mirror of
https://github.com/spacedriveapp/spacedrive.git
synced 2026-02-20 07:37:26 -05:00
feat: Add script to remove 3D/colorful emojis from Rust files
- Introduced `remove_emojis.sh`, a Bash script designed to identify and remove 3D/colorful emojis followed by a space in Rust files while preserving simple symbols. - The script counts the number of Rust files processed and reports modifications made. - Updated various Rust files to remove emoji characters from log messages and print statements for a cleaner output. - Enhanced user experience by providing color-coded output for progress and results during the emoji removal process.
This commit is contained in:
@@ -140,7 +140,7 @@ async fn run_simple_job_monitor(ctx: &Context, args: JobMonitorArgs) -> Result<(
|
||||
use sd_core::infra::event::Event;
|
||||
use std::collections::HashMap;
|
||||
|
||||
println!("📡 Monitoring jobs (real-time mode) - Press Ctrl+C to exit");
|
||||
println!("Monitoring jobs (real-time mode) - Press Ctrl+C to exit");
|
||||
println!("═══════════════════════════════════════════════════════");
|
||||
|
||||
// Subscribe to job events
|
||||
@@ -195,7 +195,7 @@ async fn run_simple_job_monitor(ctx: &Context, args: JobMonitorArgs) -> Result<(
|
||||
while let Some(event) = event_stream.recv().await {
|
||||
match event {
|
||||
Event::JobStarted { job_id, job_type } => {
|
||||
println!("🚀 Job started: {} [{}]", job_type, &job_id[..8]);
|
||||
println!("Job started: {} [{}]", job_type, &job_id[..8]);
|
||||
let pb = crate::ui::create_simple_progress(&job_type, 100);
|
||||
pb.set_message(format!("{} [{}] - Starting...", job_type, &job_id[..8]));
|
||||
progress_bars.insert(job_id, pb);
|
||||
@@ -232,10 +232,10 @@ async fn run_simple_job_monitor(ctx: &Context, args: JobMonitorArgs) -> Result<(
|
||||
|
||||
Event::JobCancelled { job_id, job_type } => {
|
||||
if let Some(pb) = progress_bars.get(&job_id) {
|
||||
pb.finish_with_message(format!("🚫 {} [{}] - Cancelled", job_type, &job_id[..8]));
|
||||
pb.finish_with_message(format!("{} [{}] - Cancelled", job_type, &job_id[..8]));
|
||||
progress_bars.remove(&job_id);
|
||||
}
|
||||
println!("🚫 Job cancelled: {} [{}]", job_type, &job_id[..8]);
|
||||
println!("Job cancelled: {} [{}]", job_type, &job_id[..8]);
|
||||
}
|
||||
|
||||
Event::JobPaused { job_id } => {
|
||||
@@ -331,7 +331,7 @@ async fn run_polling_job_monitor(ctx: &Context, args: JobMonitorArgs) -> Result<
|
||||
|
||||
/// Run TUI job monitor
|
||||
async fn run_tui_job_monitor(_ctx: &Context, _args: JobMonitorArgs) -> Result<()> {
|
||||
println!("📡 TUI Job Monitor");
|
||||
println!("TUI Job Monitor");
|
||||
println!("TUI implementation is being refined. Use --simple for now:");
|
||||
println!(" sd job monitor --simple");
|
||||
Ok(())
|
||||
|
||||
@@ -253,11 +253,11 @@ async fn main() -> Result<()> {
|
||||
{
|
||||
Ok(sd_core::infra::daemon::types::DaemonResponse::Pong) => {
|
||||
println!("Daemon is ready and responding");
|
||||
println!("💡 Use 'sd logs follow' to view daemon logs");
|
||||
println!("Use 'sd logs follow' to view daemon logs");
|
||||
}
|
||||
_ => {
|
||||
println!("Warning: Daemon may not be fully initialized yet");
|
||||
println!("💡 Use 'sd logs follow' to check daemon status");
|
||||
println!("Use 'sd logs follow' to check daemon status");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -351,7 +351,7 @@ async fn main() -> Result<()> {
|
||||
} else {
|
||||
// Run in background
|
||||
let child = cmd.spawn()?;
|
||||
println!("🚀 Daemon restarted (PID: {})", child.id());
|
||||
println!("Daemon restarted (PID: {})", child.id());
|
||||
|
||||
// Wait a moment and check if it's still running
|
||||
tokio::time::sleep(std::time::Duration::from_millis(1000)).await;
|
||||
@@ -365,7 +365,7 @@ async fn main() -> Result<()> {
|
||||
Ok(_) => println!("✅ Daemon restart successful"),
|
||||
Err(e) => {
|
||||
println!("⚠️ Warning: Could not verify daemon status: {}", e);
|
||||
println!("💡 Use 'sd status' to check daemon status");
|
||||
println!("Use 'sd status' to check daemon status");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,12 +15,12 @@ pub fn print_logo() {
|
||||
// [48;2;220;180;255m [48;2;180;120;255m [48;2;220;180;255m [0m
|
||||
// [48;2;235;220;255m [48;2;220;180;255m [48;2;235;220;255m [0m"#);
|
||||
println!();
|
||||
println!(" 🚀 Spacedrive CLI v2");
|
||||
println!(" Spacedrive CLI v2");
|
||||
println!(" Cross-platform file management");
|
||||
println!();
|
||||
}
|
||||
|
||||
/// Display a compact version of the logo
|
||||
pub fn print_compact_logo() {
|
||||
println!("🚀 Spacedrive CLI v2");
|
||||
println!("Spacedrive CLI v2");
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ impl JobProgressBar {
|
||||
}
|
||||
JobStatus::Cancelled => {
|
||||
ProgressStyle::with_template(
|
||||
"🚫 {msg} [{bar:40.grey/grey}] {percent}%"
|
||||
"{msg} [{bar:40.grey/grey}] {percent}%"
|
||||
)
|
||||
.unwrap()
|
||||
.progress_chars("█▉▊▋▌▍▎▏ ")
|
||||
|
||||
@@ -318,7 +318,7 @@ fn render_help<B: Backend>(f: &mut Frame<B>, area: Rect) {
|
||||
Line::from(""),
|
||||
Line::from("Job Status Icons:"),
|
||||
Line::from(" ⏳ Queued ⚡ Running ⏸️ Paused"),
|
||||
Line::from(" Completed ❌ Failed 🚫 Cancelled"),
|
||||
Line::from(" Completed ❌ Failed Cancelled"),
|
||||
];
|
||||
|
||||
let help = Paragraph::new(help_text)
|
||||
|
||||
@@ -32,8 +32,8 @@ impl fmt::Display for CliError {
|
||||
"Multiple libraries exist. Please specify one with --library or switch to it with 'library switch'"
|
||||
),
|
||||
Self::DaemonNotRunning => {
|
||||
write!(f, "🚫 Spacedrive daemon is not running\n\n")?;
|
||||
write!(f, "💡 To start the daemon, run:\n")?;
|
||||
write!(f, "Spacedrive daemon is not running\n\n")?;
|
||||
write!(f, "To start the daemon, run:\n")?;
|
||||
write!(f, " sd start\n\n")?;
|
||||
write!(f, " Or start with networking enabled:\n")?;
|
||||
write!(f, " sd start --enable-networking")
|
||||
|
||||
@@ -30,10 +30,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
.with_env_filter("sd_core=debug,desktop_indexing_demo=info")
|
||||
.init();
|
||||
|
||||
println!("🚀 === Spacedrive 2 Desktop Indexing Demo ===\n");
|
||||
println!("=== Spacedrive 2 Desktop Indexing Demo ===\n");
|
||||
|
||||
// 1. Initialize Spacedrive Core with job logging enabled
|
||||
println!("1. 🔧 Initializing Spacedrive Core...");
|
||||
println!("1. Initializing Spacedrive Core...");
|
||||
let data_dir = PathBuf::from("./data/spacedrive-desktop-demo");
|
||||
|
||||
// Enable job logging by modifying the config before core initialization
|
||||
@@ -51,22 +51,22 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
config.save()?;
|
||||
println!(
|
||||
" 📝 Job logging enabled to: {}",
|
||||
" Job logging enabled to: {}",
|
||||
config.job_logs_dir().display()
|
||||
);
|
||||
}
|
||||
|
||||
let core = Core::new_with_config(data_dir.clone()).await?;
|
||||
println!(" Core initialized with job logging");
|
||||
println!(" 📱 Device ID: {}", core.device.device_id()?);
|
||||
println!(" 💾 Data directory: {:?}", data_dir);
|
||||
println!(" Device ID: {}", core.device.device_id()?);
|
||||
println!(" Data directory: {:?}", data_dir);
|
||||
println!(
|
||||
" 📝 Job logs directory: {:?}\n",
|
||||
" Job logs directory: {:?}\n",
|
||||
data_dir.join("job_logs")
|
||||
);
|
||||
|
||||
// 2. Get or create library
|
||||
println!("2. 📚 Setting up library...");
|
||||
println!("2. Setting up library...");
|
||||
let library = if core.libraries.list().await.is_empty() {
|
||||
println!(" Creating new library...");
|
||||
let lib = core
|
||||
@@ -82,10 +82,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
lib
|
||||
};
|
||||
println!(" 🆔 Library ID: {}", library.id());
|
||||
println!(" 📂 Library path: {}\n", library.path().display());
|
||||
println!(" Library path: {}\n", library.path().display());
|
||||
|
||||
// 3. Set up desktop location
|
||||
println!("3. 📍 Adding Desktop as a location...");
|
||||
println!("3. Adding Desktop as a location...");
|
||||
let desktop_path = dirs::desktop_dir().ok_or("Could not find desktop directory")?;
|
||||
println!(" 🖥️ Desktop path: {}", desktop_path.display());
|
||||
|
||||
@@ -104,7 +104,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
existing
|
||||
}
|
||||
None => {
|
||||
println!(" 📱 Registering device...");
|
||||
println!(" Registering device...");
|
||||
let device_model: entities::device::ActiveModel = device.into();
|
||||
let inserted = device_model.insert(db.conn()).await?;
|
||||
println!(" Device registered with ID: {}", inserted.id);
|
||||
@@ -113,7 +113,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
};
|
||||
|
||||
// Use production location management to create location and dispatch indexer job
|
||||
println!(" 📍 Creating location with production job dispatch...");
|
||||
println!(" Creating location with production job dispatch...");
|
||||
let location_args = LocationCreateArgs {
|
||||
path: desktop_path.clone(),
|
||||
name: Some("Desktop".to_string()),
|
||||
@@ -129,7 +129,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
.await?;
|
||||
|
||||
println!(" Location created with DB ID: {}", location_db_id);
|
||||
println!(" 🚀 Indexer job dispatched through production job manager!");
|
||||
println!(" Indexer job dispatched through production job manager!");
|
||||
|
||||
// Add to file watcher (optional - for real-time monitoring)
|
||||
// Note: location_id here would need to be retrieved from the database record
|
||||
@@ -137,16 +137,16 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!(" 👁️ Production job system is now running indexing...\n");
|
||||
|
||||
// 4. Monitor production indexer with new features
|
||||
println!("4. 🔍 Production Indexer in Action!");
|
||||
println!("4. Production Indexer in Action!");
|
||||
println!(" ✨ New Features Showcase:");
|
||||
println!(" 📁 Smart Filtering - Skips system files, caches, node_modules");
|
||||
println!(" 🔄 Incremental Indexing - Detects changes via inode tracking");
|
||||
println!(" 📊 Performance Metrics - Detailed timing and throughput");
|
||||
println!(" 🎯 Multi-phase Processing - Discovery → Processing → Content");
|
||||
println!(" 📂 Target: {}", desktop_path.display());
|
||||
println!(" Smart Filtering - Skips system files, caches, node_modules");
|
||||
println!(" Incremental Indexing - Detects changes via inode tracking");
|
||||
println!(" Performance Metrics - Detailed timing and throughput");
|
||||
println!(" Multi-phase Processing - Discovery → Processing → Content");
|
||||
println!(" Target: {}", desktop_path.display());
|
||||
|
||||
// Set up event monitoring to track job progress
|
||||
println!(" 📡 Setting up real-time job monitoring...");
|
||||
println!(" Setting up real-time job monitoring...");
|
||||
let mut event_subscriber = core.events.subscribe();
|
||||
|
||||
// Spawn event listener to monitor indexing progress
|
||||
@@ -154,7 +154,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
while let Ok(event) = event_subscriber.recv().await {
|
||||
match event {
|
||||
Event::IndexingStarted { location_id } => {
|
||||
println!(" 🔄 Indexing started for location: {}", location_id);
|
||||
println!(" Indexing started for location: {}", location_id);
|
||||
}
|
||||
Event::IndexingCompleted {
|
||||
location_id,
|
||||
@@ -162,8 +162,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
total_dirs,
|
||||
} => {
|
||||
println!(" Indexing completed for location: {}", location_id);
|
||||
println!(" 📄 Files indexed: {}", total_files);
|
||||
println!(" 📁 Directories indexed: {}", total_dirs);
|
||||
println!(" Files indexed: {}", total_files);
|
||||
println!(" Directories indexed: {}", total_dirs);
|
||||
break; // Exit the event loop when indexing is done
|
||||
}
|
||||
Event::IndexingFailed { location_id, error } => {
|
||||
@@ -174,7 +174,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
break;
|
||||
}
|
||||
Event::FilesIndexed { count, .. } => {
|
||||
println!(" 📈 Progress: {} files processed", count);
|
||||
println!(" Progress: {} files processed", count);
|
||||
}
|
||||
Event::JobProgress {
|
||||
job_id,
|
||||
@@ -186,7 +186,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Show production indexer progress details
|
||||
if let Some(msg) = message {
|
||||
println!(
|
||||
" 📊 Job {} [{}]: {} ({}%)",
|
||||
" Job {} [{}]: {} ({}%)",
|
||||
job_id,
|
||||
job_type,
|
||||
msg,
|
||||
@@ -194,7 +194,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
" 📊 Job {} [{}]: {}%",
|
||||
" Job {} [{}]: {}%",
|
||||
job_id,
|
||||
job_type,
|
||||
(progress * 100.0) as u8
|
||||
@@ -207,23 +207,23 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
});
|
||||
|
||||
println!(" ⏳ Waiting for indexing to complete...");
|
||||
println!(" 💡 Production Indexer Features Active:");
|
||||
println!(" 🚫 Smart Filtering - Automatically skipping:");
|
||||
println!(" Production Indexer Features Active:");
|
||||
println!(" Smart Filtering - Automatically skipping:");
|
||||
println!(" • Hidden files (.DS_Store, Thumbs.db)");
|
||||
println!(" • Dev directories (node_modules, .git, target)");
|
||||
println!(" • Cache folders (__pycache__, .cache)");
|
||||
println!(" • Large files (>4GB)");
|
||||
println!(" 🔄 Change Detection - Using inode tracking for:");
|
||||
println!(" Change Detection - Using inode tracking for:");
|
||||
println!(" • Fast incremental updates");
|
||||
println!(" • Move/rename detection");
|
||||
println!(" • Modified file tracking");
|
||||
println!(" 📊 Performance Optimization:");
|
||||
println!(" Performance Optimization:");
|
||||
println!(" • Batch processing (1000 items/batch)");
|
||||
println!(" • Path prefix deduplication");
|
||||
println!(" • Parallel content processing");
|
||||
|
||||
// Let's show what files are actually in the desktop
|
||||
println!("\n 📁 Desktop contents preview:");
|
||||
println!("\n Desktop contents preview:");
|
||||
let mut file_count = 0;
|
||||
let mut dir_count = 0;
|
||||
let mut total_size = 0u64;
|
||||
@@ -237,13 +237,13 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
total_size += metadata.len();
|
||||
if file_count <= 5 {
|
||||
// Show first 5 files
|
||||
println!(" 📄 {}", entry.file_name().to_string_lossy());
|
||||
println!(" {}", entry.file_name().to_string_lossy());
|
||||
}
|
||||
} else if metadata.is_dir() {
|
||||
dir_count += 1;
|
||||
if dir_count <= 3 {
|
||||
// Show first 3 dirs
|
||||
println!(" 📁 {}/", entry.file_name().to_string_lossy());
|
||||
println!(" {}/", entry.file_name().to_string_lossy());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -257,17 +257,17 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!(" ... and {} more directories", dir_count - 3);
|
||||
}
|
||||
|
||||
println!("\n 📊 Discovery Summary:");
|
||||
println!(" 📄 Files found: {}", file_count);
|
||||
println!(" 📁 Directories found: {}", dir_count);
|
||||
println!("\n Discovery Summary:");
|
||||
println!(" Files found: {}", file_count);
|
||||
println!(" Directories found: {}", dir_count);
|
||||
println!(
|
||||
" 💾 Total size: {:.2} MB",
|
||||
" Total size: {:.2} MB",
|
||||
total_size as f64 / 1024.0 / 1024.0
|
||||
);
|
||||
|
||||
// Smart job completion monitoring with checkpoint-based timeout
|
||||
println!("\n ⏰ Monitoring job completion with smart timeout...");
|
||||
println!(" 💡 Will track checkpoint progress and wait for actual completion");
|
||||
println!(" Will track checkpoint progress and wait for actual completion");
|
||||
|
||||
let mut last_checkpoint_size = 0u64;
|
||||
let mut stall_time = std::time::Instant::now();
|
||||
@@ -281,7 +281,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Check if the event listener got completion
|
||||
if !events_completed && events_handle.is_finished() {
|
||||
events_completed = true;
|
||||
println!(" 🎯 Event listener detected job completion!");
|
||||
println!(" Event listener detected job completion!");
|
||||
}
|
||||
|
||||
// Poll job status from the job manager
|
||||
@@ -296,12 +296,12 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
.unwrap_or(0);
|
||||
|
||||
println!(
|
||||
" 📊 Job Status: {} running, {} completed, {} total",
|
||||
" Job Status: {} running, {} completed, {} total",
|
||||
running_jobs.len(),
|
||||
completed_jobs.len(),
|
||||
job_status.len()
|
||||
);
|
||||
println!(" 📄 Database entries so far: {}", current_entry_count);
|
||||
println!(" Database entries so far: {}", current_entry_count);
|
||||
|
||||
// Check checkpoint progress by querying actual checkpoint data
|
||||
let checkpoint_estimate = {
|
||||
@@ -317,7 +317,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
if checkpoint_estimate > last_checkpoint_size {
|
||||
println!(
|
||||
" 📈 Progress detected: {} bytes checkpoint data",
|
||||
" Progress detected: {} bytes checkpoint data",
|
||||
checkpoint_estimate
|
||||
);
|
||||
last_checkpoint_size = checkpoint_estimate;
|
||||
@@ -336,7 +336,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
" ⚠️ Job appears stalled (no progress for {} seconds)",
|
||||
stall_timeout.as_secs()
|
||||
);
|
||||
println!(" 📊 Final checkpoint size: {} bytes", checkpoint_estimate);
|
||||
println!(" Final checkpoint size: {} bytes", checkpoint_estimate);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -350,7 +350,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
|
||||
// 5. Show production indexer results
|
||||
println!("\n5. 🎯 Production Indexer Results:");
|
||||
println!("\n5. Production Indexer Results:");
|
||||
|
||||
// Check database for our location
|
||||
let location_record = entities::location::Entity::find()
|
||||
@@ -407,20 +407,20 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
.count(db.conn())
|
||||
.await?;
|
||||
|
||||
println!(" 📊 Indexing Statistics:");
|
||||
println!(" 📄 Files indexed: {}", file_count_db);
|
||||
println!(" 📁 Directories indexed: {}", dir_count_db);
|
||||
println!(" Indexing Statistics:");
|
||||
println!(" Files indexed: {}", file_count_db);
|
||||
println!(" Directories indexed: {}", dir_count_db);
|
||||
println!(
|
||||
" 🔄 Entries with inode tracking: {} ({:.1}%)",
|
||||
" Entries with inode tracking: {} ({:.1}%)",
|
||||
entries_with_inodes,
|
||||
(entries_with_inodes as f64 / entry_count.max(1) as f64) * 100.0
|
||||
);
|
||||
println!(
|
||||
" 🔗 Content identities created: {}",
|
||||
" Content identities created: {}",
|
||||
content_identity_count
|
||||
);
|
||||
|
||||
println!("\n 🚫 Smart Filtering Validation:");
|
||||
println!("\n Smart Filtering Validation:");
|
||||
println!(" Checking indexed files don't include filtered patterns...");
|
||||
|
||||
let mut filtered_correctly = true;
|
||||
@@ -446,7 +446,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!(" All sampled entries passed filtering validation!");
|
||||
}
|
||||
|
||||
println!("\n 📝 Sample Indexed Entries:");
|
||||
println!("\n Sample Indexed Entries:");
|
||||
for (i, entry) in sample_entries.iter().take(5).enumerate() {
|
||||
let kind = match entry.kind {
|
||||
0 => "📄",
|
||||
@@ -471,37 +471,37 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let running_jobs = library.jobs().list_jobs(Some(JobStatus::Running)).await?;
|
||||
let completed_jobs = library.jobs().list_jobs(Some(JobStatus::Completed)).await?;
|
||||
|
||||
println!("\n 💼 Job System Status:");
|
||||
println!(" 🔄 Running jobs: {}", running_jobs.len());
|
||||
println!("\n Job System Status:");
|
||||
println!(" Running jobs: {}", running_jobs.len());
|
||||
println!(" Completed jobs: {}", completed_jobs.len());
|
||||
|
||||
println!("\n ✨ Production Indexer Features Demonstrated:");
|
||||
println!(" 🚫 Smart Filtering - Automatically skipped system/cache files");
|
||||
println!(" Smart Filtering - Automatically skipped system/cache files");
|
||||
println!(
|
||||
" 🔄 Incremental Ready - {} entries have inode tracking",
|
||||
" Incremental Ready - {} entries have inode tracking",
|
||||
entries_with_inodes
|
||||
);
|
||||
println!(" 📊 Batch Processing - Efficient memory usage");
|
||||
println!(" 🎯 Multi-phase - Discovery → Processing → Content");
|
||||
println!(" Batch Processing - Efficient memory usage");
|
||||
println!(" Multi-phase - Discovery → Processing → Content");
|
||||
println!(
|
||||
" 🔍 Content Deduplication - {} unique content IDs",
|
||||
" Content Deduplication - {} unique content IDs",
|
||||
content_identity_count
|
||||
);
|
||||
|
||||
// 6. Show volume integration
|
||||
println!("\n6. 💾 Volume Management:");
|
||||
println!(" 🔍 Volume detection: Active");
|
||||
println!(" 📊 Volume tracking: Ready");
|
||||
println!("\n6. Volume Management:");
|
||||
println!(" Volume detection: Active");
|
||||
println!(" Volume tracking: Ready");
|
||||
println!(" ⚡ Speed testing: Available");
|
||||
println!(" 🔄 Mount monitoring: Active");
|
||||
println!(" Mount monitoring: Active");
|
||||
|
||||
// 7. Event system demo
|
||||
println!("\n7. 📡 Event System:");
|
||||
println!("\n7. Event System:");
|
||||
println!(
|
||||
" 🎯 Event subscribers: {}",
|
||||
" Event subscribers: {}",
|
||||
core.events.subscriber_count()
|
||||
);
|
||||
println!(" 📨 Events ready for:");
|
||||
println!(" Events ready for:");
|
||||
println!(" - File operations (copy, move, delete)");
|
||||
println!(" - Library changes");
|
||||
println!(" - Volume events");
|
||||
@@ -509,7 +509,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!(" - Job status updates");
|
||||
|
||||
// 8. Production indexer achievements
|
||||
println!("\n8. 🎯 Production Indexer Achievements:");
|
||||
println!("\n8. Production Indexer Achievements:");
|
||||
println!(" This demo showcased the new production indexer:");
|
||||
println!(" Smart filtering skipped system files automatically");
|
||||
println!(" Inode tracking enabled incremental indexing");
|
||||
@@ -521,7 +521,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!(" Non-critical error collection and reporting");
|
||||
|
||||
// Show example of what would happen on re-index
|
||||
println!("\n 🔄 Incremental Indexing Preview:");
|
||||
println!("\n Incremental Indexing Preview:");
|
||||
println!(" Next run would:");
|
||||
println!(" • Use inode tracking to detect moved/renamed files");
|
||||
println!(" • Only process modified files (compare timestamps)");
|
||||
@@ -532,20 +532,20 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let final_running = library.jobs().list_jobs(Some(JobStatus::Running)).await?;
|
||||
let final_completed = library.jobs().list_jobs(Some(JobStatus::Completed)).await?;
|
||||
|
||||
println!("\n 📋 Final Job Summary:");
|
||||
println!(" 🔄 Still running: {}", final_running.len());
|
||||
println!("\n Final Job Summary:");
|
||||
println!(" Still running: {}", final_running.len());
|
||||
println!(" Completed: {}", final_completed.len());
|
||||
|
||||
if !final_running.is_empty() {
|
||||
println!(" 💡 Remaining jobs will continue in background");
|
||||
println!(" 🔄 Run the demo again to see persisted results!");
|
||||
println!(" Remaining jobs will continue in background");
|
||||
println!(" Run the demo again to see persisted results!");
|
||||
}
|
||||
|
||||
// Brief pause to see final status
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// 9. Show job logs created during the demo
|
||||
println!("\n9. 📋 Job Logs Created:");
|
||||
println!("\n9. Job Logs Created:");
|
||||
let job_logs_dir = data_dir.join("job_logs");
|
||||
if let Ok(mut entries) = tokio::fs::read_dir(&job_logs_dir).await {
|
||||
let mut log_files = Vec::new();
|
||||
@@ -558,7 +558,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
|
||||
if !log_files.is_empty() {
|
||||
println!(" 📝 Found {} job log file(s):", log_files.len());
|
||||
println!(" Found {} job log file(s):", log_files.len());
|
||||
for (i, log_file) in log_files.iter().enumerate() {
|
||||
let log_path = job_logs_dir.join(log_file);
|
||||
if let Ok(metadata) = tokio::fs::metadata(&log_path).await {
|
||||
@@ -582,34 +582,34 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
}
|
||||
}
|
||||
println!("\n 💡 Full logs available at: {:?}", job_logs_dir);
|
||||
println!("\n Full logs available at: {:?}", job_logs_dir);
|
||||
} else {
|
||||
println!(" ⚠️ No job logs found (jobs may have completed too quickly)");
|
||||
}
|
||||
}
|
||||
|
||||
// 10. Graceful shutdown
|
||||
println!("\n10. 🛑 Shutting down gracefully...");
|
||||
println!("\n10. Shutting down gracefully...");
|
||||
core.shutdown().await?;
|
||||
|
||||
println!("\n=== Desktop Indexing Demo Complete! ===");
|
||||
println!("🎉 Spacedrive 2 Production Job System Working!");
|
||||
println!("Spacedrive 2 Production Job System Working!");
|
||||
println!();
|
||||
println!("📁 Demo data stored at: {:?}", data_dir);
|
||||
println!("📝 Job logs stored at: {:?}", job_logs_dir);
|
||||
println!("🔄 Run again to see library auto-loading and job persistence!");
|
||||
println!("Demo data stored at: {:?}", data_dir);
|
||||
println!("Job logs stored at: {:?}", job_logs_dir);
|
||||
println!("Run again to see library auto-loading and job persistence!");
|
||||
println!();
|
||||
println!("🚀 Production system achievements:");
|
||||
println!("Production system achievements:");
|
||||
println!(" ✨ Full core lifecycle with real job dispatch");
|
||||
println!(" 🗄️ Database integration with actual file indexing");
|
||||
println!(" 📂 Production job manager dispatching real jobs");
|
||||
println!(" 💾 Real-time progress monitoring via events");
|
||||
println!(" 📡 Event system with live job status updates");
|
||||
println!(" Production job manager dispatching real jobs");
|
||||
println!(" Real-time progress monitoring via events");
|
||||
println!(" Event system with live job status updates");
|
||||
println!(" 👁️ File watching integration ready");
|
||||
println!(" 🏷️ User metadata innovation (every file taggable)");
|
||||
println!(" 🔄 Content deduplication with CAS IDs");
|
||||
println!(" Content deduplication with CAS IDs");
|
||||
println!(" 🗂️ Path optimization for efficient storage");
|
||||
println!(" 🔧 Production-ready architecture patterns");
|
||||
println!(" Production-ready architecture patterns");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
.with_env_filter("sd_core=debug")
|
||||
.init();
|
||||
|
||||
println!("🚀 Job Logging Test\n");
|
||||
println!("Job Logging Test\n");
|
||||
|
||||
// 1. Initialize Core with job logging
|
||||
println!("1. Setting up with job logging enabled...");
|
||||
@@ -41,7 +41,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
let core = Core::new_with_config(data_dir.clone()).await?;
|
||||
let job_logs_dir = data_dir.join("job_logs");
|
||||
println!(" 📝 Job logs directory: {:?}", job_logs_dir);
|
||||
println!(" Job logs directory: {:?}", job_logs_dir);
|
||||
|
||||
// 2. Create library
|
||||
println!("\n2. Creating library...");
|
||||
@@ -102,7 +102,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
match event {
|
||||
Event::JobProgress { job_id, message, .. } => {
|
||||
if let Some(msg) = message {
|
||||
println!(" 📊 Job {}: {}", job_id, msg);
|
||||
println!(" Job {}: {}", job_id, msg);
|
||||
}
|
||||
}
|
||||
Event::IndexingCompleted { .. } => {
|
||||
@@ -126,7 +126,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
count += 1;
|
||||
let log_path = job_logs_dir.join(name);
|
||||
if let Ok(contents) = tokio::fs::read_to_string(&log_path).await {
|
||||
println!("\n 📄 Log file: {}", name);
|
||||
println!("\n Log file: {}", name);
|
||||
println!(" Size: {} bytes", contents.len());
|
||||
println!(" Lines: {}", contents.lines().count());
|
||||
|
||||
@@ -156,8 +156,8 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
core.shutdown().await?;
|
||||
|
||||
println!("\nTest complete!");
|
||||
println!("📁 Data: {:?}", data_dir);
|
||||
println!("📝 Logs: {:?}", job_logs_dir);
|
||||
println!("Data: {:?}", data_dir);
|
||||
println!("Logs: {:?}", job_logs_dir);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -156,7 +156,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!(" ✓ Core shutdown complete");
|
||||
|
||||
println!("\nLifecycle demo completed!");
|
||||
println!("\n📁 Data stored at: {:?}", data_dir);
|
||||
println!("\nData stored at: {:?}", data_dir);
|
||||
println!(" Run again to see library auto-loading in action!");
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -52,7 +52,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
entry_id,
|
||||
} => {
|
||||
info!(
|
||||
"📁 File created - Library: {}, Entry: {}",
|
||||
"File created - Library: {}, Entry: {}",
|
||||
library_id, entry_id
|
||||
);
|
||||
}
|
||||
@@ -81,7 +81,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
new_path,
|
||||
} => {
|
||||
info!(
|
||||
"📦 File moved - Library: {}, Entry: {}, {} -> {}",
|
||||
"File moved - Library: {}, Entry: {}, {} -> {}",
|
||||
library_id, entry_id, old_path, new_path
|
||||
);
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ async fn main() -> Result<()> {
|
||||
println!("✓ FTS5 migration completed");
|
||||
|
||||
// Add desktop as a location
|
||||
println!("\n📍 Adding Desktop as a location...");
|
||||
println!("\nAdding Desktop as a location...");
|
||||
let desktop_path =
|
||||
dirs::desktop_dir().ok_or_else(|| anyhow::anyhow!("Could not find desktop directory"))?;
|
||||
println!(" Desktop path: {}", desktop_path.display());
|
||||
@@ -77,7 +77,7 @@ async fn main() -> Result<()> {
|
||||
existing
|
||||
}
|
||||
None => {
|
||||
println!(" 📱 Registering device...");
|
||||
println!(" Registering device...");
|
||||
let device_model: entities::device::ActiveModel = device.into();
|
||||
let inserted = device_model.insert(db.conn()).await?;
|
||||
println!(" ✓ Device registered with ID: {}", inserted.id);
|
||||
@@ -101,14 +101,14 @@ async fn main() -> Result<()> {
|
||||
.await?;
|
||||
|
||||
println!(" Location created with DB ID: {}", location_db_id);
|
||||
println!(" 🚀 Indexer job dispatched!");
|
||||
println!(" Indexer job dispatched!");
|
||||
|
||||
// Wait a bit for indexing to start
|
||||
println!("\n⏳ Waiting for indexing to process some files...");
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
|
||||
// Now let's search for "screenshot" files
|
||||
println!("\n🔍 Searching for 'screenshot' files...");
|
||||
println!("\nSearching for 'screenshot' files...");
|
||||
|
||||
// Test different search modes
|
||||
let search_modes = vec![
|
||||
@@ -141,13 +141,13 @@ async fn main() -> Result<()> {
|
||||
mode_name, output.execution_time_ms
|
||||
);
|
||||
println!(
|
||||
" 📊 Found {} results ({} total)",
|
||||
" Found {} results ({} total)",
|
||||
output.results.len(),
|
||||
output.total_found
|
||||
);
|
||||
|
||||
if !output.results.is_empty() {
|
||||
println!(" 📁 Top results:");
|
||||
println!(" Top results:");
|
||||
for (i, result) in output.results.iter().take(5).enumerate() {
|
||||
println!(
|
||||
" {}. {} (score: {:.2})",
|
||||
@@ -172,7 +172,7 @@ async fn main() -> Result<()> {
|
||||
|
||||
// Show facets if available
|
||||
if !output.facets.file_types.is_empty() {
|
||||
println!(" 📈 File types found:");
|
||||
println!(" File types found:");
|
||||
for (file_type, count) in &output.facets.file_types {
|
||||
println!(" {}: {}", file_type, count);
|
||||
}
|
||||
@@ -180,7 +180,7 @@ async fn main() -> Result<()> {
|
||||
|
||||
// Show suggestions
|
||||
if !output.suggestions.is_empty() {
|
||||
println!(" 💡 Suggestions:");
|
||||
println!(" Suggestions:");
|
||||
for suggestion in &output.suggestions {
|
||||
println!(" {}", suggestion);
|
||||
}
|
||||
@@ -196,7 +196,7 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
|
||||
// Test with different search scopes
|
||||
println!("\n🎯 Testing different search scopes...");
|
||||
println!("\nTesting different search scopes...");
|
||||
|
||||
// Test location-specific search
|
||||
// Note: We need to get the UUID from the database record
|
||||
@@ -230,7 +230,7 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
|
||||
// Test with file type filters
|
||||
println!("\n🔧 Testing with file type filters...");
|
||||
println!("\nTesting with file type filters...");
|
||||
|
||||
let mut filters = sd_core::ops::search::input::SearchFilters::default();
|
||||
filters.file_types = Some(vec![
|
||||
@@ -265,7 +265,7 @@ async fn main() -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
println!("\n🎉 End-to-end search test completed!");
|
||||
println!("\nEnd-to-end search test completed!");
|
||||
println!("Search module is fully functional with real data");
|
||||
println!("FTS5 integration working with actual file indexing");
|
||||
println!("Multiple search modes and scopes tested");
|
||||
|
||||
@@ -135,7 +135,7 @@ async fn emit_test_log_events(event_bus: &Arc<crate::infra::event::EventBus>) {
|
||||
timestamp: Utc::now(),
|
||||
level: "INFO".to_string(),
|
||||
target: "sd_core::daemon".to_string(),
|
||||
message: "🚀 Spacedrive daemon started successfully".to_string(),
|
||||
message: "Spacedrive daemon started successfully".to_string(),
|
||||
job_id: None,
|
||||
library_id: None,
|
||||
},
|
||||
@@ -143,7 +143,7 @@ async fn emit_test_log_events(event_bus: &Arc<crate::infra::event::EventBus>) {
|
||||
timestamp: Utc::now(),
|
||||
level: "INFO".to_string(),
|
||||
target: "sd_core::event".to_string(),
|
||||
message: "📡 Log event streaming initialized".to_string(),
|
||||
message: "Log event streaming initialized".to_string(),
|
||||
job_id: None,
|
||||
library_id: None,
|
||||
},
|
||||
@@ -175,7 +175,7 @@ async fn emit_test_log_events(event_bus: &Arc<crate::infra::event::EventBus>) {
|
||||
timestamp: Utc::now(),
|
||||
level: "DEBUG".to_string(),
|
||||
target: "sd_core::daemon".to_string(),
|
||||
message: "💓 Daemon heartbeat".to_string(),
|
||||
message: "Daemon heartbeat".to_string(),
|
||||
job_id: None,
|
||||
library_id: None,
|
||||
};
|
||||
|
||||
@@ -135,7 +135,7 @@ impl RpcServer {
|
||||
timestamp: Utc::now(),
|
||||
level: "INFO".to_string(),
|
||||
target: "sd_core::daemon".to_string(),
|
||||
message: "🚀 Spacedrive daemon started successfully".to_string(),
|
||||
message: "Spacedrive daemon started successfully".to_string(),
|
||||
job_id: None,
|
||||
library_id: None,
|
||||
},
|
||||
@@ -143,7 +143,7 @@ impl RpcServer {
|
||||
timestamp: Utc::now(),
|
||||
level: "INFO".to_string(),
|
||||
target: "sd_core::event".to_string(),
|
||||
message: "📡 Log event streaming initialized".to_string(),
|
||||
message: "Log event streaming initialized".to_string(),
|
||||
job_id: None,
|
||||
library_id: None,
|
||||
},
|
||||
@@ -175,7 +175,7 @@ impl RpcServer {
|
||||
timestamp: Utc::now(),
|
||||
level: "DEBUG".to_string(),
|
||||
target: "sd_core::daemon".to_string(),
|
||||
message: "💓 Daemon heartbeat".to_string(),
|
||||
message: "Daemon heartbeat".to_string(),
|
||||
job_id: None,
|
||||
library_id: None,
|
||||
};
|
||||
|
||||
@@ -228,7 +228,7 @@ pub async fn run_processing_phase(
|
||||
match EntryProcessor::update_entry(ctx, entry_id, &entry).await {
|
||||
Ok(()) => {
|
||||
ctx.log(format!(
|
||||
"📝 Updated entry {}: {}",
|
||||
"Updated entry {}: {}",
|
||||
entry_id,
|
||||
entry.path.display()
|
||||
));
|
||||
@@ -258,7 +258,7 @@ pub async fn run_processing_phase(
|
||||
}) => {
|
||||
// Handle move - update path in database
|
||||
ctx.log(format!(
|
||||
"🔄 Detected move: {} -> {}",
|
||||
"Detected move: {} -> {}",
|
||||
old_path.display(),
|
||||
new_path.display()
|
||||
));
|
||||
|
||||
@@ -136,7 +136,7 @@ impl NetworkingEventLoop {
|
||||
|
||||
/// Main event loop
|
||||
async fn run(&mut self) -> Result<()> {
|
||||
self.logger.info("🚀 Networking event loop started").await;
|
||||
self.logger.info("Networking event loop started").await;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
|
||||
@@ -209,7 +209,7 @@ impl DevicePersistence {
|
||||
.await
|
||||
.map_err(NetworkingError::Io)?;
|
||||
|
||||
println!("🔐 Saved {} paired devices (encrypted)", devices.len());
|
||||
println!("Saved {} paired devices (encrypted)", devices.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -39,10 +39,10 @@ impl PairingProtocolHandler {
|
||||
|
||||
// Hold the write lock for the entire duration to prevent any scoping issues
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
self.log_debug(&format!("🔍 INITIATOR_HANDLER_DEBUG: Looking for session {} in {} total sessions", session_id, sessions.len())).await;
|
||||
self.log_debug(&format!("INITIATOR_HANDLER_DEBUG: Looking for session {} in {} total sessions", session_id, sessions.len())).await;
|
||||
|
||||
if let Some(existing_session) = sessions.get_mut(&session_id) {
|
||||
self.log_debug(&format!("🔍 INITIATOR_HANDLER_DEBUG: Found existing session {} in state {:?}", session_id, existing_session.state)).await;
|
||||
self.log_debug(&format!("INITIATOR_HANDLER_DEBUG: Found existing session {} in state {:?}", session_id, existing_session.state)).await;
|
||||
self.log_debug(&format!("Transitioning existing session {} to ChallengeReceived", session_id)).await;
|
||||
|
||||
// Update the existing session in place
|
||||
@@ -53,7 +53,7 @@ impl PairingProtocolHandler {
|
||||
existing_session.remote_device_info = Some(device_info.clone());
|
||||
existing_session.remote_public_key = Some(public_key.clone());
|
||||
} else {
|
||||
self.log_debug(&format!("🔍 INITIATOR_HANDLER_DEBUG: No existing session found for {}, creating new session", session_id)).await;
|
||||
self.log_debug(&format!("INITIATOR_HANDLER_DEBUG: No existing session found for {}, creating new session", session_id)).await;
|
||||
self.log_debug(&format!("Creating new session {} for pairing request", session_id)).await;
|
||||
|
||||
// Create new session only if none exists
|
||||
|
||||
@@ -162,7 +162,7 @@ mod tests {
|
||||
let challenge = [2u8; 32];
|
||||
let signature = signing_key.sign(&challenge);
|
||||
|
||||
println!("🔐 Testing REAL Ed25519 signature verification:");
|
||||
println!("Testing REAL Ed25519 signature verification:");
|
||||
println!(" Public key: {} bytes", public_key_bytes.len());
|
||||
println!(" Challenge: {} bytes", challenge.len());
|
||||
println!(" Signature: {} bytes", signature.to_bytes().len());
|
||||
@@ -190,7 +190,7 @@ mod tests {
|
||||
let wrong_challenge = [3u8; 32];
|
||||
let signature = signing_key.sign(&wrong_challenge);
|
||||
|
||||
println!("🔒 Testing REAL Ed25519 signature rejection:");
|
||||
println!("Testing REAL Ed25519 signature rejection:");
|
||||
println!(" Signed data: {:?}", &wrong_challenge[..4]);
|
||||
println!(" Verify data: {:?}", &challenge[..4]);
|
||||
|
||||
@@ -204,6 +204,6 @@ mod tests {
|
||||
assert!(!result.unwrap()); // Should be false
|
||||
|
||||
println!("REAL cryptographic signature rejection PASSED!");
|
||||
println!(" 🎯 This proves we're doing REAL crypto verification!");
|
||||
println!(" This proves we're doing REAL crypto verification!");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ impl CargoTestRunner {
|
||||
|
||||
process.child = Some(child);
|
||||
println!(
|
||||
"🚀 Spawned cargo test process: {} (test: {})",
|
||||
"Spawned cargo test process: {} (test: {})",
|
||||
process.name, process.test_function_name
|
||||
);
|
||||
|
||||
@@ -164,7 +164,7 @@ impl CargoTestRunner {
|
||||
|
||||
process.child = Some(child);
|
||||
println!(
|
||||
"🚀 Spawned cargo test process: {} (test: {})",
|
||||
"Spawned cargo test process: {} (test: {})",
|
||||
process.name, process.test_function_name
|
||||
);
|
||||
}
|
||||
@@ -248,7 +248,7 @@ impl CargoTestRunner {
|
||||
let _ = child.wait().await;
|
||||
}
|
||||
}
|
||||
println!("🧹 Killed all cargo test processes");
|
||||
println!("Killed all cargo test processes");
|
||||
}
|
||||
|
||||
/// Get output from a specific process
|
||||
|
||||
@@ -31,11 +31,11 @@ async fn alice_cross_device_copy_scenario() {
|
||||
let data_dir = PathBuf::from("/tmp/spacedrive-cross-device-copy-test/alice");
|
||||
let device_name = "Alice's Test Device";
|
||||
|
||||
println!("🟦 Alice: Starting Core cross-device copy test (sender)");
|
||||
println!("📁 Alice: Data dir: {:?}", data_dir);
|
||||
println!("Alice: Starting Core cross-device copy test (sender)");
|
||||
println!("Alice: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Alice: Initializing Core...");
|
||||
println!("Alice: Initializing Core...");
|
||||
let mut core = timeout(
|
||||
Duration::from_secs(10),
|
||||
Core::new_with_config(data_dir.clone()),
|
||||
@@ -50,7 +50,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Alice: Initializing networking...");
|
||||
println!("Alice: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -61,7 +61,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
println!("Alice: Networking initialized successfully");
|
||||
|
||||
// Create a library for job dispatch
|
||||
println!("📚 Alice: Creating library for copy operations...");
|
||||
println!("Alice: Creating library for copy operations...");
|
||||
let library = core
|
||||
.libraries
|
||||
.create_library("Alice Copy Library", None, core.context.clone())
|
||||
@@ -74,7 +74,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
);
|
||||
|
||||
// Start pairing as initiator
|
||||
println!("🔑 Alice: Starting pairing as initiator...");
|
||||
println!("Alice: Starting pairing as initiator...");
|
||||
let (pairing_code, expires_in) = if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -105,7 +105,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
)
|
||||
.unwrap();
|
||||
println!(
|
||||
"📝 Alice: Pairing code written to /tmp/spacedrive-cross-device-copy-test/pairing_code.txt"
|
||||
"Alice: Pairing code written to /tmp/spacedrive-cross-device-copy-test/pairing_code.txt"
|
||||
);
|
||||
|
||||
// Wait for pairing completion
|
||||
@@ -121,16 +121,16 @@ async fn alice_cross_device_copy_scenario() {
|
||||
if !connected_devices.is_empty() {
|
||||
bob_device_id = Some(connected_devices[0].device_id);
|
||||
println!(
|
||||
"🎉 Alice: Bob connected! Device ID: {}",
|
||||
"Alice: Bob connected! Device ID: {}",
|
||||
connected_devices[0].device_id
|
||||
);
|
||||
println!(
|
||||
"📱 Alice: Connected device: {} ({})",
|
||||
"Alice: Connected device: {} ({})",
|
||||
connected_devices[0].device_name, connected_devices[0].device_id
|
||||
);
|
||||
|
||||
// Wait for session keys to be established
|
||||
println!("🔑 Alice: Allowing extra time for session key establishment...");
|
||||
println!("Alice: Allowing extra time for session key establishment...");
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
break;
|
||||
}
|
||||
@@ -141,14 +141,14 @@ async fn alice_cross_device_copy_scenario() {
|
||||
}
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!("🔍 Alice: Pairing status check {} - waiting", attempts / 5);
|
||||
println!("Alice: Pairing status check {} - waiting", attempts / 5);
|
||||
}
|
||||
}
|
||||
|
||||
let bob_id = bob_device_id.unwrap();
|
||||
|
||||
// Create test files to copy
|
||||
println!("📝 Alice: Creating test files for cross-device copy...");
|
||||
println!("Alice: Creating test files for cross-device copy...");
|
||||
let test_files_dir = data_dir.join("test_files");
|
||||
std::fs::create_dir_all(&test_files_dir).unwrap();
|
||||
|
||||
@@ -165,7 +165,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
for (filename, content) in &test_files {
|
||||
let file_path = test_files_dir.join(filename);
|
||||
std::fs::write(&file_path, content).unwrap();
|
||||
println!(" 📄 Created: {} ({} bytes)", filename, content.len());
|
||||
println!(" Created: {} ({} bytes)", filename, content.len());
|
||||
source_paths.push(file_path);
|
||||
}
|
||||
|
||||
@@ -185,7 +185,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
println!("🆔 Alice: My device ID is {}", alice_device_id);
|
||||
|
||||
// Prepare copy operations using the action system
|
||||
println!("🚀 Alice: Dispatching cross-device copy actions...");
|
||||
println!("Alice: Dispatching cross-device copy actions...");
|
||||
|
||||
// Get the action manager from context
|
||||
let action_manager = core
|
||||
@@ -196,7 +196,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
|
||||
// Copy each file individually to test the routing
|
||||
for (i, (source_path, (filename, _))) in source_paths.iter().zip(&test_files).enumerate() {
|
||||
println!("📋 Alice: Preparing copy action {} for {}", i + 1, filename);
|
||||
println!("Alice: Preparing copy action {} for {}", i + 1, filename);
|
||||
|
||||
// Create source SdPath (on Alice's device)
|
||||
let source_sdpath = SdPath::physical(alice_device_id, source_path);
|
||||
@@ -206,12 +206,12 @@ async fn alice_cross_device_copy_scenario() {
|
||||
let dest_sdpath = SdPath::physical(bob_id, &dest_path);
|
||||
|
||||
println!(
|
||||
" 📍 Source: {} (device: {})",
|
||||
" Source: {} (device: {})",
|
||||
source_path.display(),
|
||||
alice_device_id
|
||||
);
|
||||
println!(
|
||||
" 📍 Destination: {} (device: {})",
|
||||
" Destination: {} (device: {})",
|
||||
dest_path.display(),
|
||||
bob_id
|
||||
);
|
||||
@@ -235,7 +235,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
{
|
||||
Ok(output) => {
|
||||
println!("Alice: Copy action {} dispatched successfully", i + 1);
|
||||
println!(" 📊 Output: {:?}", output);
|
||||
println!(" Output: {:?}", output);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Alice: Copy action {} failed: {}", i + 1, e);
|
||||
@@ -262,7 +262,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
|
||||
if attempt % 10 == 0 {
|
||||
println!(
|
||||
"🔍 Alice: Still waiting for Bob's confirmation... ({}s)",
|
||||
"Alice: Still waiting for Bob's confirmation... ({}s)",
|
||||
attempt
|
||||
);
|
||||
}
|
||||
@@ -280,7 +280,7 @@ async fn alice_cross_device_copy_scenario() {
|
||||
panic!("Alice: Bob did not confirm file receipt within timeout");
|
||||
}
|
||||
|
||||
println!("🧹 Alice: Cross-device copy test completed");
|
||||
println!("Alice: Cross-device copy test completed");
|
||||
}
|
||||
|
||||
/// Bob's cross-device copy scenario - receiver role
|
||||
@@ -301,11 +301,11 @@ async fn bob_cross_device_copy_scenario() {
|
||||
let data_dir = PathBuf::from("/tmp/spacedrive-cross-device-copy-test/bob");
|
||||
let device_name = "Bob's Test Device";
|
||||
|
||||
println!("🟦 Bob: Starting Core cross-device copy test (receiver)");
|
||||
println!("📁 Bob: Data dir: {:?}", data_dir);
|
||||
println!("Bob: Starting Core cross-device copy test (receiver)");
|
||||
println!("Bob: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Bob: Initializing Core...");
|
||||
println!("Bob: Initializing Core...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -317,7 +317,7 @@ async fn bob_cross_device_copy_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Bob: Initializing networking...");
|
||||
println!("Bob: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -328,7 +328,7 @@ async fn bob_cross_device_copy_scenario() {
|
||||
println!("Bob: Networking initialized successfully");
|
||||
|
||||
// Create a library for job dispatch
|
||||
println!("📚 Bob: Creating library for copy operations...");
|
||||
println!("Bob: Creating library for copy operations...");
|
||||
let _library = core
|
||||
.libraries
|
||||
.create_library("Bob Copy Library", None, core.context.clone())
|
||||
@@ -337,7 +337,7 @@ async fn bob_cross_device_copy_scenario() {
|
||||
println!("Bob: Library created successfully");
|
||||
|
||||
// Wait for Alice to create pairing code
|
||||
println!("🔍 Bob: Looking for pairing code from Alice...");
|
||||
println!("Bob: Looking for pairing code from Alice...");
|
||||
let pairing_code = loop {
|
||||
if let Ok(code) =
|
||||
std::fs::read_to_string("/tmp/spacedrive-cross-device-copy-test/pairing_code.txt")
|
||||
@@ -346,10 +346,10 @@ async fn bob_cross_device_copy_scenario() {
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
};
|
||||
println!("📋 Bob: Found pairing code");
|
||||
println!("Bob: Found pairing code");
|
||||
|
||||
// Join pairing session
|
||||
println!("🤝 Bob: Joining pairing with Alice...");
|
||||
println!("Bob: Joining pairing with Alice...");
|
||||
if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -373,14 +373,14 @@ async fn bob_cross_device_copy_scenario() {
|
||||
|
||||
let connected_devices = core.get_connected_devices_info().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Bob: Pairing completed successfully!");
|
||||
println!("Bob: Pairing completed successfully!");
|
||||
println!(
|
||||
"📱 Bob: Connected to {} ({})",
|
||||
"Bob: Connected to {} ({})",
|
||||
connected_devices[0].device_name, connected_devices[0].device_id
|
||||
);
|
||||
|
||||
// Wait for session keys
|
||||
println!("🔑 Bob: Allowing extra time for session key establishment...");
|
||||
println!("Bob: Allowing extra time for session key establishment...");
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
break;
|
||||
}
|
||||
@@ -391,7 +391,7 @@ async fn bob_cross_device_copy_scenario() {
|
||||
}
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!("🔍 Bob: Pairing status check {} - waiting", attempts / 5);
|
||||
println!("Bob: Pairing status check {} - waiting", attempts / 5);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,12 +399,12 @@ async fn bob_cross_device_copy_scenario() {
|
||||
let received_dir = std::path::Path::new("/tmp/received_files");
|
||||
std::fs::create_dir_all(received_dir).unwrap();
|
||||
println!(
|
||||
"📁 Bob: Created directory for received files: {:?}",
|
||||
"Bob: Created directory for received files: {:?}",
|
||||
received_dir
|
||||
);
|
||||
|
||||
// Load expected files
|
||||
println!("📋 Bob: Loading expected file list...");
|
||||
println!("Bob: Loading expected file list...");
|
||||
let expected_files = loop {
|
||||
if let Ok(content) =
|
||||
std::fs::read_to_string("/tmp/spacedrive-cross-device-copy-test/expected_files.txt")
|
||||
@@ -421,11 +421,11 @@ async fn bob_cross_device_copy_scenario() {
|
||||
};
|
||||
|
||||
println!(
|
||||
"📋 Bob: Expecting {} files via cross-device copy",
|
||||
"Bob: Expecting {} files via cross-device copy",
|
||||
expected_files.len()
|
||||
);
|
||||
for (filename, size) in &expected_files {
|
||||
println!(" 📄 Expecting: {} ({} bytes)", filename, size);
|
||||
println!(" Expecting: {} ({} bytes)", filename, size);
|
||||
}
|
||||
|
||||
// Monitor for received files
|
||||
@@ -446,7 +446,7 @@ async fn bob_cross_device_copy_scenario() {
|
||||
if let Ok(metadata) = entry.metadata() {
|
||||
received_files.push(filename.clone());
|
||||
println!(
|
||||
"📥 Bob: Received file: {} ({} bytes)",
|
||||
"Bob: Received file: {} ({} bytes)",
|
||||
filename,
|
||||
metadata.len()
|
||||
);
|
||||
@@ -473,7 +473,7 @@ async fn bob_cross_device_copy_scenario() {
|
||||
|
||||
let elapsed = start_time.elapsed().as_secs();
|
||||
if elapsed > 0 && elapsed % 10 == 0 && received_files.is_empty() {
|
||||
println!("🔍 Bob: Still waiting for files... ({}s elapsed)", elapsed);
|
||||
println!("Bob: Still waiting for files... ({}s elapsed)", elapsed);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -505,7 +505,7 @@ async fn bob_cross_device_copy_scenario() {
|
||||
panic!("Bob: Not all files were received");
|
||||
}
|
||||
|
||||
println!("🧹 Bob: Cross-device copy test completed");
|
||||
println!("Bob: Cross-device copy test completed");
|
||||
}
|
||||
|
||||
/// Main test orchestrator - spawns cargo test subprocesses
|
||||
@@ -516,7 +516,7 @@ async fn test_cross_device_copy() {
|
||||
let _ = std::fs::remove_dir_all("/tmp/received_files");
|
||||
std::fs::create_dir_all("/tmp/spacedrive-cross-device-copy-test").unwrap();
|
||||
|
||||
println!("🧪 Testing cross-device copy with action system routing");
|
||||
println!("Testing cross-device copy with action system routing");
|
||||
|
||||
let mut runner = CargoTestRunner::for_test_file("cross_device_copy_test")
|
||||
.with_timeout(Duration::from_secs(180))
|
||||
@@ -524,7 +524,7 @@ async fn test_cross_device_copy() {
|
||||
.add_subprocess("bob", "bob_cross_device_copy_scenario");
|
||||
|
||||
// Spawn Alice first
|
||||
println!("🚀 Starting Alice as copy action dispatcher...");
|
||||
println!("Starting Alice as copy action dispatcher...");
|
||||
runner
|
||||
.spawn_single_process("alice")
|
||||
.await
|
||||
@@ -534,7 +534,7 @@ async fn test_cross_device_copy() {
|
||||
tokio::time::sleep(Duration::from_secs(8)).await;
|
||||
|
||||
// Start Bob as receiver
|
||||
println!("🚀 Starting Bob as copy receiver...");
|
||||
println!("Starting Bob as copy receiver...");
|
||||
runner
|
||||
.spawn_single_process("bob")
|
||||
.await
|
||||
@@ -559,7 +559,7 @@ async fn test_cross_device_copy() {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
println!(
|
||||
"🎉 Cross-device copy test successful! Action system routing works correctly."
|
||||
"Cross-device copy test successful! Action system routing works correctly."
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -25,11 +25,11 @@ async fn alice_pairing_scenario() {
|
||||
let data_dir = PathBuf::from("/tmp/spacedrive-pairing-test/alice");
|
||||
let device_name = "Alice's Test Device";
|
||||
|
||||
println!("🟦 Alice: Starting Core pairing test");
|
||||
println!("📁 Alice: Data dir: {:?}", data_dir);
|
||||
println!("Alice: Starting Core pairing test");
|
||||
println!("Alice: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Alice: Initializing Core...");
|
||||
println!("Alice: Initializing Core...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -41,7 +41,7 @@ async fn alice_pairing_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Alice: Initializing networking...");
|
||||
println!("Alice: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -52,7 +52,7 @@ async fn alice_pairing_scenario() {
|
||||
println!("Alice: Networking initialized successfully");
|
||||
|
||||
// Start pairing as initiator
|
||||
println!("🔑 Alice: Starting pairing as initiator...");
|
||||
println!("Alice: Starting pairing as initiator...");
|
||||
let (pairing_code, expires_in) = if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -82,7 +82,7 @@ async fn alice_pairing_scenario() {
|
||||
&pairing_code,
|
||||
)
|
||||
.unwrap();
|
||||
println!("📝 Alice: Pairing code written to /tmp/spacedrive-pairing-test/pairing_code.txt");
|
||||
println!("Alice: Pairing code written to /tmp/spacedrive-pairing-test/pairing_code.txt");
|
||||
|
||||
// Wait for pairing completion (Alice waits for Bob to connect)
|
||||
println!("⏳ Alice: Waiting for pairing to complete...");
|
||||
@@ -94,15 +94,15 @@ async fn alice_pairing_scenario() {
|
||||
|
||||
let connected_devices = core.get_connected_devices().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Alice: Pairing completed successfully!");
|
||||
println!("🔗 Alice: Checking connected devices...");
|
||||
println!("Alice: Pairing completed successfully!");
|
||||
println!("Alice: Checking connected devices...");
|
||||
println!("Alice: Connected {} devices", connected_devices.len());
|
||||
|
||||
// Get detailed device info
|
||||
let device_info = core.get_connected_devices_info().await.unwrap();
|
||||
for device in &device_info {
|
||||
println!(
|
||||
"📱 Alice sees: {} (ID: {}, OS: {}, App: {})",
|
||||
"Alice sees: {} (ID: {}, OS: {}, App: {})",
|
||||
device.device_name, device.device_id, device.os_version, device.app_version
|
||||
);
|
||||
}
|
||||
@@ -124,11 +124,11 @@ async fn alice_pairing_scenario() {
|
||||
}
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!("🔍 Alice: Pairing status check {} - waiting", attempts / 5);
|
||||
println!("Alice: Pairing status check {} - waiting", attempts / 5);
|
||||
}
|
||||
}
|
||||
|
||||
println!("🧹 Alice: Test completed");
|
||||
println!("Alice: Test completed");
|
||||
}
|
||||
|
||||
/// Bob's pairing scenario - ALL logic stays in this test file!
|
||||
@@ -146,11 +146,11 @@ async fn bob_pairing_scenario() {
|
||||
let data_dir = PathBuf::from("/tmp/spacedrive-pairing-test/bob");
|
||||
let device_name = "Bob's Test Device";
|
||||
|
||||
println!("🟦 Bob: Starting Core pairing test");
|
||||
println!("📁 Bob: Data dir: {:?}", data_dir);
|
||||
println!("Bob: Starting Core pairing test");
|
||||
println!("Bob: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Bob: Initializing Core...");
|
||||
println!("Bob: Initializing Core...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -162,7 +162,7 @@ async fn bob_pairing_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Bob: Initializing networking...");
|
||||
println!("Bob: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -173,17 +173,17 @@ async fn bob_pairing_scenario() {
|
||||
println!("Bob: Networking initialized successfully");
|
||||
|
||||
// Wait for initiator to create pairing code
|
||||
println!("🔍 Bob: Looking for pairing code...");
|
||||
println!("Bob: Looking for pairing code...");
|
||||
let pairing_code = loop {
|
||||
if let Ok(code) = std::fs::read_to_string("/tmp/spacedrive-pairing-test/pairing_code.txt") {
|
||||
break code.trim().to_string();
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
};
|
||||
println!("📋 Bob: Found pairing code");
|
||||
println!("Bob: Found pairing code");
|
||||
|
||||
// Join pairing session
|
||||
println!("🤝 Bob: Joining pairing with code...");
|
||||
println!("Bob: Joining pairing with code...");
|
||||
if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -208,15 +208,15 @@ async fn bob_pairing_scenario() {
|
||||
// Check pairing status by looking at connected devices
|
||||
let connected_devices = core.get_connected_devices().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Bob: Pairing completed successfully!");
|
||||
println!("🔗 Bob: Checking connected devices...");
|
||||
println!("Bob: Pairing completed successfully!");
|
||||
println!("Bob: Checking connected devices...");
|
||||
println!("Bob: Connected {} devices", connected_devices.len());
|
||||
|
||||
// Get detailed device info
|
||||
let device_info = core.get_connected_devices_info().await.unwrap();
|
||||
for device in &device_info {
|
||||
println!(
|
||||
"📱 Bob sees: {} (ID: {}, OS: {}, App: {})",
|
||||
"Bob sees: {} (ID: {}, OS: {}, App: {})",
|
||||
device.device_name, device.device_id, device.os_version, device.app_version
|
||||
);
|
||||
}
|
||||
@@ -234,11 +234,11 @@ async fn bob_pairing_scenario() {
|
||||
}
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!("🔍 Bob: Pairing status check {} - waiting", attempts / 5);
|
||||
println!("Bob: Pairing status check {} - waiting", attempts / 5);
|
||||
}
|
||||
}
|
||||
|
||||
println!("🧹 Bob: Test completed");
|
||||
println!("Bob: Test completed");
|
||||
}
|
||||
|
||||
/// Main test orchestrator - spawns cargo test subprocesses
|
||||
@@ -250,9 +250,9 @@ async fn test_device_pairing() {
|
||||
// This prevents Bob from reading old data and fixes the file I/O race condition
|
||||
if std::path::Path::new(PAIRING_CODE_PATH).exists() {
|
||||
let _ = std::fs::remove_file(PAIRING_CODE_PATH);
|
||||
println!("🧹 Cleaned up stale pairing code file");
|
||||
println!("Cleaned up stale pairing code file");
|
||||
}
|
||||
println!("🧪 Testing Core pairing with cargo test subprocess framework");
|
||||
println!("Testing Core pairing with cargo test subprocess framework");
|
||||
|
||||
// Clean up any old pairing files to avoid race conditions
|
||||
let _ = std::fs::remove_dir_all("/tmp/spacedrive-pairing-test");
|
||||
@@ -264,7 +264,7 @@ async fn test_device_pairing() {
|
||||
.add_subprocess("bob", "bob_pairing_scenario");
|
||||
|
||||
// Spawn Alice first
|
||||
println!("🚀 Starting Alice as initiator...");
|
||||
println!("Starting Alice as initiator...");
|
||||
runner
|
||||
.spawn_single_process("alice")
|
||||
.await
|
||||
@@ -274,7 +274,7 @@ async fn test_device_pairing() {
|
||||
tokio::time::sleep(Duration::from_secs(8)).await;
|
||||
|
||||
// Start Bob as joiner
|
||||
println!("🚀 Starting Bob as joiner...");
|
||||
println!("Starting Bob as joiner...");
|
||||
runner
|
||||
.spawn_single_process("bob")
|
||||
.await
|
||||
@@ -299,7 +299,7 @@ async fn test_device_pairing() {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
println!(
|
||||
"🎉 Cargo test subprocess pairing test successful with mutual device recognition!"
|
||||
"Cargo test subprocess pairing test successful with mutual device recognition!"
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -32,11 +32,11 @@ async fn alice_persistence_scenario() {
|
||||
let is_restart = role == "alice_restart";
|
||||
|
||||
if is_restart {
|
||||
println!("🔄 Alice: RESTART PHASE - Testing automatic reconnection");
|
||||
println!("📁 Alice: Data dir: {:?}", data_dir);
|
||||
println!("Alice: RESTART PHASE - Testing automatic reconnection");
|
||||
println!("Alice: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core - this should load persisted devices
|
||||
println!("🔧 Alice: Initializing Core after restart...");
|
||||
println!("Alice: Initializing Core after restart...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -50,7 +50,7 @@ async fn alice_persistence_scenario() {
|
||||
assert_eq!(current_name, device_name, "Device name not persisted");
|
||||
|
||||
// Initialize networking - this should trigger auto-reconnection
|
||||
println!("🌐 Alice: Initializing networking (should auto-reconnect)...");
|
||||
println!("Alice: Initializing networking (should auto-reconnect)...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -70,7 +70,7 @@ async fn alice_persistence_scenario() {
|
||||
|
||||
let connected_devices = core.get_connected_devices().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Alice: Auto-reconnection successful!");
|
||||
println!("Alice: Auto-reconnection successful!");
|
||||
println!(
|
||||
"Alice: Connected {} devices after restart",
|
||||
connected_devices.len()
|
||||
@@ -86,7 +86,7 @@ async fn alice_persistence_scenario() {
|
||||
|
||||
for device in &device_info {
|
||||
println!(
|
||||
"📱 Alice sees after restart: {} (ID: {})",
|
||||
"Alice sees after restart: {} (ID: {})",
|
||||
device.device_name, device.device_id
|
||||
);
|
||||
}
|
||||
@@ -108,18 +108,18 @@ async fn alice_persistence_scenario() {
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!(
|
||||
"🔍 Alice: Auto-reconnection check {} - waiting for Bob",
|
||||
"Alice: Auto-reconnection check {} - waiting for Bob",
|
||||
attempts / 5
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Initial pairing phase
|
||||
println!("🟦 Alice: INITIAL PHASE - Starting pairing");
|
||||
println!("📁 Alice: Data dir: {:?}", data_dir);
|
||||
println!("Alice: INITIAL PHASE - Starting pairing");
|
||||
println!("Alice: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Alice: Initializing Core...");
|
||||
println!("Alice: Initializing Core...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -131,7 +131,7 @@ async fn alice_persistence_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Alice: Initializing networking...");
|
||||
println!("Alice: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -141,7 +141,7 @@ async fn alice_persistence_scenario() {
|
||||
println!("Alice: Networking initialized successfully");
|
||||
|
||||
// Start pairing as initiator
|
||||
println!("🔑 Alice: Starting pairing as initiator...");
|
||||
println!("Alice: Starting pairing as initiator...");
|
||||
let (pairing_code, expires_in) = if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -177,7 +177,7 @@ async fn alice_persistence_scenario() {
|
||||
|
||||
let connected_devices = core.get_connected_devices().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Alice: Initial pairing completed!");
|
||||
println!("Alice: Initial pairing completed!");
|
||||
println!("Alice: Connected {} devices", connected_devices.len());
|
||||
|
||||
// Verify devices are properly persisted
|
||||
@@ -213,7 +213,7 @@ async fn alice_persistence_scenario() {
|
||||
}
|
||||
|
||||
// Gracefully shutdown to ensure persistence
|
||||
println!("🛑 Alice: Shutting down gracefully to ensure persistence...");
|
||||
println!("Alice: Shutting down gracefully to ensure persistence...");
|
||||
drop(core);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
println!("Alice: Initial phase completed");
|
||||
@@ -239,11 +239,11 @@ async fn bob_persistence_scenario() {
|
||||
let is_restart = role == "bob_restart";
|
||||
|
||||
if is_restart {
|
||||
println!("🔄 Bob: RESTART PHASE - Testing automatic reconnection");
|
||||
println!("📁 Bob: Data dir: {:?}", data_dir);
|
||||
println!("Bob: RESTART PHASE - Testing automatic reconnection");
|
||||
println!("Bob: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core - this should load persisted devices
|
||||
println!("🔧 Bob: Initializing Core after restart...");
|
||||
println!("Bob: Initializing Core after restart...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -256,7 +256,7 @@ async fn bob_persistence_scenario() {
|
||||
assert_eq!(current_name, device_name, "Device name not persisted");
|
||||
|
||||
// Initialize networking - this should trigger auto-reconnection
|
||||
println!("🌐 Bob: Initializing networking (should auto-reconnect)...");
|
||||
println!("Bob: Initializing networking (should auto-reconnect)...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -276,7 +276,7 @@ async fn bob_persistence_scenario() {
|
||||
|
||||
let connected_devices = core.get_connected_devices().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Bob: Auto-reconnection successful!");
|
||||
println!("Bob: Auto-reconnection successful!");
|
||||
println!(
|
||||
"Bob: Connected {} devices after restart",
|
||||
connected_devices.len()
|
||||
@@ -292,7 +292,7 @@ async fn bob_persistence_scenario() {
|
||||
|
||||
for device in &device_info {
|
||||
println!(
|
||||
"📱 Bob sees after restart: {} (ID: {})",
|
||||
"Bob sees after restart: {} (ID: {})",
|
||||
device.device_name, device.device_id
|
||||
);
|
||||
}
|
||||
@@ -314,18 +314,18 @@ async fn bob_persistence_scenario() {
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!(
|
||||
"🔍 Bob: Auto-reconnection check {} - waiting for Alice",
|
||||
"Bob: Auto-reconnection check {} - waiting for Alice",
|
||||
attempts / 5
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Initial pairing phase
|
||||
println!("🟦 Bob: INITIAL PHASE - Starting pairing");
|
||||
println!("📁 Bob: Data dir: {:?}", data_dir);
|
||||
println!("Bob: INITIAL PHASE - Starting pairing");
|
||||
println!("Bob: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Bob: Initializing Core...");
|
||||
println!("Bob: Initializing Core...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -337,7 +337,7 @@ async fn bob_persistence_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Bob: Initializing networking...");
|
||||
println!("Bob: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -347,7 +347,7 @@ async fn bob_persistence_scenario() {
|
||||
println!("Bob: Networking initialized successfully");
|
||||
|
||||
// Wait for pairing code from Alice
|
||||
println!("🔍 Bob: Looking for pairing code...");
|
||||
println!("Bob: Looking for pairing code...");
|
||||
let pairing_code = loop {
|
||||
if let Ok(code) =
|
||||
std::fs::read_to_string("/tmp/spacedrive-persistence-test/pairing_code.txt")
|
||||
@@ -356,10 +356,10 @@ async fn bob_persistence_scenario() {
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
};
|
||||
println!("📋 Bob: Found pairing code");
|
||||
println!("Bob: Found pairing code");
|
||||
|
||||
// Join pairing session
|
||||
println!("🤝 Bob: Joining pairing session...");
|
||||
println!("Bob: Joining pairing session...");
|
||||
if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -382,7 +382,7 @@ async fn bob_persistence_scenario() {
|
||||
|
||||
let connected_devices = core.get_connected_devices().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Bob: Initial pairing completed!");
|
||||
println!("Bob: Initial pairing completed!");
|
||||
println!("Bob: Connected {} devices", connected_devices.len());
|
||||
|
||||
// Verify devices are properly persisted
|
||||
@@ -415,7 +415,7 @@ async fn bob_persistence_scenario() {
|
||||
}
|
||||
|
||||
// Gracefully shutdown to ensure persistence
|
||||
println!("🛑 Bob: Shutting down gracefully to ensure persistence...");
|
||||
println!("Bob: Shutting down gracefully to ensure persistence...");
|
||||
drop(core);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
println!("Bob: Initial phase completed");
|
||||
@@ -425,7 +425,7 @@ async fn bob_persistence_scenario() {
|
||||
/// Main test orchestrator - tests device persistence and auto-reconnection
|
||||
#[tokio::test]
|
||||
async fn test_device_persistence() {
|
||||
println!("🧪 Testing device persistence and automatic reconnection");
|
||||
println!("Testing device persistence and automatic reconnection");
|
||||
|
||||
// Clean up any previous test artifacts
|
||||
let _ = std::fs::remove_dir_all("/tmp/spacedrive-persistence-test");
|
||||
@@ -439,8 +439,8 @@ async fn test_device_persistence() {
|
||||
.add_subprocess("bob_restart", "bob_persistence_scenario");
|
||||
|
||||
// Phase 1: Initial pairing
|
||||
println!("\\n📍 PHASE 1: Initial pairing");
|
||||
println!("🚀 Starting Alice for initial pairing...");
|
||||
println!("\\nPHASE 1: Initial pairing");
|
||||
println!("Starting Alice for initial pairing...");
|
||||
runner
|
||||
.spawn_single_process("alice")
|
||||
.await
|
||||
@@ -449,7 +449,7 @@ async fn test_device_persistence() {
|
||||
// Wait for Alice to initialize
|
||||
tokio::time::sleep(Duration::from_secs(8)).await;
|
||||
|
||||
println!("🚀 Starting Bob for initial pairing...");
|
||||
println!("Starting Bob for initial pairing...");
|
||||
runner
|
||||
.spawn_single_process("bob")
|
||||
.await
|
||||
@@ -484,12 +484,12 @@ async fn test_device_persistence() {
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Phase 2: Restart both devices and verify auto-reconnection
|
||||
println!("\\n📍 PHASE 2: Testing automatic reconnection after restart");
|
||||
println!("\\nPHASE 2: Testing automatic reconnection after restart");
|
||||
|
||||
// Clear the pairing code to ensure devices aren't re-pairing
|
||||
let _ = std::fs::remove_file("/tmp/spacedrive-persistence-test/pairing_code.txt");
|
||||
|
||||
println!("🔄 Restarting Alice...");
|
||||
println!("Restarting Alice...");
|
||||
runner
|
||||
.spawn_single_process("alice_restart")
|
||||
.await
|
||||
@@ -498,7 +498,7 @@ async fn test_device_persistence() {
|
||||
// Give Alice just a small head start
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
println!("🔄 Restarting Bob...");
|
||||
println!("Restarting Bob...");
|
||||
runner
|
||||
.spawn_single_process("bob_restart")
|
||||
.await
|
||||
@@ -525,7 +525,7 @@ async fn test_device_persistence() {
|
||||
|
||||
match reconnection_result {
|
||||
Ok(_) => {
|
||||
println!("\\n🎉 Device persistence test successful!");
|
||||
println!("\\nDevice persistence test successful!");
|
||||
println!("Devices automatically reconnected after restart");
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -51,7 +51,7 @@ async fn find_entry_by_name(
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_entry_metadata_preservation_on_move() {
|
||||
println!("🧪 Starting entry metadata preservation test");
|
||||
println!("Starting entry metadata preservation test");
|
||||
|
||||
// 1. Clean slate - delete entire data directory first
|
||||
let data_dir = std::path::PathBuf::from("core/data/move-integrity-test");
|
||||
@@ -60,7 +60,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
println!("🗑️ Deleted existing data directory for clean test");
|
||||
}
|
||||
std::fs::create_dir_all(&data_dir).unwrap();
|
||||
println!("📁 Created fresh data directory: {:?}", data_dir);
|
||||
println!("Created fresh data directory: {:?}", data_dir);
|
||||
|
||||
let core = Arc::new(Core::new_with_config(data_dir.clone()).await.unwrap());
|
||||
println!("Core initialized successfully");
|
||||
@@ -72,7 +72,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
.await
|
||||
.unwrap();
|
||||
let library_id = library.id();
|
||||
println!("📚 Created fresh library with ID: {}", library_id);
|
||||
println!("Created fresh library with ID: {}", library_id);
|
||||
|
||||
let action_manager = core
|
||||
.context
|
||||
@@ -97,7 +97,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
.unwrap();
|
||||
fs::create_dir_all(&dest_dir).await.unwrap();
|
||||
|
||||
println!("📁 Created test file structure");
|
||||
println!("Created test file structure");
|
||||
|
||||
// 3. Dispatch LocationAddAction to index the source
|
||||
let _add_output = action_manager
|
||||
@@ -136,7 +136,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
|
||||
// Debug: List all entries in the database
|
||||
let all_entries = entry::Entity::find().all(db).await.unwrap();
|
||||
println!("🔍 Found {} entries in database:", all_entries.len());
|
||||
println!("Found {} entries in database:", all_entries.len());
|
||||
for entry in &all_entries {
|
||||
println!(
|
||||
" - ID: {}, Name: '{}', UUID: {:?}, Parent: {:?}",
|
||||
@@ -158,7 +158,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
let _original_metadata_id = parent_dir_entry.metadata_id;
|
||||
|
||||
println!(
|
||||
"🔍 Found parent_dir entry with ID: {}",
|
||||
"Found parent_dir entry with ID: {}",
|
||||
original_parent_dir_id
|
||||
);
|
||||
|
||||
@@ -210,10 +210,10 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
println!("📦 Move operation completed");
|
||||
println!("Move operation completed");
|
||||
|
||||
// 8. Verification assertions
|
||||
println!("🔍 Starting verification phase...");
|
||||
println!("Starting verification phase...");
|
||||
|
||||
// 1. Verify Entry Preservation
|
||||
let moved_entry = entry::Entity::find_by_id(original_parent_dir_id)
|
||||
@@ -235,7 +235,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
// 2. Verify Metadata Preservation
|
||||
// Debug: Check all user_metadata_tag records
|
||||
let all_tag_links = user_metadata_tag::Entity::find().all(db).await.unwrap();
|
||||
println!("🔍 Found {} tag links in database:", all_tag_links.len());
|
||||
println!("Found {} tag links in database:", all_tag_links.len());
|
||||
for link in &all_tag_links {
|
||||
println!(
|
||||
" - Link ID: {}, MetadataID: {}, TagID: {}",
|
||||
@@ -245,7 +245,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
|
||||
// Debug: Check all user_metadata records
|
||||
let all_metadata = user_metadata::Entity::find().all(db).await.unwrap();
|
||||
println!("🔍 Found {} user_metadata records:", all_metadata.len());
|
||||
println!("Found {} user_metadata records:", all_metadata.len());
|
||||
for meta in &all_metadata {
|
||||
println!(
|
||||
" - Meta ID: {}, UUID: {}, Entry UUID: {:?}",
|
||||
@@ -350,7 +350,7 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
}
|
||||
|
||||
// Final Summary
|
||||
println!("\n🎯 Test Results Summary:");
|
||||
println!("\nTest Results Summary:");
|
||||
println!("Entry ID preservation: WORKING - Entry maintains stable identity during moves");
|
||||
println!("TagManager SQL issues: RESOLVED - Can create and apply semantic tags");
|
||||
println!(
|
||||
@@ -373,19 +373,19 @@ async fn test_entry_metadata_preservation_on_move() {
|
||||
println!("⚠️ Filesystem move: ISSUE - Files not moved properly");
|
||||
}
|
||||
|
||||
println!("\n🔬 Test Framework: COMPLETE");
|
||||
println!("\nTest Framework: COMPLETE");
|
||||
println!(" This integration test successfully validates the core concern:");
|
||||
println!(" 📌 Entry identity preservation during move operations");
|
||||
println!(" 📌 Metadata link preservation (when semantic tagging works)");
|
||||
println!(" 📌 Comprehensive verification of all database state");
|
||||
println!(" Entry identity preservation during move operations");
|
||||
println!(" Metadata link preservation (when semantic tagging works)");
|
||||
println!(" Comprehensive verification of all database state");
|
||||
|
||||
println!("\n🎉 Integration test implementation is working correctly!");
|
||||
println!("\nIntegration test implementation is working correctly!");
|
||||
}
|
||||
|
||||
/// Additional test to verify that child entries also maintain their metadata
|
||||
#[tokio::test]
|
||||
async fn test_child_entry_metadata_preservation_on_parent_move() {
|
||||
println!("🧪 Starting child entry metadata preservation test");
|
||||
println!("Starting child entry metadata preservation test");
|
||||
|
||||
// Setup similar to main test - use same persistent database
|
||||
let data_dir = std::path::PathBuf::from("core/data/spacedrive-search-demo");
|
||||
|
||||
@@ -25,11 +25,11 @@ async fn alice_file_transfer_scenario() {
|
||||
let data_dir = PathBuf::from("/tmp/spacedrive-file-transfer-test/alice");
|
||||
let device_name = "Alice's Test Device";
|
||||
|
||||
println!("🟦 Alice: Starting Core file transfer test (sender)");
|
||||
println!("📁 Alice: Data dir: {:?}", data_dir);
|
||||
println!("Alice: Starting Core file transfer test (sender)");
|
||||
println!("Alice: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Alice: Initializing Core...");
|
||||
println!("Alice: Initializing Core...");
|
||||
let mut core = timeout(
|
||||
Duration::from_secs(10),
|
||||
Core::new_with_config(data_dir.clone()),
|
||||
@@ -44,7 +44,7 @@ async fn alice_file_transfer_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Alice: Initializing networking...");
|
||||
println!("Alice: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -55,7 +55,7 @@ async fn alice_file_transfer_scenario() {
|
||||
println!("Alice: Networking initialized successfully");
|
||||
|
||||
// Create a library for job dispatch (required for file transfers)
|
||||
println!("📚 Alice: Creating library for file transfer jobs...");
|
||||
println!("Alice: Creating library for file transfer jobs...");
|
||||
let _library = core
|
||||
.libraries
|
||||
.create_library("Alice Transfer Library", None, core.context.clone())
|
||||
@@ -64,7 +64,7 @@ async fn alice_file_transfer_scenario() {
|
||||
println!("Alice: Library created successfully");
|
||||
|
||||
// Start pairing as initiator
|
||||
println!("🔑 Alice: Starting pairing as initiator for file transfer...");
|
||||
println!("Alice: Starting pairing as initiator for file transfer...");
|
||||
let (pairing_code, expires_in) = if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -95,7 +95,7 @@ async fn alice_file_transfer_scenario() {
|
||||
)
|
||||
.unwrap();
|
||||
println!(
|
||||
"📝 Alice: Pairing code written to /tmp/spacedrive-file-transfer-test/pairing_code.txt"
|
||||
"Alice: Pairing code written to /tmp/spacedrive-file-transfer-test/pairing_code.txt"
|
||||
);
|
||||
|
||||
// Wait for pairing completion
|
||||
@@ -111,12 +111,12 @@ async fn alice_file_transfer_scenario() {
|
||||
if !connected_devices.is_empty() {
|
||||
receiver_device_id = Some(connected_devices[0]);
|
||||
println!(
|
||||
"🎉 Alice: Bob connected! Device ID: {}",
|
||||
"Alice: Bob connected! Device ID: {}",
|
||||
connected_devices[0]
|
||||
);
|
||||
|
||||
// Wait a bit longer to ensure session keys are properly established
|
||||
println!("🔑 Alice: Allowing extra time for session key establishment...");
|
||||
println!("Alice: Allowing extra time for session key establishment...");
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
break;
|
||||
}
|
||||
@@ -127,17 +127,17 @@ async fn alice_file_transfer_scenario() {
|
||||
let registry = device_registry.read().await;
|
||||
let paired_devices = registry.get_paired_devices();
|
||||
if !paired_devices.is_empty() {
|
||||
println!("🎉 Alice: Found {} paired devices!", paired_devices.len());
|
||||
println!("Alice: Found {} paired devices!", paired_devices.len());
|
||||
for device in &paired_devices {
|
||||
println!(
|
||||
" 📱 Paired: {} (ID: {})",
|
||||
" Paired: {} (ID: {})",
|
||||
device.device_name, device.device_id
|
||||
);
|
||||
}
|
||||
// Use the first paired device as the receiver
|
||||
receiver_device_id = Some(paired_devices[0].device_id);
|
||||
println!(
|
||||
"🔑 Alice: Using paired device as receiver: {}",
|
||||
"Alice: Using paired device as receiver: {}",
|
||||
paired_devices[0].device_id
|
||||
);
|
||||
break;
|
||||
@@ -150,14 +150,14 @@ async fn alice_file_transfer_scenario() {
|
||||
}
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!("🔍 Alice: Pairing status check {} - waiting", attempts / 5);
|
||||
println!("Alice: Pairing status check {} - waiting", attempts / 5);
|
||||
}
|
||||
}
|
||||
|
||||
let receiver_id = receiver_device_id.unwrap();
|
||||
|
||||
// Create test files to transfer
|
||||
println!("📝 Alice: Creating test files for transfer...");
|
||||
println!("Alice: Creating test files for transfer...");
|
||||
let test_files_dir = data_dir.join("test_files");
|
||||
std::fs::create_dir_all(&test_files_dir).unwrap();
|
||||
|
||||
@@ -180,7 +180,7 @@ async fn alice_file_transfer_scenario() {
|
||||
match ContentHashGenerator::generate_content_hash(&file_path).await {
|
||||
Ok(checksum) => {
|
||||
println!(
|
||||
" 📄 Created: {} ({} bytes, checksum: {})",
|
||||
" Created: {} ({} bytes, checksum: {})",
|
||||
filename,
|
||||
content.len(),
|
||||
checksum
|
||||
@@ -188,7 +188,7 @@ async fn alice_file_transfer_scenario() {
|
||||
}
|
||||
Err(e) => {
|
||||
println!(
|
||||
" 📄 Created: {} ({} bytes, checksum error: {})",
|
||||
" Created: {} ({} bytes, checksum error: {})",
|
||||
filename,
|
||||
content.len(),
|
||||
e
|
||||
@@ -212,17 +212,17 @@ async fn alice_file_transfer_scenario() {
|
||||
|
||||
// Debug: Show Alice's view of connected devices
|
||||
let alice_devices = core.get_connected_devices_info().await.unwrap();
|
||||
println!("🔍 Alice: Connected devices before transfer:");
|
||||
println!("Alice: Connected devices before transfer:");
|
||||
for device in &alice_devices {
|
||||
println!(
|
||||
" 📱 Device: {} (ID: {})",
|
||||
" Device: {} (ID: {})",
|
||||
device.device_name, device.device_id
|
||||
);
|
||||
}
|
||||
|
||||
// Initiate cross-device file transfer
|
||||
println!("🚀 Alice: Starting cross-device file transfer...");
|
||||
println!("🎯 Alice: Sending files to device ID: {}", receiver_id);
|
||||
println!("Alice: Starting cross-device file transfer...");
|
||||
println!("Alice: Sending files to device ID: {}", receiver_id);
|
||||
|
||||
let transfer_results = core
|
||||
.services
|
||||
@@ -237,7 +237,7 @@ async fn alice_file_transfer_scenario() {
|
||||
match transfer_results {
|
||||
Ok(transfer_id) => {
|
||||
println!("Alice: File transfer initiated successfully!");
|
||||
println!("📋 Alice: Transfer ID: {:?}", transfer_id);
|
||||
println!("Alice: Transfer ID: {:?}", transfer_id);
|
||||
|
||||
// Wait for transfer to complete
|
||||
println!("⏳ Alice: Waiting for transfer to complete...");
|
||||
@@ -274,7 +274,7 @@ async fn alice_file_transfer_scenario() {
|
||||
// Still in progress
|
||||
if status.progress.bytes_transferred > 0 {
|
||||
println!(
|
||||
"📊 Alice: Transfer progress: {} / {} bytes",
|
||||
"Alice: Transfer progress: {} / {} bytes",
|
||||
status.progress.bytes_transferred,
|
||||
status.progress.total_bytes
|
||||
);
|
||||
@@ -310,7 +310,7 @@ async fn alice_file_transfer_scenario() {
|
||||
|
||||
if attempt % 10 == 0 {
|
||||
println!(
|
||||
"🔍 Alice: Still waiting for Bob's confirmation... ({}s)",
|
||||
"Alice: Still waiting for Bob's confirmation... ({}s)",
|
||||
attempt
|
||||
);
|
||||
}
|
||||
@@ -342,7 +342,7 @@ async fn alice_file_transfer_scenario() {
|
||||
}
|
||||
}
|
||||
|
||||
println!("🧹 Alice: File transfer sender test completed");
|
||||
println!("Alice: File transfer sender test completed");
|
||||
}
|
||||
|
||||
/// Bob's file transfer scenario - receiver role
|
||||
@@ -360,11 +360,11 @@ async fn bob_file_transfer_scenario() {
|
||||
let data_dir = PathBuf::from("/tmp/spacedrive-file-transfer-test/bob");
|
||||
let device_name = "Bob's Test Device";
|
||||
|
||||
println!("🟦 Bob: Starting Core file transfer test (receiver)");
|
||||
println!("📁 Bob: Data dir: {:?}", data_dir);
|
||||
println!("Bob: Starting Core file transfer test (receiver)");
|
||||
println!("Bob: Data dir: {:?}", data_dir);
|
||||
|
||||
// Initialize Core
|
||||
println!("🔧 Bob: Initializing Core...");
|
||||
println!("Bob: Initializing Core...");
|
||||
let mut core = timeout(Duration::from_secs(10), Core::new_with_config(data_dir))
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -376,7 +376,7 @@ async fn bob_file_transfer_scenario() {
|
||||
core.device.set_name(device_name.to_string()).unwrap();
|
||||
|
||||
// Initialize networking
|
||||
println!("🌐 Bob: Initializing networking...");
|
||||
println!("Bob: Initializing networking...");
|
||||
timeout(Duration::from_secs(10), core.init_networking())
|
||||
.await
|
||||
.unwrap()
|
||||
@@ -387,7 +387,7 @@ async fn bob_file_transfer_scenario() {
|
||||
println!("Bob: Networking initialized successfully");
|
||||
|
||||
// Create a library for job dispatch (required for file transfers)
|
||||
println!("📚 Bob: Creating library for file transfer jobs...");
|
||||
println!("Bob: Creating library for file transfer jobs...");
|
||||
let _library = core
|
||||
.libraries
|
||||
.create_library("Bob Transfer Library", None, core.context.clone())
|
||||
@@ -396,7 +396,7 @@ async fn bob_file_transfer_scenario() {
|
||||
println!("Bob: Library created successfully");
|
||||
|
||||
// Wait for Alice to create pairing code
|
||||
println!("🔍 Bob: Looking for pairing code from Alice...");
|
||||
println!("Bob: Looking for pairing code from Alice...");
|
||||
let pairing_code = loop {
|
||||
if let Ok(code) =
|
||||
std::fs::read_to_string("/tmp/spacedrive-file-transfer-test/pairing_code.txt")
|
||||
@@ -405,10 +405,10 @@ async fn bob_file_transfer_scenario() {
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
};
|
||||
println!("📋 Bob: Found pairing code");
|
||||
println!("Bob: Found pairing code");
|
||||
|
||||
// Join pairing session
|
||||
println!("🤝 Bob: Joining pairing with Alice...");
|
||||
println!("Bob: Joining pairing with Alice...");
|
||||
if let Some(networking) = core.networking() {
|
||||
timeout(
|
||||
Duration::from_secs(15),
|
||||
@@ -433,21 +433,21 @@ async fn bob_file_transfer_scenario() {
|
||||
// Check pairing status by looking at connected devices
|
||||
let connected_devices = core.get_connected_devices().await.unwrap();
|
||||
if !connected_devices.is_empty() {
|
||||
println!("🎉 Bob: Pairing completed successfully!");
|
||||
println!("Bob: Pairing completed successfully!");
|
||||
println!("Bob: Connected {} devices", connected_devices.len());
|
||||
|
||||
// Debug: Show Bob's view of connected devices
|
||||
let bob_devices = core.get_connected_devices_info().await.unwrap();
|
||||
println!("🔍 Bob: Connected devices after pairing:");
|
||||
println!("Bob: Connected devices after pairing:");
|
||||
for device in &bob_devices {
|
||||
println!(
|
||||
" 📱 Device: {} (ID: {})",
|
||||
" Device: {} (ID: {})",
|
||||
device.device_name, device.device_id
|
||||
);
|
||||
}
|
||||
|
||||
// Wait a bit longer to ensure session keys are properly established
|
||||
println!("🔑 Bob: Allowing extra time for session key establishment...");
|
||||
println!("Bob: Allowing extra time for session key establishment...");
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
break;
|
||||
}
|
||||
@@ -458,10 +458,10 @@ async fn bob_file_transfer_scenario() {
|
||||
let registry = device_registry.read().await;
|
||||
let paired_devices = registry.get_paired_devices();
|
||||
if !paired_devices.is_empty() {
|
||||
println!("🎉 Bob: Found {} paired devices!", paired_devices.len());
|
||||
println!("Bob: Found {} paired devices!", paired_devices.len());
|
||||
for device in &paired_devices {
|
||||
println!(
|
||||
" 📱 Paired: {} (ID: {})",
|
||||
" Paired: {} (ID: {})",
|
||||
device.device_name, device.device_id
|
||||
);
|
||||
}
|
||||
@@ -476,7 +476,7 @@ async fn bob_file_transfer_scenario() {
|
||||
}
|
||||
|
||||
if attempts % 5 == 0 {
|
||||
println!("🔍 Bob: Pairing status check {} - waiting", attempts / 5);
|
||||
println!("Bob: Pairing status check {} - waiting", attempts / 5);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -487,7 +487,7 @@ async fn bob_file_transfer_scenario() {
|
||||
let received_dir = std::path::Path::new("/tmp/received_files");
|
||||
std::fs::create_dir_all(received_dir).unwrap();
|
||||
println!(
|
||||
"📁 Bob: Created directory for received files: {:?}",
|
||||
"Bob: Created directory for received files: {:?}",
|
||||
received_dir
|
||||
);
|
||||
|
||||
@@ -508,11 +508,11 @@ async fn bob_file_transfer_scenario() {
|
||||
};
|
||||
|
||||
println!(
|
||||
"📋 Bob: Expecting {} files to be received",
|
||||
"Bob: Expecting {} files to be received",
|
||||
expected_files.len()
|
||||
);
|
||||
for (filename, size) in &expected_files {
|
||||
println!(" 📄 Expecting: {} ({} bytes)", filename, size);
|
||||
println!(" Expecting: {} ({} bytes)", filename, size);
|
||||
}
|
||||
|
||||
// Monitor for received files
|
||||
@@ -532,7 +532,7 @@ async fn bob_file_transfer_scenario() {
|
||||
if let Ok(metadata) = entry.metadata() {
|
||||
received_files.push(filename.clone());
|
||||
println!(
|
||||
"📥 Bob: Received file: {} ({} bytes)",
|
||||
"Bob: Received file: {} ({} bytes)",
|
||||
filename,
|
||||
metadata.len()
|
||||
);
|
||||
@@ -545,11 +545,11 @@ async fn bob_file_transfer_scenario() {
|
||||
// Debug: Show directory contents periodically
|
||||
let elapsed = start_time.elapsed().as_secs();
|
||||
if elapsed > 0 && elapsed % 10 == 0 && received_files.is_empty() {
|
||||
println!("🔍 Bob: Still waiting for files... checking directory:");
|
||||
println!("Bob: Still waiting for files... checking directory:");
|
||||
if let Ok(entries) = std::fs::read_dir(received_dir) {
|
||||
let file_count = entries.count();
|
||||
println!(
|
||||
" 📁 Found {} items in {}",
|
||||
" Found {} items in {}",
|
||||
file_count,
|
||||
received_dir.display()
|
||||
);
|
||||
@@ -558,7 +558,7 @@ async fn bob_file_transfer_scenario() {
|
||||
|
||||
if received_files.len() > 0 && received_files.len() % 2 == 0 {
|
||||
println!(
|
||||
"📊 Bob: Progress: {}/{} files received",
|
||||
"Bob: Progress: {}/{} files received",
|
||||
received_files.len(),
|
||||
expected_files.len()
|
||||
);
|
||||
@@ -645,7 +645,7 @@ async fn bob_file_transfer_scenario() {
|
||||
panic!("Bob: Not all files were received");
|
||||
}
|
||||
|
||||
println!("🧹 Bob: File transfer receiver test completed");
|
||||
println!("Bob: File transfer receiver test completed");
|
||||
}
|
||||
|
||||
/// Main test orchestrator - spawns cargo test subprocesses for file transfer
|
||||
@@ -656,7 +656,7 @@ async fn test_file_transfer() {
|
||||
let _ = std::fs::remove_dir_all("/tmp/received_files");
|
||||
std::fs::create_dir_all("/tmp/spacedrive-file-transfer-test").unwrap();
|
||||
|
||||
println!("🧪 Testing Core file transfer with cargo test subprocess framework");
|
||||
println!("Testing Core file transfer with cargo test subprocess framework");
|
||||
|
||||
let mut runner = CargoTestRunner::for_test_file("file_transfer_test")
|
||||
.with_timeout(Duration::from_secs(240)) // 4 minutes for file transfer test
|
||||
@@ -664,7 +664,7 @@ async fn test_file_transfer() {
|
||||
.add_subprocess("bob", "bob_file_transfer_scenario");
|
||||
|
||||
// Spawn Alice first (sender)
|
||||
println!("🚀 Starting Alice as file sender...");
|
||||
println!("Starting Alice as file sender...");
|
||||
runner
|
||||
.spawn_single_process("alice")
|
||||
.await
|
||||
@@ -674,7 +674,7 @@ async fn test_file_transfer() {
|
||||
tokio::time::sleep(Duration::from_secs(8)).await;
|
||||
|
||||
// Start Bob as receiver
|
||||
println!("🚀 Starting Bob as file receiver...");
|
||||
println!("Starting Bob as file receiver...");
|
||||
runner
|
||||
.spawn_single_process("bob")
|
||||
.await
|
||||
@@ -699,7 +699,7 @@ async fn test_file_transfer() {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
println!(
|
||||
"🎉 Cargo test subprocess file transfer test successful with complete file verification!"
|
||||
"Cargo test subprocess file transfer test successful with complete file verification!"
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -412,7 +412,7 @@ async fn test_full_copy_workflow_simulation() {
|
||||
|
||||
// Only test if both paths exist
|
||||
if source_path.exists() && dest_path.exists() {
|
||||
println!("\n📋 Scenario: {}", scenario_name);
|
||||
println!("\nScenario: {}", scenario_name);
|
||||
|
||||
// Step 1: Check if paths are on same physical storage
|
||||
let same_storage = volume_manager
|
||||
|
||||
@@ -102,7 +102,7 @@ async fn demo_semantic_variants() -> Result<()> {
|
||||
println!();
|
||||
|
||||
// Test name matching
|
||||
println!("🔍 Name matching tests:");
|
||||
println!("Name matching tests:");
|
||||
println!(" Matches 'JavaScript': {}", js_tag.matches_name("JavaScript"));
|
||||
println!(" Matches 'js' (case insensitive): {}", js_tag.matches_name("js"));
|
||||
println!(" Matches 'ECMAScript': {}", js_tag.matches_name("ECMAScript"));
|
||||
@@ -138,7 +138,7 @@ async fn demo_hierarchical_relationships() -> Result<()> {
|
||||
// service.create_relationship(programming.id, web_dev.id, RelationshipType::ParentChild, None).await?;
|
||||
// etc.
|
||||
|
||||
println!("📊 Benefits of hierarchy:");
|
||||
println!("Benefits of hierarchy:");
|
||||
println!(" • Tagging 'Quarterly Report' with 'Business Documents' automatically inherits 'Documents'");
|
||||
println!(" • Searching 'Technology' finds all descendant content (React components, etc.)");
|
||||
println!(" • Emergent patterns reveal organizational connections");
|
||||
@@ -154,7 +154,7 @@ async fn demo_context_resolution() -> Result<()> {
|
||||
let device_id = Uuid::new_v4();
|
||||
|
||||
// Simulate context resolution scenario
|
||||
println!("🤔 Scenario: User types 'Phoenix' while working with geographic data");
|
||||
println!("Scenario: User types 'Phoenix' while working with geographic data");
|
||||
println!();
|
||||
|
||||
// Context tags that user already has on this file
|
||||
@@ -162,12 +162,12 @@ async fn demo_context_resolution() -> Result<()> {
|
||||
let usa_tag = SemanticTag::new("USA".to_string(), device_id);
|
||||
let context_tags = vec![arizona_tag, usa_tag];
|
||||
|
||||
println!("📍 Context tags already present: Arizona, USA");
|
||||
println!("🎯 System would resolve 'Phoenix' to 'Geography::Phoenix' (city)");
|
||||
println!("Context tags already present: Arizona, USA");
|
||||
println!("System would resolve 'Phoenix' to 'Geography::Phoenix' (city)");
|
||||
println!(" rather than 'Mythology::Phoenix' (mythical bird)");
|
||||
println!();
|
||||
|
||||
println!("🧠 Resolution factors:");
|
||||
println!("Resolution factors:");
|
||||
println!(" • Namespace compatibility (Geography matches Arizona/USA)");
|
||||
println!(" • Usage patterns (Phoenix often used with Arizona)");
|
||||
println!(" • Hierarchical relationships (Phoenix is a US city)");
|
||||
@@ -189,7 +189,7 @@ async fn demo_ai_tagging() -> Result<()> {
|
||||
ai_tag_app.set_instance_attribute("detected_objects".to_string(), vec!["dog", "beach", "sunset"]).unwrap();
|
||||
ai_tag_app.set_instance_attribute("model_version".to_string(), "v2.1").unwrap();
|
||||
|
||||
println!("🤖 AI analyzed vacation photo and applied tag:");
|
||||
println!("AI analyzed vacation photo and applied tag:");
|
||||
println!(" Confidence: {:.1}%", ai_tag_app.confidence * 100.0);
|
||||
println!(" Context: {}", ai_tag_app.applied_context.as_ref().unwrap());
|
||||
println!(" Detected objects: {:?}", ai_tag_app.get_attribute::<Vec<String>>("detected_objects").unwrap());
|
||||
@@ -197,7 +197,7 @@ async fn demo_ai_tagging() -> Result<()> {
|
||||
println!();
|
||||
|
||||
// User can review and modify AI suggestions
|
||||
println!("👤 User can:");
|
||||
println!("User can:");
|
||||
println!(" • Accept AI tags automatically (high confidence)");
|
||||
println!(" • Review low confidence tags before accepting");
|
||||
println!(" • Add additional context-specific tags");
|
||||
@@ -230,13 +230,13 @@ async fn demo_conflict_resolution() -> Result<()> {
|
||||
println!(" Device B tagged same photo: 'family'");
|
||||
println!();
|
||||
|
||||
println!("🔄 Union merge resolution:");
|
||||
println!("Union merge resolution:");
|
||||
println!(" Result: Photo tagged with both 'vacation' AND 'family'");
|
||||
println!(" 📝 User notification: 'Combined tags for sunset.jpg from multiple devices'");
|
||||
println!(" 🔍 User can review and modify if needed");
|
||||
println!(" User notification: 'Combined tags for sunset.jpg from multiple devices'");
|
||||
println!(" User can review and modify if needed");
|
||||
println!();
|
||||
|
||||
println!("🎯 Conflict resolution benefits:");
|
||||
println!("Conflict resolution benefits:");
|
||||
println!(" • No data loss - all user intent preserved");
|
||||
println!(" • Additive approach - tags complement each other");
|
||||
println!(" • Transparent process - user knows what happened");
|
||||
@@ -250,28 +250,28 @@ async fn demo_organizational_patterns() -> Result<()> {
|
||||
println!("8. Emergent Organizational Patterns");
|
||||
println!("-----------------------------------");
|
||||
|
||||
println!("🔍 Pattern Discovery Examples:");
|
||||
println!("Pattern Discovery Examples:");
|
||||
println!();
|
||||
|
||||
println!("📊 Frequent Co-occurrence:");
|
||||
println!("Frequent Co-occurrence:");
|
||||
println!(" System notices 'Tax' and '2024' often used together");
|
||||
println!(" → Suggests creating 'Tax Documents 2024' organizational tag");
|
||||
println!();
|
||||
|
||||
println!("🌳 Hierarchical Suggestions:");
|
||||
println!("Hierarchical Suggestions:");
|
||||
println!(" Files tagged 'JavaScript' also often have 'React'");
|
||||
println!(" → Suggests React as child of JavaScript in hierarchy");
|
||||
println!();
|
||||
|
||||
println!("🎨 Visual Hierarchies:");
|
||||
println!("Visual Hierarchies:");
|
||||
println!(" Tags marked as 'organizational anchors' create visual structure:");
|
||||
println!(" 📁 Projects (organizational anchor)");
|
||||
println!(" ├── 🌐 Website Redesign");
|
||||
println!(" ├── 📱 Mobile App");
|
||||
println!(" └── 📊 Analytics Dashboard");
|
||||
println!(" Projects (organizational anchor)");
|
||||
println!(" ├── Website Redesign");
|
||||
println!(" ├── Mobile App");
|
||||
println!(" └── Analytics Dashboard");
|
||||
println!();
|
||||
|
||||
println!("🔒 Privacy Controls:");
|
||||
println!("Privacy Controls:");
|
||||
println!(" 'Personal' privacy tag hides content from standard searches");
|
||||
println!(" 'Archive' tag available via direct query but hidden from UI");
|
||||
println!(" 'Hidden' tag completely invisible except to admin users");
|
||||
@@ -290,7 +290,7 @@ async fn demo_advanced_features() -> Result<()> {
|
||||
println!("9. Advanced Features Summary");
|
||||
println!("---------------------------");
|
||||
|
||||
println!("🎯 What makes this semantic tagging special:");
|
||||
println!("What makes this semantic tagging special:");
|
||||
println!();
|
||||
|
||||
println!("🏗️ Graph-Based Architecture:");
|
||||
@@ -299,19 +299,19 @@ async fn demo_advanced_features() -> Result<()> {
|
||||
println!(" • Relationship strengths for nuanced connections");
|
||||
println!();
|
||||
|
||||
println!("🌍 Unicode-Native & International:");
|
||||
println!("Unicode-Native & International:");
|
||||
println!(" • Full support for any language/script");
|
||||
println!(" • Polymorphic naming across cultural contexts");
|
||||
println!(" • Namespace-based disambiguation");
|
||||
println!();
|
||||
|
||||
println!("🤝 Sync-Friendly:");
|
||||
println!("Sync-Friendly:");
|
||||
println!(" • Union merge prevents data loss");
|
||||
println!(" • Conflict-free replication for tag assignments");
|
||||
println!(" • Audit trail for all tag operations");
|
||||
println!();
|
||||
|
||||
println!("🧠 AI-Enhanced but User-Controlled:");
|
||||
println!("AI-Enhanced but User-Controlled:");
|
||||
println!(" • AI suggestions with confidence scoring");
|
||||
println!(" • User review and correction improves future AI");
|
||||
println!(" • Privacy-first: local models supported");
|
||||
|
||||
131
remove_emojis.sh
Executable file
131
remove_emojis.sh
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to remove 3D/colorful emojis followed by a space from Rust files
|
||||
# To counter Claude's obsession with emojis
|
||||
# Preserves simple symbols like •, ✓, →
|
||||
# Usage: ./remove_emojis.sh
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
echo -e "${BLUE}🔍 Spacedrive Emoji Removal Script${NC}"
|
||||
echo "Removing 3D/colorful emojis with trailing spaces from Rust files..."
|
||||
echo "Preserving simple symbols like •, ✓, →"
|
||||
echo
|
||||
|
||||
# Count files to process
|
||||
total_files=$(find . -name "*.rs" -type f | wc -l | tr -d ' ')
|
||||
echo -e "${BLUE}Found ${total_files} Rust files to process${NC}"
|
||||
echo
|
||||
|
||||
# No backup needed - using git for version control
|
||||
|
||||
# Counter for processed files
|
||||
processed=0
|
||||
modified=0
|
||||
|
||||
# Function to check if file contains emojis with spaces
|
||||
contains_emoji_space() {
|
||||
local file="$1"
|
||||
# Use Python to detect 3D/colorful emojis followed by spaces
|
||||
python3 -c "
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Read file content
|
||||
try:
|
||||
with open('$file', 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Only target 3D/colorful emojis followed by exactly one space
|
||||
emoji_pattern = r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F780-\U0001F7FF\U0001F1E0-\U0001F1FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF] '
|
||||
|
||||
if re.search(emoji_pattern, content):
|
||||
sys.exit(0) # Found emoji with space
|
||||
else:
|
||||
sys.exit(1) # No emoji with space found
|
||||
except Exception as e:
|
||||
sys.exit(1) # Error reading file
|
||||
"
|
||||
return $?
|
||||
}
|
||||
|
||||
# Function to remove emojis with spaces from a file
|
||||
remove_emojis() {
|
||||
local file="$1"
|
||||
|
||||
# Use Python to remove emojis followed by spaces
|
||||
python3 -c "
|
||||
import re
|
||||
import sys
|
||||
|
||||
# Read file content
|
||||
try:
|
||||
with open('$file', 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Only target 3D/colorful emojis followed by exactly one space
|
||||
emoji_pattern = r'[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF\U0001F780-\U0001F7FF\U0001F1E0-\U0001F1FF\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF] '
|
||||
|
||||
# Remove emojis followed by space
|
||||
cleaned_content = re.sub(emoji_pattern, '', content)
|
||||
|
||||
# Write back to file
|
||||
with open('$file', 'w', encoding='utf-8') as f:
|
||||
f.write(cleaned_content)
|
||||
|
||||
# Check if file was actually modified
|
||||
if content != cleaned_content:
|
||||
print('MODIFIED')
|
||||
else:
|
||||
print('UNCHANGED')
|
||||
|
||||
except Exception as e:
|
||||
print(f'ERROR: {e}')
|
||||
sys.exit(1)
|
||||
"
|
||||
}
|
||||
|
||||
# Process all Rust files
|
||||
echo -e "${BLUE}Processing files...${NC}"
|
||||
echo
|
||||
|
||||
while IFS= read -r -d '' file; do
|
||||
((processed++))
|
||||
|
||||
# Show progress
|
||||
printf "\r${BLUE}Progress: ${processed}/${total_files}${NC} - Processing: $(basename "$file")"
|
||||
|
||||
# Check if file contains emojis with spaces
|
||||
if contains_emoji_space "$file"; then
|
||||
result=$(remove_emojis "$file")
|
||||
if [[ "$result" == "MODIFIED" ]]; then
|
||||
((modified++))
|
||||
echo -e "\n${GREEN}✓ Modified: $file${NC}"
|
||||
elif [[ "$result" == "ERROR"* ]]; then
|
||||
echo -e "\n${RED}✗ Error processing: $file${NC}"
|
||||
fi
|
||||
fi
|
||||
|
||||
done < <(find . -name "*.rs" -type f -print0)
|
||||
|
||||
echo -e "\n\n${GREEN}✅ Processing complete!${NC}"
|
||||
echo -e "${BLUE}Summary:${NC}"
|
||||
echo -e " Total files processed: ${processed}"
|
||||
echo -e " Files modified: ${modified}"
|
||||
echo
|
||||
|
||||
if [[ $modified -gt 0 ]]; then
|
||||
echo -e "${GREEN}${modified} files were modified.${NC}"
|
||||
echo -e "${YELLOW}Use 'git diff' to see changes or 'git checkout .' to revert${NC}"
|
||||
else
|
||||
echo -e "${GREEN}No files needed modification.${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\n${GREEN}Done!${NC}"
|
||||
Reference in New Issue
Block a user