Merge pull request #3002 from spacedriveapp/tembo/upgrade-iroh-v0-95-1

Upgrade Iroh to v0.95.1
This commit is contained in:
Jamie Pine
2026-01-24 16:18:22 -08:00
committed by GitHub
92 changed files with 1153 additions and 907 deletions

BIN
Cargo.lock generated
View File

Binary file not shown.

View File

@@ -59,7 +59,7 @@ globset = "0.4.15"
http = "1.2.0"
hyper = "1.5.2"
image = "0.25.5"
iroh = "0.29.0"
iroh = "0.95.1"
itertools = "0.13.0"
lending-stream = "1.0"
libc = "0.2.169"

View File

@@ -99,9 +99,7 @@ fn display_event(event: &Event, args: &EventsMonitorArgs) {
// Determine output format (new format flag takes precedence over legacy flags)
let use_json_pretty = matches!(args.format, OutputFormat::JsonPretty) || args.pretty;
let use_json = matches!(args.format, OutputFormat::Json)
|| use_json_pretty
|| args.verbose;
let use_json = matches!(args.format, OutputFormat::Json) || use_json_pretty || args.verbose;
if use_json {
// JSON mode: show full JSON
@@ -441,25 +439,28 @@ fn summarize_event(event: &Event) -> String {
format!("Sync error: {}", message)
}
// Proxy pairing events
Event::ProxyPairingConfirmationRequired {
vouchee_device_name,
voucher_device_name,
..
} => {
format!(
"Proxy pairing confirmation required: {} vouched by {}",
vouchee_device_name, voucher_device_name
)
}
Event::ProxyPairingVouchingReady {
vouchee_device_id, ..
} => {
format!("Proxy pairing vouching ready for device {}", vouchee_device_id)
}
// Proxy pairing events
Event::ProxyPairingConfirmationRequired {
vouchee_device_name,
voucher_device_name,
..
} => {
format!(
"Proxy pairing confirmation required: {} vouched by {}",
vouchee_device_name, voucher_device_name
)
}
Event::ProxyPairingVouchingReady {
vouchee_device_id, ..
} => {
format!(
"Proxy pairing vouching ready for device {}",
vouchee_device_id
)
}
// Config events
Event::ConfigChanged { .. } => "Configuration changed".to_string(),
// Config events
Event::ConfigChanged { .. } => "Configuration changed".to_string(),
// Custom events
Event::Custom { event_type, data } => {
@@ -472,4 +473,3 @@ fn summarize_event(event: &Event) -> String {
}
}
}
}

View File

@@ -332,7 +332,8 @@ pub async fn run(ctx: &Context, cmd: IndexCmd) -> Result<()> {
&format_bytes(breakdown.path_index_overhead as u64),
&format_bytes(breakdown.path_index_entries as u64),
&format_bytes(
(breakdown.path_index_overhead + breakdown.path_index_entries) as u64
(breakdown.path_index_overhead + breakdown.path_index_entries)
as u64,
),
]);
breakdown_table.add_row(vec![
@@ -340,7 +341,8 @@ pub async fn run(ctx: &Context, cmd: IndexCmd) -> Result<()> {
&format_bytes(breakdown.entry_uuids_overhead as u64),
&format_bytes(breakdown.entry_uuids_entries as u64),
&format_bytes(
(breakdown.entry_uuids_overhead + breakdown.entry_uuids_entries) as u64
(breakdown.entry_uuids_overhead + breakdown.entry_uuids_entries)
as u64,
),
]);
breakdown_table.add_row(vec![
@@ -349,13 +351,12 @@ pub async fn run(ctx: &Context, cmd: IndexCmd) -> Result<()> {
&format_bytes(breakdown.content_kinds_entries as u64),
&format_bytes(
(breakdown.content_kinds_overhead + breakdown.content_kinds_entries)
as u64
as u64,
),
]);
let total = breakdown.arena
+ breakdown.cache
+ breakdown.registry
+ breakdown.cache + breakdown.registry
+ breakdown.path_index_overhead
+ breakdown.path_index_entries
+ breakdown.entry_uuids_overhead

View File

@@ -620,7 +620,11 @@ async fn show_partners(ctx: &Context) -> Result<()> {
println!(" - Paired devices do not have sync_enabled=true");
println!();
} else {
println!(" {} {} sync partner(s) available", "".green(), output.partners.len());
println!(
" {} {} sync partner(s) available",
"".green(),
output.partners.len()
);
println!();
let mut table = Table::new();
@@ -653,10 +657,22 @@ async fn show_partners(ctx: &Context) -> Result<()> {
println!("{}", "Library Membership Debug".dark_grey().bold());
println!("{}", "".repeat(60).dark_grey());
println!();
println!(" Total devices in library: {}", output.debug_info.total_devices);
println!(" Devices with sync_enabled: {}", output.debug_info.sync_enabled_devices);
println!(" Devices with NodeId mapping: {}", output.debug_info.paired_devices);
println!(" Final sync partners: {}", output.debug_info.final_sync_partners);
println!(
" Total devices in library: {}",
output.debug_info.total_devices
);
println!(
" Devices with sync_enabled: {}",
output.debug_info.sync_enabled_devices
);
println!(
" Devices with NodeId mapping: {}",
output.debug_info.paired_devices
);
println!(
" Final sync partners: {}",
output.debug_info.final_sync_partners
);
println!();
if !output.debug_info.device_details.is_empty() {
@@ -665,10 +681,18 @@ async fn show_partners(ctx: &Context) -> Result<()> {
.load_preset(UTF8_FULL)
.set_content_arrangement(ContentArrangement::Dynamic)
.set_header(Row::from(vec![
Cell::new("Device").add_attribute(Attribute::Bold).fg(Color::DarkGrey),
Cell::new("Sync Enabled").add_attribute(Attribute::Bold).fg(Color::DarkGrey),
Cell::new("Has NodeId").add_attribute(Attribute::Bold).fg(Color::DarkGrey),
Cell::new("NodeId").add_attribute(Attribute::Bold).fg(Color::DarkGrey),
Cell::new("Device")
.add_attribute(Attribute::Bold)
.fg(Color::DarkGrey),
Cell::new("Sync Enabled")
.add_attribute(Attribute::Bold)
.fg(Color::DarkGrey),
Cell::new("Has NodeId")
.add_attribute(Attribute::Bold)
.fg(Color::DarkGrey),
Cell::new("NodeId")
.add_attribute(Attribute::Bold)
.fg(Color::DarkGrey),
]));
for device in &output.debug_info.device_details {

View File

@@ -86,9 +86,9 @@ pub async fn run(ctx: &Context, cmd: VolumeCmd) -> Result<()> {
println!(" ID: {}", volume.id);
println!(" Fingerprint: {}", volume.fingerprint);
println!(" Type: {:?}", volume.volume_type);
println!(" Mount: {}", volume.mount_point.display());
println!(" Mounted: {}", volume.is_mounted);
println!(" Tracked: {}", volume.is_tracked);
println!(" Mount: {}", volume.mount_point.display());
println!(" Mounted: {}", volume.is_mounted);
println!(" Tracked: {}", volume.is_tracked);
println!();
}
}

View File

@@ -43,7 +43,6 @@ pub mod commands {
/// Platform-specific data directory resolution
pub fn default_data_dir() -> anyhow::Result<std::path::PathBuf> {
#[cfg(target_os = "macos")]
let dir = dirs::data_dir()
.ok_or_else(|| anyhow::anyhow!("Could not determine data directory"))?

View File

@@ -130,10 +130,8 @@ hound = "3.5" # WAV file reading
rubato = "0.16" # Audio resampling to 16kHz
# Networking
# Iroh P2P networking with iOS support (using Oscar's patches)
iroh = { git = "https://github.com/n0-computer/iroh", rev = "e0c5091008d42f4c577f72b1085dfb26c28bd56f", features = [
"discovery-local-network"
] }
# Iroh P2P networking
iroh = { version = "0.95.1", features = ["discovery-local-network"] }
# Serialization for protocols
serde_cbor = "0.11"
@@ -249,7 +247,5 @@ tempfile = "3.14"
# Patches for iOS compatibility
[patch.crates-io]
# https://github.com/n0-computer/iroh/pull/3409
iroh = { git = "https://github.com/n0-computer/iroh", rev = "e0c5091008d42f4c577f72b1085dfb26c28bd56f" }
# https://github.com/shellrow/netdev/pull/125
netdev = { git = "https://github.com/shellrow/netdev", rev = "b6ef275d2a72143b3c7d5845ee2f5a70b0e97771" }

View File

@@ -32,11 +32,18 @@ async fn main() {
for vol in &volumes {
println!("Volume: {}", vol.name);
println!(" Display name: {}", vol.display_name.as_ref().unwrap_or(&"None".to_string()));
println!(
" Display name: {}",
vol.display_name.as_ref().unwrap_or(&"None".to_string())
);
println!(" Mount point: {}", vol.mount_point.display());
println!(" Type: {:?}", vol.volume_type);
println!(" Filesystem: {}", vol.file_system);
println!(" Fingerprint: {} ({})", vol.fingerprint.short_id(), vol.fingerprint.0);
println!(
" Fingerprint: {} ({})",
vol.fingerprint.short_id(),
vol.fingerprint.0
);
println!(" Is user visible: {}", vol.is_user_visible);
println!(" Auto-track eligible: {}", vol.auto_track_eligible);
println!(" Is tracked: {}", vol.is_tracked);
@@ -44,14 +51,15 @@ async fn main() {
}
// Show specifically which volumes are auto-track eligible
let auto_track: Vec<_> = volumes
.iter()
.filter(|v| v.auto_track_eligible)
.collect();
let auto_track: Vec<_> = volumes.iter().filter(|v| v.auto_track_eligible).collect();
println!("=== Auto-Track Eligible Volumes ({}) ===", auto_track.len());
for vol in auto_track {
println!(" - {} ({})", vol.display_name.as_ref().unwrap_or(&vol.name), vol.mount_point.display());
println!(
" - {} ({})",
vol.display_name.as_ref().unwrap_or(&vol.name),
vol.mount_point.display()
);
}
// Show Primary volumes specifically

View File

@@ -28,7 +28,11 @@ fn main() {
let fp_ext1 = VolumeFingerprint::from_external_volume(spacedrive_id, device_id);
let fp_ext2 = VolumeFingerprint::from_external_volume(spacedrive_id, device_id);
println!(" With same dotfile UUID: {} == {}", fp_ext1.short_id(), fp_ext2.short_id());
println!(
" With same dotfile UUID: {} == {}",
fp_ext1.short_id(),
fp_ext2.short_id()
);
println!(" Match: {}\n", fp_ext1 == fp_ext2);
// Test 3: Network volume stability
@@ -53,7 +57,10 @@ fn main() {
println!(" Mount at /Volumes/MyDrive: {}", fp_mount1.short_id());
println!(" Mount at /Volumes/MyDrive1: {}", fp_mount2.short_id());
println!(" Different: {} (expected for primary volumes)\n", fp_mount1 != fp_mount2);
println!(
" Different: {} (expected for primary volumes)\n",
fp_mount1 != fp_mount2
);
// Test 5: External volume - Same dotfile UUID, different mount points
println!("Test 5: External volume - Dotfile UUID stable across remounts");
@@ -63,7 +70,10 @@ fn main() {
println!(" Mounted at /Volumes/USB: {}", fp_at_mount1.short_id());
println!(" Mounted at /Volumes/USB1: {}", fp_at_mount2.short_id());
println!(" Match: {} (dotfile UUID is stable!)\n", fp_at_mount1 == fp_at_mount2);
println!(
" Match: {} (dotfile UUID is stable!)\n",
fp_at_mount1 == fp_at_mount2
);
// Summary
println!("=== Summary ===");

View File

@@ -132,7 +132,9 @@ impl RpcServer {
}
/// Start the event broadcaster that forwards core events to subscribed connections
async fn start_event_broadcaster(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
async fn start_event_broadcaster(
&self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let core = self.core.clone();
// Make the core's LogBus globally available to the LogEventLayer

View File

@@ -251,156 +251,143 @@ impl crate::infra::sync::Syncable for Model {
.unwrap_or(serde_json::Value::String("Unknown".to_string())),
)
.unwrap_or_else(|_| "Unknown".to_string())),
os_version: Set(
data.get("os_version")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid os_version: {}", e))
})
os_version: Set(data
.get("os_version")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid os_version: {}", e))
})
.transpose()?,
),
hardware_model: Set(
data.get("hardware_model")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid hardware_model: {}", e))
})
})
.transpose()?),
hardware_model: Set(data
.get("hardware_model")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid hardware_model: {}", e))
})
.transpose()?,
),
cpu_model: Set(
data.get("cpu_model")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_model: {}", e))
})
})
.transpose()?),
cpu_model: Set(data
.get("cpu_model")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_model: {}", e))
})
.transpose()?,
),
cpu_architecture: Set(
data.get("cpu_architecture")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_architecture: {}", e))
})
})
.transpose()?),
cpu_architecture: Set(data
.get("cpu_architecture")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_architecture: {}", e))
})
.transpose()?,
),
cpu_cores_physical: Set(
data.get("cpu_cores_physical")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<u32>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_cores_physical: {}", e))
})
})
.transpose()?),
cpu_cores_physical: Set(data
.get("cpu_cores_physical")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<u32>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_cores_physical: {}", e))
})
.transpose()?,
),
cpu_cores_logical: Set(
data.get("cpu_cores_logical")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<u32>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_cores_logical: {}", e))
})
})
.transpose()?),
cpu_cores_logical: Set(data
.get("cpu_cores_logical")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<u32>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_cores_logical: {}", e))
})
.transpose()?,
),
cpu_frequency_mhz: Set(
data.get("cpu_frequency_mhz")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_frequency_mhz: {}", e))
})
})
.transpose()?),
cpu_frequency_mhz: Set(data
.get("cpu_frequency_mhz")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid cpu_frequency_mhz: {}", e))
})
.transpose()?,
),
memory_total_bytes: Set(
data.get("memory_total_bytes")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid memory_total_bytes: {}", e))
})
})
.transpose()?),
memory_total_bytes: Set(data
.get("memory_total_bytes")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid memory_total_bytes: {}", e))
})
.transpose()?,
),
form_factor: Set(
data.get("form_factor")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid form_factor: {}", e))
})
})
.transpose()?),
form_factor: Set(data
.get("form_factor")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid form_factor: {}", e))
})
.transpose()?,
),
manufacturer: Set(
data.get("manufacturer")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid manufacturer: {}", e))
})
})
.transpose()?),
manufacturer: Set(data
.get("manufacturer")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid manufacturer: {}", e))
})
.transpose()?,
),
gpu_models: Set(
data.get("gpu_models")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<Json>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid gpu_models: {}", e))
})
})
.transpose()?),
gpu_models: Set(data
.get("gpu_models")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<Json>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid gpu_models: {}", e))
})
.transpose()?,
),
boot_disk_type: Set(
data.get("boot_disk_type")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid boot_disk_type: {}", e))
})
})
.transpose()?),
boot_disk_type: Set(data
.get("boot_disk_type")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<String>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid boot_disk_type: {}", e))
})
.transpose()?,
),
boot_disk_capacity_bytes: Set(
data.get("boot_disk_capacity_bytes")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid boot_disk_capacity_bytes: {}", e))
})
})
.transpose()?),
boot_disk_capacity_bytes: Set(data
.get("boot_disk_capacity_bytes")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!(
"Invalid boot_disk_capacity_bytes: {}",
e
))
})
.transpose()?,
),
swap_total_bytes: Set(
data.get("swap_total_bytes")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid swap_total_bytes: {}", e))
})
})
.transpose()?),
swap_total_bytes: Set(data
.get("swap_total_bytes")
.filter(|v| !v.is_null())
.map(|v| {
serde_json::from_value::<i64>(v.clone()).map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid swap_total_bytes: {}", e))
})
.transpose()?,
),
network_addresses: Set(
serde_json::from_value(
data.get("network_addresses")
.cloned()
.unwrap_or(serde_json::json!([])),
)
.map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid network_addresses: {}", e))
})?,
),
})
.transpose()?),
network_addresses: Set(serde_json::from_value(
data.get("network_addresses")
.cloned()
.unwrap_or(serde_json::json!([])),
)
.map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid network_addresses: {}", e))
})?),
is_online: Set(serde_json::from_value(
data.get("is_online")
.cloned()
@@ -413,16 +400,12 @@ impl crate::infra::sync::Syncable for Model {
.unwrap_or_else(|| serde_json::json!(chrono::Utc::now())),
)
.unwrap_or_else(|_| chrono::Utc::now().into())),
capabilities: Set(
serde_json::from_value(
data.get("capabilities")
.cloned()
.unwrap_or(serde_json::json!({})),
)
.map_err(|e| {
sea_orm::DbErr::Custom(format!("Invalid capabilities: {}", e))
})?,
),
capabilities: Set(serde_json::from_value(
data.get("capabilities")
.cloned()
.unwrap_or(serde_json::json!({})),
)
.map_err(|e| sea_orm::DbErr::Custom(format!("Invalid capabilities: {}", e)))?),
created_at: Set(chrono::Utc::now().into()),
updated_at: Set(chrono::Utc::now().into()),
sync_enabled: Set(serde_json::from_value(

View File

@@ -117,13 +117,11 @@ impl Syncable for Model {
// Cursor-based pagination with tie-breaker
if let Some((cursor_ts, cursor_uuid)) = cursor {
query = query.filter(
Condition::any()
.add(Column::CreatedAt.gt(cursor_ts))
.add(
Condition::all()
.add(Column::CreatedAt.eq(cursor_ts))
.add(Column::Uuid.gt(cursor_uuid)),
),
Condition::any().add(Column::CreatedAt.gt(cursor_ts)).add(
Condition::all()
.add(Column::CreatedAt.eq(cursor_ts))
.add(Column::Uuid.gt(cursor_uuid)),
),
);
}

View File

@@ -113,7 +113,12 @@ impl MigrationTrait for Migration {
.auto_increment()
.primary_key(),
)
.col(ColumnDef::new(MimeTypes::Uuid).uuid().not_null().unique_key())
.col(
ColumnDef::new(MimeTypes::Uuid)
.uuid()
.not_null()
.unique_key(),
)
.col(
ColumnDef::new(MimeTypes::MimeType)
.string()

View File

@@ -40,10 +40,8 @@ impl MigrationTrait for Migration {
// Restore columns for rollback
let db = manager.get_connection();
db.execute_unprepared(
"ALTER TABLE devices ADD COLUMN last_sync_at TEXT DEFAULT NULL",
)
.await?;
db.execute_unprepared("ALTER TABLE devices ADD COLUMN last_sync_at TEXT DEFAULT NULL")
.await?;
db.execute_unprepared(
"ALTER TABLE devices ADD COLUMN last_state_watermark TEXT DEFAULT NULL",

View File

@@ -419,13 +419,16 @@ impl JobManager {
};
// Emit final progress event if one exists (may have been throttled)
if let Some(final_progress) = latest_progress_for_monitor.lock().await.as_ref() {
if let Some(final_progress) =
latest_progress_for_monitor.lock().await.as_ref()
{
let generic_progress = match final_progress {
Progress::Structured(value) => {
// Try to deserialize CopyProgress and convert to GenericProgress
if let Ok(copy_progress) = serde_json::from_value::<
crate::ops::files::copy::CopyProgress,
>(value.clone())
if let Ok(copy_progress) =
serde_json::from_value::<
crate::ops::files::copy::CopyProgress,
>(value.clone())
{
use crate::infra::job::generic_progress::ToGenericProgress;
Some(copy_progress.to_generic_progress())
@@ -441,7 +444,8 @@ impl JobManager {
job_id: job_id_clone.to_string(),
job_type: job_type_str.to_string(),
device_id,
progress: final_progress.as_percentage().unwrap_or(0.0) as f64,
progress: final_progress.as_percentage().unwrap_or(0.0)
as f64,
message: Some(final_progress.to_string()),
generic_progress,
});
@@ -646,7 +650,8 @@ impl JobManager {
// Database persistence (only for non-ephemeral jobs)
if should_persist {
if last_db_update.elapsed() >= DB_UPDATE_INTERVAL {
if let Err(e) = job_db_clone.update_progress(job_id_clone, &progress).await {
if let Err(e) = job_db_clone.update_progress(job_id_clone, &progress).await
{
debug!("Failed to persist job progress to database: {}", e);
}
last_db_update = std::time::Instant::now();
@@ -845,13 +850,16 @@ impl JobManager {
};
// Emit final progress event if one exists (may have been throttled)
if let Some(final_progress) = latest_progress_for_monitor.lock().await.as_ref() {
if let Some(final_progress) =
latest_progress_for_monitor.lock().await.as_ref()
{
let generic_progress = match final_progress {
Progress::Structured(value) => {
// Try to deserialize CopyProgress and convert to GenericProgress
if let Ok(copy_progress) = serde_json::from_value::<
crate::ops::files::copy::CopyProgress,
>(value.clone())
if let Ok(copy_progress) =
serde_json::from_value::<
crate::ops::files::copy::CopyProgress,
>(value.clone())
{
use crate::infra::job::generic_progress::ToGenericProgress;
Some(copy_progress.to_generic_progress())
@@ -867,7 +875,8 @@ impl JobManager {
job_id: job_id_clone.to_string(),
job_type: job_type_str.to_string(),
device_id,
progress: final_progress.as_percentage().unwrap_or(0.0) as f64,
progress: final_progress.as_percentage().unwrap_or(0.0)
as f64,
message: Some(final_progress.to_string()),
generic_progress,
});
@@ -1071,7 +1080,9 @@ impl JobManager {
};
// Get job data from in-memory struct (for non-persisted jobs) or database
let (job_name, action_type, action_context) = if let Some(ctx) = &running_job.action_context {
let (job_name, action_type, action_context) = if let Some(ctx) =
&running_job.action_context
{
// Use in-memory action_context (for ephemeral volume jobs)
let action_context_info = ActionContextInfo {
action_type: ctx.action_type.clone(),
@@ -1508,11 +1519,15 @@ impl JobManager {
let latest_progress_for_monitor = latest_progress.clone();
// Deserialize action context from database if available
let action_context = if let Some(context_data) = &job_record.action_context {
rmp_serde::from_slice::<crate::infra::action::context::ActionContext>(context_data).ok()
} else {
None
};
let action_context =
if let Some(context_data) = &job_record.action_context {
rmp_serde::from_slice::<
crate::infra::action::context::ActionContext,
>(context_data)
.ok()
} else {
None
};
self.running_jobs.write().await.insert(
job_id,
@@ -1807,7 +1822,10 @@ impl JobManager {
.await?;
if result.rows_affected == 0 {
return Err(JobError::NotFound(format!("Job {} not found in database", job_id)));
return Err(JobError::NotFound(format!(
"Job {} not found in database",
job_id
)));
}
}
@@ -1816,7 +1834,12 @@ impl JobManager {
return Err(JobError::NotFound(format!("Job {} not found", job_id)));
}
info!("Job {} cancelled (in memory: {}, in db: {})", job_id, is_in_memory, db_job.is_some());
info!(
"Job {} cancelled (in memory: {}, in db: {})",
job_id,
is_in_memory,
db_job.is_some()
);
Ok(())
}
@@ -1855,12 +1878,19 @@ impl JobManager {
// Deserialize action context from database if available
let action_context = if let Some(context_data) = &job_record.action_context {
rmp_serde::from_slice::<crate::infra::action::context::ActionContext>(context_data).ok()
rmp_serde::from_slice::<crate::infra::action::context::ActionContext>(
context_data,
)
.ok()
} else {
None
};
Some((job_record.name.clone(), job_record.state.clone(), action_context))
Some((
job_record.name.clone(),
job_record.state.clone(),
action_context,
))
}
};

View File

@@ -923,7 +923,11 @@ mod tests {
println!("Registered syncable models ({}):", models.len());
for model in &models {
let reg = registry.get(model).unwrap();
let sync_type = if reg.is_device_owned { "device-owned" } else { "shared" };
let sync_type = if reg.is_device_owned {
"device-owned"
} else {
"shared"
};
println!(" - {} ({})", model, sync_type);
}

View File

@@ -481,7 +481,9 @@ impl Core {
}
/// Initialize networking using master key
pub async fn init_networking(&mut self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
pub async fn init_networking(
&mut self,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
self.init_networking_with_logger(Arc::new(service::network::SilentLogger))
.await
}
@@ -519,27 +521,30 @@ impl Core {
if let Some(networking_service) = self.services.networking() {
// Register default protocol handlers only if networking was just initialized
// (if networking was already initialized during Core::new(), protocols are already registered)
if !already_initialized {
logger.info("Registering protocol handlers...").await;
self.register_default_protocols(&networking_service).await?;
} else {
logger
.info("Protocol handlers already registered during initialization")
.await;
// Reload protocol configs even when networking is already initialized
// This allows tests and runtime config changes to take effect
logger.info("Reloading protocol configs from disk...").await;
if let Err(e) = reload_protocol_configs(&networking_service, &self.config.read().await.data_dir).await {
logger
.warn(&format!("Failed to reload some protocol configs: {}", e))
.await;
if !already_initialized {
logger.info("Registering protocol handlers...").await;
self.register_default_protocols(&networking_service).await?;
} else {
logger.info("Protocol configs reloaded successfully").await;
}
}
logger
.info("Protocol handlers already registered during initialization")
.await;
// Set up event bridge to integrate with core event system (only if not already done)
// Reload protocol configs even when networking is already initialized
// This allows tests and runtime config changes to take effect
logger.info("Reloading protocol configs from disk...").await;
if let Err(e) =
reload_protocol_configs(&networking_service, &self.config.read().await.data_dir)
.await
{
logger
.warn(&format!("Failed to reload some protocol configs: {}", e))
.await;
} else {
logger.info("Protocol configs reloaded successfully").await;
}
}
// Set up event bridge to integrate with core event system (only if not already done)
if !already_initialized {
let event_bridge = NetworkEventBridge::new(
networking_service.subscribe_events(),
@@ -759,8 +764,8 @@ async fn reload_protocol_configs(
if let Some(handler) = guard.get_handler("pairing") {
if let Some(pairing_handler) = handler
.as_any()
.downcast_ref::<crate::service::network::protocol::PairingProtocolHandler>()
{
.downcast_ref::<crate::service::network::protocol::PairingProtocolHandler>(
) {
pairing_handler
.set_proxy_config(app_config.proxy_pairing)
.await;

View File

@@ -1409,7 +1409,9 @@ impl Library {
// Count and size files in thumbnails directory (legacy structure)
if thumbnails_dir.exists() {
let (count, size) = self.count_and_size_recursive(thumbnails_dir.clone()).await?;
let (count, size) = self
.count_and_size_recursive(thumbnails_dir.clone())
.await?;
total_count += count;
total_size += size;
debug!(
@@ -1441,7 +1443,8 @@ impl Library {
while let Some(entry) = entries.next_entry().await? {
let file_type = entry.file_type().await?;
if file_type.is_dir() {
let (sub_count, sub_size) = Box::pin(self.count_and_size_recursive_impl(entry.path())).await?;
let (sub_count, sub_size) =
Box::pin(self.count_and_size_recursive_impl(entry.path())).await?;
count += sub_count;
size += sub_size;
} else if file_type.is_file() {
@@ -1796,7 +1799,8 @@ impl Library {
while let Some(entry) = entries.next_entry().await? {
let file_type = entry.file_type().await?;
if file_type.is_dir() {
let (sub_count, sub_size) = Box::pin(Self::count_and_size_recursive_static_impl(entry.path())).await?;
let (sub_count, sub_size) =
Box::pin(Self::count_and_size_recursive_static_impl(entry.path())).await?;
count += sub_count;
size += sub_size;
} else if file_type.is_file() {

View File

@@ -103,7 +103,11 @@ impl LocationManager {
}
}
Err(e) => {
warn!("Failed to get metadata for location root {}: {}", path.display(), e);
warn!(
"Failed to get metadata for location root {}: {}",
path.display(),
e
);
None
}
}
@@ -132,14 +136,19 @@ impl LocationManager {
// Resolve volume for this location path BEFORE creating the entry
// Volume detection is required - all locations must have a volume
let volume_id = match volume_manager.resolve_volume_for_sdpath(&sd_path, &library).await {
let volume_id = match volume_manager
.resolve_volume_for_sdpath(&sd_path, &library)
.await
{
Ok(Some(volume)) => {
info!("Resolved volume '{}' for location path", volume.name);
// Ensure volume is in database and get its ID
volume_manager
.ensure_volume_in_db(&volume, &library)
.await
.map_err(|e| LocationError::Other(format!("Failed to register volume: {}", e)))?
.map_err(|e| {
LocationError::Other(format!("Failed to register volume: {}", e))
})?
}
Ok(None) => {
return Err(LocationError::Other(format!(
@@ -173,8 +182,8 @@ impl LocationManager {
indexed_at: Set(Some(now)), // Record when location root was created
permissions: Set(None),
inode: Set(inode.map(|i| i as i64)), // Use extracted inode
parent_id: Set(None), // Location root has no parent
volume_id: Set(Some(volume_id)), // Volume is required for all locations
parent_id: Set(None), // Location root has no parent
volume_id: Set(Some(volume_id)), // Volume is required for all locations
..Default::default()
};

View File

@@ -6,6 +6,7 @@ use crate::{
domain::Device,
infra::query::{LibraryQuery, QueryError, QueryResult},
};
use iroh::Watcher;
use sea_orm::{ColumnTrait, EntityTrait, QueryFilter, QueryOrder};
use serde::{Deserialize, Serialize};
use specta::Type;
@@ -154,14 +155,21 @@ impl LibraryQuery for ListLibraryDevicesQuery {
// Query Iroh directly for actual connection status and method
let (is_actually_connected, connection_method) = if let Some(ep) = endpoint {
// Get node ID for this device
let node_id = registry.get_node_id_for_device(device_id);
if let Some(node_id) = node_id {
// Query Iroh for connection info
if let Some(remote_info) = ep.remote_info(node_id) {
let conn_method = crate::domain::device::ConnectionMethod::from_iroh_connection_type(remote_info.conn_type);
let is_connected = conn_method.is_some();
if let Some(node_id) = registry.get_node_id_for_device(device_id) {
// Use conn_type() API (replaces remote_info() removed in v0.93+)
if let Some(mut conn_type_watcher) = ep.conn_type(node_id) {
// Get current connection type from watcher using the Watcher trait's get() method
let conn_type = conn_type_watcher.get();
// Check connection status first (before conn_type is moved)
let is_connected =
!matches!(conn_type, iroh::endpoint::ConnectionType::None);
let conn_method =
crate::domain::device::ConnectionMethod::from_iroh_connection_type(
conn_type,
);
(is_connected, conn_method)
} else {
// No address information exists for this endpoint (never connected)
(false, None)
}
} else {
@@ -187,6 +195,7 @@ impl LibraryQuery for ListLibraryDevicesQuery {
// Always update online/connected status based on current network state
// (database is_online column can be stale for remote devices)
// TODO: remove that column imo
existing.is_connected = is_actually_connected;
existing.is_online = is_actually_connected;
existing.connection_method = connection_method;
@@ -213,7 +222,8 @@ impl LibraryQuery for ListLibraryDevicesQuery {
}
// Convert network DeviceInfo to domain Device
let device = Device::from_network_info(&info, is_actually_connected, connection_method);
let device =
Device::from_network_info(&info, is_actually_connected, connection_method);
result.push(device);
}
}

View File

@@ -66,17 +66,16 @@ impl CoreAction for UpdateDeviceAction {
}
// Validate slug format (alphanumeric + hyphens only)
if !slug.chars().all(|c| c.is_alphanumeric() || c == '-') {
return Err("Device slug can only contain letters, numbers, and hyphens".to_string());
return Err(
"Device slug can only contain letters, numbers, and hyphens".to_string()
);
}
}
Ok(Self { input })
}
async fn execute(
self,
context: Arc<CoreContext>,
) -> Result<Self::Output, ActionError> {
async fn execute(self, context: Arc<CoreContext>) -> Result<Self::Output, ActionError> {
// Load current device config
let mut device_config = DeviceConfig::load_from(&context.data_dir)
.map_err(|e| ActionError::Internal(format!("Failed to load device config: {}", e)))?;
@@ -105,10 +104,7 @@ impl CoreAction for UpdateDeviceAction {
})
}
async fn validate(
&self,
_context: Arc<CoreContext>,
) -> Result<ValidationResult, ActionError> {
async fn validate(&self, _context: Arc<CoreContext>) -> Result<ValidationResult, ActionError> {
// Basic validation is done in from_input
Ok(ValidationResult::Success { metadata: None })
}

View File

@@ -299,14 +299,15 @@ impl LibraryAction for FileCopyAction {
// Get strategy metadata for rich UI display
let first_source = &self.sources.paths[0];
let (_, strategy_metadata) = super::routing::CopyStrategyRouter::select_strategy_with_metadata(
first_source,
&self.destination,
self.options.delete_after_copy,
&self.options.copy_method,
Some(&*context.volume_manager),
)
.await;
let (_, strategy_metadata) =
super::routing::CopyStrategyRouter::select_strategy_with_metadata(
first_source,
&self.destination,
self.options.delete_after_copy,
&self.options.copy_method,
Some(&*context.volume_manager),
)
.await;
// Calculate file counts and total bytes
let (file_count, total_bytes) = self.calculate_totals().await?;
@@ -453,9 +454,9 @@ impl FileCopyAction {
let mut stack = vec![path.to_path_buf()];
while let Some(current) = stack.pop() {
let metadata = tokio::fs::metadata(&current).await.map_err(|e| {
ActionError::Internal(format!("Failed to read metadata: {}", e))
})?;
let metadata = tokio::fs::metadata(&current)
.await
.map_err(|e| ActionError::Internal(format!("Failed to read metadata: {}", e)))?;
if metadata.is_file() {
count += 1;

View File

@@ -35,7 +35,7 @@ impl CopyDatabaseQuery {
match PathResolver::resolve_to_entry(&self.db, source).await {
Ok(Some(entry)) => {
let (file_count, total_size) = match entry.kind {
0 => (1u64, entry.size as u64), // File
0 => (1u64, entry.size as u64), // File
1 => (entry.file_count as u64, entry.aggregate_size as u64), // Directory
_ => (0, 0),
};
@@ -316,7 +316,8 @@ impl CopyDatabaseQuery {
for component in components {
if let Some(parent_id) = current_parent_id {
// Remove extension for entry lookup (extensions stored separately)
let component_without_ext = if let Some(dot_pos) = component.rfind('.') {
let component_without_ext = if let Some(dot_pos) = component.rfind('.')
{
&component[..dot_pos]
} else {
component
@@ -325,8 +326,8 @@ impl CopyDatabaseQuery {
// Normalize Unicode spaces (macOS uses special space characters)
// Replace narrow no-break space (\u{202f}) and other space variants with regular space
let normalized_name = component_without_ext
.replace('\u{202f}', " ") // Narrow no-break space
.replace('\u{00a0}', " ") // Non-breaking space
.replace('\u{202f}', " ") // Narrow no-break space
.replace('\u{00a0}', " ") // Non-breaking space
.replace('\u{2009}', " "); // Thin space
let child = entry::Entity::find()

View File

@@ -375,7 +375,8 @@ impl JobHandler for FileCopyJob {
copied_count += files_in_source; // Count actual files as copied for progress tracking
// Mark as completed in metadata (already done during previous run)
self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Completed);
self.job_metadata
.update_status(&resolved_source, super::metadata::CopyFileStatus::Completed);
continue;
}
@@ -422,7 +423,8 @@ impl JobHandler for FileCopyJob {
};
// Mark file as currently copying in metadata
self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Copying);
self.job_metadata
.update_status(&resolved_source, super::metadata::CopyFileStatus::Copying);
// Persist immediately so UI can show "copying" status in real-time
self.persist_job_state_to_db(&ctx).await?;
@@ -506,7 +508,10 @@ impl JobHandler for FileCopyJob {
ctx.log(format!("Skipping existing file: {}", dest_path.display()));
// Mark as skipped in metadata
self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Skipped);
self.job_metadata.update_status(
&resolved_source,
super::metadata::CopyFileStatus::Skipped,
);
// Skip this file
progress_aggregator.complete_source();
@@ -572,7 +577,10 @@ impl JobHandler for FileCopyJob {
self.completed_indices.push(index);
// Mark as completed in metadata
self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Completed);
self.job_metadata.update_status(
&resolved_source,
super::metadata::CopyFileStatus::Completed,
);
// If this is a move operation and the strategy didn't handle deletion,
// we need to delete the source after successful copy
@@ -1058,8 +1066,8 @@ impl FileCopyJob {
match PathResolver::resolve_to_entry(ctx.library_db(), source).await {
Ok(Some(entry)) => {
let size = match entry.kind {
0 => entry.size as u64, // File
1 => entry.aggregate_size as u64, // Directory
0 => entry.size as u64, // File
1 => entry.aggregate_size as u64, // Directory
_ => 0,
};
total += size;
@@ -1094,8 +1102,9 @@ impl FileCopyJob {
use sea_orm::{ActiveModelTrait, ActiveValue::Set};
// Serialize current job state
let job_state = rmp_serde::to_vec(self)
.map_err(|e| JobError::serialization(format!("Failed to serialize job state: {}", e)))?;
let job_state = rmp_serde::to_vec(self).map_err(|e| {
JobError::serialization(format!("Failed to serialize job state: {}", e))
})?;
// Update the jobs.state field in the database
let job_db = ctx.library.jobs().database();
@@ -1105,7 +1114,9 @@ impl FileCopyJob {
..Default::default()
};
job_model.update(job_db.conn()).await
job_model
.update(job_db.conn())
.await
.map_err(|e| JobError::execution(format!("Failed to persist job state: {}", e)))?;
ctx.log(format!(
@@ -1129,7 +1140,9 @@ impl FileCopyJob {
JobError::execution(format!("Failed to resolve source path: {}", e))
})?;
let (size_bytes, is_directory, entry_id) = if let Some(local_path) = resolved_source.as_local_path() {
let (size_bytes, is_directory, entry_id) = if let Some(local_path) =
resolved_source.as_local_path()
{
// Local path - get from filesystem
let metadata = tokio::fs::metadata(local_path)
.await

View File

@@ -491,7 +491,7 @@ impl RemoteTransferStrategy {
));
// Connect to remote device
let node_addr = iroh::NodeAddr::new(node_id);
let node_addr = iroh::EndpointAddr::new(node_id);
let connection = endpoint
.connect(node_addr, b"spacedrive/filetransfer/1")
.await
@@ -1125,7 +1125,7 @@ async fn stream_file_data<'a>(
node_id, destination_device_id
));
let node_addr = iroh::NodeAddr::new(node_id);
let node_addr = iroh::EndpointAddr::new(node_id);
let connection = endpoint
.connect(node_addr, b"spacedrive/filetransfer/1")
.await

View File

@@ -316,7 +316,9 @@ impl FileByPathQuery {
PathResolver::resolve_to_entry(db, sd_path)
.await
.map_err(|e| QueryError::Internal(format!("Database error: {}", e)))?
.ok_or_else(|| QueryError::Internal(format!("Entry not found for path: {}", sd_path.display())))
.ok_or_else(|| {
QueryError::Internal(format!("Entry not found for path: {}", sd_path.display()))
})
}
}

View File

@@ -95,7 +95,8 @@ impl LibraryAction for IndexingAction {
}
IndexPersistence::Persistent => {
// Persistent mode stores entries in the database but doesn't require a location binding yet.
let mut c = IndexerJobConfig::ephemeral_browse(sd_path, self.input.scope, false);
let mut c =
IndexerJobConfig::ephemeral_browse(sd_path, self.input.scope, false);
c.persistence = IndexPersistence::Persistent;
c
}

View File

@@ -830,7 +830,10 @@ impl DatabaseStorage {
.await
.map_err(|e| JobError::execution(format!("Failed to query content identity: {}", e)))?;
let (content_model, is_new_content, mime_type_model, is_new_mime_type) = if let Some(existing) = existing {
let (content_model, is_new_content, mime_type_model, is_new_mime_type) = if let Some(
existing,
) = existing
{
let mut existing_active: entities::content_identity::ActiveModel = existing.into();
existing_active.entry_count = Set(existing_active.entry_count.unwrap() + 1);
existing_active.last_verified_at = Set(chrono::Utc::now());
@@ -855,11 +858,13 @@ impl DatabaseStorage {
let file_type_result = registry.identify(path).await;
let (kind_id, mime_type_id, mime_type_model, is_new_mime_type) = match file_type_result {
let (kind_id, mime_type_id, mime_type_model, is_new_mime_type) = match file_type_result
{
Ok(result) => {
let kind_id = result.file_type.category as i32;
let (mime_type_id, mime_type_model, is_new_mime_type) = if let Some(mime_str) = result.file_type.primary_mime_type()
let (mime_type_id, mime_type_model, is_new_mime_type) = if let Some(mime_str) =
result.file_type.primary_mime_type()
{
let existing = entities::mime_type::Entity::find()
.filter(entities::mime_type::Column::MimeType.eq(mime_str))

View File

@@ -138,7 +138,8 @@ impl EphemeralIndexCache {
// Try to load from snapshot
if let Ok(snapshot_cache_dir) = super::snapshot::get_snapshot_cache_dir() {
if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir) {
if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir)
{
if let Ok(Some(loaded_index)) = EphemeralIndex::load_snapshot(&snapshot_path) {
// Replace the global index with the loaded one
let mut index = self.index.write().await;
@@ -161,7 +162,8 @@ impl EphemeralIndexCache {
/// Save the current index to a snapshot file
pub async fn save_snapshot(&self, path: &Path) -> anyhow::Result<()> {
if let Ok(snapshot_cache_dir) = super::snapshot::get_snapshot_cache_dir() {
if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir) {
if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir)
{
let index = self.index.read().await;
index.save_snapshot(&snapshot_path)?;
tracing::info!("Saved snapshot for path: {}", path.display());

View File

@@ -635,7 +635,7 @@ impl EphemeralIndex {
self.entry_uuids.remove(&id);
self.content_kinds.remove(&id);
// Also remove from parent's children list in arena
// Also remove from parent's children list in arena
// Get the parent's entry ID
if let Some(parent_path) = path.parent() {
if let Some(&parent_id) = self.path_index.get(parent_path) {
@@ -734,7 +734,11 @@ impl EphemeralIndex {
///
/// The root path is stored in the snapshot so it can be restored to
/// indexed_paths when loaded, making the cached data queryable.
pub fn save_snapshot_with_root(&self, snapshot_path: &Path, _root_path: &Path) -> anyhow::Result<()> {
pub fn save_snapshot_with_root(
&self,
snapshot_path: &Path,
_root_path: &Path,
) -> anyhow::Result<()> {
super::snapshot::save_snapshot_impl(self, snapshot_path)
}

View File

@@ -11,7 +11,9 @@ use sea_orm::{prelude::*, ConnectionTrait, QuerySelect, Statement};
use crate::{
domain::addressing::SdPath,
infra::db::entities::{device, directory_paths, entry, location, volume, DirectoryPaths, Entry},
infra::db::entities::{
device, directory_paths, entry, location, volume, DirectoryPaths, Entry,
},
};
pub struct PathResolver;

View File

@@ -529,15 +529,16 @@ pub async fn run_processing_phase(
// Check if the root entry needs updating
let needs_update = root_entry.inode.is_none()
|| inode.map(|i| i != root_entry.inode.unwrap_or(-1) as u64).unwrap_or(false)
|| inode
.map(|i| i != root_entry.inode.unwrap_or(-1) as u64)
.unwrap_or(false)
|| root_entry.size != metadata.len() as i64
|| {
if let Ok(modified) = metadata.modified() {
if let Ok(duration) = modified.duration_since(std::time::UNIX_EPOCH) {
if let Some(timestamp) = chrono::DateTime::from_timestamp(
duration.as_secs() as i64,
0,
) {
if let Some(timestamp) =
chrono::DateTime::from_timestamp(duration.as_secs() as i64, 0)
{
root_entry.modified_at != timestamp
} else {
false
@@ -576,16 +577,16 @@ pub async fn run_processing_phase(
JobError::execution(format!("Failed to begin root update transaction: {}", e))
})?;
if let Err(e) = DatabaseStorage::update_entry_in_conn(
location_entry_id,
&root_dir_entry,
&txn,
)
.await
if let Err(e) =
DatabaseStorage::update_entry_in_conn(location_entry_id, &root_dir_entry, &txn)
.await
{
ctx.add_non_critical_error(format!("Failed to update root entry: {}", e));
if let Err(rollback_err) = txn.rollback().await {
warn!("Failed to rollback root update transaction: {}", rollback_err);
warn!(
"Failed to rollback root update transaction: {}",
rollback_err
);
}
} else {
txn.commit().await.map_err(|e| {

View File

@@ -151,7 +151,14 @@ impl ContentHashProcessor {
let content_hash = ContentHashGenerator::generate_content_hash(&entry.path).await?;
debug!("✓ Generated content hash: {}", content_hash);
DatabaseStorage::link_to_content_identity(db, entry.id, &entry.path, content_hash, registry).await?;
DatabaseStorage::link_to_content_identity(
db,
entry.id,
&entry.path,
content_hash,
registry,
)
.await?;
debug!("✓ Linked content identity for entry {}", entry.id);

View File

@@ -75,9 +75,8 @@ impl LibraryQuery for CopyMetadataQuery {
}
// Deserialize the job state
let copy_job: FileCopyJob = rmp_serde::from_slice(&job_record.state).map_err(|e| {
QueryError::Internal(format!("Failed to deserialize job state: {}", e))
})?;
let copy_job: FileCopyJob = rmp_serde::from_slice(&job_record.state)
.map_err(|e| QueryError::Internal(format!("Failed to deserialize job state: {}", e)))?;
// Build File domain objects from entry UUIDs
let mut metadata = copy_job.job_metadata;
@@ -91,11 +90,8 @@ impl LibraryQuery for CopyMetadataQuery {
// Batch load File objects
if !entry_uuids.is_empty() {
match crate::domain::file::File::from_entry_uuids(
library.db().conn(),
&entry_uuids,
)
.await
match crate::domain::file::File::from_entry_uuids(library.db().conn(), &entry_uuids)
.await
{
Ok(files) => {
metadata.file_objects = files;

View File

@@ -63,7 +63,15 @@ impl LibraryQuery for ValidateLocationPathQuery {
let volume_manager = &context.volume_manager;
let volume_opt = volume_manager.volume_for_path(path).await;
tracing::info!("Volume lookup for path {}: {:?}", path.display(), volume_opt.as_ref().map(|v| (v.name.as_str(), &v.volume_type, v.fingerprint.0.as_str())));
tracing::info!(
"Volume lookup for path {}: {:?}",
path.display(),
volume_opt.as_ref().map(|v| (
v.name.as_str(),
&v.volume_type,
v.fingerprint.0.as_str()
))
);
let is_primary = volume_opt
.as_ref()
@@ -87,7 +95,11 @@ impl LibraryQuery for ValidateLocationPathQuery {
}
matches
});
tracing::info!("is_system_dir: {}, is_primary: {}", is_system_dir, is_primary);
tracing::info!(
"is_system_dir: {}, is_primary: {}",
is_system_dir,
is_primary
);
// Determine risk level using hybrid approach (depth + system directory check)
let risk_level = if is_system_dir || depth <= 1 {

View File

@@ -243,9 +243,9 @@ impl OcrJob {
if let Ok(Some(mime)) = mime_type::Entity::find_by_id(mime_id).one(db).await
{
if super::is_ocr_supported(
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
if let Ok(path) = crate::ops::indexing::PathResolver::get_full_path(
db,
entry_model.id,
@@ -305,9 +305,9 @@ impl OcrJob {
mime_type::Entity::find_by_id(mime_id).one(db).await
{
if super::is_ocr_supported(
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
// Get full path
if let Ok(path) =
crate::ops::indexing::PathResolver::get_full_path(

View File

@@ -264,9 +264,9 @@ impl SpeechToTextJob {
if let Ok(Some(mime)) = mime_type::Entity::find_by_id(mime_id).one(db).await
{
if super::is_speech_supported(
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
if let Ok(path) = crate::ops::indexing::PathResolver::get_full_path(
db,
entry_model.id,
@@ -310,9 +310,9 @@ impl SpeechToTextJob {
if let Ok(Some(mime)) = mime_type::Entity::find_by_id(mime_id).one(db).await
{
if super::is_speech_supported(
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
&mime.mime_type,
ctx.library().core_context().file_type_registry(),
) {
if let Ok(path) = crate::ops::indexing::PathResolver::get_full_path(
db,
entry_model.id,

View File

@@ -1,6 +1,9 @@
//! Thumbnail generation action handlers
use super::{job::{ThumbnailJob, ThumbnailJobConfig}, processor::ThumbnailProcessor};
use super::{
job::{ThumbnailJob, ThumbnailJobConfig},
processor::ThumbnailProcessor,
};
use crate::{
context::CoreContext,
infra::action::{error::ActionError, LibraryAction},

View File

@@ -144,10 +144,9 @@ impl LibraryAction for GenerateThumbstripAction {
}
// Process the file
let result = processor
.process(db, &proc_entry)
.await
.map_err(|e| ActionError::Internal(format!("Thumbstrip generation failed: {}", e)))?;
let result = processor.process(db, &proc_entry).await.map_err(|e| {
ActionError::Internal(format!("Thumbstrip generation failed: {}", e))
})?;
if !result.success {
return Err(ActionError::Internal(

View File

@@ -40,7 +40,7 @@ impl CoreAction for PairJoinAction {
// If node_id provided separately, add it to enable relay fallback
if let Some(node_id_str) = &self.node_id {
let node_id: iroh::NodeId = node_id_str
let node_id: iroh::EndpointId = node_id_str
.parse()
.map_err(|e| ActionError::Internal(format!("Invalid node ID: {}", e)))?;
pairing_code = pairing_code.with_node_id(node_id);

View File

@@ -30,9 +30,7 @@ impl CoreQuery for NetworkStatusQuery {
if let Some(net) = networking {
let node_id = net.node_id().to_string();
let addresses = if let Ok(Some(addr)) = net.get_node_addr() {
addr.direct_addresses()
.map(|a| a.to_string())
.collect::<Vec<_>>()
addr.ip_addrs().map(|a| a.to_string()).collect::<Vec<_>>()
} else {
Vec::new()
};

View File

@@ -187,8 +187,8 @@ impl FileSearchInput {
/// Validate the search input
pub fn validate(&self) -> Result<(), String> {
// Allow empty queries when sorting by IndexedAt (for recents view)
let is_recents_query = self.query.trim().is_empty()
&& matches!(self.sort.field, SortField::IndexedAt);
let is_recents_query =
self.query.trim().is_empty() && matches!(self.sort.field, SortField::IndexedAt);
if self.query.trim().is_empty() && !is_recents_query {
return Err("Query cannot be empty".to_string());

View File

@@ -115,9 +115,9 @@ impl LibraryQuery for FileSearchQuery {
// Get actual total count for pagination
let total_count = self
.get_total_count(db.conn(), context.file_type_registry())
.await
.unwrap_or(0);
.get_total_count(db.conn(), context.file_type_registry())
.await
.unwrap_or(0);
// Create output with persistent index type
let output = FileSearchOutput::new_persistent(
@@ -867,7 +867,8 @@ impl FileSearchQuery {
// Get volume to find device info
let Some(volume) = crate::infra::db::entities::volume::Entity::find_by_id(volume_id)
.one(db)
.await? else {
.await?
else {
return Ok(None);
};
@@ -875,7 +876,8 @@ impl FileSearchQuery {
let Some(device) = crate::infra::db::entities::device::Entity::find()
.filter(crate::infra::db::entities::device::Column::Uuid.eq(volume.device_id))
.one(db)
.await? else {
.await?
else {
return Ok(None);
};
@@ -895,11 +897,7 @@ impl FileSearchQuery {
file,
score,
score_breakdown: crate::ops::search::output::ScoreBreakdown::new(
score,
None,
0.0,
0.0,
0.0,
score, None, 0.0, 0.0, 0.0,
),
highlights: Vec::new(),
matched_content: None,
@@ -951,10 +949,7 @@ impl FileSearchQuery {
// Execute query
let entries = query.all(db).await?;
tracing::info!(
"Fast search without FTS returned {} entries",
entries.len()
);
tracing::info!("Fast search without FTS returned {} entries", entries.len());
// Convert entries to FileSearchResult using helper
let mut results = Vec::new();

View File

@@ -5,8 +5,8 @@ use crate::infra::query::{LibraryQuery, QueryError, QueryResult};
use crate::infra::sync::NetworkTransport;
use std::sync::Arc;
use super::{GetSyncPartnersInput, GetSyncPartnersOutput};
use super::output::{DeviceDebugInfo, SyncPartnerInfo, SyncPartnersDebugInfo};
use super::{GetSyncPartnersInput, GetSyncPartnersOutput};
/// Get computed sync partners for the current library
pub struct GetSyncPartners {
@@ -27,7 +27,7 @@ impl LibraryQuery for GetSyncPartners {
session: crate::infra::api::SessionContext,
) -> QueryResult<Self::Output> {
use crate::infra::db::entities;
use sea_orm::{EntityTrait};
use sea_orm::EntityTrait;
// Get library from session
let library_id = session

View File

@@ -246,4 +246,4 @@ async fn lookup_entry_uuid(
entry_model
.uuid
.ok_or_else(|| format!("Entry {} has no UUID assigned", entry_id))
}
}

View File

@@ -1,11 +1,7 @@
//! Volume eject action
use super::{VolumeEjectInput, VolumeEjectOutput};
use crate::{
context::CoreContext,
infra::action::error::ActionError,
volume::VolumeFingerprint,
};
use crate::{context::CoreContext, infra::action::error::ActionError, volume::VolumeFingerprint};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tracing::{error, info};
@@ -45,9 +41,7 @@ impl crate::infra::action::LibraryAction for VolumeEjectAction {
.volume_manager
.get_volume(&fingerprint)
.await
.ok_or_else(|| {
ActionError::Internal(format!("Volume not found: {}", fingerprint))
})?;
.ok_or_else(|| ActionError::Internal(format!("Volume not found: {}", fingerprint)))?;
// Check if volume is mounted
if !volume.is_mounted {

View File

@@ -128,7 +128,6 @@ impl VolumeListQuery {
_ => Ok(None),
}
}
}
impl LibraryQuery for VolumeListQuery {

View File

@@ -94,9 +94,7 @@ impl LibraryAction for VolumeSpeedTestAction {
use crate::domain::resource::EventEmitter;
volume
.emit_changed(&context.events)
.map_err(|e| {
ActionError::Internal(format!("Failed to emit volume event: {}", e))
})?;
.map_err(|e| ActionError::Internal(format!("Failed to emit volume event: {}", e)))?;
// Return native output directly
Ok(VolumeSpeedTestOutput::new(

View File

@@ -11,8 +11,8 @@ use crate::service::network::{
NetworkingError, Result,
};
use iroh::endpoint::Connection;
use iroh::NodeId;
use iroh::{Endpoint, NodeAddr};
use iroh::EndpointId;
use iroh::{Endpoint, EndpointAddr};
use std::sync::Arc;
use tokio::io::AsyncWriteExt;
use tokio::sync::{broadcast, mpsc, RwLock};
@@ -24,15 +24,15 @@ pub enum EventLoopCommand {
// Connection management
ConnectionEstablished {
device_id: Uuid,
node_id: NodeId,
node_id: EndpointId,
},
ConnectionLost {
device_id: Uuid,
node_id: NodeId,
node_id: EndpointId,
reason: String,
},
TrackOutboundConnection {
node_id: NodeId,
node_id: EndpointId,
conn: Connection,
},
@@ -43,7 +43,7 @@ pub enum EventLoopCommand {
data: Vec<u8>,
},
SendMessageToNode {
node_id: NodeId,
node_id: EndpointId,
protocol: String,
data: Vec<u8>,
},
@@ -81,11 +81,11 @@ pub struct NetworkingEventLoop {
/// Our network identity
identity: NetworkIdentity,
/// Active connections tracker (keyed by NodeId and ALPN)
active_connections: Arc<RwLock<std::collections::HashMap<(NodeId, Vec<u8>), Connection>>>,
/// Active connections tracker (keyed by EndpointId and ALPN)
active_connections: Arc<RwLock<std::collections::HashMap<(EndpointId, Vec<u8>), Connection>>>,
/// Nodes that already have connection watchers spawned (to prevent duplicates)
watched_nodes: Arc<RwLock<std::collections::HashSet<NodeId>>>,
watched_nodes: Arc<RwLock<std::collections::HashSet<EndpointId>>>,
/// Logger for event loop operations
logger: Arc<dyn NetworkLogger>,
@@ -99,7 +99,9 @@ impl NetworkingEventLoop {
device_registry: Arc<RwLock<DeviceRegistry>>,
event_sender: broadcast::Sender<NetworkEvent>,
identity: NetworkIdentity,
active_connections: Arc<RwLock<std::collections::HashMap<(NodeId, Vec<u8>), Connection>>>,
active_connections: Arc<
RwLock<std::collections::HashMap<(EndpointId, Vec<u8>), Connection>>,
>,
logger: Arc<dyn NetworkLogger>,
) -> Self {
let (command_tx, command_rx) = mpsc::unbounded_channel();
@@ -200,20 +202,12 @@ impl NetworkingEventLoop {
/// Handle an incoming connection
async fn handle_connection(&self, conn: Connection) {
// Extract the remote node ID from the connection
let remote_node_id = match conn.remote_node_id() {
Ok(key) => key,
Err(e) => {
self.logger
.error(&format!("Failed to get remote node ID: {}", e))
.await;
return;
}
};
// Extract the remote node ID from the connection (now infallible in v0.95+)
let remote_node_id = conn.remote_id();
// Track the connection (keyed by node_id and alpn)
{
let alpn_bytes = conn.alpn().unwrap_or_default();
let alpn_bytes = conn.alpn().to_vec();
let mut connections = self.active_connections.write().await;
connections.insert((remote_node_id, alpn_bytes), conn.clone());
}
@@ -292,7 +286,7 @@ impl NetworkingEventLoop {
// Only remove connection if it's actually closed
if conn.close_reason().is_some() {
let mut connections = active_connections.write().await;
let alpn_bytes = conn.alpn().unwrap_or_default();
let alpn_bytes = conn.alpn().to_vec();
connections.remove(&(remote_node_id, alpn_bytes));
logger
.info(&format!(
@@ -318,7 +312,7 @@ impl NetworkingEventLoop {
device_registry: Arc<RwLock<DeviceRegistry>>,
event_sender: broadcast::Sender<NetworkEvent>,
command_sender: mpsc::UnboundedSender<EventLoopCommand>,
remote_node_id: NodeId,
remote_node_id: EndpointId,
logger: Arc<dyn NetworkLogger>,
) {
loop {
@@ -356,7 +350,7 @@ impl NetworkingEventLoop {
}
// Route to handler based on ALPN
let alpn_bytes = conn.alpn().unwrap_or_default();
let alpn_bytes = conn.alpn().to_vec();
if alpn_bytes == MESSAGING_ALPN {
let registry = protocol_registry.read().await;
@@ -443,7 +437,7 @@ impl NetworkingEventLoop {
logger.debug(&format!("Accepted unidirectional stream from {}", remote_node_id)).await;
// Get ALPN to determine which protocol handler to use
let alpn_bytes = conn.alpn().unwrap_or_default();
let alpn_bytes = conn.alpn().to_vec();
let registry = protocol_registry.read().await;
// Route based on ALPN
@@ -646,7 +640,7 @@ impl NetworkingEventLoop {
EventLoopCommand::TrackOutboundConnection { node_id, conn } => {
// Add outbound connection to active connections map
let alpn_bytes = conn.alpn().unwrap_or_default();
let alpn_bytes = conn.alpn().to_vec();
{
let mut connections = self.active_connections.write().await;
connections.insert((node_id, alpn_bytes.clone()), conn.clone());
@@ -706,7 +700,7 @@ impl NetworkingEventLoop {
}
/// Send a message to a specific node
async fn send_to_node(&self, node_id: NodeId, protocol: &str, data: Vec<u8>) {
async fn send_to_node(&self, node_id: EndpointId, protocol: &str, data: Vec<u8>) {
self.logger
.debug(&format!(
"Sending {} message to {} ({} bytes)",
@@ -738,7 +732,7 @@ impl NetworkingEventLoop {
};
// Create node address (Iroh will use existing connection if available)
let node_addr = NodeAddr::new(node_id);
let node_addr = EndpointAddr::new(node_id);
// Connect with specific ALPN
self.logger
@@ -760,7 +754,7 @@ impl NetworkingEventLoop {
// Track the connection
{
let mut connections = self.active_connections.write().await;
let alpn_bytes = conn.alpn().unwrap_or_default();
let alpn_bytes = conn.alpn().to_vec();
connections.insert((node_id, alpn_bytes), conn.clone());
}
@@ -894,41 +888,35 @@ impl NetworkingEventLoop {
}
}
/// Update DeviceRegistry connection states based on Iroh's remote_info
/// Update DeviceRegistry connection states based on tracked connections and latency
///
/// This monitors Iroh connections and updates the DeviceRegistry state accordingly.
/// Devices transition to Connected when Iroh reports an active connection, and back
/// to Paired when the connection is lost. This is cosmetic only - sync routing uses
/// is_node_connected() which queries Iroh directly.
async fn update_connection_states(&self) {
// Get all remote info from Iroh
let remote_infos: Vec<_> = self.endpoint.remote_info_iter().collect();
// Lock registry for updates
let mut registry = self.device_registry.write().await;
// Track which node IDs Iroh reports as connected
let mut connected_node_ids = std::collections::HashSet::new();
// Get all tracked connections
let active_connections = self.active_connections.read().await;
let connected_node_ids: std::collections::HashSet<EndpointId> = active_connections
.keys()
.map(|(node_id, _alpn)| *node_id)
.collect();
// Update devices that Iroh reports as connected
for remote_info in remote_infos {
// Check if this is an active connection
let is_connected =
!matches!(remote_info.conn_type, iroh::endpoint::ConnectionType::None);
// Update devices that we have active connections to
for node_id in &connected_node_ids {
// Check if connection is still alive via latency
let latency = self.endpoint.latency(*node_id);
let is_connected = latency.is_some();
if is_connected {
connected_node_ids.insert(remote_info.node_id);
// Find device for this node
if let Some(device_id) = registry.get_device_by_node_id(remote_info.node_id) {
if let Some(device_id) = registry.get_device_by_node_id(*node_id) {
// Update to Connected state if not already
if let Err(e) = registry
.update_device_from_connection(
device_id,
remote_info.node_id,
remote_info.conn_type,
remote_info.latency,
)
.update_device_from_connection(device_id, *node_id, true, latency)
.await
{
self.logger
@@ -942,30 +930,28 @@ impl NetworkingEventLoop {
}
}
// Check devices that are marked as Connected in registry but NOT in Iroh's list
// Check devices that are marked as Connected in registry but no longer have active connections
// These devices have silently disconnected and need to be transitioned back to Paired
let all_devices = registry.get_all_devices();
for (device_id, state) in all_devices {
if let crate::service::network::device::DeviceState::Connected { info, .. } = state {
// Get the node_id for this device
if let Ok(node_id) = info.network_fingerprint.node_id.parse::<NodeId>() {
// If this node is NOT in Iroh's connected list, it's stale
if !connected_node_ids.contains(&node_id) {
if let Ok(node_id) = info.network_fingerprint.node_id.parse::<EndpointId>() {
// Check if this node still has an active connection
let has_active_connection = connected_node_ids.contains(&node_id)
&& self.endpoint.latency(node_id).is_some();
if !has_active_connection {
self.logger
.info(&format!(
"Device {} ({}) is marked Connected but not in Iroh's connection list - transitioning to Paired",
"Device {} ({}) is marked Connected but has no active connection - transitioning to Paired",
device_id, info.device_name
))
.await;
// Transition to Paired state via update_device_from_connection with None conn_type
// Transition to Paired state
if let Err(e) = registry
.update_device_from_connection(
device_id,
node_id,
iroh::endpoint::ConnectionType::None,
None,
)
.update_device_from_connection(device_id, node_id, false, None)
.await
{
self.logger
@@ -985,7 +971,7 @@ impl NetworkingEventLoop {
///
/// This provides instant reactivity when connections drop, instead of waiting
/// for the 10-second polling interval in update_connection_states().
async fn spawn_connection_watcher(&self, conn: Connection, node_id: NodeId) {
async fn spawn_connection_watcher(&self, conn: Connection, node_id: EndpointId) {
super::spawn_connection_watcher_task(
conn,
node_id,

View File

@@ -11,7 +11,7 @@ use crate::service::network::{
};
use iroh::discovery::{dns::DnsDiscovery, mdns::MdnsDiscovery, pkarr::PkarrPublisher, Discovery};
use iroh::endpoint::Connection;
use iroh::{Endpoint, NodeAddr, NodeId, RelayMode, RelayUrl, Watcher};
use iroh::{Endpoint, EndpointAddr, EndpointId, RelayMode, RelayUrl, Watcher};
use std::sync::Arc;
use tokio::sync::{broadcast, mpsc, RwLock};
use uuid::Uuid;
@@ -30,23 +30,23 @@ pub const JOB_ACTIVITY_ALPN: &[u8] = b"spacedrive/jobactivity/1";
pub enum NetworkEvent {
// Discovery events
PeerDiscovered {
node_id: NodeId,
node_addr: NodeAddr,
node_id: EndpointId,
node_addr: EndpointAddr,
},
PeerDisconnected {
node_id: NodeId,
node_id: EndpointId,
},
// Pairing events
PairingRequest {
session_id: Uuid,
device_info: DeviceInfo,
node_id: NodeId,
node_id: EndpointId,
},
PairingSessionDiscovered {
session_id: Uuid,
node_id: NodeId,
node_addr: NodeAddr,
node_id: EndpointId,
node_addr: EndpointAddr,
device_info: DeviceInfo,
},
PairingCompleted {
@@ -61,11 +61,11 @@ pub enum NetworkEvent {
// Connection events
ConnectionEstablished {
device_id: Uuid,
node_id: NodeId,
node_id: EndpointId,
},
ConnectionLost {
device_id: Uuid,
node_id: NodeId,
node_id: EndpointId,
},
MessageReceived {
from: Uuid,
@@ -83,7 +83,7 @@ pub struct NetworkingService {
identity: NetworkIdentity,
/// Our Iroh node ID
node_id: NodeId,
node_id: EndpointId,
/// Discovery service for finding peers
discovery: Option<Box<dyn Discovery>>,
@@ -103,12 +103,12 @@ pub struct NetworkingService {
/// Event sender for broadcasting network events (broadcast channel allows multiple subscribers)
event_sender: broadcast::Sender<NetworkEvent>,
/// Active connections tracker (keyed by NodeId and ALPN)
/// Active connections tracker (keyed by EndpointId and ALPN)
/// Each ALPN protocol requires its own connection since ALPN is negotiated at connection establishment
active_connections: Arc<RwLock<std::collections::HashMap<(NodeId, Vec<u8>), Connection>>>,
active_connections: Arc<RwLock<std::collections::HashMap<(EndpointId, Vec<u8>), Connection>>>,
/// Nodes that already have connection watchers spawned (to prevent duplicates)
watched_nodes: Arc<RwLock<std::collections::HashSet<NodeId>>>,
watched_nodes: Arc<RwLock<std::collections::HashSet<EndpointId>>>,
/// Sync multiplexer for routing sync messages to correct library
sync_multiplexer: Arc<SyncMultiplexer>,
@@ -181,7 +181,10 @@ impl NetworkingService {
///
/// This enables the device registry to emit complete device data with hardware_model
/// by querying the library database instead of just using network DeviceInfo.
pub async fn set_library_manager(&self, library_manager: std::sync::Weak<crate::library::LibraryManager>) {
pub async fn set_library_manager(
&self,
library_manager: std::sync::Weak<crate::library::LibraryManager>,
) {
let mut registry = self.device_registry.write().await;
registry.set_library_manager(library_manager);
}
@@ -220,9 +223,9 @@ impl NetworkingService {
JOB_ACTIVITY_ALPN.to_vec(),
])
.relay_mode(iroh::RelayMode::Default)
.add_discovery(MdnsDiscovery::builder())
.add_discovery(PkarrPublisher::n0_dns())
.add_discovery(DnsDiscovery::n0_dns())
.discovery(MdnsDiscovery::builder())
.discovery(PkarrPublisher::n0_dns())
.discovery(DnsDiscovery::n0_dns())
.bind_addr_v4(std::net::SocketAddrV4::new(
std::net::Ipv4Addr::UNSPECIFIED,
0,
@@ -341,7 +344,7 @@ impl NetworkingService {
endpoint: Option<Endpoint>,
logger: Arc<dyn NetworkLogger>,
) {
// Deterministic reconnection: only the device with the lower NodeId initiates
// Deterministic reconnection: only the device with the lower EndpointId initiates
// This prevents both sides from simultaneously trying to connect
let endpoint_ref = match &endpoint {
Some(ep) => ep,
@@ -351,12 +354,12 @@ impl NetworkingService {
}
};
let my_node_id = endpoint_ref.node_id();
let my_node_id = endpoint_ref.id();
let remote_node_id = match persisted_device
.device_info
.network_fingerprint
.node_id
.parse::<NodeId>()
.parse::<EndpointId>()
{
Ok(id) => id,
Err(e) => {
@@ -367,12 +370,12 @@ impl NetworkingService {
}
};
// Deterministic rule: only device with lower NodeId initiates outbound connections
// Deterministic rule: only device with lower EndpointId initiates outbound connections
// This prevents both sides from creating competing connections
if my_node_id > remote_node_id {
logger
.debug(&format!(
"Skipping outbound reconnection to {} - waiting for them to connect to us (NodeId rule: {} > {})",
"Skipping outbound reconnection to {} - waiting for them to connect to us (EndpointId rule: {} > {})",
persisted_device.device_info.device_name,
my_node_id,
remote_node_id
@@ -383,7 +386,7 @@ impl NetworkingService {
logger
.info(&format!(
"NodeId rule: {} < {} - we should initiate connection",
"EndpointId rule: {} < {} - we should initiate connection",
my_node_id, remote_node_id
))
.await;
@@ -401,10 +404,10 @@ impl NetworkingService {
.device_info
.network_fingerprint
.node_id
.parse::<NodeId>()
.parse::<EndpointId>()
{
// Build NodeAddr - Iroh will discover addresses automatically
let node_addr = NodeAddr::new(node_id);
// Build EndpointAddr - Iroh will discover addresses automatically
let node_addr = EndpointAddr::new(node_id);
// Attempt connection with retries to give discovery time to work
let mut retry_count = 0;
@@ -548,7 +551,7 @@ impl NetworkingService {
interval.tick().await;
// Get all connected devices
let connected_devices: Vec<(uuid::Uuid, iroh::NodeId)> = {
let connected_devices: Vec<(uuid::Uuid, iroh::EndpointId)> = {
let registry = device_registry.read().await;
registry
.get_all_devices()
@@ -560,7 +563,7 @@ impl NetworkingService {
} = state
{
if let Ok(node_id) =
info.network_fingerprint.node_id.parse::<iroh::NodeId>()
info.network_fingerprint.node_id.parse::<iroh::EndpointId>()
{
Some((device_id, node_id))
} else {
@@ -734,7 +737,7 @@ impl NetworkingService {
.info("Sending disconnect notifications to connected devices")
.await;
let connected_devices: Vec<(uuid::Uuid, iroh::NodeId)> = {
let connected_devices: Vec<(uuid::Uuid, iroh::EndpointId)> = {
let registry = self.device_registry.read().await;
registry
.get_all_devices()
@@ -745,7 +748,7 @@ impl NetworkingService {
} = state
{
if let Ok(node_id) =
info.network_fingerprint.node_id.parse::<iroh::NodeId>()
info.network_fingerprint.node_id.parse::<iroh::EndpointId>()
{
Some((device_id, node_id))
} else {
@@ -812,7 +815,7 @@ impl NetworkingService {
}
/// Get our node ID
pub fn node_id(&self) -> NodeId {
pub fn node_id(&self) -> EndpointId {
self.node_id
}
@@ -822,10 +825,10 @@ impl NetworkingService {
}
/// Get raw connected nodes directly from endpoint
pub async fn get_raw_connected_nodes(&self) -> Vec<NodeId> {
pub async fn get_raw_connected_nodes(&self) -> Vec<EndpointId> {
let connections = self.active_connections.read().await;
// Extract unique NodeIds from (NodeId, ALPN) keys
let mut node_ids: Vec<NodeId> = connections
// Extract unique EndpointIds from (EndpointId, ALPN) keys
let mut node_ids: Vec<EndpointId> = connections
.keys()
.map(|(node_id, _alpn)| *node_id)
.collect();
@@ -910,7 +913,7 @@ impl NetworkingService {
/// Get the active connections cache shared with the event loop
pub fn active_connections(
&self,
) -> Arc<RwLock<std::collections::HashMap<(NodeId, Vec<u8>), Connection>>> {
) -> Arc<RwLock<std::collections::HashMap<(EndpointId, Vec<u8>), Connection>>> {
self.active_connections.clone()
}
@@ -926,7 +929,7 @@ impl NetworkingService {
// This leverages Iroh's native mDNS capabilities without needing custom key-value storage
/// Get currently connected nodes for direct pairing attempts
pub async fn get_connected_nodes(&self) -> Vec<NodeId> {
pub async fn get_connected_nodes(&self) -> Vec<EndpointId> {
// Get connected nodes from device registry
let registry = self.device_registry.read().await;
registry.get_connected_nodes()
@@ -945,7 +948,7 @@ impl NetworkingService {
/// Send message to a specific node (bypassing device lookup)
pub async fn send_message_to_node(
&self,
node_id: NodeId,
node_id: EndpointId,
protocol: &str,
data: Vec<u8>,
) -> Result<()> {
@@ -968,14 +971,13 @@ impl NetworkingService {
}
}
/// Strip direct addresses from a NodeAddr to force relay-only connection
fn strip_direct_addresses(node_addr: NodeAddr) -> NodeAddr {
use std::collections::BTreeSet;
NodeAddr::from_parts(
node_addr.node_id,
node_addr.relay_url().cloned(),
BTreeSet::new(), // Empty direct addresses
)
/// Strip IP addresses from an EndpointAddr to force relay-only connection
/// Note: In v0.95+, EndpointAddr is immutable. This creates a minimal EndpointAddr
/// with just the ID - Iroh will use discovery to find relay URLs if needed.
fn strip_ip_addresses(endpoint_addr: EndpointAddr) -> EndpointAddr {
// Create a minimal EndpointAddr with just the ID
// Iroh's discovery system will handle finding relay URLs
EndpointAddr::new(endpoint_addr.id)
}
/// Spawn a background task to watch for connection closure
@@ -983,7 +985,7 @@ impl NetworkingService {
/// This provides instant reactivity when connections drop by waiting on
/// Iroh's Connection::closed() future, instead of relying on the 10-second
/// polling interval in update_connection_states().
async fn spawn_connection_watcher(&self, conn: Connection, node_id: NodeId) {
async fn spawn_connection_watcher(&self, conn: Connection, node_id: EndpointId) {
spawn_connection_watcher_task(
conn,
node_id,
@@ -1000,36 +1002,40 @@ impl NetworkingService {
/// # Parameters
/// * `node_addr` - The node address to connect to
/// * `force_relay` - If true, strip direct addresses and only use relay
pub async fn connect_to_node(&self, node_addr: NodeAddr, force_relay: bool) -> Result<()> {
let node_addr = if force_relay {
Self::strip_direct_addresses(node_addr)
pub async fn connect_to_node(
&self,
endpoint_addr: EndpointAddr,
force_relay: bool,
) -> Result<()> {
let endpoint_addr = if force_relay {
Self::strip_ip_addresses(endpoint_addr)
} else {
node_addr
endpoint_addr
};
if let Some(endpoint) = &self.endpoint {
// Use pairing ALPN for initial connection during pairing
let conn = endpoint
.connect(node_addr.clone(), PAIRING_ALPN)
.connect(endpoint_addr.clone(), PAIRING_ALPN)
.await
.map_err(|e| {
NetworkingError::ConnectionFailed(format!("Failed to connect: {}", e))
})?;
// Track the outbound connection (with PAIRING_ALPN)
let node_id = node_addr.node_id;
let remote_id = endpoint_addr.id;
{
let mut connections = self.active_connections.write().await;
connections.insert((node_id, PAIRING_ALPN.to_vec()), conn.clone());
connections.insert((remote_id, PAIRING_ALPN.to_vec()), conn.clone());
self.logger
.info(&format!(
"Tracked outbound pairing connection to {}",
node_id
remote_id
))
.await;
}
// Spawn a task to watch for connection closure for instant reactivity
self.spawn_connection_watcher(conn, node_id).await;
self.spawn_connection_watcher(conn, remote_id).await;
Ok(())
} else {
@@ -1040,9 +1046,9 @@ impl NetworkingService {
}
/// Get our node address for advertising
pub fn get_node_addr(&self) -> Result<Option<NodeAddr>> {
pub fn get_node_addr(&self) -> Result<Option<EndpointAddr>> {
if let Some(endpoint) = &self.endpoint {
Ok(endpoint.node_addr().get())
Ok(Some(endpoint.addr()))
} else {
Err(NetworkingError::ConnectionFailed(
"Networking not started".to_string(),
@@ -1053,8 +1059,12 @@ impl NetworkingService {
/// Get the configured relay URL
pub async fn get_relay_url(&self) -> Option<String> {
if let Some(endpoint) = &self.endpoint {
let relay = endpoint.home_relay().initialized().await;
Some(relay.to_string())
// In v0.95+, get relay URL from the endpoint address
endpoint
.addr()
.relay_urls()
.next()
.map(|url| url.to_string())
} else {
None
}
@@ -1071,7 +1081,13 @@ impl NetworkingService {
"Networking not started".to_string(),
))?;
let mut discovery_stream = endpoint.discovery_stream();
// Create mDNS discovery service to subscribe to events
// Note: In v0.95+, we need to get discovery services individually and subscribe
let endpoint_id = endpoint.id();
let mdns_discovery = MdnsDiscovery::builder().build(endpoint_id).map_err(|e| {
NetworkingError::ConnectionFailed(format!("Failed to create mDNS discovery: {}", e))
})?;
let mut discovery_stream = mdns_discovery.subscribe().await;
let session_id_str = session_id.to_string();
let timeout = tokio::time::Duration::from_secs(5); // Shorter timeout for mDNS
let start = tokio::time::Instant::now();
@@ -1085,26 +1101,23 @@ impl NetworkingService {
while start.elapsed() < timeout {
tokio::select! {
Some(result) = discovery_stream.next() => {
match result {
Ok(iroh::discovery::DiscoveryEvent::Discovered(item)) => {
Some(event) = discovery_stream.next() => {
match event {
iroh::discovery::mdns::DiscoveryEvent::Discovered { endpoint_info, .. } => {
// Check if this node is broadcasting our session_id
if let Some(user_data) = item.node_info().data.user_data() {
if let Some(user_data) = endpoint_info.data.user_data() {
if user_data.as_ref() == session_id_str {
let endpoint_id = endpoint_info.endpoint_id;
self.logger
.info(&format!(
"[mDNS] Found pairing initiator: {} with {} direct addresses",
item.node_id().fmt_short(),
item.node_info().data.direct_addresses().len()
"[mDNS] Found pairing initiator: {} with {} IP addresses",
endpoint_id.fmt_short(),
endpoint_info.data.ip_addrs().count()
))
.await;
// Build NodeAddr from discovery info
let node_addr = iroh::NodeAddr::from_parts(
item.node_id(),
item.node_info().data.relay_url().cloned(),
item.node_info().data.direct_addresses().clone()
);
// Build EndpointAddr from discovery info
let node_addr = endpoint_info.into_endpoint_addr();
// Try to connect to the initiator
if let Err(e) = self.connect_to_node(node_addr.clone(), force_relay).await {
@@ -1118,14 +1131,9 @@ impl NetworkingService {
}
}
}
Ok(iroh::discovery::DiscoveryEvent::Expired(_)) => {
iroh::discovery::mdns::DiscoveryEvent::Expired { .. } => {
// Node expired, continue searching
}
Err(e) => {
self.logger
.warn(&format!("[mDNS] Discovery stream error: {}", e))
.await;
}
}
}
_ = tokio::time::sleep(tokio::time::Duration::from_millis(100)) => {
@@ -1146,10 +1154,10 @@ impl NetworkingService {
&self,
pairing_code: &crate::service::network::protocol::pairing::PairingCode,
) -> Result<()> {
// Get the NodeId from the pairing code
// Get the EndpointId from the pairing code
let node_id = pairing_code.node_id().ok_or_else(|| {
NetworkingError::ConnectionFailed(
"Pairing code missing NodeId - cannot use pkarr discovery for remote pairing"
"Pairing code missing EndpointId - cannot use pkarr discovery for remote pairing"
.to_string(),
)
})?;
@@ -1172,7 +1180,7 @@ impl NetworkingService {
// 1. Query dns.iroh.link/pkarr for the node's published address info
// 2. Get the relay_url and any direct addresses
// 3. Try to connect via the best available path
let node_addr = NodeAddr::new(node_id);
let node_addr = EndpointAddr::new(node_id);
self.logger
.debug("[Pkarr] Querying dns.iroh.link for node address...")
@@ -1276,7 +1284,7 @@ impl NetworkingService {
let initiator_device_id = self.device_id();
let node_addr = self
.get_node_addr()?
.unwrap_or(NodeAddr::new(initiator_node_id));
.unwrap_or(EndpointAddr::new(initiator_node_id));
let device_registry = self.device_registry();
{
let mut registry = device_registry.write().await;
@@ -1294,7 +1302,7 @@ impl NetworkingService {
"Networking not started".to_string(),
))?;
let user_data = iroh::node_info::UserData::try_from(session_id.to_string())
let user_data = iroh::endpoint_info::UserData::try_from(session_id.to_string())
.map_err(|e| NetworkingError::Protocol(format!("Failed to create user data: {}", e)))?;
endpoint.set_user_data_for_discovery(Some(user_data));
@@ -1310,12 +1318,19 @@ impl NetworkingService {
tokio::time::sleep(tokio::time::Duration::from_millis(500)).await;
// Ensure relay connection is established before pkarr publishing
// In v0.95+, we wait for the endpoint to be online (has relay + direct addresses)
self.logger
.info("Waiting for relay connection to be established...")
.info("Waiting for endpoint to come online...")
.await;
let relay_url = endpoint.home_relay().initialized().await;
endpoint.online().await;
let relay_url = endpoint
.addr()
.relay_urls()
.next()
.map(|u| u.to_string())
.unwrap_or_else(|| "unknown".to_string());
self.logger
.info(&format!("Relay connection established: {}", relay_url))
.info(&format!("Endpoint online, relay: {}", relay_url))
.await;
// Give pkarr sufficient time to publish our node address to dns.iroh.link
@@ -1512,7 +1527,7 @@ impl NetworkingService {
// We need to try connecting to all discovered nodes since we don't know which one is the initiator
// Get our own node address to broadcast it
let our_node_addr = endpoint.node_addr().get();
let our_node_addr = endpoint.addr();
self.logger
.info(&format!(
@@ -1811,10 +1826,10 @@ impl NetworkingService {
/// polling interval in update_connection_states().
async fn spawn_connection_watcher_task(
conn: Connection,
node_id: NodeId,
watched_nodes: Arc<RwLock<std::collections::HashSet<NodeId>>>,
node_id: EndpointId,
watched_nodes: Arc<RwLock<std::collections::HashSet<EndpointId>>>,
device_registry: Arc<RwLock<DeviceRegistry>>,
active_connections: Arc<RwLock<std::collections::HashMap<(NodeId, Vec<u8>), Connection>>>,
active_connections: Arc<RwLock<std::collections::HashMap<(EndpointId, Vec<u8>), Connection>>>,
logger: Arc<dyn NetworkLogger>,
) {
// Check if we already have a watcher for this node
@@ -1832,7 +1847,7 @@ async fn spawn_connection_watcher_task(
let close_reason = conn.closed().await;
// Get the ALPN for this specific connection
let alpn_bytes = conn.alpn().unwrap_or_default();
let alpn_bytes = conn.alpn().to_vec();
logger
.info(&format!(
@@ -1864,13 +1879,11 @@ async fn spawn_connection_watcher_task(
// Find the device ID for this node and update state
let mut registry = device_registry.write().await;
if let Some(device_id) = registry.get_device_by_node_id(node_id) {
// Use update_device_from_connection with ConnectionType::None
// Use update_device_from_connection with is_connected=false (all connections closed)
if let Err(e) = registry
.update_device_from_connection(
device_id,
node_id,
iroh::endpoint::ConnectionType::None,
None,
device_id, node_id, false, // is_connected
None, // latency
)
.await
{

View File

@@ -3,7 +3,7 @@
use super::{DeviceInfo, SessionKeys};
use crate::service::network::{NetworkingError, Result};
use chrono::{DateTime, Utc};
use iroh::NodeId;
use iroh::EndpointId;
use std::sync::Arc;
use tokio::sync::{mpsc, RwLock};
use uuid::Uuid;
@@ -12,7 +12,7 @@ use uuid::Uuid;
#[derive(Debug, Clone)]
pub struct DeviceConnection {
/// The node ID of the remote device
pub node_id: NodeId,
pub node_id: EndpointId,
/// Device information
pub device_info: DeviceInfo,
@@ -63,7 +63,7 @@ pub struct OutgoingMessage {
impl DeviceConnection {
/// Create a new device connection
pub fn new(
node_id: NodeId,
node_id: EndpointId,
device_info: DeviceInfo,
session_keys: SessionKeys,
) -> (Self, mpsc::UnboundedReceiver<OutgoingMessage>) {

View File

@@ -5,7 +5,7 @@ pub mod persistence;
pub mod registry;
use chrono::{DateTime, Utc};
use iroh::{NodeAddr, NodeId};
use iroh::{EndpointAddr, EndpointId};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use uuid::Uuid;
@@ -55,15 +55,15 @@ impl Default for DeviceType {
pub enum DeviceState {
/// Device discovered via Iroh discovery but not yet connected
Discovered {
node_id: NodeId,
node_addr: NodeAddr,
node_id: EndpointId,
node_addr: EndpointAddr,
discovered_at: DateTime<Utc>,
},
/// Device currently in pairing process
Pairing {
node_id: NodeId,
node_id: EndpointId,
session_id: Uuid,
node_addr: NodeAddr,
node_addr: EndpointAddr,
started_at: DateTime<Utc>,
},
/// Device successfully paired but not currently connected

View File

@@ -9,7 +9,7 @@ use crate::device::DeviceManager;
use crate::infra::event::EventBus;
use crate::service::network::{utils::logging::NetworkLogger, NetworkingError, Result};
use chrono::{DateTime, Utc};
use iroh::{NodeAddr, NodeId};
use iroh::{EndpointAddr, EndpointId};
use std::collections::HashMap;
use std::sync::Arc;
use uuid::Uuid;
@@ -23,7 +23,7 @@ pub struct DeviceRegistry {
devices: HashMap<Uuid, DeviceState>,
/// Map of node ID to device ID for quick lookup
node_to_device: HashMap<NodeId, Uuid>,
node_to_device: HashMap<EndpointId, Uuid>,
/// Map of session ID to device ID for pairing lookup
session_to_device: HashMap<Uuid, Uuid>,
@@ -225,7 +225,7 @@ impl DeviceRegistry {
.device_info
.network_fingerprint
.node_id
.parse::<NodeId>()
.parse::<EndpointId>()
{
self.node_to_device.insert(node_id, device_id);
self.logger
@@ -266,7 +266,12 @@ impl DeviceRegistry {
}
/// Add a discovered node
pub fn add_discovered_node(&mut self, device_id: Uuid, node_id: NodeId, node_addr: NodeAddr) {
pub fn add_discovered_node(
&mut self,
device_id: Uuid,
node_id: EndpointId,
node_addr: EndpointAddr,
) {
let state = DeviceState::Discovered {
node_id,
node_addr,
@@ -281,9 +286,9 @@ impl DeviceRegistry {
pub fn start_pairing(
&mut self,
device_id: Uuid,
node_id: NodeId,
node_id: EndpointId,
session_id: Uuid,
node_addr: NodeAddr,
node_addr: EndpointAddr,
) -> Result<()> {
let state = DeviceState::Pairing {
node_id,
@@ -321,7 +326,7 @@ impl DeviceRegistry {
let node_id = info
.network_fingerprint
.node_id
.parse::<NodeId>()
.parse::<EndpointId>()
.map_err(|e| {
NetworkingError::Protocol(format!("Invalid node ID in network fingerprint: {}", e))
})?;
@@ -534,7 +539,7 @@ impl DeviceRegistry {
}
/// Get device ID by peer ID
pub fn get_device_by_node(&self, node_id: NodeId) -> Option<Uuid> {
pub fn get_device_by_node(&self, node_id: EndpointId) -> Option<Uuid> {
self.node_to_device.get(&node_id).copied()
}
@@ -587,7 +592,9 @@ impl DeviceRegistry {
| DeviceState::Connected { info, .. }
| DeviceState::Disconnected { info, .. } => {
// Extract node ID from network fingerprint and clean up mapping
if let Ok(node_id) = info.network_fingerprint.node_id.parse::<iroh::NodeId>() {
if let Ok(node_id) =
info.network_fingerprint.node_id.parse::<iroh::EndpointId>()
{
self.node_to_device.remove(&node_id);
}
}
@@ -616,7 +623,7 @@ impl DeviceRegistry {
}
/// Get peer ID for a device
pub fn get_node_by_device(&self, device_id: Uuid) -> Option<NodeId> {
pub fn get_node_by_device(&self, device_id: Uuid) -> Option<EndpointId> {
// Look through node_to_device map in reverse
for (node_id, &dev_id) in &self.node_to_device {
if dev_id == device_id {
@@ -629,7 +636,7 @@ impl DeviceRegistry {
}
/// Get node ID for a device (alias for get_node_by_device)
pub fn get_node_id_for_device(&self, device_id: Uuid) -> Option<NodeId> {
pub fn get_node_id_for_device(&self, device_id: Uuid) -> Option<EndpointId> {
self.get_node_by_device(device_id)
}
@@ -639,32 +646,26 @@ impl DeviceRegistry {
/// directly to get real-time connection state, rather than relying on cached state.
///
/// Returns true if:
/// - Device UUID is mapped to a NodeId
/// - Iroh reports an active connection (Direct, Relay, or Mixed)
/// - Connection type is not None
/// - Device UUID is mapped to an EndpointId
/// - Iroh reports latency for the connection (indicating active connection)
pub fn is_node_connected(&self, endpoint: &iroh::Endpoint, device_id: Uuid) -> bool {
// Get NodeId for this device
// Get EndpointId for this device
let node_id = match self.get_node_id_for_device(device_id) {
Some(id) => id,
None => return false,
};
// Query Iroh for current connection state
match endpoint.remote_info(node_id) {
Some(remote_info) => {
// Check if connection type indicates an active connection
!matches!(remote_info.conn_type, iroh::endpoint::ConnectionType::None)
}
None => false,
}
// Query Iroh for current connection state via latency
// latency() returns Some if there's an active connection
endpoint.latency(node_id).is_some()
}
/// Get device UUID from node ID
pub fn get_device_by_node_id(&self, node_id: NodeId) -> Option<Uuid> {
pub fn get_device_by_node_id(&self, node_id: EndpointId) -> Option<Uuid> {
self.node_to_device.get(&node_id).copied()
}
/// Update device connection state from Iroh RemoteInfo
/// Update device connection state based on connection status
///
/// This is called by the connection monitor to update DeviceRegistry state
/// based on Iroh's actual connection state. This is cosmetic only - sync
@@ -672,8 +673,8 @@ impl DeviceRegistry {
pub async fn update_device_from_connection(
&mut self,
device_id: Uuid,
node_id: NodeId,
conn_type: iroh::endpoint::ConnectionType,
node_id: EndpointId,
is_connected: bool,
latency: Option<std::time::Duration>,
) -> Result<()> {
// Update node-to-device mapping
@@ -686,7 +687,7 @@ impl DeviceRegistry {
};
// Determine if we should be in Connected state
let should_be_connected = !matches!(conn_type, iroh::endpoint::ConnectionType::None);
let should_be_connected = is_connected;
match current_state {
DeviceState::Paired {
@@ -788,7 +789,7 @@ impl DeviceRegistry {
}
/// Get all currently connected peer IDs
pub fn get_connected_nodes(&self) -> Vec<NodeId> {
pub fn get_connected_nodes(&self) -> Vec<EndpointId> {
self.node_to_device.keys().cloned().collect()
}
@@ -883,7 +884,11 @@ impl DeviceRegistry {
}
/// Set a device as connected with its node ID
pub async fn set_device_connected(&mut self, device_id: Uuid, node_id: NodeId) -> Result<()> {
pub async fn set_device_connected(
&mut self,
device_id: Uuid,
node_id: EndpointId,
) -> Result<()> {
// Update the node_to_device mapping
self.node_to_device.insert(node_id, device_id);

View File

@@ -8,7 +8,7 @@ use crate::service::network::{
utils::{get_or_create_connection, SilentLogger},
NetworkingError, Result,
};
use iroh::{endpoint::Connection, Endpoint, NodeId};
use iroh::{endpoint::Connection, Endpoint, EndpointId};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -19,7 +19,7 @@ use uuid::Uuid;
/// Client for subscribing to job activity from remote devices
pub struct JobActivityClient {
endpoint: Endpoint,
connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
remote_cache: Arc<RemoteJobCache>,
device_registry: Arc<RwLock<DeviceRegistry>>,
}
@@ -27,7 +27,7 @@ pub struct JobActivityClient {
impl JobActivityClient {
pub fn new(
endpoint: Endpoint,
connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
remote_cache: Arc<RemoteJobCache>,
device_registry: Arc<RwLock<DeviceRegistry>>,
) -> Self {

View File

@@ -8,7 +8,7 @@ use crate::{
service::network::{NetworkingError, Result},
};
use async_trait::async_trait;
use iroh::NodeId;
use iroh::EndpointId;
use std::sync::Arc;
use uuid::Uuid;
@@ -245,7 +245,7 @@ impl super::ProtocolHandler for FileDeleteProtocolHandler {
&self,
mut send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
mut recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
_remote_node_id: NodeId,
_remote_node_id: EndpointId,
) {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -321,7 +321,7 @@ impl super::ProtocolHandler for FileDeleteProtocolHandler {
async fn handle_response(
&self,
_from_device: Uuid,
_from_node: NodeId,
_from_node: EndpointId,
_response_data: Vec<u8>,
) -> Result<()> {
// File delete responses are handled by RemoteDeleteStrategy

View File

@@ -3,7 +3,7 @@
use crate::service::network::utils::logging::NetworkLogger;
use crate::service::network::{NetworkingError, Result};
use async_trait::async_trait;
use iroh::NodeId;
use iroh::EndpointId;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
@@ -1429,7 +1429,7 @@ impl super::ProtocolHandler for FileTransferProtocolHandler {
&self,
mut send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
mut recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
remote_node_id: NodeId,
remote_node_id: EndpointId,
) {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -1723,7 +1723,7 @@ impl super::ProtocolHandler for FileTransferProtocolHandler {
async fn handle_response(
&self,
from_device: Uuid,
_from_node: NodeId,
_from_node: EndpointId,
response_data: Vec<u8>,
) -> Result<()> {
// Deserialize the response

View File

@@ -14,7 +14,7 @@ use crate::{
};
use async_trait::async_trait;
use chrono::{DateTime, Utc};
use iroh::{endpoint::Connection, Endpoint, NodeId};
use iroh::{endpoint::Connection, Endpoint, EndpointId};
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
@@ -105,7 +105,7 @@ pub enum RemoteJobEvent {
/// Subscription information for a remote device
struct Subscription {
node_id: NodeId,
node_id: EndpointId,
event_tx: tokio::sync::mpsc::UnboundedSender<JobActivityMessage>,
library_filter: Option<Uuid>,
last_activity: DateTime<Utc>,
@@ -158,7 +158,7 @@ pub struct JobActivityProtocolHandler {
subscriptions: Arc<RwLock<HashMap<Uuid, Subscription>>>,
/// Cached connections (shared with NetworkingService)
connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
/// Local device ID
device_id: Uuid,
@@ -176,7 +176,7 @@ impl JobActivityProtocolHandler {
event_bus: Arc<EventBus>,
device_registry: Arc<RwLock<DeviceRegistry>>,
endpoint: Option<Endpoint>,
connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
device_id: Uuid,
library_id: Option<Uuid>,
) -> Self {
@@ -402,7 +402,7 @@ impl ProtocolHandler for JobActivityProtocolHandler {
&self,
mut send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
mut recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
remote_node_id: NodeId,
remote_node_id: EndpointId,
) {
// Create channel for receiving events to send
let (event_tx, mut event_rx) = tokio::sync::mpsc::unbounded_channel();
@@ -499,7 +499,7 @@ impl ProtocolHandler for JobActivityProtocolHandler {
Ok(Vec::new())
}
async fn handle_response(&self, _: Uuid, _: NodeId, _: Vec<u8>) -> Result<()> {
async fn handle_response(&self, _: Uuid, _: EndpointId, _: Vec<u8>) -> Result<()> {
Ok(())
}

View File

@@ -3,7 +3,7 @@
use super::{library_messages::LibraryMessage, ProtocolEvent, ProtocolHandler};
use crate::service::network::{utils, NetworkingError, Result};
use async_trait::async_trait;
use iroh::{endpoint::Connection, Endpoint, NodeAddr, NodeId};
use iroh::{endpoint::Connection, Endpoint, EndpointAddr, EndpointId};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
@@ -21,8 +21,8 @@ pub struct MessagingProtocolHandler {
/// Endpoint for creating and managing connections
endpoint: Option<Endpoint>,
/// Cached connections to remote nodes (keyed by NodeId and ALPN)
connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
/// Cached connections to remote nodes (keyed by EndpointId and ALPN)
connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
}
/// Basic message types
@@ -67,7 +67,7 @@ impl MessagingProtocolHandler {
pub fn new(
device_registry: Arc<RwLock<crate::service::network::device::DeviceRegistry>>,
endpoint: Option<Endpoint>,
active_connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
active_connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
) -> Self {
Self {
context: None,
@@ -283,7 +283,8 @@ impl MessagingProtocolHandler {
match existing {
Ok(Some(existing_device)) => {
// Device exists (from pre-registration) - update with full hardware
let mut device_model: entities::device::ActiveModel = existing_device.into();
let mut device_model: entities::device::ActiveModel =
existing_device.into();
// Update all fields with data from message
device_model.name = Set(device_name.clone());
@@ -299,7 +300,8 @@ impl MessagingProtocolHandler {
device_model.memory_total_bytes = Set(memory_total_bytes);
device_model.form_factor = Set(form_factor.clone());
device_model.manufacturer = Set(manufacturer.clone());
device_model.gpu_models = Set(gpu_models.clone().map(|g| serde_json::json!(g)));
device_model.gpu_models =
Set(gpu_models.clone().map(|g| serde_json::json!(g)));
device_model.boot_disk_type = Set(boot_disk_type.clone());
device_model.boot_disk_capacity_bytes = Set(boot_disk_capacity_bytes);
device_model.swap_total_bytes = Set(swap_total_bytes);
@@ -400,35 +402,52 @@ impl MessagingProtocolHandler {
if let Ok(our_device) = context_clone.device_manager.to_device() {
// Get our slug for this library
if let Some(lib_id) = library_id {
if let Ok(our_slug) = context_clone.device_manager.slug_for_library(lib_id) {
if let Ok(our_slug) =
context_clone.device_manager.slug_for_library(lib_id)
{
// Get networking
if let Some(networking) = context_clone.get_networking().await {
let our_register_request = LibraryMessage::RegisterDeviceRequest {
request_id: Uuid::new_v4(),
library_id,
device_id: our_device.id,
device_name: our_device.name,
device_slug: our_slug,
os_name: our_device.os.to_string(),
os_version: our_device.os_version,
hardware_model: our_device.hardware_model,
cpu_model: our_device.cpu_model,
cpu_architecture: our_device.cpu_architecture,
cpu_cores_physical: our_device.cpu_cores_physical,
cpu_cores_logical: our_device.cpu_cores_logical,
cpu_frequency_mhz: our_device.cpu_frequency_mhz,
memory_total_bytes: our_device.memory_total_bytes,
form_factor: our_device.form_factor.map(|f| f.to_string()),
manufacturer: our_device.manufacturer,
gpu_models: our_device.gpu_models,
boot_disk_type: our_device.boot_disk_type,
boot_disk_capacity_bytes: our_device.boot_disk_capacity_bytes,
swap_total_bytes: our_device.swap_total_bytes,
};
if let Some(networking) =
context_clone.get_networking().await
{
let our_register_request =
LibraryMessage::RegisterDeviceRequest {
request_id: Uuid::new_v4(),
library_id,
device_id: our_device.id,
device_name: our_device.name,
device_slug: our_slug,
os_name: our_device.os.to_string(),
os_version: our_device.os_version,
hardware_model: our_device.hardware_model,
cpu_model: our_device.cpu_model,
cpu_architecture: our_device
.cpu_architecture,
cpu_cores_physical: our_device
.cpu_cores_physical,
cpu_cores_logical: our_device
.cpu_cores_logical,
cpu_frequency_mhz: our_device
.cpu_frequency_mhz,
memory_total_bytes: our_device
.memory_total_bytes,
form_factor: our_device
.form_factor
.map(|f| f.to_string()),
manufacturer: our_device.manufacturer,
gpu_models: our_device.gpu_models,
boot_disk_type: our_device.boot_disk_type,
boot_disk_capacity_bytes: our_device
.boot_disk_capacity_bytes,
swap_total_bytes: our_device
.swap_total_bytes,
};
// Send to the device that just registered with us
if let Err(e) = networking
.send_library_request(sender_device_id, our_register_request)
.send_library_request(
sender_device_id,
our_register_request,
)
.await
{
tracing::warn!(
@@ -649,7 +668,7 @@ impl MessagingProtocolHandler {
/// Uses cached connections and creates new streams (Iroh best practice)
pub async fn send_library_message(
&self,
node_id: NodeId,
node_id: EndpointId,
message: LibraryMessage,
) -> Result<LibraryMessage> {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -755,7 +774,7 @@ impl ProtocolHandler for MessagingProtocolHandler {
&self,
mut send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
mut recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
remote_node_id: NodeId,
remote_node_id: EndpointId,
) {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -906,7 +925,7 @@ impl ProtocolHandler for MessagingProtocolHandler {
async fn handle_response(
&self,
_from_device: Uuid,
_from_node: NodeId,
_from_node: EndpointId,
_response_data: Vec<u8>,
) -> Result<()> {
// Messaging protocol handles responses in handle_request

View File

@@ -11,7 +11,7 @@ pub mod sync;
use crate::service::network::{NetworkingError, Result};
use async_trait::async_trait;
use iroh::NodeId;
use iroh::EndpointId;
use std::collections::HashMap;
use uuid::Uuid;
@@ -38,7 +38,7 @@ pub trait ProtocolHandler: Send + Sync {
&self,
send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
remote_node_id: NodeId,
remote_node_id: EndpointId,
);
/// Allow downcasting to concrete type for specialized methods
@@ -51,7 +51,7 @@ pub trait ProtocolHandler: Send + Sync {
async fn handle_response(
&self,
from_device: Uuid,
from_node: NodeId,
from_node: EndpointId,
response_data: Vec<u8>,
) -> Result<()>;

View File

@@ -10,7 +10,7 @@ use crate::service::network::{
device::{DeviceInfo, SessionKeys},
NetworkingError, Result,
};
use iroh::{NodeId, Watcher};
use iroh::{EndpointId, Watcher};
use uuid::Uuid;
impl PairingProtocolHandler {
@@ -192,44 +192,48 @@ impl PairingProtocolHandler {
.map_err(|e| NetworkingError::Serialization(e));
}
self.log_info(&format!(
"Signature verified successfully for session {} from device {}",
session_id, from_device
))
.await;
self.log_info(&format!(
"Signature verified successfully for session {} from device {}",
session_id, from_device
))
.await;
// Update session with the final device_info from Response (has correct node_id)
// This ensures vouching uses the joiner's authoritative device info
{
let mut sessions = self.active_sessions.write().await;
if let Some(session) = sessions.get_mut(&session_id) {
session.remote_device_info = Some(device_info.clone());
self.log_debug(&format!(
"Updated session {} with joiner's device info (node_id: {})",
session_id, device_info.network_fingerprint.node_id
))
.await;
}
}
// Signature is valid - complete pairing on Initiator's side
let shared_secret = self.generate_shared_secret(session_id).await?;
let session_keys = SessionKeys::from_shared_secret(shared_secret.clone());
let actual_device_id = device_info.device_id;
let node_id = match device_info.network_fingerprint.node_id.parse::<NodeId>() {
Ok(id) => id,
Err(_) => {
self.log_warn("Failed to parse node ID from device info, using fallback")
// Update session with the final device_info from Response (has correct node_id)
// This ensures vouching uses the joiner's authoritative device info
{
let mut sessions = self.active_sessions.write().await;
if let Some(session) = sessions.get_mut(&session_id) {
session.remote_device_info = Some(device_info.clone());
self.log_debug(&format!(
"Updated session {} with joiner's device info (node_id: {})",
session_id, device_info.network_fingerprint.node_id
))
.await;
NodeId::from_bytes(&[0u8; 32]).unwrap()
}
}
};
// Signature is valid - complete pairing on Initiator's side
let shared_secret = self.generate_shared_secret(session_id).await?;
let session_keys = SessionKeys::from_shared_secret(shared_secret.clone());
let actual_device_id = device_info.device_id;
let node_id = match device_info
.network_fingerprint
.node_id
.parse::<EndpointId>()
{
Ok(id) => id,
Err(_) => {
self.log_warn("Failed to parse node ID from device info, using fallback")
.await;
EndpointId::from_bytes(&[0u8; 32]).unwrap()
}
};
// Register joiner's device in Pairing state
{
let mut registry = self.device_registry.write().await;
let node_addr = iroh::NodeAddr::new(node_id);
let node_addr = iroh::EndpointAddr::new(node_id);
registry
.start_pairing(actual_device_id, node_id, session_id, node_addr)
@@ -247,8 +251,7 @@ impl PairingProtocolHandler {
let relay_url = self
.endpoint
.as_ref()
.and_then(|ep| ep.home_relay().get().into_iter().next())
.map(|r| r.to_string());
.and_then(|ep| ep.addr().relay_urls().next().map(|r| r.to_string()));
// Complete pairing in device registry
{

View File

@@ -9,7 +9,7 @@ use crate::service::network::{
device::{DeviceInfo, SessionKeys},
NetworkingError, Result,
};
use iroh::{NodeId, Watcher};
use iroh::{EndpointId, Watcher};
use uuid::Uuid;
impl PairingProtocolHandler {
@@ -117,7 +117,7 @@ impl PairingProtocolHandler {
success: bool,
reason: Option<String>,
from_device: Uuid,
from_node: NodeId,
from_node: EndpointId,
) -> Result<()> {
self.log_info(&format!(
"Received completion message for session {} - success: {}",
@@ -152,7 +152,7 @@ impl PairingProtocolHandler {
let node_id = match initiator_device_info
.network_fingerprint
.node_id
.parse::<NodeId>()
.parse::<EndpointId>()
{
Ok(id) => id,
Err(_) => {
@@ -167,7 +167,7 @@ impl PairingProtocolHandler {
// Register the initiator device in Pairing state
{
let mut registry = self.device_registry.write().await;
let node_addr = iroh::NodeAddr::new(node_id);
let node_addr = iroh::EndpointAddr::new(node_id);
registry
.start_pairing(device_id, node_id, session_id, node_addr)
@@ -185,8 +185,7 @@ impl PairingProtocolHandler {
let relay_url = self
.endpoint
.as_ref()
.and_then(|ep| ep.home_relay().get().into_iter().next())
.map(|r| r.to_string());
.and_then(|ep| ep.addr().relay_urls().next().map(|r| r.to_string()));
// Complete pairing in device registry
{

View File

@@ -27,7 +27,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use blake3;
use iroh::{endpoint::Connection, Endpoint, NodeAddr, NodeId, Watcher};
use iroh::{endpoint::Connection, Endpoint, EndpointAddr, EndpointId, Watcher};
use tokio::sync::RwLock;
use uuid::Uuid;
@@ -78,8 +78,8 @@ pub struct PairingProtocolHandler {
/// Endpoint for creating and managing connections
endpoint: Option<Endpoint>,
/// Cached connections to remote nodes (keyed by NodeId and ALPN)
connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
/// Cached connections to remote nodes (keyed by EndpointId and ALPN)
connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
/// Event bus for emitting pairing events
event_bus: Arc<RwLock<Option<Arc<EventBus>>>>,
@@ -121,7 +121,7 @@ impl PairingProtocolHandler {
crate::service::network::core::event_loop::EventLoopCommand,
>,
endpoint: Option<Endpoint>,
active_connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
active_connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
) -> Self {
Self {
identity,
@@ -153,7 +153,7 @@ impl PairingProtocolHandler {
>,
data_dir: PathBuf,
endpoint: Option<Endpoint>,
active_connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
active_connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
) -> Self {
let persistence = Arc::new(PairingPersistence::new(data_dir));
Self {
@@ -1009,13 +1009,13 @@ impl PairingProtocolHandler {
NetworkingError::Protocol("Missing vouchee public key".to_string())
})?;
let secret = session.shared_secret.clone();
self.log_debug(&format!(
"Vouching device {} with node_id: '{}'",
device_info.device_id, device_info.network_fingerprint.node_id
))
.await;
(device_info, public_key, secret)
};
@@ -1302,7 +1302,7 @@ impl PairingProtocolHandler {
voucher_signature: Vec<u8>,
timestamp: chrono::DateTime<chrono::Utc>,
proxied_session_keys: SessionKeys,
remote_node_id: NodeId,
remote_node_id: EndpointId,
) -> Result<()> {
let proxy_config: ProxyPairingConfig = { self.proxy_config.read().await.clone() };
@@ -1435,27 +1435,27 @@ impl PairingProtocolHandler {
return Ok(());
}
if proxy_config.auto_accept_vouched && voucher_is_trusted {
{
self.log_info(&format!(
"Auto-accepting proxy pairing for device {} with node_id: '{}'",
vouchee_device_info.device_id, vouchee_device_info.network_fingerprint.node_id
))
.await;
if proxy_config.auto_accept_vouched && voucher_is_trusted {
{
self.log_info(&format!(
"Auto-accepting proxy pairing for device {} with node_id: '{}'",
vouchee_device_info.device_id, vouchee_device_info.network_fingerprint.node_id
))
.await;
let mut registry = self.device_registry.write().await;
registry
.complete_pairing(
vouchee_device_info.device_id,
vouchee_device_info.clone(),
proxied_session_keys.clone(),
None,
crate::service::network::device::PairingType::Proxied,
Some(voucher_device_id),
Some(chrono::Utc::now()),
)
.await?;
}
let mut registry = self.device_registry.write().await;
registry
.complete_pairing(
vouchee_device_info.device_id,
vouchee_device_info.clone(),
proxied_session_keys.clone(),
None,
crate::service::network::device::PairingType::Proxied,
Some(voucher_device_id),
Some(chrono::Utc::now()),
)
.await?;
}
let accepting_device_id = self.get_device_info().await?.device_id;
let response = PairingMessage::ProxyPairingResponse {
@@ -1541,7 +1541,7 @@ impl PairingProtocolHandler {
async fn send_proxy_pairing_rejection(
&self,
remote_node_id: NodeId,
remote_node_id: EndpointId,
session_id: Uuid,
reason: String,
) -> Result<()> {
@@ -1790,7 +1790,7 @@ impl PairingProtocolHandler {
async fn handle_pairing_message(
&self,
message: PairingMessage,
remote_node_id: NodeId,
remote_node_id: EndpointId,
) -> Result<Option<Vec<u8>>> {
match message {
PairingMessage::PairingRequest {
@@ -1892,7 +1892,7 @@ impl PairingProtocolHandler {
}
/// Get or create a device ID for a node
async fn get_device_id_for_node(&self, node_id: NodeId) -> Uuid {
async fn get_device_id_for_node(&self, node_id: EndpointId) -> Uuid {
let registry = self.device_registry.read().await;
registry.get_device_by_node(node_id).unwrap_or_else(|| {
// Generate a deterministic UUID from the node ID
@@ -1914,7 +1914,7 @@ impl PairingProtocolHandler {
pub async fn send_pairing_message_to_node(
&self,
endpoint: &Endpoint,
node_id: NodeId,
node_id: EndpointId,
message: &PairingMessage,
) -> Result<Option<PairingMessage>> {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -1985,7 +1985,7 @@ impl PairingProtocolHandler {
&self,
mut send: impl tokio::io::AsyncWrite + Unpin,
mut recv: impl tokio::io::AsyncRead + Unpin,
initiator_node_id: NodeId,
initiator_node_id: EndpointId,
) -> Result<Option<PairingMessage>> {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -2115,7 +2115,7 @@ impl PairingProtocolHandler {
pub async fn send_pairing_message_fire_and_forget(
&self,
node_id: NodeId,
node_id: EndpointId,
message: &PairingMessage,
) -> Result<()> {
let data = serde_json::to_vec(message).map_err(NetworkingError::Serialization)?;
@@ -2146,7 +2146,7 @@ impl ProtocolHandler for PairingProtocolHandler {
&self,
mut send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
mut recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
remote_node_id: NodeId,
remote_node_id: EndpointId,
) {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -2359,7 +2359,7 @@ impl ProtocolHandler for PairingProtocolHandler {
async fn handle_response(
&self,
from_device: Uuid,
from_node: NodeId,
from_node: EndpointId,
response_data: Vec<u8>,
) -> Result<()> {
self.log_debug(&format!(

View File

@@ -5,7 +5,7 @@ use crate::service::network::{
utils::identity::NetworkFingerprint,
};
use chrono::{DateTime, Utc};
use iroh::{NodeAddr, NodeId};
use iroh::{EndpointAddr, EndpointId};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
@@ -24,8 +24,8 @@ pub struct PairingCode {
/// Expiration timestamp
expires_at: DateTime<Utc>,
/// Initiator's NodeId for remote discovery via pkarr (optional - enables relay path)
node_id: Option<NodeId>,
/// Initiator's EndpointId for remote discovery via pkarr (optional - enables relay path)
node_id: Option<EndpointId>,
}
impl PairingCode {
@@ -65,7 +65,7 @@ impl PairingCode {
}
/// Add node_id for remote pairing via pkarr discovery
pub fn with_node_id(mut self, node_id: NodeId) -> Self {
pub fn with_node_id(mut self, node_id: EndpointId) -> Self {
self.node_id = Some(node_id);
self
}
@@ -124,7 +124,7 @@ impl PairingCode {
// Extract node_id (optional - enables remote pairing via pkarr)
if let Some(node_id_str) = data.get("node_id").and_then(|v| v.as_str()) {
let node_id = node_id_str.parse::<NodeId>().map_err(|e| {
let node_id = node_id_str.parse::<EndpointId>().map_err(|e| {
crate::service::network::NetworkingError::Protocol(format!(
"Invalid node_id in QR code: {}",
e
@@ -164,8 +164,8 @@ impl PairingCode {
&self.secret
}
/// Get the initiator's NodeId for pkarr discovery
pub fn node_id(&self) -> Option<NodeId> {
/// Get the initiator's EndpointId for pkarr discovery
pub fn node_id(&self) -> Option<EndpointId> {
self.node_id
}
@@ -340,7 +340,7 @@ pub enum PairingState {
ResponsePending {
challenge: Vec<u8>,
response_data: Vec<u8>,
remote_node_id: Option<NodeId>,
remote_node_id: Option<EndpointId>,
},
ResponseSent,
Completed,
@@ -413,7 +413,7 @@ pub struct PairingAdvertisement {
/// The node ID of the initiator (as string for serialization)
pub node_id: String,
/// The node address components for reconstruction
pub node_addr_info: NodeAddrInfo,
pub node_addr_info: EndpointAddrInfo,
/// Device information of the initiator
pub device_info: DeviceInfo,
/// When this advertisement expires
@@ -422,9 +422,9 @@ pub struct PairingAdvertisement {
pub created_at: DateTime<Utc>,
}
/// Serializable representation of NodeAddr
/// Serializable representation of EndpointAddr
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NodeAddrInfo {
pub struct EndpointAddrInfo {
/// Node ID as string
pub node_id: String,
/// Direct socket addresses
@@ -434,43 +434,34 @@ pub struct NodeAddrInfo {
}
impl PairingAdvertisement {
/// Convert node ID string back to NodeId
pub fn node_id(&self) -> crate::service::network::Result<NodeId> {
/// Convert node ID string back to EndpointId
pub fn node_id(&self) -> crate::service::network::Result<EndpointId> {
self.node_id.parse().map_err(|e| {
crate::service::network::NetworkingError::Protocol(format!("Invalid node ID: {}", e))
})
}
/// Convert node address info back to NodeAddr
pub fn node_addr(&self) -> crate::service::network::Result<NodeAddr> {
/// Convert node address info back to EndpointAddr
pub fn node_addr(&self) -> crate::service::network::Result<EndpointAddr> {
// Parse node ID
let node_id = self.node_addr_info.node_id.parse::<NodeId>().map_err(|e| {
crate::service::network::NetworkingError::Protocol(format!(
"Invalid node ID in advertisement: {}",
e
))
})?;
let node_id = self
.node_addr_info
.node_id
.parse::<EndpointId>()
.map_err(|e| {
crate::service::network::NetworkingError::Protocol(format!(
"Invalid node ID in advertisement: {}",
e
))
})?;
// Start with base NodeAddr
let mut node_addr = NodeAddr::new(node_id);
// In v0.95+, EndpointAddr is immutable and builder methods were removed.
// Create a minimal EndpointAddr with just the ID - Iroh's discovery system
// will automatically resolve addresses via pkarr/DNS if configured.
let node_addr = EndpointAddr::new(node_id);
// Add direct addresses
let mut direct_addrs = Vec::new();
for addr_str in &self.node_addr_info.direct_addresses {
if let Ok(addr) = addr_str.parse() {
direct_addrs.push(addr);
}
}
if !direct_addrs.is_empty() {
node_addr = node_addr.with_direct_addresses(direct_addrs);
}
// Add relay URL if present
if let Some(relay_url) = &self.node_addr_info.relay_url {
if let Ok(url) = relay_url.parse() {
node_addr = node_addr.with_relay_url(url);
}
}
// Note: Direct addresses and relay URLs from pairing code are now handled
// by Iroh's discovery system (pkarr/DNS) rather than being manually set.
Ok(node_addr)
}

View File

@@ -57,14 +57,16 @@ impl VouchingQueue {
pub async fn open(data_dir: impl AsRef<Path>) -> Result<Self> {
let networking_dir = data_dir.as_ref().join("networking");
// Ensure networking directory exists
tokio::fs::create_dir_all(&networking_dir).await.map_err(|e| {
NetworkingError::Protocol(format!("Failed to create networking directory: {}", e))
})?;
tokio::fs::create_dir_all(&networking_dir)
.await
.map_err(|e| {
NetworkingError::Protocol(format!("Failed to create networking directory: {}", e))
})?;
// Ensure networking directory exists
std::fs::create_dir_all(&networking_dir).map_err(|e| {
NetworkingError::Protocol(format!("Failed to create networking directory: {}", e))
})?;
let db_path = networking_dir.join("vouching_queue.db");
let database_url = format!("sqlite://{}?mode=rwc", db_path.display());
let conn = Database::connect(&database_url).await.map_err(|e| {

View File

@@ -2,7 +2,7 @@
use super::{ProtocolEvent, ProtocolHandler};
use crate::service::network::{NetworkingError, Result};
use iroh::NodeId;
use iroh::EndpointId;
use std::collections::HashMap;
use std::sync::Arc;
use uuid::Uuid;
@@ -68,7 +68,7 @@ impl ProtocolRegistry {
&self,
protocol_name: &str,
from_device: Uuid,
from_node: NodeId,
from_node: EndpointId,
response_data: Vec<u8>,
) -> Result<()> {
let handler = self.get_handler(protocol_name).ok_or_else(|| {

View File

@@ -595,7 +595,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncProtocolHandler
&self,
mut send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
mut recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
remote_node_id: iroh::NodeId,
remote_node_id: iroh::EndpointId,
) {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -719,7 +719,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncProtocolHandler
async fn handle_response(
&self,
from_device: Uuid,
_from_node: iroh::NodeId,
_from_node: iroh::EndpointId,
response: Vec<u8>,
) -> Result<()> {
if response.is_empty() {

View File

@@ -94,7 +94,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncMultiplexer {
&self,
mut send: Box<dyn tokio::io::AsyncWrite + Send + Unpin>,
mut recv: Box<dyn tokio::io::AsyncRead + Send + Unpin>,
remote_node_id: iroh::NodeId,
remote_node_id: iroh::EndpointId,
) {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
@@ -195,7 +195,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncMultiplexer {
async fn handle_response(
&self,
from_device: Uuid,
_from_node: iroh::NodeId,
_from_node: iroh::EndpointId,
response: Vec<u8>,
) -> Result<()> {
if response.is_empty() {

View File

@@ -6,7 +6,7 @@
//! - Automatic connection reuse across all protocols
use crate::service::network::{NetworkingError, Result};
use iroh::{endpoint::Connection, Endpoint, NodeAddr, NodeId};
use iroh::{endpoint::Connection, Endpoint, EndpointAddr, EndpointId};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
@@ -29,9 +29,9 @@ use super::logging::NetworkLogger;
/// * `Ok(Connection)` - Either cached or newly created connection
/// * `Err(NetworkingError)` - If connection fails
pub async fn get_or_create_connection(
connections: Arc<RwLock<HashMap<(NodeId, Vec<u8>), Connection>>>,
connections: Arc<RwLock<HashMap<(EndpointId, Vec<u8>), Connection>>>,
endpoint: &Endpoint,
node_id: NodeId,
node_id: EndpointId,
alpn: &'static [u8],
logger: &Arc<dyn NetworkLogger>,
) -> Result<Connection> {
@@ -64,7 +64,7 @@ pub async fn get_or_create_connection(
}
// Create new connection with specified ALPN
let node_addr = NodeAddr::new(node_id);
let node_addr = EndpointAddr::new(node_id);
logger
.info(&format!(
"Creating new {} connection to node {}",

View File

@@ -1,7 +1,7 @@
//! Network identity management - node ID and key generation
use crate::service::network::{NetworkingError, Result};
use iroh::{NodeId, SecretKey};
use iroh::{EndpointId, SecretKey};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
@@ -9,7 +9,7 @@ use uuid::Uuid;
#[derive(Clone)]
pub struct NetworkIdentity {
secret_key: SecretKey,
node_id: NodeId,
node_id: EndpointId,
// Keep Ed25519 keypair for backward compatibility
ed25519_seed: [u8; 32],
}
@@ -17,11 +17,14 @@ pub struct NetworkIdentity {
impl NetworkIdentity {
/// Create a new random network identity
pub async fn new() -> Result<Self> {
let secret_key = SecretKey::generate(&mut rand::thread_rng());
let node_id = secret_key.public();
// Generate random bytes for the secret key
use rand::RngCore;
let mut ed25519_seed = [0u8; 32];
rand::thread_rng().fill_bytes(&mut ed25519_seed);
// Generate Ed25519 seed for backward compatibility
let ed25519_seed = rand::random();
// Create Iroh secret key from random bytes
let secret_key = SecretKey::from_bytes(&ed25519_seed);
let node_id = secret_key.public();
Ok(Self {
secret_key,
@@ -60,7 +63,7 @@ impl NetworkIdentity {
}
/// Get the node ID
pub fn node_id(&self) -> NodeId {
pub fn node_id(&self) -> EndpointId {
self.node_id
}

View File

@@ -1026,7 +1026,10 @@ impl BackfillManager {
);
if let Err(e) = resource_manager
.emit_batch_resource_events(&model_type, applied_snapshot_uuids)
.emit_batch_resource_events(
&model_type,
applied_snapshot_uuids,
)
.await
{
warn!(
@@ -1082,20 +1085,26 @@ impl BackfillManager {
} else {
// FK error but can't extract UUID (raw SQLite error)
// Extract diagnostic information for troubleshooting
let fk_mappings = crate::infra::sync::registry::get_fk_mappings(&entry.model_type);
let fk_mappings = crate::infra::sync::registry::get_fk_mappings(
&entry.model_type,
);
// Extract UUID fields from entry data to show which FKs are present
let uuid_fields: Vec<String> = if let Some(obj) = entry.data.as_object() {
obj.keys()
.filter(|k| k.ends_with("_uuid"))
.map(|k| {
let value = obj.get(k).and_then(|v| v.as_str()).unwrap_or("null");
format!("{}={}", k, value)
})
.collect()
} else {
vec![]
};
let uuid_fields: Vec<String> =
if let Some(obj) = entry.data.as_object() {
obj.keys()
.filter(|k| k.ends_with("_uuid"))
.map(|k| {
let value = obj
.get(k)
.and_then(|v| v.as_str())
.unwrap_or("null");
format!("{}={}", k, value)
})
.collect()
} else {
vec![]
};
// Log comprehensive diagnostic information
tracing::info!(

View File

@@ -71,10 +71,8 @@ impl LogSyncHandler {
// Emit resource event for UI reactivity (for insert/update changes)
if matches!(change_type, ChangeType::Insert | ChangeType::Update) {
let resource_manager = crate::domain::ResourceManager::new(
db.clone(),
self.peer_sync.event_bus().clone(),
);
let resource_manager =
crate::domain::ResourceManager::new(db.clone(), self.peer_sync.event_bus().clone());
if let Err(e) = resource_manager
.emit_resource_events(&model_type, vec![record_uuid])

View File

@@ -87,7 +87,9 @@ pub struct TestEnvironment {
impl TestEnvironment {
/// Create a new test environment with the given name
pub fn new(test_name: impl Into<String>) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
pub fn new(
test_name: impl Into<String>,
) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let test_name = test_name.into();
let test_root = PathBuf::from("test_data");
let test_data_dir = test_root.join(&test_name);
@@ -225,7 +227,9 @@ impl TestConfigBuilder {
}
/// Build and save the AppConfig to the data directory
pub async fn build_and_save(self) -> Result<AppConfig, Box<dyn std::error::Error + Send + Sync>> {
pub async fn build_and_save(
self,
) -> Result<AppConfig, Box<dyn std::error::Error + Send + Sync>> {
let config = self.build();
// Ensure the data directory exists
@@ -318,7 +322,9 @@ pub struct IntegrationTestSetup {
impl IntegrationTestSetup {
/// Create a new integration test setup with default configuration
pub async fn new(test_name: impl Into<String>) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
pub async fn new(
test_name: impl Into<String>,
) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let environment = TestEnvironment::new(test_name)?;
// Clean any existing data
@@ -411,7 +417,9 @@ impl IntegrationTestSetup {
///
/// This method ensures that the custom AppConfig settings from the test setup
/// are properly applied when initializing the Core.
pub async fn create_core(&self) -> Result<crate::Core, Box<dyn std::error::Error + Send + Sync>> {
pub async fn create_core(
&self,
) -> Result<crate::Core, Box<dyn std::error::Error + Send + Sync>> {
info!(
"Creating Core with test configuration from: {}",
self.data_dir().display()

View File

@@ -337,8 +337,10 @@ pub fn containers_to_volumes(
// Create stable volume fingerprint for APFS volumes
// APFS volumes are always local system/primary volumes, use mount_point + device_id
let fingerprint =
crate::volume::types::VolumeFingerprint::from_primary_volume(mount_point, device_id);
let fingerprint = crate::volume::types::VolumeFingerprint::from_primary_volume(
mount_point,
device_id,
);
debug!(
"APFS_CONVERT: Generated fingerprint {} for volume '{}' (consumed: {} bytes)",
@@ -357,8 +359,7 @@ pub fn containers_to_volumes(
// Auto-track eligibility: Only Primary volume (Data volume on modern macOS)
let auto_track_eligible =
matches!(volume_type, crate::volume::types::VolumeType::Primary)
&& is_user_visible;
matches!(volume_type, crate::volume::types::VolumeType::Primary) && is_user_visible;
debug!(
"APFS_CONVERT: Volume '{}' classified as Type={:?}, user_visible={}, auto_track_eligible={}",

View File

@@ -119,10 +119,7 @@ fn parse_df_line(
}
crate::volume::types::VolumeType::Network => {
// Use filesystem device as backend identifier for network volumes
VolumeFingerprint::from_network_volume(
filesystem_device,
&mount_path.to_string_lossy(),
)
VolumeFingerprint::from_network_volume(filesystem_device, &mount_path.to_string_lossy())
}
_ => {
// Primary, UserData, Secondary, System, Virtual, Unknown

View File

@@ -254,7 +254,10 @@ pub fn create_volume_from_windows_info(
}
crate::volume::types::VolumeType::Network => {
// Use mount path as backend identifier for network volumes
let backend_id = info.volume_guid.as_deref().unwrap_or(&mount_path.to_string_lossy());
let backend_id = info
.volume_guid
.as_deref()
.unwrap_or(&mount_path.to_string_lossy());
VolumeFingerprint::from_network_volume(backend_id, &mount_path.to_string_lossy())
}
_ => {

View File

@@ -189,7 +189,8 @@ async fn test_copy_progress_with_metadata_tracking() {
let mut event_subscriber = core.events.subscribe();
// Start monitoring task BEFORE dispatching to avoid missing events
let (job_id_tx, job_id_rx) = tokio::sync::oneshot::channel::<sd_core::infra::job::types::JobId>();
let (job_id_tx, job_id_rx) =
tokio::sync::oneshot::channel::<sd_core::infra::job::types::JobId>();
let monitor_handle = tokio::spawn(async move {
// Wait for job ID to be sent
@@ -292,9 +293,10 @@ async fn test_copy_progress_with_metadata_tracking() {
// Limit queries
use sd_core::infra::query::LibraryQuery;
let query_input = sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput {
job_id: job_id.into(),
};
let query_input =
sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput {
job_id: job_id.into(),
};
let query =
sd_core::ops::jobs::copy_metadata::query::CopyMetadataQuery::from_input(
@@ -357,7 +359,9 @@ async fn test_copy_progress_with_metadata_tracking() {
let job_id = job_handle.id;
// Send job ID to monitoring task
job_id_tx.send(job_id).expect("Monitor task should be running");
job_id_tx
.send(job_id)
.expect("Monitor task should be running");
// Wait for job completion with timeout
let (event_count, metadata_query_count) =
@@ -374,8 +378,9 @@ async fn test_copy_progress_with_metadata_tracking() {
// Query final metadata state
println!("\nQuerying final job metadata...");
use sd_core::infra::query::LibraryQuery;
let query_input =
sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput { job_id: job_id.into() };
let query_input = sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput {
job_id: job_id.into(),
};
let query =
sd_core::ops::jobs::copy_metadata::query::CopyMetadataQuery::from_input(query_input)
.unwrap();
@@ -412,9 +417,8 @@ async fn test_copy_progress_with_metadata_tracking() {
0
};
let test_passed = files_completed_at_end == file_count
&& max_percentage >= 0.99
&& max_jump < 50.0;
let test_passed =
files_completed_at_end == file_count && max_percentage >= 0.99 && max_jump < 50.0;
let failure_reason = if !test_passed {
if files_completed_at_end != file_count {
@@ -478,9 +482,7 @@ async fn test_copy_progress_with_metadata_tracking() {
// Always write to temp dir for local inspection
let temp_snapshot_path = test_root.join("test_snapshot.json");
let snapshot_json = serde_json::to_string_pretty(&snapshot).unwrap();
fs::write(&temp_snapshot_path, snapshot_json)
.await
.unwrap();
fs::write(&temp_snapshot_path, snapshot_json).await.unwrap();
println!(
"\n📄 Snapshot written to temp: {}",
temp_snapshot_path.display()
@@ -518,4 +520,3 @@ async fn test_copy_progress_with_metadata_tracking() {
);
}
}

View File

@@ -587,4 +587,4 @@ async fn test_cross_device_copy() {
panic!("Cross-device copy test failed");
}
}
}
}

View File

@@ -606,4 +606,4 @@ async fn test_file_copy_pull() {
panic!("PULL transfer test failed");
}
}
}
}

View File

@@ -303,7 +303,8 @@ async fn test_ephemeral_file_move_via_reindex() -> anyhow::Result<()> {
// Index in ephemeral mode
let test_root_sd = SdPath::local(test_root.clone());
let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_config =
IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_job = IndexerJob::new(indexer_config);
tracing::info!("Initial ephemeral indexing");
@@ -327,8 +328,11 @@ async fn test_ephemeral_file_move_via_reindex() -> anyhow::Result<()> {
.await?;
// Manual reindex to detect the change
let reindex_config =
IndexerJobConfig::ephemeral_browse(SdPath::local(test_root.clone()), IndexScope::Recursive, false);
let reindex_config = IndexerJobConfig::ephemeral_browse(
SdPath::local(test_root.clone()),
IndexScope::Recursive,
false,
);
let reindex_job = IndexerJob::new(reindex_config);
let reindex_handle = harness.library.jobs().dispatch(reindex_job).await?;
reindex_handle.wait().await?;
@@ -381,7 +385,8 @@ async fn test_ephemeral_file_move_via_watcher() -> anyhow::Result<()> {
// Index in ephemeral mode
let test_root_sd = SdPath::local(test_root.clone());
let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_config =
IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_job = IndexerJob::new(indexer_config);
tracing::info!("Initial ephemeral indexing");

View File

@@ -50,22 +50,22 @@ impl FileSyncTestSetup {
let temp_dir = TempDir::new()?;
let config = sd_core::config::AppConfig {
version: 3,
data_dir: temp_dir.path().to_path_buf(),
log_level: "info".to_string(),
telemetry_enabled: false,
preferences: sd_core::config::Preferences::default(),
job_logging: sd_core::config::JobLoggingConfig::default(),
services: sd_core::config::ServiceConfig {
networking_enabled: false,
volume_monitoring_enabled: false,
fs_watcher_enabled: false,
statistics_listener_enabled: false,
},
logging: sd_core::config::LoggingConfig::default(),
proxy_pairing: sd_core::config::app_config::ProxyPairingConfig::default(),
};
let config = sd_core::config::AppConfig {
version: 3,
data_dir: temp_dir.path().to_path_buf(),
log_level: "info".to_string(),
telemetry_enabled: false,
preferences: sd_core::config::Preferences::default(),
job_logging: sd_core::config::JobLoggingConfig::default(),
services: sd_core::config::ServiceConfig {
networking_enabled: false,
volume_monitoring_enabled: false,
fs_watcher_enabled: false,
statistics_listener_enabled: false,
},
logging: sd_core::config::LoggingConfig::default(),
proxy_pairing: sd_core::config::app_config::ProxyPairingConfig::default(),
};
config.save()?;
let core = Core::new(temp_dir.path().to_path_buf())

View File

@@ -317,7 +317,8 @@ async fn test_ephemeral_folder_rename_via_reindex() -> anyhow::Result<()> {
// Index in ephemeral mode
let test_root_sd = SdPath::local(test_root.clone());
let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_config =
IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_job = IndexerJob::new(indexer_config);
tracing::info!("Initial ephemeral indexing");
@@ -338,8 +339,11 @@ async fn test_ephemeral_folder_rename_via_reindex() -> anyhow::Result<()> {
tokio::fs::rename(&original_folder, &renamed_folder).await?;
// Manual reindex to detect the change
let reindex_config =
IndexerJobConfig::ephemeral_browse(SdPath::local(test_root.clone()), IndexScope::Recursive, false);
let reindex_config = IndexerJobConfig::ephemeral_browse(
SdPath::local(test_root.clone()),
IndexScope::Recursive,
false,
);
let reindex_job = IndexerJob::new(reindex_config);
let reindex_handle = harness.library.jobs().dispatch(reindex_job).await?;
reindex_handle.wait().await?;
@@ -386,7 +390,8 @@ async fn test_ephemeral_folder_rename_via_watcher() -> anyhow::Result<()> {
// Index in ephemeral mode
let test_root_sd = SdPath::local(test_root.clone());
let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_config =
IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false);
let indexer_job = IndexerJob::new(indexer_config);
tracing::info!("Initial ephemeral indexing");

View File

@@ -372,7 +372,8 @@ async fn test_location_export_import() -> Result<(), Box<dyn std::error::Error +
}
#[tokio::test]
async fn test_export_nonexistent_location() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
async fn test_export_nonexistent_location() -> Result<(), Box<dyn std::error::Error + Send + Sync>>
{
let temp_dir = TempDir::new()?;
let core_dir = temp_dir.path().join("core");
let export_file = temp_dir.path().join("export.sql");
@@ -475,7 +476,8 @@ async fn test_import_invalid_file() -> Result<(), Box<dyn std::error::Error + Se
}
#[tokio::test]
async fn test_import_links_existing_content_identities() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
async fn test_import_links_existing_content_identities(
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// This test verifies that when importing a location that has content matching
// existing content_identities in the destination library, the entries link to
// the existing content_identities rather than creating duplicates.

View File

@@ -163,10 +163,7 @@ fn test_vouching_session_with_multiple_vouches() {
assert!(all_terminal);
session.state = VouchingSessionState::Completed;
assert!(matches!(
session.state,
VouchingSessionState::Completed
));
assert!(matches!(session.state, VouchingSessionState::Completed));
}
#[test]
@@ -234,24 +231,15 @@ fn test_vouching_session_state_transitions() {
};
// Start as Pending
assert!(matches!(
session.state,
VouchingSessionState::Pending
));
assert!(matches!(session.state, VouchingSessionState::Pending));
// Transition to InProgress when vouching starts
session.state = VouchingSessionState::InProgress;
assert!(matches!(
session.state,
VouchingSessionState::InProgress
));
assert!(matches!(session.state, VouchingSessionState::InProgress));
// Transition to Completed when all vouches are processed
session.state = VouchingSessionState::Completed;
assert!(matches!(
session.state,
VouchingSessionState::Completed
));
assert!(matches!(session.state, VouchingSessionState::Completed));
}
#[test]

View File

@@ -93,7 +93,11 @@ async fn alice_proxy_pairing_scenario() {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let paired_devices = if let Some(networking) = core.networking() {
networking.device_registry().read().await.get_paired_devices()
networking
.device_registry()
.read()
.await
.get_paired_devices()
} else {
vec![]
};
@@ -146,7 +150,11 @@ async fn alice_proxy_pairing_scenario() {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let paired_devices = if let Some(networking) = core.networking() {
networking.device_registry().read().await.get_paired_devices()
networking
.device_registry()
.read()
.await
.get_paired_devices()
} else {
vec![]
};
@@ -181,7 +189,11 @@ async fn alice_proxy_pairing_scenario() {
// Get Carol's device ID
let paired_devices = if let Some(networking) = core.networking() {
networking.device_registry().read().await.get_paired_devices()
networking
.device_registry()
.read()
.await
.get_paired_devices()
} else {
vec![]
};
@@ -281,7 +293,11 @@ async fn carol_proxy_pairing_scenario() {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let paired_devices = if let Some(networking) = core.networking() {
networking.device_registry().read().await.get_paired_devices()
networking
.device_registry()
.read()
.await
.get_paired_devices()
} else {
vec![]
};
@@ -327,7 +343,11 @@ async fn carol_proxy_pairing_scenario() {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let paired_devices = if let Some(networking) = core.networking() {
networking.device_registry().read().await.get_paired_devices()
networking
.device_registry()
.read()
.await
.get_paired_devices()
} else {
vec![]
};
@@ -447,7 +467,11 @@ async fn bob_proxy_pairing_scenario() {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let paired_devices = if let Some(networking) = core.networking() {
networking.device_registry().read().await.get_paired_devices()
networking
.device_registry()
.read()
.await
.get_paired_devices()
} else {
vec![]
};
@@ -485,7 +509,11 @@ async fn bob_proxy_pairing_scenario() {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let paired_devices = if let Some(networking) = core.networking() {
networking.device_registry().read().await.get_paired_devices()
networking
.device_registry()
.read()
.await
.get_paired_devices()
} else {
vec![]
};

View File

@@ -479,8 +479,20 @@ async fn test_sequential_backfill_control() -> anyhow::Result<()> {
tracing::info!("Indexing both locations on Alice first");
add_and_index_location(&harness.library_alice, &harness.core_alice.volumes, core_path.to_str().unwrap(), "core").await?;
add_and_index_location(&harness.library_alice, &harness.core_alice.volumes, apps_path.to_str().unwrap(), "apps").await?;
add_and_index_location(
&harness.library_alice,
&harness.core_alice.volumes,
core_path.to_str().unwrap(),
"core",
)
.await?;
add_and_index_location(
&harness.library_alice,
&harness.core_alice.volumes,
apps_path.to_str().unwrap(),
"apps",
)
.await?;
let alice_entries = entities::entry::Entity::find()
.count(harness.library_alice.db().conn())

View File

@@ -53,7 +53,12 @@ async fn test_initial_backfill_alice_indexes_first() -> anyhow::Result<()> {
let device_alice_id = core_alice.device.device_id()?;
let library_alice = core_alice
.libraries
.create_library_with_id(library_id, "Backfill Test Library", None, core_alice.context.clone())
.create_library_with_id(
library_id,
"Backfill Test Library",
None,
core_alice.context.clone(),
)
.await?;
let device_record = entities::device::Entity::find()
@@ -179,7 +184,12 @@ async fn test_initial_backfill_alice_indexes_first() -> anyhow::Result<()> {
let device_bob_id = core_bob.device.device_id()?;
let library_bob = core_bob
.libraries
.create_library_with_id(library_id, "Backfill Test Library", None, core_bob.context.clone())
.create_library_with_id(
library_id,
"Backfill Test Library",
None,
core_bob.context.clone(),
)
.await?;
register_device(&library_alice, device_bob_id, "Bob").await?;
@@ -414,7 +424,12 @@ async fn test_bidirectional_volume_sync() -> anyhow::Result<()> {
let device_alice_id = core_alice.device.device_id()?;
let library_alice = core_alice
.libraries
.create_library_with_id(library_id, "Volume Sync Test", None, core_alice.context.clone())
.create_library_with_id(
library_id,
"Volume Sync Test",
None,
core_alice.context.clone(),
)
.await?;
let core_bob = Core::new(temp_dir_bob.clone())
@@ -423,7 +438,12 @@ async fn test_bidirectional_volume_sync() -> anyhow::Result<()> {
let device_bob_id = core_bob.device.device_id()?;
let library_bob = core_bob
.libraries
.create_library_with_id(library_id, "Volume Sync Test", None, core_bob.context.clone())
.create_library_with_id(
library_id,
"Volume Sync Test",
None,
core_bob.context.clone(),
)
.await?;
register_device(&library_alice, device_bob_id, "Bob").await?;
@@ -634,7 +654,12 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> {
let device_alice_id = core_alice.device.device_id()?;
let library_alice = core_alice
.libraries
.create_library_with_id(library_id, "Volume Event Test", None, core_alice.context.clone())
.create_library_with_id(
library_id,
"Volume Event Test",
None,
core_alice.context.clone(),
)
.await?;
let core_bob = Core::new(temp_dir_bob.clone())
@@ -643,7 +668,12 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> {
let device_bob_id = core_bob.device.device_id()?;
let library_bob = core_bob
.libraries
.create_library_with_id(library_id, "Volume Event Test", None, core_bob.context.clone())
.create_library_with_id(
library_id,
"Volume Event Test",
None,
core_bob.context.clone(),
)
.await?;
register_device(&library_alice, device_bob_id, "Bob").await?;
@@ -683,17 +713,27 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> {
tracing::debug!("Bob received event: {:?}", event);
match event {
Event::ResourceChangedBatch { resource_type, resources, .. } => {
Event::ResourceChangedBatch {
resource_type,
resources,
..
} => {
if resource_type == "volume" {
tracing::info!(
resource_count = if let serde_json::Value::Array(arr) = &resources { arr.len() } else { 0 },
resource_count = if let serde_json::Value::Array(arr) = &resources {
arr.len()
} else {
0
},
"Bob received ResourceChangedBatch for volumes"
);
// Check if Alice's volume is in the batch
if let serde_json::Value::Array(volume_array) = resources {
for volume_json in volume_array {
if let Some(uuid_str) = volume_json.get("id").and_then(|v| v.as_str()) {
if let Some(uuid_str) =
volume_json.get("id").and_then(|v| v.as_str())
{
if let Ok(volume_id) = Uuid::parse_str(uuid_str) {
if volume_id == alice_volume_uuid_clone {
tracing::info!(
@@ -709,7 +749,11 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> {
}
}
}
Event::ResourceChanged { resource_type, resource, .. } => {
Event::ResourceChanged {
resource_type,
resource,
..
} => {
if resource_type == "volume" {
tracing::info!("Bob received single ResourceChanged for volume");
@@ -803,17 +847,16 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> {
// Abort the listener task
event_listener.abort();
tracing::info!(
event_received = event_was_received,
"=== Test Result ==="
);
tracing::info!(event_received = event_was_received, "=== Test Result ===");
assert!(
event_was_received,
"Bob should have received a ResourceChanged event for Alice's volume during sync, but didn't"
);
tracing::info!("✅ Volume ResourceChanged event was emitted on the receiving device during sync");
tracing::info!(
"✅ Volume ResourceChanged event was emitted on the receiving device during sync"
);
Ok(())
}

View File

@@ -315,8 +315,7 @@ async fn carol_three_device_scenario() {
// Wait for Alice's library ID
println!("Carol: Waiting for Alice's library ID...");
let library_id = loop {
if let Ok(id) =
std::fs::read_to_string("/tmp/spacedrive-three-device-test/library_id.txt")
if let Ok(id) = std::fs::read_to_string("/tmp/spacedrive-three-device-test/library_id.txt")
{
break id.trim().to_string();
}

View File

@@ -688,7 +688,10 @@ async fn carol_transitive_sync_scenario() {
let tolerance = (alice_expected_count as f64 * 0.1) as i64;
if diff <= tolerance && carol_final_count > 10 {
println!("Carol: Sync complete! Received {} entries", carol_final_count);
println!(
"Carol: Sync complete! Received {} entries",
carol_final_count
);
break;
}
@@ -796,9 +799,12 @@ async fn test_transitive_sync_backfill() {
match result {
Ok(_) => {
println!("\n✅ TRANSITIVE SYNC BACKFILL TEST PASSED!");
println!(" ✅ Alice indexed {} entries",
println!(
" ✅ Alice indexed {} entries",
std::fs::read_to_string(format!("{}/alice_entry_count.txt", TEST_DIR))
.unwrap_or_default().trim());
.unwrap_or_default()
.trim()
);
println!(" ✅ Alice paired with Bob (direct)");
println!(" ✅ Bob synced Alice's data");
println!(" ✅ Bob paired with Carol (direct)");

View File

@@ -118,7 +118,11 @@ async fn test_macos_volume_detection() {
" {} -> {} ({})",
path_str,
volume.name,
if volume.name == expected_volume { "" } else { "✗ WRONG" }
if volume.name == expected_volume {
""
} else {
"✗ WRONG"
}
);
assert_eq!(
volume.name, expected_volume,
@@ -501,4 +505,4 @@ async fn test_full_copy_workflow_simulation() {
);
}
}
}
}

View File

@@ -1301,4 +1301,4 @@ async fn test_volume_monitor_service() {
// Cleanup: shutdown core to release file descriptors
core.shutdown().await.expect("Failed to shutdown core");
}
}

View File

@@ -425,7 +425,10 @@ fn build_ios() -> Result<()> {
let xcframework_path = ios_core_dir.join(format!("{}.xcframework", framework_name));
if xcframework_path.exists() {
println!("Updating existing XCFramework at: {}", xcframework_path.display());
println!(
"Updating existing XCFramework at: {}",
xcframework_path.display()
);
} else {
println!("Creating XCFramework at: {}", xcframework_path.display());
std::fs::create_dir_all(&xcframework_path)
@@ -439,8 +442,7 @@ fn build_ios() -> Result<()> {
// Create/update device framework directory
let device_target = xcframework_path.join("ios-arm64");
std::fs::create_dir_all(&device_target)
.context("Failed to create device target directory")?;
std::fs::create_dir_all(&device_target).context("Failed to create device target directory")?;
std::fs::copy(
device_framework_dir.join(framework_name),
device_target.join(format!("lib{}.a", framework_name)),
@@ -449,8 +451,7 @@ fn build_ios() -> Result<()> {
// Create/update simulator framework directory
let sim_target = xcframework_path.join("ios-arm64-simulator");
std::fs::create_dir_all(&sim_target)
.context("Failed to create simulator target directory")?;
std::fs::create_dir_all(&sim_target).context("Failed to create simulator target directory")?;
std::fs::copy(
sim_framework_dir.join(framework_name),
sim_target.join(format!("lib{}.a", framework_name)),