From d4d912bc3f68ad995bf79f3056e9556203ac9114 Mon Sep 17 00:00:00 2001 From: "tembo[bot]" <208362400+tembo[bot]@users.noreply.github.com> Date: Sat, 24 Jan 2026 21:58:43 +0000 Subject: [PATCH 1/4] chore(iroh): upgrade to v0.95.1 with breaking changes and migration steps --- Cargo.lock | Bin 330109 -> 331444 bytes Cargo.toml | 2 +- core/Cargo.toml | 8 +- core/src/service/network/core/event_loop.rs | 84 +++++----- core/src/service/network/core/mod.rs | 158 +++++++++--------- core/src/service/network/device/mod.rs | 10 +- core/src/service/network/device/registry.rs | 54 +++--- .../service/network/protocol/file_delete.rs | 6 +- .../service/network/protocol/file_transfer.rs | 6 +- .../service/network/protocol/job_activity.rs | 12 +- .../src/service/network/protocol/messaging.rs | 14 +- core/src/service/network/protocol/mod.rs | 6 +- .../network/protocol/pairing/initiator.rs | 11 +- .../network/protocol/pairing/joiner.rs | 11 +- .../service/network/protocol/pairing/mod.rs | 28 ++-- .../service/network/protocol/pairing/types.rs | 36 ++-- core/src/service/network/protocol/registry.rs | 4 +- .../service/network/protocol/sync/handler.rs | 4 +- .../network/protocol/sync/multiplexer.rs | 4 +- core/src/service/network/utils/identity.rs | 6 +- 20 files changed, 228 insertions(+), 236 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94e357ab3a4592c70ad132436b3c91b37da4fd54..c9d50a4e328044e1a0011a264faf0c511cfbeaf1 100644 GIT binary patch delta 7636 zcmZ`;3zQbseLwTqWnp>jvg|JGuCRzAD!b0TckbMoQ()nkYLm8#5VbYOmOFFr4XeN| zy9)#*RqTnjCaI9I-WY?1CJ|E231pK@#0Q{_iIRwzq$x>LATf_)q7pqx)5c(b^F0=g z={X$Ee)G*Y_x1n(Ui15p&HB==S^K``=j(5to&U`LsXesanUWv%TgHFb;I>%*0k3_$ zz2nV=`FoeN=GVXQ-sMKIK!i!4A{vH~kTNnNOam>`REUT%MU3FF6e;C~&@|I&kW#9H zSS5l+Cd!@bW{n3~Sr>>tn zr*bWywy9}+?v@7|s(1Wg+l5Wzjkn*~oFCmW)Bf&$XZ3)LwBbaUAfb^`G?awLG)!nF z5|TzTj7cm*m4sTviDWE{lThNGG*E;Z&h261&mO;X=iUpNFr%Pvctq!|_gytWQ-z;t zrUMbgEXWL#R7jTK8eu$*M36{rvW$m{Xc8G}0-8`kC=W3sm6QMJ$XmAd=Wjf6UewF` zYme?H=PsVod+w-QH!5jR?gGJ>BU$~r9#J|Q!}YMA=F1MOoJ6ZrvMctutCoeFQ7LuA z!zhSD$!Vrd%sEe(k|rtt2)Q7M)>_FV5h3A$4pezh7z<|S4>;wVE^S=9H60mL`Ou>s z`QL19ZvUh6!RU1S=|`V#Jg0V6m5G}{Io6krY#kmO!e!c@*cEL~d%ohpCHa{rXXN)D zo;G=6t@*K=uNsIr4_&qgbdQ zwY!%%o%zZqm-N16uh<`%3bRLk;?A+}YIA1VH`Y7T@^>9rJbvcMUr*`550n0=@{?h` zzHh{2B8*~O75#yo|B}Bj556_CF|HhgZT+RQY9OE)QG)3tiefDz6cLSyz|M1Sm`-)Z zVjc*>NT!KM0#33xU@YZX6fhBSyZakXr+s;j*Hqg&`i{YI=(WMe#nFXce#N&Rv0obV zy7Jy*&EuDU_fEgDI_vz{@qzr;|1qnvS|$0Zi?3RyWELkTmJzCf$)s#QDVW4mLmH|y zFex!;P(iTHEDKPOj72iwXpn$McK%7XGv9Z-CBI|a^ch1NM#uVw235JaI8F@Lp8v3+ zIseUu&iwTETgT^pf5oiXwJ$4+7#)27206BAMEBBuyZwXiIr;rB&+-@+)4IaHW&i~k zGH!H`Xcls9?GW;gd@jB2MOxjfUIKk#y6e(x*aq`hH3>8}=qR+Mv+U}m&-WwdOk>b%!VT3tti z!X}vYB>cQ>a`ma`dAnjW8cboHTE(#>j0FiXF{3o35!W)Kgak>#0_<>-YL!V9aK$O7 zhDt+~rh&BkkNchZHLuQC@Q#MB&uwBA)PCzHZl}HOQg7O#+T3bLP_34;8$I!p%FM@~ zdG!nD+Ap^{v+aoyZ`DAAeIuGtr4-{q5D~zGRw+q?m?{BuN&t1JOhHk#2?<%6$cTjj zlUR(kSX@(-RR_&`&JDIk!c!cDR7j=CQV~0gOG%jrzr_|fZ~@KNeCc6 zj?f=EFd>lww?u|VM%k4MoUUSTkMrQ4`{kb76E`^L*jo&yynU^6w>|%)+m^4H-e50W z>zwc5j!GdF7Y#Zux($^>vO7QS_Y{lPIS)75*Noqer8<9QKXfM^BSxJScE`nDqy5qC zUXQ(N)M>Y;k2tM%%N1U0eq(>@GesGYINp_O~`%iZ2WZgM{1 z)cMDry20r#4t>D+sZ$K!>|8q49=X(8P&|8wvwTk3f+a-A#qKOSw8(9;3wJvmm0yy6 zUM$(|{JPuWcK>V6s-+AxC|RZ?HilD`MG^o^!FnJ>Mq?d?IF2|nQl=~wI4HD4N_C1t z3WeQ$p4SOno@Gxx?!4QcIqdXKt(7|p6Z^OQYT(J=J>h({L63|KjoAK!PKUjfy3_5Z zgAQr=!+x=!I_PxU{>QLKhY#YHn~yjPTZh-lF*7u>zHhT088m}>#O^raoNF(9)SYJk z^bq!M<00o(yX_-RlTE+r^xE#j&g|OimT?(p9I%pQ5$9;!KnakC!DUzQs=yNsg+*c5cE~oS?`dkU7JSM zRld4Oj|>goRBh?%-_|kSxpvLdPJ`X?B&cHN{#DBa(1*oDNr6dZgrYMT1%=~~Qb{8K zw*q9RVDwPM(gXk{6fa0M)=CkCBl zAAbtOy8iA}11i+eJt}}0GKxe&1cd>Kpks6-SAYveBQT%Ds9`2d%0-p|C9$a~3&XVB zD%*R&yP|ydl;VYdb2j=ogq_m>O$>MmsL{qqq0r)*NJdP=p-!R*=#(TlAPvrvLCPbR zB;Zyi60S%J@XB}I)lt0ojN>?SqZ&BAZFTryHkG(;=NxgC+c(CXDfVjzowl-g=GjBd zUW+Y`IMb(=?ODeud-RC2xl%I4mEUysP$#PJlO!q&h2c-*8S zUS=ahqgC1%);HPK^WE;oYE9ADQP)wachG zWmbKs>fKsaj-CG-x5plx=e}0rCh5oF^}69CkTL3o*#GKvJBz9F-9KrtYcBwv-u#!) zS#S_I_&5!-BrwvjNJ$WkmYk?C&VVjD2}qJ=nG6Lw6vhJBLIqPC02I?wXLZ;Gi`=*z zGR7!%sGjWcep1}L$o;5ef4j(Sv)j&qDzQ?Q!KuU$@mU&>5Gpf+X-E@h$_5fT6b#>` zqO6SwN(E%8B+Vk!Z(!`PFSwnq9L@JH=(*<-ce35J*zK}C7rCe3!W7p!ydry<&d~a4 zgt1Z_FG-Xzk^}yE3aAWO9GHj^lt~&T45}qdaV118Ld&IG+wFI{7usmCdvVF2aZbGI zkX;wMQ;Rcw?ir^(<{Ou|m)oXG+;;mfulw&^igss#)`WoR1FQ#50JaA#9kNVfD2Ra4 zDG7xWhE`31CMGqM$Al7EfggClR{Pyc+zV=(T3`g_*xj%Ds|VoWs6+t^SgtTmqm0o= z0!M+W49iM%fOSA7qxuOjw$cm0I+hw});P&`-`8Q+ry!?Seo!seT=6YxHIjC4nbqQk^;7le%)!bW6W*ypa$*Ve%$Yv z0-;!W#+LQ@{x5bHUt{ibUU7BoUcaOq5AxR**6X(?M_j-7@tE7<+5Ro4)E%Gp!6@il z_^1#IM#&~nh@d!=VZjLmKg(hzIY8NzRG)H|C`Pa;Py+^#l-cd?@#j<@{OT6>cA5EuiWm z=m;4{2?bvgQei56@A#FgkqWV0nYR>NdZ}uKZW`xf1Zp`|aF^ z+%D2kzNg&J_Y_y3a*KJzbB?#e@2GBd)h~}uxF*}W#5=$EaHDraTj>YuhFT52Uyqh? zMH%MY)8$=O<7t|2e6_1@;@7o_c7ND|KY6gLy@F&$L5}p?AOA z?ainRQ7Z5kyS-PZHo=Y#Zq|L7-QVuEIdLgL1|sZZ6ew;4nu3P`xD!EC3g+jC7eK5D zXPQGWBt&MJLSf?{ft5m%LSUf=6==eoVjUiq>+ z&)#~0*HFB$$UEh{Q)1@>N96!`B^Gc7qVxb7hAb%YUqOE|ga;hJlBpO96z3noIiNnI zphAQ}DF8Ao3!c)Re8!t@KX}Y*wU2(oYqKxDrsBeI4g`X|>d9rv_Fd#PHQ4X;d+oXUz_rU1GoYCmvK@XCaF}K!DhUjILs*P%h07`x0^kcy zI77z6(K8h1i?2iLTnKCM{N{Oiu4M!!DxmNqn2l7yi6 z#8U5sTPy6O*t*`Dd`p@B{u5)F|AGD+ZgtOH2@05#4Ld5Xj!o`eno0|`+oN#q0>5-$NQcmIetsj;`3 zLRFss+!!|InNjcOUTvYm?%(3gwZ}JmEe@%MJMb|v4CSfHhZ5NYg+*DFz8SIb${s9K2%>K8}0c)QLTbBunDz3T&BYkl-G z<$zd}2)GjTFi)v6Wz+;~7Z3%^q}U7O5K)Rg$EYk+FyW9Zp+wLGKw4bpi2vWR&kZ&r*?JQIS+Vk^~iI@cS>eVOJ|AhW1&_7#NhWyhzz z2Pc}#?)!{4r?~P#Z^uL-6esq0Go4zu{vj(R#sBX0?mPQ)e7d9PdDvS$z4(jAyxmSA z4tfvHt+%diy~gV(UjL!Dv#nVBn)mO0MKa&Ny}8Kx{il~r+_Z5JTJ67|@Maa)toQ%3 z)vYqiEPLrK5WoV*XP}l)(CCB|2A%~VXoJ@sI2cZ2$b6W8p&_cF0ad24lT>hk6p*2s zz}sKk;x|vNA#B-#kYwxq?&&&(24;?9uGNswY^-Lj;M0+Y|hzwm>I)8ioR==;f=obHbv+Ahg-0fdn{hQe3 zpY+eOEARH3Cc-!yo?h(?)IC@AsH08bDQ7JAk8ZxV9VeJHMC{4mOxbRc-t2nHmyf$ zm4T4#>2=OryK-;!alf_O?)GaYnld0l!lfZ_9A$N80(_zg5;h3+jN(lQvL6k~32p_h8-ao$>{%0RX0{^$+O1U4)(pbj`Peaf%8U>@m zt>T#Bpb)_T@+3TU3~z@Z25wsmB%={%Fof093DTn*k;py&3IE+NCZu9aJQx%E#J$+> z{44G`)wONB&wrzuot6>E#I^QXUO*IHs{mbz(Lj1Cz(NT)6<2T$pg(Y*(3C>M61=^F z!a@F9qDKhp&$EgRtDH!=Qa7Tu<+FZABj$rmx7!~<-R-=ow=!sP*C+kQn(T_tV0H)3 zEiF4h9)%b}n!sUZ3fdXj4p0vCZJ=8eQV9qgjd5jwcYritn1m3jeBoyBf?-3)*-fqw zZQt{#e_rvP&-!OPJN%&Eo;9f+&t{1>L% z4?lvMEN$Ze!h{4Yjc_L`I}~qZ@b)80OE1nL2LTsGW6vVQrm;aD79?SSz*r%`5_lt4 mz79D%PVsjK{NoqYajLlXNB+a$@XG5C5Y~cX;Y1Br3nJX{#Fu|lmoxlfn~Gvhbk zphgqJ=CMW%00bz-G$44buv!*KO)|%ajHoA^h*t|&duj} zo%ZlXe|CP`^7+%RcD;V)^z_OHJFR<(vvB&8>yEm%`D;$k^mXsOp<(v;)R@w0V05gi zH`s^!9XS*RLzXbP)42r8>=wiU~<#Lo-7{Y7&-al9}>lOe6~#%OALW z!L+}=*=wv$@XDmN?NyHtMJ&}U$PyybjIl80M8uK`X_QEWNl0RNMu;&wGZ-dHqcDgB zi&+{mycO9s2mSfevoG&kkpE|6&-5i*?rX?jYv`HoyKY63Gmsy9>wG)%x-&c!1uP<& zid8~1OH;uElF>+sh%?P(5EF&FDV=gb8DUYB#rTR&XhyXWT-c?vyzX-6=FAiI*)Kb< zrGHsfS2oM>P2=|QqfYPi+dqCw>-4SL?>Z}ATOD2*CLtq&nkeIpXvuiO;xLS3DX9#k z;!%*qLWGz?l*D055>7H5W^t%=VC>$@-T8LaerHy`=&D6|$G+2-)CQd7nesrW zw5;|AgJdwU{T*JI@7mXw_unx*Br=sq!&0tH8fHoviggCD&WOah1U!yHt`+7G8>R322zUi7U3Stb#24{w^Po+Na=0}>Rmp%MwV_R)z zW|UV4WaXNP>UjRbLkseM{g+O=zT&jlGdsL<^VJ91@F(Ji`mp9#Hrpkic76Z_mRowGcAWW%-Cn6=IB+LbJbXpoVJWl4$+WF}4nnq)eW zk-$Z)A{7OK#z8D`);x*hKna?fNQNo5L7#is^xVT|diKyQ?t6zQ&qRiQQV5Mhr?DVm znouQ!gv3EaILkyxfzE^youo++=}=}-VqzLI734>nyYpv{w)y?xV31$=yG8lpqeuLH z984a1<%`z5^9LREp`=KoDBv20BQ(hpAQ6>=Y16<&VH}Zw3ywQW99k@*BqAnCvpAy6 zL_!g}^$cf~{l$B{mYvTm$(I~$neP0-jeh?0PrB^-^Zm2)-~6UUAWNh z-{|;u>UtETw zEF28;)y>00TqTSJk|_X23lz6ZG!tnW1gX@7L@1|(Q$<3&%2Wo@3Zw`DngBqdB!A!& z^NSx|>%8dL$F6f4TL!l9O{0oe$18T-4GteNaSWoMTxKXoCO|?o2yhz_rx}eyZUR*f zsiGvvSP%fFS;+D2BvC2q+qUm=<`)lt*m=intBv1NEWgoNy}&*|-Ff!IcRNdpyY6;g z?e&0ByTf|R)mShf$2U(*fydr5S{dIsRMA^j>OjUp76t(pNeq>7nwpe|FkmqdOA<+# zP;p|G|Np*domX+>Gi=gihxa>wY1j3;y>{dY=QR7#-R?3w=TC04{l$J~A2UG{Ts*Y}&v|oSRVYcxAu(|a$cd<TrpZd^lD{lb4j1+Aenh%YHXk~S-+mDY|sM}qhlNFR}MIz zoQ1C?gJEdzJmf65mwn5b|7Y06`~xqyl?C5q(AZ3^(QNCkq zx8411XX&De@yV*>6;p2Hx_WT16~A7LN39Gvj%lg{`2Kg0GHmwexG92(6CATasA!u8FE zVK*OiR@b5V)Iq1OMC4+-_c;Lc4+ot&E%l8nx65`Oa<&`?Te0nsb1$*49)*D+1U8GQ zI5xyUAZQxJilqjA2I9aZ%u{?j?R8r)vU$9sON|ACfjZ8$t%cie_qkrHy|3T>R6~t1>iQ7E zOk$oI^u^)kjFcMkFAWzmjlh;b6)Yy;sB~mPK+q6IIRp1aiITRx;>@>g=en)INq(>N zdX)je_G;O1^Nu?DSi@)Lmd#Uw|!;QHausGQz_^#awYJz~!1zD1(+n z#U?UZr*XL{aK2Hbu`&3om_eBkyXO6F=`C89)YpBy^iRTGebui7Al37ixvyBV+@*Hi z`@JQ_O}J`kd4nwNkuO3DUOpXwg7u6kMomMZpeLMilEOW48EQz6GJ(l5N|@#}(y+%y zF`Oh$T)-tJS)3nK|P(;J)4NS&H=-ummb06$Cz;!d%61tYm01UZPYPiDd+790)jFBjA4}4JE)w zA|(~mHKc8S4aVfwuX;=E8Hc?EcA(Sicw?CS>CX(WiW7zI02Vp}o&h5XmkAjj}R#u3M`PJ zS&HG`7}r^pnhbK21)v?xLX*a^NNT=mSLTjfU{^73pa{0O^Sq+@L+&%}O|^o6lgrz~ z-lFMdcduvf*zSI}W3qaRgCm11U;2r4L$d5G;jt4%p(yZIA$r#!2A|^8pg%+?uJMM{ z=nP@FViU4J!D)#|r<&wzzS^BX{Wq<%rlcCERz~w3%X|F^@O{~t%=3wGW4SGyh;1p}U zW_`rI&$>M)MpU3`jVhH&pwU6whmH@B5orPu0NPXFU8|(*(o43%vYBBZ<4A!55@PSX z%kODFWyH<8%5A%HE?iKXK>6~2*wNutCIp?r73(w+sJKY*m}qWzNqQ1B87US^rfJ!! zp#*WE09TXd|>!{VwdJ_Zy|QLZkH3;;B)R@uhb%h?$Ia=Axoj+A?g~^ z1Fe!N0k>o!m_3fUhDXjIQWU+EM4mtrjAM5U0iz%NgWqU7ce}h4j@0%&=QP_N-tEn` zhjzQOJcxSn<904`|5Z*{`S6m-N)?pUvTA(@7D(!9?}}oOe|6yj8G_BXoY}^0^sPOWm>W#%~%u%IElz0$59Hw4nh{FLSSP1 z!hgBljkTK#JJIeg?yX%XSGZIHxB2t62CSu5M2;@r02rVD9n<}(= zfed33miCeoYM^JBX&`2l^t$GA>VuYNRHis5t?$jRN9>~Y{!;ts{cdOR_50k7v+9(b z5EEu;tTdR6aJa1)sD^$U7^fuB6qGFu?6eA@7qGMC8Kl5$d4`x$e&q^i=O&0t+mzc< zGV+rAyk*U$nYp+Pm2hfnv%YXII+bge+h@DLwZA{$w3p4Z9opwM74yFCe#3W1CvP8x z!e4)bf9VjMn@&Z9VIzWG4Lp!AB8^Cep@@Je{0|Z#9AL&0gjFg)xo}Jvp&rV$em)B( zDq4iY zU%36K)0KZ=!@8ja&50I(0z>>Iu~bE5AP@tTX^vnShG!H3B2Ec0CB{a;N0~MmtY9E% z4I{Q3y1k+|=&Ngk&Metp+3qeXo_fT6bkS*7PL1Q3P==M#@$ER%@3yz35bHpJ&!Kf- zVYqPx8OEW*n!|&Z`6Uxkj2KQCMkO>ni{h{}+32iFLyQLgf!h#E94EsX?dH$_vA?+C zCvMS~KQCafc6n=8Gl&F!1uiYtNsQzRW(wXC@gAxW+aAGDMhs~qK9vZY0#LA2gisM6 zfmGZsZSWQp=l{XI)o-q?utvLweE)RY=6F5D(%0QfJ6mg?sL>6Wj=j0vyQu6ad&_Uu ze`R#T7(daimrZqn46<*=mmzIfXdk0)qrIjRH?*DJvDwALOTFivV(Lt-;$9ym&no|as|lc_T|6B#y$Np@2Qr0Nse^5h@xQTMPw+<3v4mxu`?LD4rMjfK~j5O#!lx(qF446C72fJ2A&g~CN9W;fQ1{DAFO-qQN* zmptT#9qf0Q)HZvetj)3_Gt zqH(^&-gJxCUEH_LJNGoZZI{<>_h0G`7x6Cd8?HTY)LB?G-0A&=XU`G-A|!EL_WK8% z-s1CL^zL!(V_(AB^=@yw<1RbTK7Eh3(9Zvg_xqC$@jH9Gzc2RO>&f-UL$wT4 z>a;NZ>$O8hWCUvtqAW#%gl+?g7P?kcgeK@gP_58)sUZ>x+7lHjbYsZGO#T#r+^^8bX>d{^q5}8eW!z5s9AAOJ0!;UxbCj!M9o{zsOlfP0G zHX_s*-ouzNqo%%rq-}Od*5HEdQ0i zsQALe-d)c86VcO~EJS`}#qdyK5NI;&p<+RpAnhzG9Gg`_%0O}PGzBt_7&0o^zU+D^ zln7}7cABcJ3 zOY#>k?6jBv!fPs8e(v4e*);R)qhjy=Ceni-fx$P-BobaP1J98Zb_|b;cp?zJN&%Y& zDlVIPoIV#J`f`e1kOu-m586rO6TyIYMseFQ??(gnz!A42-!}Q)A()vMs|CHKXiyPl z0MccspCZxcF<_L&c+64C41_;`vknkWrhJ z5s=N9H071E{qL;0UHJ=&A8+tqZZD><^`D++pZ}=eYx{oVw=^Bk0POZh;8K3R-EX#c zeGDH4pT*U|9r*8Exj(nYoc5ue{>}Eo*6*-;Z}NwW&)w?Z(O`?u`Ca+;$zl7)JN(UJzrEOgzkkNu;>(ZuKRK(o;YEMXtWpt* KOON@y@4o@5;oq(R diff --git a/Cargo.toml b/Cargo.toml index a7cd67b11..e6a2b2018 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -59,7 +59,7 @@ globset = "0.4.15" http = "1.2.0" hyper = "1.5.2" image = "0.25.5" -iroh = "0.29.0" +iroh = "0.95.1" itertools = "0.13.0" lending-stream = "1.0" libc = "0.2.169" diff --git a/core/Cargo.toml b/core/Cargo.toml index 47284786b..56cf191f7 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -130,10 +130,8 @@ hound = "3.5" # WAV file reading rubato = "0.16" # Audio resampling to 16kHz # Networking -# Iroh P2P networking with iOS support (using Oscar's patches) -iroh = { git = "https://github.com/n0-computer/iroh", rev = "e0c5091008d42f4c577f72b1085dfb26c28bd56f", features = [ - "discovery-local-network" -] } +# Iroh P2P networking +iroh = { version = "0.95.1", features = ["discovery-local-network"] } # Serialization for protocols serde_cbor = "0.11" @@ -249,7 +247,5 @@ tempfile = "3.14" # Patches for iOS compatibility [patch.crates-io] -# https://github.com/n0-computer/iroh/pull/3409 -iroh = { git = "https://github.com/n0-computer/iroh", rev = "e0c5091008d42f4c577f72b1085dfb26c28bd56f" } # https://github.com/shellrow/netdev/pull/125 netdev = { git = "https://github.com/shellrow/netdev", rev = "b6ef275d2a72143b3c7d5845ee2f5a70b0e97771" } diff --git a/core/src/service/network/core/event_loop.rs b/core/src/service/network/core/event_loop.rs index c678c1e37..b4fca25f8 100644 --- a/core/src/service/network/core/event_loop.rs +++ b/core/src/service/network/core/event_loop.rs @@ -11,8 +11,8 @@ use crate::service::network::{ NetworkingError, Result, }; use iroh::endpoint::Connection; -use iroh::NodeId; -use iroh::{Endpoint, NodeAddr}; +use iroh::EndpointId; +use iroh::{Endpoint, EndpointAddr}; use std::sync::Arc; use tokio::io::AsyncWriteExt; use tokio::sync::{broadcast, mpsc, RwLock}; @@ -24,15 +24,15 @@ pub enum EventLoopCommand { // Connection management ConnectionEstablished { device_id: Uuid, - node_id: NodeId, + node_id: EndpointId, }, ConnectionLost { device_id: Uuid, - node_id: NodeId, + node_id: EndpointId, reason: String, }, TrackOutboundConnection { - node_id: NodeId, + node_id: EndpointId, conn: Connection, }, @@ -43,7 +43,7 @@ pub enum EventLoopCommand { data: Vec, }, SendMessageToNode { - node_id: NodeId, + node_id: EndpointId, protocol: String, data: Vec, }, @@ -81,11 +81,11 @@ pub struct NetworkingEventLoop { /// Our network identity identity: NetworkIdentity, - /// Active connections tracker (keyed by NodeId and ALPN) - active_connections: Arc), Connection>>>, + /// Active connections tracker (keyed by EndpointId and ALPN) + active_connections: Arc), Connection>>>, /// Nodes that already have connection watchers spawned (to prevent duplicates) - watched_nodes: Arc>>, + watched_nodes: Arc>>, /// Logger for event loop operations logger: Arc, @@ -99,7 +99,7 @@ impl NetworkingEventLoop { device_registry: Arc>, event_sender: broadcast::Sender, identity: NetworkIdentity, - active_connections: Arc), Connection>>>, + active_connections: Arc), Connection>>>, logger: Arc, ) -> Self { let (command_tx, command_rx) = mpsc::unbounded_channel(); @@ -201,7 +201,7 @@ impl NetworkingEventLoop { /// Handle an incoming connection async fn handle_connection(&self, conn: Connection) { // Extract the remote node ID from the connection - let remote_node_id = match conn.remote_node_id() { + let remote_node_id = match conn.remote_id() { Ok(key) => key, Err(e) => { self.logger @@ -318,7 +318,7 @@ impl NetworkingEventLoop { device_registry: Arc>, event_sender: broadcast::Sender, command_sender: mpsc::UnboundedSender, - remote_node_id: NodeId, + remote_node_id: EndpointId, logger: Arc, ) { loop { @@ -706,7 +706,7 @@ impl NetworkingEventLoop { } /// Send a message to a specific node - async fn send_to_node(&self, node_id: NodeId, protocol: &str, data: Vec) { + async fn send_to_node(&self, node_id: EndpointId, protocol: &str, data: Vec) { self.logger .debug(&format!( "Sending {} message to {} ({} bytes)", @@ -894,41 +894,35 @@ impl NetworkingEventLoop { } } - /// Update DeviceRegistry connection states based on Iroh's remote_info + /// Update DeviceRegistry connection states based on tracked connections and latency /// /// This monitors Iroh connections and updates the DeviceRegistry state accordingly. /// Devices transition to Connected when Iroh reports an active connection, and back /// to Paired when the connection is lost. This is cosmetic only - sync routing uses /// is_node_connected() which queries Iroh directly. async fn update_connection_states(&self) { - // Get all remote info from Iroh - let remote_infos: Vec<_> = self.endpoint.remote_info_iter().collect(); - // Lock registry for updates let mut registry = self.device_registry.write().await; - // Track which node IDs Iroh reports as connected - let mut connected_node_ids = std::collections::HashSet::new(); + // Get all tracked connections + let active_connections = self.active_connections.read().await; + let connected_node_ids: std::collections::HashSet = active_connections + .keys() + .map(|(node_id, _alpn)| *node_id) + .collect(); - // Update devices that Iroh reports as connected - for remote_info in remote_infos { - // Check if this is an active connection - let is_connected = - !matches!(remote_info.conn_type, iroh::endpoint::ConnectionType::None); + // Update devices that we have active connections to + for node_id in &connected_node_ids { + // Check if connection is still alive via latency + let latency = self.endpoint.latency(*node_id); + let is_connected = latency.is_some(); if is_connected { - connected_node_ids.insert(remote_info.node_id); - // Find device for this node - if let Some(device_id) = registry.get_device_by_node_id(remote_info.node_id) { + if let Some(device_id) = registry.get_device_by_node_id(*node_id) { // Update to Connected state if not already if let Err(e) = registry - .update_device_from_connection( - device_id, - remote_info.node_id, - remote_info.conn_type, - remote_info.latency, - ) + .update_device_from_connection(device_id, *node_id, true, latency) .await { self.logger @@ -942,30 +936,28 @@ impl NetworkingEventLoop { } } - // Check devices that are marked as Connected in registry but NOT in Iroh's list + // Check devices that are marked as Connected in registry but no longer have active connections // These devices have silently disconnected and need to be transitioned back to Paired let all_devices = registry.get_all_devices(); for (device_id, state) in all_devices { if let crate::service::network::device::DeviceState::Connected { info, .. } = state { // Get the node_id for this device - if let Ok(node_id) = info.network_fingerprint.node_id.parse::() { - // If this node is NOT in Iroh's connected list, it's stale - if !connected_node_ids.contains(&node_id) { + if let Ok(node_id) = info.network_fingerprint.node_id.parse::() { + // Check if this node still has an active connection + let has_active_connection = connected_node_ids.contains(&node_id) + && self.endpoint.latency(node_id).is_some(); + + if !has_active_connection { self.logger .info(&format!( - "Device {} ({}) is marked Connected but not in Iroh's connection list - transitioning to Paired", + "Device {} ({}) is marked Connected but has no active connection - transitioning to Paired", device_id, info.device_name )) .await; - // Transition to Paired state via update_device_from_connection with None conn_type + // Transition to Paired state if let Err(e) = registry - .update_device_from_connection( - device_id, - node_id, - iroh::endpoint::ConnectionType::None, - None, - ) + .update_device_from_connection(device_id, node_id, false, None) .await { self.logger @@ -985,7 +977,7 @@ impl NetworkingEventLoop { /// /// This provides instant reactivity when connections drop, instead of waiting /// for the 10-second polling interval in update_connection_states(). - async fn spawn_connection_watcher(&self, conn: Connection, node_id: NodeId) { + async fn spawn_connection_watcher(&self, conn: Connection, node_id: EndpointId) { super::spawn_connection_watcher_task( conn, node_id, diff --git a/core/src/service/network/core/mod.rs b/core/src/service/network/core/mod.rs index 345693f92..3f22a5c14 100644 --- a/core/src/service/network/core/mod.rs +++ b/core/src/service/network/core/mod.rs @@ -11,7 +11,7 @@ use crate::service::network::{ }; use iroh::discovery::{dns::DnsDiscovery, mdns::MdnsDiscovery, pkarr::PkarrPublisher, Discovery}; use iroh::endpoint::Connection; -use iroh::{Endpoint, NodeAddr, NodeId, RelayMode, RelayUrl, Watcher}; +use iroh::{Endpoint, EndpointAddr, EndpointId, RelayMode, RelayUrl, Watcher}; use std::sync::Arc; use tokio::sync::{broadcast, mpsc, RwLock}; use uuid::Uuid; @@ -30,23 +30,23 @@ pub const JOB_ACTIVITY_ALPN: &[u8] = b"spacedrive/jobactivity/1"; pub enum NetworkEvent { // Discovery events PeerDiscovered { - node_id: NodeId, - node_addr: NodeAddr, + node_id: EndpointId, + node_addr: EndpointAddr, }, PeerDisconnected { - node_id: NodeId, + node_id: EndpointId, }, // Pairing events PairingRequest { session_id: Uuid, device_info: DeviceInfo, - node_id: NodeId, + node_id: EndpointId, }, PairingSessionDiscovered { session_id: Uuid, - node_id: NodeId, - node_addr: NodeAddr, + node_id: EndpointId, + node_addr: EndpointAddr, device_info: DeviceInfo, }, PairingCompleted { @@ -61,11 +61,11 @@ pub enum NetworkEvent { // Connection events ConnectionEstablished { device_id: Uuid, - node_id: NodeId, + node_id: EndpointId, }, ConnectionLost { device_id: Uuid, - node_id: NodeId, + node_id: EndpointId, }, MessageReceived { from: Uuid, @@ -83,7 +83,7 @@ pub struct NetworkingService { identity: NetworkIdentity, /// Our Iroh node ID - node_id: NodeId, + node_id: EndpointId, /// Discovery service for finding peers discovery: Option>, @@ -103,12 +103,12 @@ pub struct NetworkingService { /// Event sender for broadcasting network events (broadcast channel allows multiple subscribers) event_sender: broadcast::Sender, - /// Active connections tracker (keyed by NodeId and ALPN) + /// Active connections tracker (keyed by EndpointId and ALPN) /// Each ALPN protocol requires its own connection since ALPN is negotiated at connection establishment - active_connections: Arc), Connection>>>, + active_connections: Arc), Connection>>>, /// Nodes that already have connection watchers spawned (to prevent duplicates) - watched_nodes: Arc>>, + watched_nodes: Arc>>, /// Sync multiplexer for routing sync messages to correct library sync_multiplexer: Arc, @@ -341,7 +341,7 @@ impl NetworkingService { endpoint: Option, logger: Arc, ) { - // Deterministic reconnection: only the device with the lower NodeId initiates + // Deterministic reconnection: only the device with the lower EndpointId initiates // This prevents both sides from simultaneously trying to connect let endpoint_ref = match &endpoint { Some(ep) => ep, @@ -351,12 +351,12 @@ impl NetworkingService { } }; - let my_node_id = endpoint_ref.node_id(); + let my_node_id = endpoint_ref.id(); let remote_node_id = match persisted_device .device_info .network_fingerprint .node_id - .parse::() + .parse::() { Ok(id) => id, Err(e) => { @@ -367,12 +367,12 @@ impl NetworkingService { } }; - // Deterministic rule: only device with lower NodeId initiates outbound connections + // Deterministic rule: only device with lower EndpointId initiates outbound connections // This prevents both sides from creating competing connections if my_node_id > remote_node_id { logger .debug(&format!( - "Skipping outbound reconnection to {} - waiting for them to connect to us (NodeId rule: {} > {})", + "Skipping outbound reconnection to {} - waiting for them to connect to us (EndpointId rule: {} > {})", persisted_device.device_info.device_name, my_node_id, remote_node_id @@ -383,7 +383,7 @@ impl NetworkingService { logger .info(&format!( - "NodeId rule: {} < {} - we should initiate connection", + "EndpointId rule: {} < {} - we should initiate connection", my_node_id, remote_node_id )) .await; @@ -401,10 +401,10 @@ impl NetworkingService { .device_info .network_fingerprint .node_id - .parse::() + .parse::() { - // Build NodeAddr - Iroh will discover addresses automatically - let node_addr = NodeAddr::new(node_id); + // Build EndpointAddr - Iroh will discover addresses automatically + let node_addr = EndpointAddr::new(node_id); // Attempt connection with retries to give discovery time to work let mut retry_count = 0; @@ -548,7 +548,7 @@ impl NetworkingService { interval.tick().await; // Get all connected devices - let connected_devices: Vec<(uuid::Uuid, iroh::NodeId)> = { + let connected_devices: Vec<(uuid::Uuid, iroh::EndpointId)> = { let registry = device_registry.read().await; registry .get_all_devices() @@ -560,7 +560,7 @@ impl NetworkingService { } = state { if let Ok(node_id) = - info.network_fingerprint.node_id.parse::() + info.network_fingerprint.node_id.parse::() { Some((device_id, node_id)) } else { @@ -734,7 +734,7 @@ impl NetworkingService { .info("Sending disconnect notifications to connected devices") .await; - let connected_devices: Vec<(uuid::Uuid, iroh::NodeId)> = { + let connected_devices: Vec<(uuid::Uuid, iroh::EndpointId)> = { let registry = self.device_registry.read().await; registry .get_all_devices() @@ -745,7 +745,7 @@ impl NetworkingService { } = state { if let Ok(node_id) = - info.network_fingerprint.node_id.parse::() + info.network_fingerprint.node_id.parse::() { Some((device_id, node_id)) } else { @@ -812,7 +812,7 @@ impl NetworkingService { } /// Get our node ID - pub fn node_id(&self) -> NodeId { + pub fn node_id(&self) -> EndpointId { self.node_id } @@ -822,10 +822,10 @@ impl NetworkingService { } /// Get raw connected nodes directly from endpoint - pub async fn get_raw_connected_nodes(&self) -> Vec { + pub async fn get_raw_connected_nodes(&self) -> Vec { let connections = self.active_connections.read().await; - // Extract unique NodeIds from (NodeId, ALPN) keys - let mut node_ids: Vec = connections + // Extract unique EndpointIds from (EndpointId, ALPN) keys + let mut node_ids: Vec = connections .keys() .map(|(node_id, _alpn)| *node_id) .collect(); @@ -910,7 +910,7 @@ impl NetworkingService { /// Get the active connections cache shared with the event loop pub fn active_connections( &self, - ) -> Arc), Connection>>> { + ) -> Arc), Connection>>> { self.active_connections.clone() } @@ -926,7 +926,7 @@ impl NetworkingService { // This leverages Iroh's native mDNS capabilities without needing custom key-value storage /// Get currently connected nodes for direct pairing attempts - pub async fn get_connected_nodes(&self) -> Vec { + pub async fn get_connected_nodes(&self) -> Vec { // Get connected nodes from device registry let registry = self.device_registry.read().await; registry.get_connected_nodes() @@ -945,7 +945,7 @@ impl NetworkingService { /// Send message to a specific node (bypassing device lookup) pub async fn send_message_to_node( &self, - node_id: NodeId, + node_id: EndpointId, protocol: &str, data: Vec, ) -> Result<()> { @@ -968,14 +968,16 @@ impl NetworkingService { } } - /// Strip direct addresses from a NodeAddr to force relay-only connection - fn strip_direct_addresses(node_addr: NodeAddr) -> NodeAddr { - use std::collections::BTreeSet; - NodeAddr::from_parts( - node_addr.node_id, - node_addr.relay_url().cloned(), - BTreeSet::new(), // Empty direct addresses - ) + /// Strip IP addresses from an EndpointAddr to force relay-only connection + fn strip_ip_addresses(endpoint_addr: EndpointAddr) -> EndpointAddr { + // In v0.95+, create a new EndpointAddr with only relay URLs (no IP addrs) + let id = endpoint_addr.id; + let mut new_addr = EndpointAddr::new(id); + // Add relay URLs but not IP addresses + for relay_url in endpoint_addr.relay_urls() { + new_addr = new_addr.with_relay(relay_url.clone()); + } + new_addr } /// Spawn a background task to watch for connection closure @@ -983,7 +985,7 @@ impl NetworkingService { /// This provides instant reactivity when connections drop by waiting on /// Iroh's Connection::closed() future, instead of relying on the 10-second /// polling interval in update_connection_states(). - async fn spawn_connection_watcher(&self, conn: Connection, node_id: NodeId) { + async fn spawn_connection_watcher(&self, conn: Connection, node_id: EndpointId) { spawn_connection_watcher_task( conn, node_id, @@ -1000,36 +1002,36 @@ impl NetworkingService { /// # Parameters /// * `node_addr` - The node address to connect to /// * `force_relay` - If true, strip direct addresses and only use relay - pub async fn connect_to_node(&self, node_addr: NodeAddr, force_relay: bool) -> Result<()> { - let node_addr = if force_relay { - Self::strip_direct_addresses(node_addr) + pub async fn connect_to_node(&self, endpoint_addr: EndpointAddr, force_relay: bool) -> Result<()> { + let endpoint_addr = if force_relay { + Self::strip_ip_addresses(endpoint_addr) } else { - node_addr + endpoint_addr }; if let Some(endpoint) = &self.endpoint { // Use pairing ALPN for initial connection during pairing let conn = endpoint - .connect(node_addr.clone(), PAIRING_ALPN) + .connect(endpoint_addr.clone(), PAIRING_ALPN) .await .map_err(|e| { NetworkingError::ConnectionFailed(format!("Failed to connect: {}", e)) })?; // Track the outbound connection (with PAIRING_ALPN) - let node_id = node_addr.node_id; + let remote_id = endpoint_addr.id; { let mut connections = self.active_connections.write().await; - connections.insert((node_id, PAIRING_ALPN.to_vec()), conn.clone()); + connections.insert((remote_id, PAIRING_ALPN.to_vec()), conn.clone()); self.logger .info(&format!( "Tracked outbound pairing connection to {}", - node_id + remote_id )) .await; } // Spawn a task to watch for connection closure for instant reactivity - self.spawn_connection_watcher(conn, node_id).await; + self.spawn_connection_watcher(conn, remote_id).await; Ok(()) } else { @@ -1040,9 +1042,9 @@ impl NetworkingService { } /// Get our node address for advertising - pub fn get_node_addr(&self) -> Result> { + pub fn get_node_addr(&self) -> Result> { if let Some(endpoint) = &self.endpoint { - Ok(endpoint.node_addr().get()) + Ok(endpoint.addr().get()) } else { Err(NetworkingError::ConnectionFailed( "Networking not started".to_string(), @@ -1053,8 +1055,12 @@ impl NetworkingService { /// Get the configured relay URL pub async fn get_relay_url(&self) -> Option { if let Some(endpoint) = &self.endpoint { - let relay = endpoint.home_relay().initialized().await; - Some(relay.to_string()) + // In v0.95+, get relay URL from the endpoint address + if let Some(addr) = endpoint.addr().get() { + addr.relay_urls().next().map(|url| url.to_string()) + } else { + None + } } else { None } @@ -1093,18 +1099,14 @@ impl NetworkingService { if user_data.as_ref() == session_id_str { self.logger .info(&format!( - "[mDNS] Found pairing initiator: {} with {} direct addresses", - item.node_id().fmt_short(), - item.node_info().data.direct_addresses().len() + "[mDNS] Found pairing initiator: {} with {} IP addresses", + item.endpoint_id().fmt_short(), + item.node_info().data.ip_addrs().count() )) .await; - // Build NodeAddr from discovery info - let node_addr = iroh::NodeAddr::from_parts( - item.node_id(), - item.node_info().data.relay_url().cloned(), - item.node_info().data.direct_addresses().clone() - ); + // Build EndpointAddr from discovery info + let node_addr = item.node_info().into_endpoint_addr(item.endpoint_id()); // Try to connect to the initiator if let Err(e) = self.connect_to_node(node_addr.clone(), force_relay).await { @@ -1146,10 +1148,10 @@ impl NetworkingService { &self, pairing_code: &crate::service::network::protocol::pairing::PairingCode, ) -> Result<()> { - // Get the NodeId from the pairing code + // Get the EndpointId from the pairing code let node_id = pairing_code.node_id().ok_or_else(|| { NetworkingError::ConnectionFailed( - "Pairing code missing NodeId - cannot use pkarr discovery for remote pairing" + "Pairing code missing EndpointId - cannot use pkarr discovery for remote pairing" .to_string(), ) })?; @@ -1172,7 +1174,7 @@ impl NetworkingService { // 1. Query dns.iroh.link/pkarr for the node's published address info // 2. Get the relay_url and any direct addresses // 3. Try to connect via the best available path - let node_addr = NodeAddr::new(node_id); + let node_addr = EndpointAddr::new(node_id); self.logger .debug("[Pkarr] Querying dns.iroh.link for node address...") @@ -1276,7 +1278,7 @@ impl NetworkingService { let initiator_device_id = self.device_id(); let node_addr = self .get_node_addr()? - .unwrap_or(NodeAddr::new(initiator_node_id)); + .unwrap_or(EndpointAddr::new(initiator_node_id)); let device_registry = self.device_registry(); { let mut registry = device_registry.write().await; @@ -1310,12 +1312,18 @@ impl NetworkingService { tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; // Ensure relay connection is established before pkarr publishing + // In v0.95+, we wait for the endpoint to be online (has relay + direct addresses) self.logger - .info("Waiting for relay connection to be established...") + .info("Waiting for endpoint to come online...") .await; - let relay_url = endpoint.home_relay().initialized().await; + endpoint.online().await; + let relay_url = endpoint + .addr() + .get() + .and_then(|a| a.relay_urls().next().map(|u| u.to_string())) + .unwrap_or_else(|| "unknown".to_string()); self.logger - .info(&format!("Relay connection established: {}", relay_url)) + .info(&format!("Endpoint online, relay: {}", relay_url)) .await; // Give pkarr sufficient time to publish our node address to dns.iroh.link @@ -1512,7 +1520,7 @@ impl NetworkingService { // We need to try connecting to all discovered nodes since we don't know which one is the initiator // Get our own node address to broadcast it - let our_node_addr = endpoint.node_addr().get(); + let our_node_addr = endpoint.addr().get(); self.logger .info(&format!( @@ -1811,10 +1819,10 @@ impl NetworkingService { /// polling interval in update_connection_states(). async fn spawn_connection_watcher_task( conn: Connection, - node_id: NodeId, - watched_nodes: Arc>>, + node_id: EndpointId, + watched_nodes: Arc>>, device_registry: Arc>, - active_connections: Arc), Connection>>>, + active_connections: Arc), Connection>>>, logger: Arc, ) { // Check if we already have a watcher for this node diff --git a/core/src/service/network/device/mod.rs b/core/src/service/network/device/mod.rs index 5336377d1..2d1259154 100644 --- a/core/src/service/network/device/mod.rs +++ b/core/src/service/network/device/mod.rs @@ -5,7 +5,7 @@ pub mod persistence; pub mod registry; use chrono::{DateTime, Utc}; -use iroh::{NodeAddr, NodeId}; +use iroh::{EndpointAddr, EndpointId}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use uuid::Uuid; @@ -55,15 +55,15 @@ impl Default for DeviceType { pub enum DeviceState { /// Device discovered via Iroh discovery but not yet connected Discovered { - node_id: NodeId, - node_addr: NodeAddr, + node_id: EndpointId, + node_addr: EndpointAddr, discovered_at: DateTime, }, /// Device currently in pairing process Pairing { - node_id: NodeId, + node_id: EndpointId, session_id: Uuid, - node_addr: NodeAddr, + node_addr: EndpointAddr, started_at: DateTime, }, /// Device successfully paired but not currently connected diff --git a/core/src/service/network/device/registry.rs b/core/src/service/network/device/registry.rs index 62190a8bb..8402139df 100644 --- a/core/src/service/network/device/registry.rs +++ b/core/src/service/network/device/registry.rs @@ -9,7 +9,7 @@ use crate::device::DeviceManager; use crate::infra::event::EventBus; use crate::service::network::{utils::logging::NetworkLogger, NetworkingError, Result}; use chrono::{DateTime, Utc}; -use iroh::{NodeAddr, NodeId}; +use iroh::{EndpointAddr, EndpointId}; use std::collections::HashMap; use std::sync::Arc; use uuid::Uuid; @@ -23,7 +23,7 @@ pub struct DeviceRegistry { devices: HashMap, /// Map of node ID to device ID for quick lookup - node_to_device: HashMap, + node_to_device: HashMap, /// Map of session ID to device ID for pairing lookup session_to_device: HashMap, @@ -225,7 +225,7 @@ impl DeviceRegistry { .device_info .network_fingerprint .node_id - .parse::() + .parse::() { self.node_to_device.insert(node_id, device_id); self.logger @@ -266,7 +266,7 @@ impl DeviceRegistry { } /// Add a discovered node - pub fn add_discovered_node(&mut self, device_id: Uuid, node_id: NodeId, node_addr: NodeAddr) { + pub fn add_discovered_node(&mut self, device_id: Uuid, node_id: EndpointId, node_addr: EndpointAddr) { let state = DeviceState::Discovered { node_id, node_addr, @@ -281,9 +281,9 @@ impl DeviceRegistry { pub fn start_pairing( &mut self, device_id: Uuid, - node_id: NodeId, + node_id: EndpointId, session_id: Uuid, - node_addr: NodeAddr, + node_addr: EndpointAddr, ) -> Result<()> { let state = DeviceState::Pairing { node_id, @@ -321,7 +321,7 @@ impl DeviceRegistry { let node_id = info .network_fingerprint .node_id - .parse::() + .parse::() .map_err(|e| { NetworkingError::Protocol(format!("Invalid node ID in network fingerprint: {}", e)) })?; @@ -534,7 +534,7 @@ impl DeviceRegistry { } /// Get device ID by peer ID - pub fn get_device_by_node(&self, node_id: NodeId) -> Option { + pub fn get_device_by_node(&self, node_id: EndpointId) -> Option { self.node_to_device.get(&node_id).copied() } @@ -587,7 +587,7 @@ impl DeviceRegistry { | DeviceState::Connected { info, .. } | DeviceState::Disconnected { info, .. } => { // Extract node ID from network fingerprint and clean up mapping - if let Ok(node_id) = info.network_fingerprint.node_id.parse::() { + if let Ok(node_id) = info.network_fingerprint.node_id.parse::() { self.node_to_device.remove(&node_id); } } @@ -616,7 +616,7 @@ impl DeviceRegistry { } /// Get peer ID for a device - pub fn get_node_by_device(&self, device_id: Uuid) -> Option { + pub fn get_node_by_device(&self, device_id: Uuid) -> Option { // Look through node_to_device map in reverse for (node_id, &dev_id) in &self.node_to_device { if dev_id == device_id { @@ -629,7 +629,7 @@ impl DeviceRegistry { } /// Get node ID for a device (alias for get_node_by_device) - pub fn get_node_id_for_device(&self, device_id: Uuid) -> Option { + pub fn get_node_id_for_device(&self, device_id: Uuid) -> Option { self.get_node_by_device(device_id) } @@ -639,32 +639,26 @@ impl DeviceRegistry { /// directly to get real-time connection state, rather than relying on cached state. /// /// Returns true if: - /// - Device UUID is mapped to a NodeId - /// - Iroh reports an active connection (Direct, Relay, or Mixed) - /// - Connection type is not None + /// - Device UUID is mapped to an EndpointId + /// - Iroh reports latency for the connection (indicating active connection) pub fn is_node_connected(&self, endpoint: &iroh::Endpoint, device_id: Uuid) -> bool { - // Get NodeId for this device + // Get EndpointId for this device let node_id = match self.get_node_id_for_device(device_id) { Some(id) => id, None => return false, }; - // Query Iroh for current connection state - match endpoint.remote_info(node_id) { - Some(remote_info) => { - // Check if connection type indicates an active connection - !matches!(remote_info.conn_type, iroh::endpoint::ConnectionType::None) - } - None => false, - } + // Query Iroh for current connection state via latency + // latency() returns Some if there's an active connection + endpoint.latency(node_id).is_some() } /// Get device UUID from node ID - pub fn get_device_by_node_id(&self, node_id: NodeId) -> Option { + pub fn get_device_by_node_id(&self, node_id: EndpointId) -> Option { self.node_to_device.get(&node_id).copied() } - /// Update device connection state from Iroh RemoteInfo + /// Update device connection state based on connection status /// /// This is called by the connection monitor to update DeviceRegistry state /// based on Iroh's actual connection state. This is cosmetic only - sync @@ -672,8 +666,8 @@ impl DeviceRegistry { pub async fn update_device_from_connection( &mut self, device_id: Uuid, - node_id: NodeId, - conn_type: iroh::endpoint::ConnectionType, + node_id: EndpointId, + is_connected: bool, latency: Option, ) -> Result<()> { // Update node-to-device mapping @@ -686,7 +680,7 @@ impl DeviceRegistry { }; // Determine if we should be in Connected state - let should_be_connected = !matches!(conn_type, iroh::endpoint::ConnectionType::None); + let should_be_connected = is_connected; match current_state { DeviceState::Paired { @@ -788,7 +782,7 @@ impl DeviceRegistry { } /// Get all currently connected peer IDs - pub fn get_connected_nodes(&self) -> Vec { + pub fn get_connected_nodes(&self) -> Vec { self.node_to_device.keys().cloned().collect() } @@ -883,7 +877,7 @@ impl DeviceRegistry { } /// Set a device as connected with its node ID - pub async fn set_device_connected(&mut self, device_id: Uuid, node_id: NodeId) -> Result<()> { + pub async fn set_device_connected(&mut self, device_id: Uuid, node_id: EndpointId) -> Result<()> { // Update the node_to_device mapping self.node_to_device.insert(node_id, device_id); diff --git a/core/src/service/network/protocol/file_delete.rs b/core/src/service/network/protocol/file_delete.rs index 65cb6485b..4a4f9b1c8 100644 --- a/core/src/service/network/protocol/file_delete.rs +++ b/core/src/service/network/protocol/file_delete.rs @@ -8,7 +8,7 @@ use crate::{ service::network::{NetworkingError, Result}, }; use async_trait::async_trait; -use iroh::NodeId; +use iroh::EndpointId; use std::sync::Arc; use uuid::Uuid; @@ -245,7 +245,7 @@ impl super::ProtocolHandler for FileDeleteProtocolHandler { &self, mut send: Box, mut recv: Box, - _remote_node_id: NodeId, + _remote_node_id: EndpointId, ) { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -321,7 +321,7 @@ impl super::ProtocolHandler for FileDeleteProtocolHandler { async fn handle_response( &self, _from_device: Uuid, - _from_node: NodeId, + _from_node: EndpointId, _response_data: Vec, ) -> Result<()> { // File delete responses are handled by RemoteDeleteStrategy diff --git a/core/src/service/network/protocol/file_transfer.rs b/core/src/service/network/protocol/file_transfer.rs index a43837458..f4173d2ce 100644 --- a/core/src/service/network/protocol/file_transfer.rs +++ b/core/src/service/network/protocol/file_transfer.rs @@ -3,7 +3,7 @@ use crate::service::network::utils::logging::NetworkLogger; use crate::service::network::{NetworkingError, Result}; use async_trait::async_trait; -use iroh::NodeId; +use iroh::EndpointId; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, @@ -1429,7 +1429,7 @@ impl super::ProtocolHandler for FileTransferProtocolHandler { &self, mut send: Box, mut recv: Box, - remote_node_id: NodeId, + remote_node_id: EndpointId, ) { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -1723,7 +1723,7 @@ impl super::ProtocolHandler for FileTransferProtocolHandler { async fn handle_response( &self, from_device: Uuid, - _from_node: NodeId, + _from_node: EndpointId, response_data: Vec, ) -> Result<()> { // Deserialize the response diff --git a/core/src/service/network/protocol/job_activity.rs b/core/src/service/network/protocol/job_activity.rs index eece63e7b..a25e9278a 100644 --- a/core/src/service/network/protocol/job_activity.rs +++ b/core/src/service/network/protocol/job_activity.rs @@ -14,7 +14,7 @@ use crate::{ }; use async_trait::async_trait; use chrono::{DateTime, Utc}; -use iroh::{endpoint::Connection, Endpoint, NodeId}; +use iroh::{endpoint::Connection, Endpoint, EndpointId}; use serde::{Deserialize, Serialize}; use std::{ collections::HashMap, @@ -105,7 +105,7 @@ pub enum RemoteJobEvent { /// Subscription information for a remote device struct Subscription { - node_id: NodeId, + node_id: EndpointId, event_tx: tokio::sync::mpsc::UnboundedSender, library_filter: Option, last_activity: DateTime, @@ -158,7 +158,7 @@ pub struct JobActivityProtocolHandler { subscriptions: Arc>>, /// Cached connections (shared with NetworkingService) - connections: Arc), Connection>>>, + connections: Arc), Connection>>>, /// Local device ID device_id: Uuid, @@ -176,7 +176,7 @@ impl JobActivityProtocolHandler { event_bus: Arc, device_registry: Arc>, endpoint: Option, - connections: Arc), Connection>>>, + connections: Arc), Connection>>>, device_id: Uuid, library_id: Option, ) -> Self { @@ -402,7 +402,7 @@ impl ProtocolHandler for JobActivityProtocolHandler { &self, mut send: Box, mut recv: Box, - remote_node_id: NodeId, + remote_node_id: EndpointId, ) { // Create channel for receiving events to send let (event_tx, mut event_rx) = tokio::sync::mpsc::unbounded_channel(); @@ -499,7 +499,7 @@ impl ProtocolHandler for JobActivityProtocolHandler { Ok(Vec::new()) } - async fn handle_response(&self, _: Uuid, _: NodeId, _: Vec) -> Result<()> { + async fn handle_response(&self, _: Uuid, _: EndpointId, _: Vec) -> Result<()> { Ok(()) } diff --git a/core/src/service/network/protocol/messaging.rs b/core/src/service/network/protocol/messaging.rs index d8059ae62..7d0fd505e 100644 --- a/core/src/service/network/protocol/messaging.rs +++ b/core/src/service/network/protocol/messaging.rs @@ -3,7 +3,7 @@ use super::{library_messages::LibraryMessage, ProtocolEvent, ProtocolHandler}; use crate::service::network::{utils, NetworkingError, Result}; use async_trait::async_trait; -use iroh::{endpoint::Connection, Endpoint, NodeAddr, NodeId}; +use iroh::{endpoint::Connection, Endpoint, EndpointAddr, EndpointId}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::sync::Arc; @@ -21,8 +21,8 @@ pub struct MessagingProtocolHandler { /// Endpoint for creating and managing connections endpoint: Option, - /// Cached connections to remote nodes (keyed by NodeId and ALPN) - connections: Arc), Connection>>>, + /// Cached connections to remote nodes (keyed by EndpointId and ALPN) + connections: Arc), Connection>>>, } /// Basic message types @@ -67,7 +67,7 @@ impl MessagingProtocolHandler { pub fn new( device_registry: Arc>, endpoint: Option, - active_connections: Arc), Connection>>>, + active_connections: Arc), Connection>>>, ) -> Self { Self { context: None, @@ -649,7 +649,7 @@ impl MessagingProtocolHandler { /// Uses cached connections and creates new streams (Iroh best practice) pub async fn send_library_message( &self, - node_id: NodeId, + node_id: EndpointId, message: LibraryMessage, ) -> Result { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -755,7 +755,7 @@ impl ProtocolHandler for MessagingProtocolHandler { &self, mut send: Box, mut recv: Box, - remote_node_id: NodeId, + remote_node_id: EndpointId, ) { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -906,7 +906,7 @@ impl ProtocolHandler for MessagingProtocolHandler { async fn handle_response( &self, _from_device: Uuid, - _from_node: NodeId, + _from_node: EndpointId, _response_data: Vec, ) -> Result<()> { // Messaging protocol handles responses in handle_request diff --git a/core/src/service/network/protocol/mod.rs b/core/src/service/network/protocol/mod.rs index 8fb6e7689..e9869e77e 100644 --- a/core/src/service/network/protocol/mod.rs +++ b/core/src/service/network/protocol/mod.rs @@ -11,7 +11,7 @@ pub mod sync; use crate::service::network::{NetworkingError, Result}; use async_trait::async_trait; -use iroh::NodeId; +use iroh::EndpointId; use std::collections::HashMap; use uuid::Uuid; @@ -38,7 +38,7 @@ pub trait ProtocolHandler: Send + Sync { &self, send: Box, recv: Box, - remote_node_id: NodeId, + remote_node_id: EndpointId, ); /// Allow downcasting to concrete type for specialized methods @@ -51,7 +51,7 @@ pub trait ProtocolHandler: Send + Sync { async fn handle_response( &self, from_device: Uuid, - from_node: NodeId, + from_node: EndpointId, response_data: Vec, ) -> Result<()>; diff --git a/core/src/service/network/protocol/pairing/initiator.rs b/core/src/service/network/protocol/pairing/initiator.rs index 09349d3c3..588016514 100644 --- a/core/src/service/network/protocol/pairing/initiator.rs +++ b/core/src/service/network/protocol/pairing/initiator.rs @@ -10,7 +10,7 @@ use crate::service::network::{ device::{DeviceInfo, SessionKeys}, NetworkingError, Result, }; -use iroh::{NodeId, Watcher}; +use iroh::{EndpointId, Watcher}; use uuid::Uuid; impl PairingProtocolHandler { @@ -217,19 +217,19 @@ impl PairingProtocolHandler { let session_keys = SessionKeys::from_shared_secret(shared_secret.clone()); let actual_device_id = device_info.device_id; - let node_id = match device_info.network_fingerprint.node_id.parse::() { + let node_id = match device_info.network_fingerprint.node_id.parse::() { Ok(id) => id, Err(_) => { self.log_warn("Failed to parse node ID from device info, using fallback") .await; - NodeId::from_bytes(&[0u8; 32]).unwrap() + EndpointId::from_bytes(&[0u8; 32]).unwrap() } }; // Register joiner's device in Pairing state { let mut registry = self.device_registry.write().await; - let node_addr = iroh::NodeAddr::new(node_id); + let node_addr = iroh::EndpointAddr::new(node_id); registry .start_pairing(actual_device_id, node_id, session_id, node_addr) @@ -247,7 +247,8 @@ impl PairingProtocolHandler { let relay_url = self .endpoint .as_ref() - .and_then(|ep| ep.home_relay().get().into_iter().next()) + .and_then(|ep| ep.addr().get()) + .and_then(|addr| addr.relay_urls().next()) .map(|r| r.to_string()); // Complete pairing in device registry diff --git a/core/src/service/network/protocol/pairing/joiner.rs b/core/src/service/network/protocol/pairing/joiner.rs index 5019ac569..ef69af5d3 100644 --- a/core/src/service/network/protocol/pairing/joiner.rs +++ b/core/src/service/network/protocol/pairing/joiner.rs @@ -9,7 +9,7 @@ use crate::service::network::{ device::{DeviceInfo, SessionKeys}, NetworkingError, Result, }; -use iroh::{NodeId, Watcher}; +use iroh::{EndpointId, Watcher}; use uuid::Uuid; impl PairingProtocolHandler { @@ -117,7 +117,7 @@ impl PairingProtocolHandler { success: bool, reason: Option, from_device: Uuid, - from_node: NodeId, + from_node: EndpointId, ) -> Result<()> { self.log_info(&format!( "Received completion message for session {} - success: {}", @@ -152,7 +152,7 @@ impl PairingProtocolHandler { let node_id = match initiator_device_info .network_fingerprint .node_id - .parse::() + .parse::() { Ok(id) => id, Err(_) => { @@ -167,7 +167,7 @@ impl PairingProtocolHandler { // Register the initiator device in Pairing state { let mut registry = self.device_registry.write().await; - let node_addr = iroh::NodeAddr::new(node_id); + let node_addr = iroh::EndpointAddr::new(node_id); registry .start_pairing(device_id, node_id, session_id, node_addr) @@ -185,7 +185,8 @@ impl PairingProtocolHandler { let relay_url = self .endpoint .as_ref() - .and_then(|ep| ep.home_relay().get().into_iter().next()) + .and_then(|ep| ep.addr().get()) + .and_then(|addr| addr.relay_urls().next()) .map(|r| r.to_string()); // Complete pairing in device registry diff --git a/core/src/service/network/protocol/pairing/mod.rs b/core/src/service/network/protocol/pairing/mod.rs index 234ec4f93..78fc01516 100644 --- a/core/src/service/network/protocol/pairing/mod.rs +++ b/core/src/service/network/protocol/pairing/mod.rs @@ -27,7 +27,7 @@ use std::sync::Arc; use async_trait::async_trait; use blake3; -use iroh::{endpoint::Connection, Endpoint, NodeAddr, NodeId, Watcher}; +use iroh::{endpoint::Connection, Endpoint, EndpointAddr, EndpointId, Watcher}; use tokio::sync::RwLock; use uuid::Uuid; @@ -78,8 +78,8 @@ pub struct PairingProtocolHandler { /// Endpoint for creating and managing connections endpoint: Option, - /// Cached connections to remote nodes (keyed by NodeId and ALPN) - connections: Arc), Connection>>>, + /// Cached connections to remote nodes (keyed by EndpointId and ALPN) + connections: Arc), Connection>>>, /// Event bus for emitting pairing events event_bus: Arc>>>, @@ -121,7 +121,7 @@ impl PairingProtocolHandler { crate::service::network::core::event_loop::EventLoopCommand, >, endpoint: Option, - active_connections: Arc), Connection>>>, + active_connections: Arc), Connection>>>, ) -> Self { Self { identity, @@ -153,7 +153,7 @@ impl PairingProtocolHandler { >, data_dir: PathBuf, endpoint: Option, - active_connections: Arc), Connection>>>, + active_connections: Arc), Connection>>>, ) -> Self { let persistence = Arc::new(PairingPersistence::new(data_dir)); Self { @@ -1302,7 +1302,7 @@ impl PairingProtocolHandler { voucher_signature: Vec, timestamp: chrono::DateTime, proxied_session_keys: SessionKeys, - remote_node_id: NodeId, + remote_node_id: EndpointId, ) -> Result<()> { let proxy_config: ProxyPairingConfig = { self.proxy_config.read().await.clone() }; @@ -1541,7 +1541,7 @@ impl PairingProtocolHandler { async fn send_proxy_pairing_rejection( &self, - remote_node_id: NodeId, + remote_node_id: EndpointId, session_id: Uuid, reason: String, ) -> Result<()> { @@ -1790,7 +1790,7 @@ impl PairingProtocolHandler { async fn handle_pairing_message( &self, message: PairingMessage, - remote_node_id: NodeId, + remote_node_id: EndpointId, ) -> Result>> { match message { PairingMessage::PairingRequest { @@ -1892,7 +1892,7 @@ impl PairingProtocolHandler { } /// Get or create a device ID for a node - async fn get_device_id_for_node(&self, node_id: NodeId) -> Uuid { + async fn get_device_id_for_node(&self, node_id: EndpointId) -> Uuid { let registry = self.device_registry.read().await; registry.get_device_by_node(node_id).unwrap_or_else(|| { // Generate a deterministic UUID from the node ID @@ -1914,7 +1914,7 @@ impl PairingProtocolHandler { pub async fn send_pairing_message_to_node( &self, endpoint: &Endpoint, - node_id: NodeId, + node_id: EndpointId, message: &PairingMessage, ) -> Result> { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -1985,7 +1985,7 @@ impl PairingProtocolHandler { &self, mut send: impl tokio::io::AsyncWrite + Unpin, mut recv: impl tokio::io::AsyncRead + Unpin, - initiator_node_id: NodeId, + initiator_node_id: EndpointId, ) -> Result> { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -2115,7 +2115,7 @@ impl PairingProtocolHandler { pub async fn send_pairing_message_fire_and_forget( &self, - node_id: NodeId, + node_id: EndpointId, message: &PairingMessage, ) -> Result<()> { let data = serde_json::to_vec(message).map_err(NetworkingError::Serialization)?; @@ -2146,7 +2146,7 @@ impl ProtocolHandler for PairingProtocolHandler { &self, mut send: Box, mut recv: Box, - remote_node_id: NodeId, + remote_node_id: EndpointId, ) { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -2359,7 +2359,7 @@ impl ProtocolHandler for PairingProtocolHandler { async fn handle_response( &self, from_device: Uuid, - from_node: NodeId, + from_node: EndpointId, response_data: Vec, ) -> Result<()> { self.log_debug(&format!( diff --git a/core/src/service/network/protocol/pairing/types.rs b/core/src/service/network/protocol/pairing/types.rs index cfa4102c6..30dac2e8a 100644 --- a/core/src/service/network/protocol/pairing/types.rs +++ b/core/src/service/network/protocol/pairing/types.rs @@ -5,7 +5,7 @@ use crate::service::network::{ utils::identity::NetworkFingerprint, }; use chrono::{DateTime, Utc}; -use iroh::{NodeAddr, NodeId}; +use iroh::{EndpointAddr, EndpointId}; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -24,8 +24,8 @@ pub struct PairingCode { /// Expiration timestamp expires_at: DateTime, - /// Initiator's NodeId for remote discovery via pkarr (optional - enables relay path) - node_id: Option, + /// Initiator's EndpointId for remote discovery via pkarr (optional - enables relay path) + node_id: Option, } impl PairingCode { @@ -65,7 +65,7 @@ impl PairingCode { } /// Add node_id for remote pairing via pkarr discovery - pub fn with_node_id(mut self, node_id: NodeId) -> Self { + pub fn with_node_id(mut self, node_id: EndpointId) -> Self { self.node_id = Some(node_id); self } @@ -124,7 +124,7 @@ impl PairingCode { // Extract node_id (optional - enables remote pairing via pkarr) if let Some(node_id_str) = data.get("node_id").and_then(|v| v.as_str()) { - let node_id = node_id_str.parse::().map_err(|e| { + let node_id = node_id_str.parse::().map_err(|e| { crate::service::network::NetworkingError::Protocol(format!( "Invalid node_id in QR code: {}", e @@ -164,8 +164,8 @@ impl PairingCode { &self.secret } - /// Get the initiator's NodeId for pkarr discovery - pub fn node_id(&self) -> Option { + /// Get the initiator's EndpointId for pkarr discovery + pub fn node_id(&self) -> Option { self.node_id } @@ -340,7 +340,7 @@ pub enum PairingState { ResponsePending { challenge: Vec, response_data: Vec, - remote_node_id: Option, + remote_node_id: Option, }, ResponseSent, Completed, @@ -413,7 +413,7 @@ pub struct PairingAdvertisement { /// The node ID of the initiator (as string for serialization) pub node_id: String, /// The node address components for reconstruction - pub node_addr_info: NodeAddrInfo, + pub node_addr_info: EndpointAddrInfo, /// Device information of the initiator pub device_info: DeviceInfo, /// When this advertisement expires @@ -422,9 +422,9 @@ pub struct PairingAdvertisement { pub created_at: DateTime, } -/// Serializable representation of NodeAddr +/// Serializable representation of EndpointAddr #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct NodeAddrInfo { +pub struct EndpointAddrInfo { /// Node ID as string pub node_id: String, /// Direct socket addresses @@ -434,25 +434,25 @@ pub struct NodeAddrInfo { } impl PairingAdvertisement { - /// Convert node ID string back to NodeId - pub fn node_id(&self) -> crate::service::network::Result { + /// Convert node ID string back to EndpointId + pub fn node_id(&self) -> crate::service::network::Result { self.node_id.parse().map_err(|e| { crate::service::network::NetworkingError::Protocol(format!("Invalid node ID: {}", e)) }) } - /// Convert node address info back to NodeAddr - pub fn node_addr(&self) -> crate::service::network::Result { + /// Convert node address info back to EndpointAddr + pub fn node_addr(&self) -> crate::service::network::Result { // Parse node ID - let node_id = self.node_addr_info.node_id.parse::().map_err(|e| { + let node_id = self.node_addr_info.node_id.parse::().map_err(|e| { crate::service::network::NetworkingError::Protocol(format!( "Invalid node ID in advertisement: {}", e )) })?; - // Start with base NodeAddr - let mut node_addr = NodeAddr::new(node_id); + // Start with base EndpointAddr + let mut node_addr = EndpointAddr::new(node_id); // Add direct addresses let mut direct_addrs = Vec::new(); diff --git a/core/src/service/network/protocol/registry.rs b/core/src/service/network/protocol/registry.rs index da0a643fa..fa52cf4da 100644 --- a/core/src/service/network/protocol/registry.rs +++ b/core/src/service/network/protocol/registry.rs @@ -2,7 +2,7 @@ use super::{ProtocolEvent, ProtocolHandler}; use crate::service::network::{NetworkingError, Result}; -use iroh::NodeId; +use iroh::EndpointId; use std::collections::HashMap; use std::sync::Arc; use uuid::Uuid; @@ -68,7 +68,7 @@ impl ProtocolRegistry { &self, protocol_name: &str, from_device: Uuid, - from_node: NodeId, + from_node: EndpointId, response_data: Vec, ) -> Result<()> { let handler = self.get_handler(protocol_name).ok_or_else(|| { diff --git a/core/src/service/network/protocol/sync/handler.rs b/core/src/service/network/protocol/sync/handler.rs index f63cb37d4..ef8300df3 100644 --- a/core/src/service/network/protocol/sync/handler.rs +++ b/core/src/service/network/protocol/sync/handler.rs @@ -595,7 +595,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncProtocolHandler &self, mut send: Box, mut recv: Box, - remote_node_id: iroh::NodeId, + remote_node_id: iroh::EndpointId, ) { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -719,7 +719,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncProtocolHandler async fn handle_response( &self, from_device: Uuid, - _from_node: iroh::NodeId, + _from_node: iroh::EndpointId, response: Vec, ) -> Result<()> { if response.is_empty() { diff --git a/core/src/service/network/protocol/sync/multiplexer.rs b/core/src/service/network/protocol/sync/multiplexer.rs index d6e4420e2..d76c01194 100644 --- a/core/src/service/network/protocol/sync/multiplexer.rs +++ b/core/src/service/network/protocol/sync/multiplexer.rs @@ -94,7 +94,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncMultiplexer { &self, mut send: Box, mut recv: Box, - remote_node_id: iroh::NodeId, + remote_node_id: iroh::EndpointId, ) { use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -195,7 +195,7 @@ impl crate::service::network::protocol::ProtocolHandler for SyncMultiplexer { async fn handle_response( &self, from_device: Uuid, - _from_node: iroh::NodeId, + _from_node: iroh::EndpointId, response: Vec, ) -> Result<()> { if response.is_empty() { diff --git a/core/src/service/network/utils/identity.rs b/core/src/service/network/utils/identity.rs index 65773db47..21779a32a 100644 --- a/core/src/service/network/utils/identity.rs +++ b/core/src/service/network/utils/identity.rs @@ -1,7 +1,7 @@ //! Network identity management - node ID and key generation use crate::service::network::{NetworkingError, Result}; -use iroh::{NodeId, SecretKey}; +use iroh::{EndpointId, SecretKey}; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -9,7 +9,7 @@ use uuid::Uuid; #[derive(Clone)] pub struct NetworkIdentity { secret_key: SecretKey, - node_id: NodeId, + node_id: EndpointId, // Keep Ed25519 keypair for backward compatibility ed25519_seed: [u8; 32], } @@ -60,7 +60,7 @@ impl NetworkIdentity { } /// Get the node ID - pub fn node_id(&self) -> NodeId { + pub fn node_id(&self) -> EndpointId { self.node_id } From c09cc5942da46110bc5dc18e75f26f2e009cc601 Mon Sep 17 00:00:00 2001 From: Jamie Pine Date: Sat, 24 Jan 2026 15:41:10 -0800 Subject: [PATCH 2/4] refactor(network): update to use EndpointId and EndpointAddr - Replaced NodeId with EndpointId in various components to align with the new Iroh v0.95+ API. - Updated connection handling to utilize EndpointAddr instead of NodeAddr, reflecting changes in the underlying library. - Adjusted discovery and connection logic to accommodate the immutability of EndpointAddr, enhancing overall network functionality. --- core/src/ops/devices/list/query.rs | 14 ++-- core/src/ops/files/copy/strategy.rs | 4 +- core/src/ops/network/pair/join/action.rs | 2 +- core/src/ops/network/status/query.rs | 2 +- core/src/service/network/core/event_loop.rs | 26 +++---- core/src/service/network/core/mod.rs | 74 +++++++++---------- core/src/service/network/device/connection.rs | 6 +- .../service/network/job_activity_client.rs | 6 +- .../network/protocol/pairing/initiator.rs | 4 +- .../network/protocol/pairing/joiner.rs | 4 +- .../service/network/protocol/pairing/types.rs | 25 ++----- core/src/service/network/utils/connection.rs | 8 +- core/src/service/network/utils/identity.rs | 11 ++- 13 files changed, 81 insertions(+), 105 deletions(-) diff --git a/core/src/ops/devices/list/query.rs b/core/src/ops/devices/list/query.rs index 2050184d2..af3326939 100644 --- a/core/src/ops/devices/list/query.rs +++ b/core/src/ops/devices/list/query.rs @@ -154,14 +154,16 @@ impl LibraryQuery for ListLibraryDevicesQuery { // Query Iroh directly for actual connection status and method let (is_actually_connected, connection_method) = if let Some(ep) = endpoint { // Get node ID for this device - let node_id = registry.get_node_id_for_device(device_id); - if let Some(node_id) = node_id { - // Query Iroh for connection info - if let Some(remote_info) = ep.remote_info(node_id) { - let conn_method = crate::domain::device::ConnectionMethod::from_iroh_connection_type(remote_info.conn_type); - let is_connected = conn_method.is_some(); + if let Some(node_id) = registry.get_node_id_for_device(device_id) { + // Use conn_type() API (replaces remote_info() removed in v0.93+) + if let Some(conn_type_watcher) = ep.conn_type(node_id) { + // Get current connection type from watcher (implements Deref) + let conn_type = *conn_type_watcher; + let conn_method = crate::domain::device::ConnectionMethod::from_iroh_connection_type(conn_type); + let is_connected = !matches!(conn_type, iroh::endpoint::ConnectionType::None); (is_connected, conn_method) } else { + // No address information exists for this endpoint (never connected) (false, None) } } else { diff --git a/core/src/ops/files/copy/strategy.rs b/core/src/ops/files/copy/strategy.rs index f071d9b4f..6b6e4bfd9 100644 --- a/core/src/ops/files/copy/strategy.rs +++ b/core/src/ops/files/copy/strategy.rs @@ -491,7 +491,7 @@ impl RemoteTransferStrategy { )); // Connect to remote device - let node_addr = iroh::NodeAddr::new(node_id); + let node_addr = iroh::EndpointAddr::new(node_id); let connection = endpoint .connect(node_addr, b"spacedrive/filetransfer/1") .await @@ -1125,7 +1125,7 @@ async fn stream_file_data<'a>( node_id, destination_device_id )); - let node_addr = iroh::NodeAddr::new(node_id); + let node_addr = iroh::EndpointAddr::new(node_id); let connection = endpoint .connect(node_addr, b"spacedrive/filetransfer/1") .await diff --git a/core/src/ops/network/pair/join/action.rs b/core/src/ops/network/pair/join/action.rs index f0e9ec313..bfdb10a1e 100644 --- a/core/src/ops/network/pair/join/action.rs +++ b/core/src/ops/network/pair/join/action.rs @@ -40,7 +40,7 @@ impl CoreAction for PairJoinAction { // If node_id provided separately, add it to enable relay fallback if let Some(node_id_str) = &self.node_id { - let node_id: iroh::NodeId = node_id_str + let node_id: iroh::EndpointId = node_id_str .parse() .map_err(|e| ActionError::Internal(format!("Invalid node ID: {}", e)))?; pairing_code = pairing_code.with_node_id(node_id); diff --git a/core/src/ops/network/status/query.rs b/core/src/ops/network/status/query.rs index 917b809f5..135b09749 100644 --- a/core/src/ops/network/status/query.rs +++ b/core/src/ops/network/status/query.rs @@ -30,7 +30,7 @@ impl CoreQuery for NetworkStatusQuery { if let Some(net) = networking { let node_id = net.node_id().to_string(); let addresses = if let Ok(Some(addr)) = net.get_node_addr() { - addr.direct_addresses() + addr.ip_addrs() .map(|a| a.to_string()) .collect::>() } else { diff --git a/core/src/service/network/core/event_loop.rs b/core/src/service/network/core/event_loop.rs index b4fca25f8..ec83cf458 100644 --- a/core/src/service/network/core/event_loop.rs +++ b/core/src/service/network/core/event_loop.rs @@ -200,20 +200,12 @@ impl NetworkingEventLoop { /// Handle an incoming connection async fn handle_connection(&self, conn: Connection) { - // Extract the remote node ID from the connection - let remote_node_id = match conn.remote_id() { - Ok(key) => key, - Err(e) => { - self.logger - .error(&format!("Failed to get remote node ID: {}", e)) - .await; - return; - } - }; + // Extract the remote node ID from the connection (now infallible in v0.95+) + let remote_node_id = conn.remote_id(); // Track the connection (keyed by node_id and alpn) { - let alpn_bytes = conn.alpn().unwrap_or_default(); + let alpn_bytes = conn.alpn().to_vec(); let mut connections = self.active_connections.write().await; connections.insert((remote_node_id, alpn_bytes), conn.clone()); } @@ -292,7 +284,7 @@ impl NetworkingEventLoop { // Only remove connection if it's actually closed if conn.close_reason().is_some() { let mut connections = active_connections.write().await; - let alpn_bytes = conn.alpn().unwrap_or_default(); + let alpn_bytes = conn.alpn().to_vec(); connections.remove(&(remote_node_id, alpn_bytes)); logger .info(&format!( @@ -356,7 +348,7 @@ impl NetworkingEventLoop { } // Route to handler based on ALPN - let alpn_bytes = conn.alpn().unwrap_or_default(); + let alpn_bytes = conn.alpn().to_vec(); if alpn_bytes == MESSAGING_ALPN { let registry = protocol_registry.read().await; @@ -443,7 +435,7 @@ impl NetworkingEventLoop { logger.debug(&format!("Accepted unidirectional stream from {}", remote_node_id)).await; // Get ALPN to determine which protocol handler to use - let alpn_bytes = conn.alpn().unwrap_or_default(); + let alpn_bytes = conn.alpn().to_vec(); let registry = protocol_registry.read().await; // Route based on ALPN @@ -646,7 +638,7 @@ impl NetworkingEventLoop { EventLoopCommand::TrackOutboundConnection { node_id, conn } => { // Add outbound connection to active connections map - let alpn_bytes = conn.alpn().unwrap_or_default(); + let alpn_bytes = conn.alpn().to_vec(); { let mut connections = self.active_connections.write().await; connections.insert((node_id, alpn_bytes.clone()), conn.clone()); @@ -738,7 +730,7 @@ impl NetworkingEventLoop { }; // Create node address (Iroh will use existing connection if available) - let node_addr = NodeAddr::new(node_id); + let node_addr = EndpointAddr::new(node_id); // Connect with specific ALPN self.logger @@ -760,7 +752,7 @@ impl NetworkingEventLoop { // Track the connection { let mut connections = self.active_connections.write().await; - let alpn_bytes = conn.alpn().unwrap_or_default(); + let alpn_bytes = conn.alpn().to_vec(); connections.insert((node_id, alpn_bytes), conn.clone()); } diff --git a/core/src/service/network/core/mod.rs b/core/src/service/network/core/mod.rs index 3f22a5c14..e24624ffc 100644 --- a/core/src/service/network/core/mod.rs +++ b/core/src/service/network/core/mod.rs @@ -220,9 +220,9 @@ impl NetworkingService { JOB_ACTIVITY_ALPN.to_vec(), ]) .relay_mode(iroh::RelayMode::Default) - .add_discovery(MdnsDiscovery::builder()) - .add_discovery(PkarrPublisher::n0_dns()) - .add_discovery(DnsDiscovery::n0_dns()) + .discovery(MdnsDiscovery::builder()) + .discovery(PkarrPublisher::n0_dns()) + .discovery(DnsDiscovery::n0_dns()) .bind_addr_v4(std::net::SocketAddrV4::new( std::net::Ipv4Addr::UNSPECIFIED, 0, @@ -969,15 +969,12 @@ impl NetworkingService { } /// Strip IP addresses from an EndpointAddr to force relay-only connection + /// Note: In v0.95+, EndpointAddr is immutable. This creates a minimal EndpointAddr + /// with just the ID - Iroh will use discovery to find relay URLs if needed. fn strip_ip_addresses(endpoint_addr: EndpointAddr) -> EndpointAddr { - // In v0.95+, create a new EndpointAddr with only relay URLs (no IP addrs) - let id = endpoint_addr.id; - let mut new_addr = EndpointAddr::new(id); - // Add relay URLs but not IP addresses - for relay_url in endpoint_addr.relay_urls() { - new_addr = new_addr.with_relay(relay_url.clone()); - } - new_addr + // Create a minimal EndpointAddr with just the ID + // Iroh's discovery system will handle finding relay URLs + EndpointAddr::new(endpoint_addr.id) } /// Spawn a background task to watch for connection closure @@ -1044,7 +1041,7 @@ impl NetworkingService { /// Get our node address for advertising pub fn get_node_addr(&self) -> Result> { if let Some(endpoint) = &self.endpoint { - Ok(endpoint.addr().get()) + Ok(Some(endpoint.addr())) } else { Err(NetworkingError::ConnectionFailed( "Networking not started".to_string(), @@ -1056,11 +1053,7 @@ impl NetworkingService { pub async fn get_relay_url(&self) -> Option { if let Some(endpoint) = &self.endpoint { // In v0.95+, get relay URL from the endpoint address - if let Some(addr) = endpoint.addr().get() { - addr.relay_urls().next().map(|url| url.to_string()) - } else { - None - } + endpoint.addr().relay_urls().next().map(|url| url.to_string()) } else { None } @@ -1077,7 +1070,13 @@ impl NetworkingService { "Networking not started".to_string(), ))?; - let mut discovery_stream = endpoint.discovery_stream(); + // Create mDNS discovery service to subscribe to events + // Note: In v0.95+, we need to get discovery services individually and subscribe + let endpoint_id = endpoint.id(); + let mdns_discovery = MdnsDiscovery::builder() + .build(endpoint_id) + .map_err(|e| NetworkingError::ConnectionFailed(format!("Failed to create mDNS discovery: {}", e)))?; + let mut discovery_stream = mdns_discovery.subscribe().await; let session_id_str = session_id.to_string(); let timeout = tokio::time::Duration::from_secs(5); // Shorter timeout for mDNS let start = tokio::time::Instant::now(); @@ -1091,22 +1090,23 @@ impl NetworkingService { while start.elapsed() < timeout { tokio::select! { - Some(result) = discovery_stream.next() => { - match result { - Ok(iroh::discovery::DiscoveryEvent::Discovered(item)) => { + Some(event) = discovery_stream.next() => { + match event { + iroh::discovery::mdns::DiscoveryEvent::Discovered { endpoint_info, .. } => { // Check if this node is broadcasting our session_id - if let Some(user_data) = item.node_info().data.user_data() { + if let Some(user_data) = endpoint_info.data.user_data() { if user_data.as_ref() == session_id_str { + let endpoint_id = endpoint_info.endpoint_id; self.logger .info(&format!( "[mDNS] Found pairing initiator: {} with {} IP addresses", - item.endpoint_id().fmt_short(), - item.node_info().data.ip_addrs().count() + endpoint_id.fmt_short(), + endpoint_info.data.ip_addrs().count() )) .await; // Build EndpointAddr from discovery info - let node_addr = item.node_info().into_endpoint_addr(item.endpoint_id()); + let node_addr = endpoint_info.into_endpoint_addr(); // Try to connect to the initiator if let Err(e) = self.connect_to_node(node_addr.clone(), force_relay).await { @@ -1120,14 +1120,9 @@ impl NetworkingService { } } } - Ok(iroh::discovery::DiscoveryEvent::Expired(_)) => { + iroh::discovery::mdns::DiscoveryEvent::Expired { .. } => { // Node expired, continue searching } - Err(e) => { - self.logger - .warn(&format!("[mDNS] Discovery stream error: {}", e)) - .await; - } } } _ = tokio::time::sleep(tokio::time::Duration::from_millis(100)) => { @@ -1296,7 +1291,7 @@ impl NetworkingService { "Networking not started".to_string(), ))?; - let user_data = iroh::node_info::UserData::try_from(session_id.to_string()) + let user_data = iroh::endpoint_info::UserData::try_from(session_id.to_string()) .map_err(|e| NetworkingError::Protocol(format!("Failed to create user data: {}", e)))?; endpoint.set_user_data_for_discovery(Some(user_data)); @@ -1319,8 +1314,9 @@ impl NetworkingService { endpoint.online().await; let relay_url = endpoint .addr() - .get() - .and_then(|a| a.relay_urls().next().map(|u| u.to_string())) + .relay_urls() + .next() + .map(|u| u.to_string()) .unwrap_or_else(|| "unknown".to_string()); self.logger .info(&format!("Endpoint online, relay: {}", relay_url)) @@ -1520,7 +1516,7 @@ impl NetworkingService { // We need to try connecting to all discovered nodes since we don't know which one is the initiator // Get our own node address to broadcast it - let our_node_addr = endpoint.addr().get(); + let our_node_addr = endpoint.addr(); self.logger .info(&format!( @@ -1840,7 +1836,7 @@ async fn spawn_connection_watcher_task( let close_reason = conn.closed().await; // Get the ALPN for this specific connection - let alpn_bytes = conn.alpn().unwrap_or_default(); + let alpn_bytes = conn.alpn().to_vec(); logger .info(&format!( @@ -1872,13 +1868,13 @@ async fn spawn_connection_watcher_task( // Find the device ID for this node and update state let mut registry = device_registry.write().await; if let Some(device_id) = registry.get_device_by_node_id(node_id) { - // Use update_device_from_connection with ConnectionType::None + // Use update_device_from_connection with is_connected=false (all connections closed) if let Err(e) = registry .update_device_from_connection( device_id, node_id, - iroh::endpoint::ConnectionType::None, - None, + false, // is_connected + None, // latency ) .await { diff --git a/core/src/service/network/device/connection.rs b/core/src/service/network/device/connection.rs index 208cef60e..65ad61f05 100644 --- a/core/src/service/network/device/connection.rs +++ b/core/src/service/network/device/connection.rs @@ -3,7 +3,7 @@ use super::{DeviceInfo, SessionKeys}; use crate::service::network::{NetworkingError, Result}; use chrono::{DateTime, Utc}; -use iroh::NodeId; +use iroh::EndpointId; use std::sync::Arc; use tokio::sync::{mpsc, RwLock}; use uuid::Uuid; @@ -12,7 +12,7 @@ use uuid::Uuid; #[derive(Debug, Clone)] pub struct DeviceConnection { /// The node ID of the remote device - pub node_id: NodeId, + pub node_id: EndpointId, /// Device information pub device_info: DeviceInfo, @@ -63,7 +63,7 @@ pub struct OutgoingMessage { impl DeviceConnection { /// Create a new device connection pub fn new( - node_id: NodeId, + node_id: EndpointId, device_info: DeviceInfo, session_keys: SessionKeys, ) -> (Self, mpsc::UnboundedReceiver) { diff --git a/core/src/service/network/job_activity_client.rs b/core/src/service/network/job_activity_client.rs index 5de40d991..078712d32 100644 --- a/core/src/service/network/job_activity_client.rs +++ b/core/src/service/network/job_activity_client.rs @@ -8,7 +8,7 @@ use crate::service::network::{ utils::{get_or_create_connection, SilentLogger}, NetworkingError, Result, }; -use iroh::{endpoint::Connection, Endpoint, NodeId}; +use iroh::{endpoint::Connection, Endpoint, EndpointId}; use std::collections::HashMap; use std::sync::Arc; use tokio::io::{AsyncReadExt, AsyncWriteExt}; @@ -19,7 +19,7 @@ use uuid::Uuid; /// Client for subscribing to job activity from remote devices pub struct JobActivityClient { endpoint: Endpoint, - connections: Arc), Connection>>>, + connections: Arc), Connection>>>, remote_cache: Arc, device_registry: Arc>, } @@ -27,7 +27,7 @@ pub struct JobActivityClient { impl JobActivityClient { pub fn new( endpoint: Endpoint, - connections: Arc), Connection>>>, + connections: Arc), Connection>>>, remote_cache: Arc, device_registry: Arc>, ) -> Self { diff --git a/core/src/service/network/protocol/pairing/initiator.rs b/core/src/service/network/protocol/pairing/initiator.rs index 588016514..d91906ded 100644 --- a/core/src/service/network/protocol/pairing/initiator.rs +++ b/core/src/service/network/protocol/pairing/initiator.rs @@ -247,9 +247,7 @@ impl PairingProtocolHandler { let relay_url = self .endpoint .as_ref() - .and_then(|ep| ep.addr().get()) - .and_then(|addr| addr.relay_urls().next()) - .map(|r| r.to_string()); + .and_then(|ep| ep.addr().relay_urls().next().map(|r| r.to_string())); // Complete pairing in device registry { diff --git a/core/src/service/network/protocol/pairing/joiner.rs b/core/src/service/network/protocol/pairing/joiner.rs index ef69af5d3..e87755f3c 100644 --- a/core/src/service/network/protocol/pairing/joiner.rs +++ b/core/src/service/network/protocol/pairing/joiner.rs @@ -185,9 +185,7 @@ impl PairingProtocolHandler { let relay_url = self .endpoint .as_ref() - .and_then(|ep| ep.addr().get()) - .and_then(|addr| addr.relay_urls().next()) - .map(|r| r.to_string()); + .and_then(|ep| ep.addr().relay_urls().next().map(|r| r.to_string())); // Complete pairing in device registry { diff --git a/core/src/service/network/protocol/pairing/types.rs b/core/src/service/network/protocol/pairing/types.rs index 30dac2e8a..db43367e4 100644 --- a/core/src/service/network/protocol/pairing/types.rs +++ b/core/src/service/network/protocol/pairing/types.rs @@ -451,26 +451,13 @@ impl PairingAdvertisement { )) })?; - // Start with base EndpointAddr - let mut node_addr = EndpointAddr::new(node_id); + // In v0.95+, EndpointAddr is immutable and builder methods were removed. + // Create a minimal EndpointAddr with just the ID - Iroh's discovery system + // will automatically resolve addresses via pkarr/DNS if configured. + let node_addr = EndpointAddr::new(node_id); - // Add direct addresses - let mut direct_addrs = Vec::new(); - for addr_str in &self.node_addr_info.direct_addresses { - if let Ok(addr) = addr_str.parse() { - direct_addrs.push(addr); - } - } - if !direct_addrs.is_empty() { - node_addr = node_addr.with_direct_addresses(direct_addrs); - } - - // Add relay URL if present - if let Some(relay_url) = &self.node_addr_info.relay_url { - if let Ok(url) = relay_url.parse() { - node_addr = node_addr.with_relay_url(url); - } - } + // Note: Direct addresses and relay URLs from pairing code are now handled + // by Iroh's discovery system (pkarr/DNS) rather than being manually set. Ok(node_addr) } diff --git a/core/src/service/network/utils/connection.rs b/core/src/service/network/utils/connection.rs index 40d4aa85d..7bbc81abd 100644 --- a/core/src/service/network/utils/connection.rs +++ b/core/src/service/network/utils/connection.rs @@ -6,7 +6,7 @@ //! - Automatic connection reuse across all protocols use crate::service::network::{NetworkingError, Result}; -use iroh::{endpoint::Connection, Endpoint, NodeAddr, NodeId}; +use iroh::{endpoint::Connection, Endpoint, EndpointAddr, EndpointId}; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::RwLock; @@ -29,9 +29,9 @@ use super::logging::NetworkLogger; /// * `Ok(Connection)` - Either cached or newly created connection /// * `Err(NetworkingError)` - If connection fails pub async fn get_or_create_connection( - connections: Arc), Connection>>>, + connections: Arc), Connection>>>, endpoint: &Endpoint, - node_id: NodeId, + node_id: EndpointId, alpn: &'static [u8], logger: &Arc, ) -> Result { @@ -64,7 +64,7 @@ pub async fn get_or_create_connection( } // Create new connection with specified ALPN - let node_addr = NodeAddr::new(node_id); + let node_addr = EndpointAddr::new(node_id); logger .info(&format!( "Creating new {} connection to node {}", diff --git a/core/src/service/network/utils/identity.rs b/core/src/service/network/utils/identity.rs index 21779a32a..0d5947526 100644 --- a/core/src/service/network/utils/identity.rs +++ b/core/src/service/network/utils/identity.rs @@ -17,11 +17,14 @@ pub struct NetworkIdentity { impl NetworkIdentity { /// Create a new random network identity pub async fn new() -> Result { - let secret_key = SecretKey::generate(&mut rand::thread_rng()); - let node_id = secret_key.public(); + // Generate random bytes for the secret key + use rand::RngCore; + let mut ed25519_seed = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut ed25519_seed); - // Generate Ed25519 seed for backward compatibility - let ed25519_seed = rand::random(); + // Create Iroh secret key from random bytes + let secret_key = SecretKey::from_bytes(&ed25519_seed); + let node_id = secret_key.public(); Ok(Self { secret_key, From 0bee40bcce0bc16152162f923a8804929f69f80c Mon Sep 17 00:00:00 2001 From: Jamie Pine Date: Sat, 24 Jan 2026 16:08:48 -0800 Subject: [PATCH 3/4] refactor(network): update connection handling in ListLibraryDevicesQuery - Modified the connection type retrieval to use the .borrow() method for improved compatibility with the updated Iroh API. - Enhanced the logic for determining device connection status, ensuring accurate online/offline state updates based on current network conditions. - Added a TODO comment regarding the potential removal of the is_online column for better clarity in future refactoring. --- core/src/ops/devices/list/query.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/core/src/ops/devices/list/query.rs b/core/src/ops/devices/list/query.rs index af3326939..d7b7b355a 100644 --- a/core/src/ops/devices/list/query.rs +++ b/core/src/ops/devices/list/query.rs @@ -6,6 +6,7 @@ use crate::{ domain::Device, infra::query::{LibraryQuery, QueryError, QueryResult}, }; +use iroh::Watcher; use sea_orm::{ColumnTrait, EntityTrait, QueryFilter, QueryOrder}; use serde::{Deserialize, Serialize}; use specta::Type; @@ -156,11 +157,16 @@ impl LibraryQuery for ListLibraryDevicesQuery { // Get node ID for this device if let Some(node_id) = registry.get_node_id_for_device(device_id) { // Use conn_type() API (replaces remote_info() removed in v0.93+) - if let Some(conn_type_watcher) = ep.conn_type(node_id) { - // Get current connection type from watcher (implements Deref) - let conn_type = *conn_type_watcher; - let conn_method = crate::domain::device::ConnectionMethod::from_iroh_connection_type(conn_type); - let is_connected = !matches!(conn_type, iroh::endpoint::ConnectionType::None); + if let Some(mut conn_type_watcher) = ep.conn_type(node_id) { + // Get current connection type from watcher using the Watcher trait's get() method + let conn_type = conn_type_watcher.get(); + // Check connection status first (before conn_type is moved) + let is_connected = + !matches!(conn_type, iroh::endpoint::ConnectionType::None); + let conn_method = + crate::domain::device::ConnectionMethod::from_iroh_connection_type( + conn_type, + ); (is_connected, conn_method) } else { // No address information exists for this endpoint (never connected) @@ -189,6 +195,7 @@ impl LibraryQuery for ListLibraryDevicesQuery { // Always update online/connected status based on current network state // (database is_online column can be stale for remote devices) + // TODO: remove that column imo existing.is_connected = is_actually_connected; existing.is_online = is_actually_connected; existing.connection_method = connection_method; @@ -215,7 +222,8 @@ impl LibraryQuery for ListLibraryDevicesQuery { } // Convert network DeviceInfo to domain Device - let device = Device::from_network_info(&info, is_actually_connected, connection_method); + let device = + Device::from_network_info(&info, is_actually_connected, connection_method); result.push(device); } } From 388b4dde28aceae95c090b2a8547e5ca2360fe42 Mon Sep 17 00:00:00 2001 From: Jamie Pine Date: Sat, 24 Jan 2026 16:14:21 -0800 Subject: [PATCH 4/4] cargo fmt --- apps/cli/src/domains/events/mod.rs | 44 +-- apps/cli/src/domains/index/mod.rs | 11 +- apps/cli/src/domains/sync/mod.rs | 42 ++- apps/cli/src/domains/volume/mod.rs | 6 +- apps/tauri/sd-tauri-core/src/lib.rs | 1 - core/examples/debug_volumes.rs | 22 +- core/examples/fingerprint_test.rs | 16 +- core/src/infra/daemon/rpc.rs | 4 +- core/src/infra/db/entities/device.rs | 275 ++++++++---------- core/src/infra/db/entities/mime_type.rs | 12 +- .../m20240101_000001_initial_schema.rs | 7 +- ...60123_000001_remove_legacy_sync_columns.rs | 6 +- core/src/infra/job/manager.rs | 72 +++-- core/src/infra/sync/registry.rs | 6 +- core/src/lib.rs | 49 ++-- core/src/library/mod.rs | 10 +- core/src/location/manager.rs | 19 +- core/src/ops/devices/update.rs | 14 +- core/src/ops/files/copy/action.rs | 23 +- core/src/ops/files/copy/database.rs | 9 +- core/src/ops/files/copy/job.rs | 33 ++- core/src/ops/files/query/file_by_path.rs | 4 +- core/src/ops/indexing/action.rs | 3 +- core/src/ops/indexing/database_storage.rs | 11 +- core/src/ops/indexing/ephemeral/cache.rs | 6 +- core/src/ops/indexing/ephemeral/index.rs | 8 +- core/src/ops/indexing/path_resolver.rs | 4 +- core/src/ops/indexing/phases/processing.rs | 25 +- core/src/ops/indexing/processor.rs | 9 +- core/src/ops/jobs/copy_metadata/query.rs | 12 +- core/src/ops/locations/validate/query.rs | 16 +- core/src/ops/media/ocr/job.rs | 12 +- core/src/ops/media/speech/job.rs | 12 +- core/src/ops/media/thumbnail/action.rs | 5 +- core/src/ops/media/thumbstrip/action.rs | 7 +- core/src/ops/network/status/query.rs | 4 +- core/src/ops/search/input.rs | 4 +- core/src/ops/search/query.rs | 23 +- core/src/ops/sync/get_sync_partners/action.rs | 4 +- core/src/ops/tags/create/action.rs | 2 +- core/src/ops/volumes/eject/action.rs | 10 +- core/src/ops/volumes/list/query.rs | 1 - core/src/ops/volumes/speed_test/action.rs | 4 +- core/src/service/network/core/event_loop.rs | 4 +- core/src/service/network/core/mod.rs | 27 +- core/src/service/network/device/registry.rs | 17 +- .../src/service/network/protocol/messaging.rs | 73 +++-- .../network/protocol/pairing/initiator.rs | 64 ++-- .../service/network/protocol/pairing/mod.rs | 44 +-- .../service/network/protocol/pairing/types.rs | 16 +- .../protocol/pairing/vouching_queue.rs | 10 +- core/src/service/sync/backfill.rs | 35 ++- core/src/service/sync/protocol_handler.rs | 6 +- core/src/testing/integration_utils.rs | 16 +- core/src/volume/fs/apfs.rs | 9 +- core/src/volume/platform/linux.rs | 5 +- core/src/volume/platform/windows.rs | 5 +- core/tests/copy_progress_test.rs | 29 +- core/tests/cross_device_copy_test.rs | 2 +- core/tests/file_copy_pull_test.rs | 2 +- core/tests/file_move_test.rs | 13 +- core/tests/file_sync_test.rs | 32 +- core/tests/folder_rename_test.rs | 13 +- core/tests/location_export_import_test.rs | 6 +- core/tests/proxy_pairing_protocol_test.rs | 20 +- core/tests/proxy_pairing_test.rs | 42 ++- core/tests/sync_backfill_race_test.rs | 16 +- core/tests/sync_backfill_test.rs | 73 ++++- core/tests/sync_setup_test.rs | 3 +- core/tests/transitive_sync_backfill_test.rs | 12 +- core/tests/volume_detection_test.rs | 8 +- core/tests/volume_tracking_test.rs | 2 +- xtask/src/main.rs | 11 +- 73 files changed, 876 insertions(+), 606 deletions(-) diff --git a/apps/cli/src/domains/events/mod.rs b/apps/cli/src/domains/events/mod.rs index 5fc699f96..e944fca0a 100644 --- a/apps/cli/src/domains/events/mod.rs +++ b/apps/cli/src/domains/events/mod.rs @@ -99,9 +99,7 @@ fn display_event(event: &Event, args: &EventsMonitorArgs) { // Determine output format (new format flag takes precedence over legacy flags) let use_json_pretty = matches!(args.format, OutputFormat::JsonPretty) || args.pretty; - let use_json = matches!(args.format, OutputFormat::Json) - || use_json_pretty - || args.verbose; + let use_json = matches!(args.format, OutputFormat::Json) || use_json_pretty || args.verbose; if use_json { // JSON mode: show full JSON @@ -441,25 +439,28 @@ fn summarize_event(event: &Event) -> String { format!("Sync error: {}", message) } - // Proxy pairing events - Event::ProxyPairingConfirmationRequired { - vouchee_device_name, - voucher_device_name, - .. - } => { - format!( - "Proxy pairing confirmation required: {} vouched by {}", - vouchee_device_name, voucher_device_name - ) - } - Event::ProxyPairingVouchingReady { - vouchee_device_id, .. - } => { - format!("Proxy pairing vouching ready for device {}", vouchee_device_id) - } + // Proxy pairing events + Event::ProxyPairingConfirmationRequired { + vouchee_device_name, + voucher_device_name, + .. + } => { + format!( + "Proxy pairing confirmation required: {} vouched by {}", + vouchee_device_name, voucher_device_name + ) + } + Event::ProxyPairingVouchingReady { + vouchee_device_id, .. + } => { + format!( + "Proxy pairing vouching ready for device {}", + vouchee_device_id + ) + } - // Config events - Event::ConfigChanged { .. } => "Configuration changed".to_string(), + // Config events + Event::ConfigChanged { .. } => "Configuration changed".to_string(), // Custom events Event::Custom { event_type, data } => { @@ -472,4 +473,3 @@ fn summarize_event(event: &Event) -> String { } } } -} diff --git a/apps/cli/src/domains/index/mod.rs b/apps/cli/src/domains/index/mod.rs index dc6380394..16f288109 100644 --- a/apps/cli/src/domains/index/mod.rs +++ b/apps/cli/src/domains/index/mod.rs @@ -332,7 +332,8 @@ pub async fn run(ctx: &Context, cmd: IndexCmd) -> Result<()> { &format_bytes(breakdown.path_index_overhead as u64), &format_bytes(breakdown.path_index_entries as u64), &format_bytes( - (breakdown.path_index_overhead + breakdown.path_index_entries) as u64 + (breakdown.path_index_overhead + breakdown.path_index_entries) + as u64, ), ]); breakdown_table.add_row(vec![ @@ -340,7 +341,8 @@ pub async fn run(ctx: &Context, cmd: IndexCmd) -> Result<()> { &format_bytes(breakdown.entry_uuids_overhead as u64), &format_bytes(breakdown.entry_uuids_entries as u64), &format_bytes( - (breakdown.entry_uuids_overhead + breakdown.entry_uuids_entries) as u64 + (breakdown.entry_uuids_overhead + breakdown.entry_uuids_entries) + as u64, ), ]); breakdown_table.add_row(vec![ @@ -349,13 +351,12 @@ pub async fn run(ctx: &Context, cmd: IndexCmd) -> Result<()> { &format_bytes(breakdown.content_kinds_entries as u64), &format_bytes( (breakdown.content_kinds_overhead + breakdown.content_kinds_entries) - as u64 + as u64, ), ]); let total = breakdown.arena - + breakdown.cache - + breakdown.registry + + breakdown.cache + breakdown.registry + breakdown.path_index_overhead + breakdown.path_index_entries + breakdown.entry_uuids_overhead diff --git a/apps/cli/src/domains/sync/mod.rs b/apps/cli/src/domains/sync/mod.rs index 2a0748e2e..0367296f0 100644 --- a/apps/cli/src/domains/sync/mod.rs +++ b/apps/cli/src/domains/sync/mod.rs @@ -620,7 +620,11 @@ async fn show_partners(ctx: &Context) -> Result<()> { println!(" - Paired devices do not have sync_enabled=true"); println!(); } else { - println!(" {} {} sync partner(s) available", "ā—".green(), output.partners.len()); + println!( + " {} {} sync partner(s) available", + "ā—".green(), + output.partners.len() + ); println!(); let mut table = Table::new(); @@ -653,10 +657,22 @@ async fn show_partners(ctx: &Context) -> Result<()> { println!("{}", "Library Membership Debug".dark_grey().bold()); println!("{}", "─".repeat(60).dark_grey()); println!(); - println!(" Total devices in library: {}", output.debug_info.total_devices); - println!(" Devices with sync_enabled: {}", output.debug_info.sync_enabled_devices); - println!(" Devices with NodeId mapping: {}", output.debug_info.paired_devices); - println!(" Final sync partners: {}", output.debug_info.final_sync_partners); + println!( + " Total devices in library: {}", + output.debug_info.total_devices + ); + println!( + " Devices with sync_enabled: {}", + output.debug_info.sync_enabled_devices + ); + println!( + " Devices with NodeId mapping: {}", + output.debug_info.paired_devices + ); + println!( + " Final sync partners: {}", + output.debug_info.final_sync_partners + ); println!(); if !output.debug_info.device_details.is_empty() { @@ -665,10 +681,18 @@ async fn show_partners(ctx: &Context) -> Result<()> { .load_preset(UTF8_FULL) .set_content_arrangement(ContentArrangement::Dynamic) .set_header(Row::from(vec![ - Cell::new("Device").add_attribute(Attribute::Bold).fg(Color::DarkGrey), - Cell::new("Sync Enabled").add_attribute(Attribute::Bold).fg(Color::DarkGrey), - Cell::new("Has NodeId").add_attribute(Attribute::Bold).fg(Color::DarkGrey), - Cell::new("NodeId").add_attribute(Attribute::Bold).fg(Color::DarkGrey), + Cell::new("Device") + .add_attribute(Attribute::Bold) + .fg(Color::DarkGrey), + Cell::new("Sync Enabled") + .add_attribute(Attribute::Bold) + .fg(Color::DarkGrey), + Cell::new("Has NodeId") + .add_attribute(Attribute::Bold) + .fg(Color::DarkGrey), + Cell::new("NodeId") + .add_attribute(Attribute::Bold) + .fg(Color::DarkGrey), ])); for device in &output.debug_info.device_details { diff --git a/apps/cli/src/domains/volume/mod.rs b/apps/cli/src/domains/volume/mod.rs index c58a2ca6b..62db9b373 100644 --- a/apps/cli/src/domains/volume/mod.rs +++ b/apps/cli/src/domains/volume/mod.rs @@ -86,9 +86,9 @@ pub async fn run(ctx: &Context, cmd: VolumeCmd) -> Result<()> { println!(" ID: {}", volume.id); println!(" Fingerprint: {}", volume.fingerprint); println!(" Type: {:?}", volume.volume_type); -println!(" Mount: {}", volume.mount_point.display()); -println!(" Mounted: {}", volume.is_mounted); -println!(" Tracked: {}", volume.is_tracked); + println!(" Mount: {}", volume.mount_point.display()); + println!(" Mounted: {}", volume.is_mounted); + println!(" Tracked: {}", volume.is_tracked); println!(); } } diff --git a/apps/tauri/sd-tauri-core/src/lib.rs b/apps/tauri/sd-tauri-core/src/lib.rs index 2f3f32f73..b56428d7c 100644 --- a/apps/tauri/sd-tauri-core/src/lib.rs +++ b/apps/tauri/sd-tauri-core/src/lib.rs @@ -43,7 +43,6 @@ pub mod commands { /// Platform-specific data directory resolution pub fn default_data_dir() -> anyhow::Result { - #[cfg(target_os = "macos")] let dir = dirs::data_dir() .ok_or_else(|| anyhow::anyhow!("Could not determine data directory"))? diff --git a/core/examples/debug_volumes.rs b/core/examples/debug_volumes.rs index f00ce0f20..0059ab661 100644 --- a/core/examples/debug_volumes.rs +++ b/core/examples/debug_volumes.rs @@ -32,11 +32,18 @@ async fn main() { for vol in &volumes { println!("Volume: {}", vol.name); - println!(" Display name: {}", vol.display_name.as_ref().unwrap_or(&"None".to_string())); + println!( + " Display name: {}", + vol.display_name.as_ref().unwrap_or(&"None".to_string()) + ); println!(" Mount point: {}", vol.mount_point.display()); println!(" Type: {:?}", vol.volume_type); println!(" Filesystem: {}", vol.file_system); - println!(" Fingerprint: {} ({})", vol.fingerprint.short_id(), vol.fingerprint.0); + println!( + " Fingerprint: {} ({})", + vol.fingerprint.short_id(), + vol.fingerprint.0 + ); println!(" Is user visible: {}", vol.is_user_visible); println!(" Auto-track eligible: {}", vol.auto_track_eligible); println!(" Is tracked: {}", vol.is_tracked); @@ -44,14 +51,15 @@ async fn main() { } // Show specifically which volumes are auto-track eligible - let auto_track: Vec<_> = volumes - .iter() - .filter(|v| v.auto_track_eligible) - .collect(); + let auto_track: Vec<_> = volumes.iter().filter(|v| v.auto_track_eligible).collect(); println!("=== Auto-Track Eligible Volumes ({}) ===", auto_track.len()); for vol in auto_track { - println!(" - {} ({})", vol.display_name.as_ref().unwrap_or(&vol.name), vol.mount_point.display()); + println!( + " - {} ({})", + vol.display_name.as_ref().unwrap_or(&vol.name), + vol.mount_point.display() + ); } // Show Primary volumes specifically diff --git a/core/examples/fingerprint_test.rs b/core/examples/fingerprint_test.rs index a2fb4d66f..b374641a6 100644 --- a/core/examples/fingerprint_test.rs +++ b/core/examples/fingerprint_test.rs @@ -28,7 +28,11 @@ fn main() { let fp_ext1 = VolumeFingerprint::from_external_volume(spacedrive_id, device_id); let fp_ext2 = VolumeFingerprint::from_external_volume(spacedrive_id, device_id); - println!(" With same dotfile UUID: {} == {}", fp_ext1.short_id(), fp_ext2.short_id()); + println!( + " With same dotfile UUID: {} == {}", + fp_ext1.short_id(), + fp_ext2.short_id() + ); println!(" Match: {}\n", fp_ext1 == fp_ext2); // Test 3: Network volume stability @@ -53,7 +57,10 @@ fn main() { println!(" Mount at /Volumes/MyDrive: {}", fp_mount1.short_id()); println!(" Mount at /Volumes/MyDrive1: {}", fp_mount2.short_id()); - println!(" Different: {} (expected for primary volumes)\n", fp_mount1 != fp_mount2); + println!( + " Different: {} (expected for primary volumes)\n", + fp_mount1 != fp_mount2 + ); // Test 5: External volume - Same dotfile UUID, different mount points println!("Test 5: External volume - Dotfile UUID stable across remounts"); @@ -63,7 +70,10 @@ fn main() { println!(" Mounted at /Volumes/USB: {}", fp_at_mount1.short_id()); println!(" Mounted at /Volumes/USB1: {}", fp_at_mount2.short_id()); - println!(" Match: {} (dotfile UUID is stable!)\n", fp_at_mount1 == fp_at_mount2); + println!( + " Match: {} (dotfile UUID is stable!)\n", + fp_at_mount1 == fp_at_mount2 + ); // Summary println!("=== Summary ==="); diff --git a/core/src/infra/daemon/rpc.rs b/core/src/infra/daemon/rpc.rs index 8e3aa781b..655d53058 100644 --- a/core/src/infra/daemon/rpc.rs +++ b/core/src/infra/daemon/rpc.rs @@ -132,7 +132,9 @@ impl RpcServer { } /// Start the event broadcaster that forwards core events to subscribed connections - async fn start_event_broadcaster(&self) -> Result<(), Box> { + async fn start_event_broadcaster( + &self, + ) -> Result<(), Box> { let core = self.core.clone(); // Make the core's LogBus globally available to the LogEventLayer diff --git a/core/src/infra/db/entities/device.rs b/core/src/infra/db/entities/device.rs index 0d7fde916..ef348c972 100644 --- a/core/src/infra/db/entities/device.rs +++ b/core/src/infra/db/entities/device.rs @@ -251,156 +251,143 @@ impl crate::infra::sync::Syncable for Model { .unwrap_or(serde_json::Value::String("Unknown".to_string())), ) .unwrap_or_else(|_| "Unknown".to_string())), - os_version: Set( - data.get("os_version") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid os_version: {}", e)) - }) + os_version: Set(data + .get("os_version") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid os_version: {}", e)) }) - .transpose()?, - ), - hardware_model: Set( - data.get("hardware_model") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid hardware_model: {}", e)) - }) + }) + .transpose()?), + hardware_model: Set(data + .get("hardware_model") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid hardware_model: {}", e)) }) - .transpose()?, - ), - cpu_model: Set( - data.get("cpu_model") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid cpu_model: {}", e)) - }) + }) + .transpose()?), + cpu_model: Set(data + .get("cpu_model") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid cpu_model: {}", e)) }) - .transpose()?, - ), - cpu_architecture: Set( - data.get("cpu_architecture") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid cpu_architecture: {}", e)) - }) + }) + .transpose()?), + cpu_architecture: Set(data + .get("cpu_architecture") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid cpu_architecture: {}", e)) }) - .transpose()?, - ), - cpu_cores_physical: Set( - data.get("cpu_cores_physical") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid cpu_cores_physical: {}", e)) - }) + }) + .transpose()?), + cpu_cores_physical: Set(data + .get("cpu_cores_physical") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid cpu_cores_physical: {}", e)) }) - .transpose()?, - ), - cpu_cores_logical: Set( - data.get("cpu_cores_logical") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid cpu_cores_logical: {}", e)) - }) + }) + .transpose()?), + cpu_cores_logical: Set(data + .get("cpu_cores_logical") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid cpu_cores_logical: {}", e)) }) - .transpose()?, - ), - cpu_frequency_mhz: Set( - data.get("cpu_frequency_mhz") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid cpu_frequency_mhz: {}", e)) - }) + }) + .transpose()?), + cpu_frequency_mhz: Set(data + .get("cpu_frequency_mhz") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid cpu_frequency_mhz: {}", e)) }) - .transpose()?, - ), - memory_total_bytes: Set( - data.get("memory_total_bytes") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid memory_total_bytes: {}", e)) - }) + }) + .transpose()?), + memory_total_bytes: Set(data + .get("memory_total_bytes") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid memory_total_bytes: {}", e)) }) - .transpose()?, - ), - form_factor: Set( - data.get("form_factor") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid form_factor: {}", e)) - }) + }) + .transpose()?), + form_factor: Set(data + .get("form_factor") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid form_factor: {}", e)) }) - .transpose()?, - ), - manufacturer: Set( - data.get("manufacturer") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid manufacturer: {}", e)) - }) + }) + .transpose()?), + manufacturer: Set(data + .get("manufacturer") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid manufacturer: {}", e)) }) - .transpose()?, - ), - gpu_models: Set( - data.get("gpu_models") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid gpu_models: {}", e)) - }) + }) + .transpose()?), + gpu_models: Set(data + .get("gpu_models") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid gpu_models: {}", e)) }) - .transpose()?, - ), - boot_disk_type: Set( - data.get("boot_disk_type") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid boot_disk_type: {}", e)) - }) + }) + .transpose()?), + boot_disk_type: Set(data + .get("boot_disk_type") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid boot_disk_type: {}", e)) }) - .transpose()?, - ), - boot_disk_capacity_bytes: Set( - data.get("boot_disk_capacity_bytes") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid boot_disk_capacity_bytes: {}", e)) - }) + }) + .transpose()?), + boot_disk_capacity_bytes: Set(data + .get("boot_disk_capacity_bytes") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!( + "Invalid boot_disk_capacity_bytes: {}", + e + )) }) - .transpose()?, - ), - swap_total_bytes: Set( - data.get("swap_total_bytes") - .filter(|v| !v.is_null()) - .map(|v| { - serde_json::from_value::(v.clone()).map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid swap_total_bytes: {}", e)) - }) + }) + .transpose()?), + swap_total_bytes: Set(data + .get("swap_total_bytes") + .filter(|v| !v.is_null()) + .map(|v| { + serde_json::from_value::(v.clone()).map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid swap_total_bytes: {}", e)) }) - .transpose()?, - ), - network_addresses: Set( - serde_json::from_value( - data.get("network_addresses") - .cloned() - .unwrap_or(serde_json::json!([])), - ) - .map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid network_addresses: {}", e)) - })?, - ), + }) + .transpose()?), + network_addresses: Set(serde_json::from_value( + data.get("network_addresses") + .cloned() + .unwrap_or(serde_json::json!([])), + ) + .map_err(|e| { + sea_orm::DbErr::Custom(format!("Invalid network_addresses: {}", e)) + })?), is_online: Set(serde_json::from_value( data.get("is_online") .cloned() @@ -413,16 +400,12 @@ impl crate::infra::sync::Syncable for Model { .unwrap_or_else(|| serde_json::json!(chrono::Utc::now())), ) .unwrap_or_else(|_| chrono::Utc::now().into())), - capabilities: Set( - serde_json::from_value( - data.get("capabilities") - .cloned() - .unwrap_or(serde_json::json!({})), - ) - .map_err(|e| { - sea_orm::DbErr::Custom(format!("Invalid capabilities: {}", e)) - })?, - ), + capabilities: Set(serde_json::from_value( + data.get("capabilities") + .cloned() + .unwrap_or(serde_json::json!({})), + ) + .map_err(|e| sea_orm::DbErr::Custom(format!("Invalid capabilities: {}", e)))?), created_at: Set(chrono::Utc::now().into()), updated_at: Set(chrono::Utc::now().into()), sync_enabled: Set(serde_json::from_value( diff --git a/core/src/infra/db/entities/mime_type.rs b/core/src/infra/db/entities/mime_type.rs index bc40a3825..f546fd5a4 100644 --- a/core/src/infra/db/entities/mime_type.rs +++ b/core/src/infra/db/entities/mime_type.rs @@ -117,13 +117,11 @@ impl Syncable for Model { // Cursor-based pagination with tie-breaker if let Some((cursor_ts, cursor_uuid)) = cursor { query = query.filter( - Condition::any() - .add(Column::CreatedAt.gt(cursor_ts)) - .add( - Condition::all() - .add(Column::CreatedAt.eq(cursor_ts)) - .add(Column::Uuid.gt(cursor_uuid)), - ), + Condition::any().add(Column::CreatedAt.gt(cursor_ts)).add( + Condition::all() + .add(Column::CreatedAt.eq(cursor_ts)) + .add(Column::Uuid.gt(cursor_uuid)), + ), ); } diff --git a/core/src/infra/db/migration/m20240101_000001_initial_schema.rs b/core/src/infra/db/migration/m20240101_000001_initial_schema.rs index 5cf2f21a9..10b35a987 100644 --- a/core/src/infra/db/migration/m20240101_000001_initial_schema.rs +++ b/core/src/infra/db/migration/m20240101_000001_initial_schema.rs @@ -113,7 +113,12 @@ impl MigrationTrait for Migration { .auto_increment() .primary_key(), ) - .col(ColumnDef::new(MimeTypes::Uuid).uuid().not_null().unique_key()) + .col( + ColumnDef::new(MimeTypes::Uuid) + .uuid() + .not_null() + .unique_key(), + ) .col( ColumnDef::new(MimeTypes::MimeType) .string() diff --git a/core/src/infra/db/migration/m20260123_000001_remove_legacy_sync_columns.rs b/core/src/infra/db/migration/m20260123_000001_remove_legacy_sync_columns.rs index 3c0691c0a..06fb92d8d 100644 --- a/core/src/infra/db/migration/m20260123_000001_remove_legacy_sync_columns.rs +++ b/core/src/infra/db/migration/m20260123_000001_remove_legacy_sync_columns.rs @@ -40,10 +40,8 @@ impl MigrationTrait for Migration { // Restore columns for rollback let db = manager.get_connection(); - db.execute_unprepared( - "ALTER TABLE devices ADD COLUMN last_sync_at TEXT DEFAULT NULL", - ) - .await?; + db.execute_unprepared("ALTER TABLE devices ADD COLUMN last_sync_at TEXT DEFAULT NULL") + .await?; db.execute_unprepared( "ALTER TABLE devices ADD COLUMN last_state_watermark TEXT DEFAULT NULL", diff --git a/core/src/infra/job/manager.rs b/core/src/infra/job/manager.rs index 1344961b3..70aff9be1 100644 --- a/core/src/infra/job/manager.rs +++ b/core/src/infra/job/manager.rs @@ -419,13 +419,16 @@ impl JobManager { }; // Emit final progress event if one exists (may have been throttled) - if let Some(final_progress) = latest_progress_for_monitor.lock().await.as_ref() { + if let Some(final_progress) = + latest_progress_for_monitor.lock().await.as_ref() + { let generic_progress = match final_progress { Progress::Structured(value) => { // Try to deserialize CopyProgress and convert to GenericProgress - if let Ok(copy_progress) = serde_json::from_value::< - crate::ops::files::copy::CopyProgress, - >(value.clone()) + if let Ok(copy_progress) = + serde_json::from_value::< + crate::ops::files::copy::CopyProgress, + >(value.clone()) { use crate::infra::job::generic_progress::ToGenericProgress; Some(copy_progress.to_generic_progress()) @@ -441,7 +444,8 @@ impl JobManager { job_id: job_id_clone.to_string(), job_type: job_type_str.to_string(), device_id, - progress: final_progress.as_percentage().unwrap_or(0.0) as f64, + progress: final_progress.as_percentage().unwrap_or(0.0) + as f64, message: Some(final_progress.to_string()), generic_progress, }); @@ -646,7 +650,8 @@ impl JobManager { // Database persistence (only for non-ephemeral jobs) if should_persist { if last_db_update.elapsed() >= DB_UPDATE_INTERVAL { - if let Err(e) = job_db_clone.update_progress(job_id_clone, &progress).await { + if let Err(e) = job_db_clone.update_progress(job_id_clone, &progress).await + { debug!("Failed to persist job progress to database: {}", e); } last_db_update = std::time::Instant::now(); @@ -845,13 +850,16 @@ impl JobManager { }; // Emit final progress event if one exists (may have been throttled) - if let Some(final_progress) = latest_progress_for_monitor.lock().await.as_ref() { + if let Some(final_progress) = + latest_progress_for_monitor.lock().await.as_ref() + { let generic_progress = match final_progress { Progress::Structured(value) => { // Try to deserialize CopyProgress and convert to GenericProgress - if let Ok(copy_progress) = serde_json::from_value::< - crate::ops::files::copy::CopyProgress, - >(value.clone()) + if let Ok(copy_progress) = + serde_json::from_value::< + crate::ops::files::copy::CopyProgress, + >(value.clone()) { use crate::infra::job::generic_progress::ToGenericProgress; Some(copy_progress.to_generic_progress()) @@ -867,7 +875,8 @@ impl JobManager { job_id: job_id_clone.to_string(), job_type: job_type_str.to_string(), device_id, - progress: final_progress.as_percentage().unwrap_or(0.0) as f64, + progress: final_progress.as_percentage().unwrap_or(0.0) + as f64, message: Some(final_progress.to_string()), generic_progress, }); @@ -1071,7 +1080,9 @@ impl JobManager { }; // Get job data from in-memory struct (for non-persisted jobs) or database - let (job_name, action_type, action_context) = if let Some(ctx) = &running_job.action_context { + let (job_name, action_type, action_context) = if let Some(ctx) = + &running_job.action_context + { // Use in-memory action_context (for ephemeral volume jobs) let action_context_info = ActionContextInfo { action_type: ctx.action_type.clone(), @@ -1508,11 +1519,15 @@ impl JobManager { let latest_progress_for_monitor = latest_progress.clone(); // Deserialize action context from database if available - let action_context = if let Some(context_data) = &job_record.action_context { - rmp_serde::from_slice::(context_data).ok() - } else { - None - }; + let action_context = + if let Some(context_data) = &job_record.action_context { + rmp_serde::from_slice::< + crate::infra::action::context::ActionContext, + >(context_data) + .ok() + } else { + None + }; self.running_jobs.write().await.insert( job_id, @@ -1807,7 +1822,10 @@ impl JobManager { .await?; if result.rows_affected == 0 { - return Err(JobError::NotFound(format!("Job {} not found in database", job_id))); + return Err(JobError::NotFound(format!( + "Job {} not found in database", + job_id + ))); } } @@ -1816,7 +1834,12 @@ impl JobManager { return Err(JobError::NotFound(format!("Job {} not found", job_id))); } - info!("Job {} cancelled (in memory: {}, in db: {})", job_id, is_in_memory, db_job.is_some()); + info!( + "Job {} cancelled (in memory: {}, in db: {})", + job_id, + is_in_memory, + db_job.is_some() + ); Ok(()) } @@ -1855,12 +1878,19 @@ impl JobManager { // Deserialize action context from database if available let action_context = if let Some(context_data) = &job_record.action_context { - rmp_serde::from_slice::(context_data).ok() + rmp_serde::from_slice::( + context_data, + ) + .ok() } else { None }; - Some((job_record.name.clone(), job_record.state.clone(), action_context)) + Some(( + job_record.name.clone(), + job_record.state.clone(), + action_context, + )) } }; diff --git a/core/src/infra/sync/registry.rs b/core/src/infra/sync/registry.rs index 54c3a1e8c..775de365e 100644 --- a/core/src/infra/sync/registry.rs +++ b/core/src/infra/sync/registry.rs @@ -923,7 +923,11 @@ mod tests { println!("Registered syncable models ({}):", models.len()); for model in &models { let reg = registry.get(model).unwrap(); - let sync_type = if reg.is_device_owned { "device-owned" } else { "shared" }; + let sync_type = if reg.is_device_owned { + "device-owned" + } else { + "shared" + }; println!(" - {} ({})", model, sync_type); } diff --git a/core/src/lib.rs b/core/src/lib.rs index fed5f4071..7d79a0fda 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -481,7 +481,9 @@ impl Core { } /// Initialize networking using master key - pub async fn init_networking(&mut self) -> Result<(), Box> { + pub async fn init_networking( + &mut self, + ) -> Result<(), Box> { self.init_networking_with_logger(Arc::new(service::network::SilentLogger)) .await } @@ -519,27 +521,30 @@ impl Core { if let Some(networking_service) = self.services.networking() { // Register default protocol handlers only if networking was just initialized // (if networking was already initialized during Core::new(), protocols are already registered) - if !already_initialized { - logger.info("Registering protocol handlers...").await; - self.register_default_protocols(&networking_service).await?; - } else { - logger - .info("Protocol handlers already registered during initialization") - .await; - - // Reload protocol configs even when networking is already initialized - // This allows tests and runtime config changes to take effect - logger.info("Reloading protocol configs from disk...").await; - if let Err(e) = reload_protocol_configs(&networking_service, &self.config.read().await.data_dir).await { - logger - .warn(&format!("Failed to reload some protocol configs: {}", e)) - .await; + if !already_initialized { + logger.info("Registering protocol handlers...").await; + self.register_default_protocols(&networking_service).await?; } else { - logger.info("Protocol configs reloaded successfully").await; - } - } + logger + .info("Protocol handlers already registered during initialization") + .await; - // Set up event bridge to integrate with core event system (only if not already done) + // Reload protocol configs even when networking is already initialized + // This allows tests and runtime config changes to take effect + logger.info("Reloading protocol configs from disk...").await; + if let Err(e) = + reload_protocol_configs(&networking_service, &self.config.read().await.data_dir) + .await + { + logger + .warn(&format!("Failed to reload some protocol configs: {}", e)) + .await; + } else { + logger.info("Protocol configs reloaded successfully").await; + } + } + + // Set up event bridge to integrate with core event system (only if not already done) if !already_initialized { let event_bridge = NetworkEventBridge::new( networking_service.subscribe_events(), @@ -759,8 +764,8 @@ async fn reload_protocol_configs( if let Some(handler) = guard.get_handler("pairing") { if let Some(pairing_handler) = handler .as_any() - .downcast_ref::() - { + .downcast_ref::( + ) { pairing_handler .set_proxy_config(app_config.proxy_pairing) .await; diff --git a/core/src/library/mod.rs b/core/src/library/mod.rs index 1221f8050..e34ad06d6 100644 --- a/core/src/library/mod.rs +++ b/core/src/library/mod.rs @@ -1409,7 +1409,9 @@ impl Library { // Count and size files in thumbnails directory (legacy structure) if thumbnails_dir.exists() { - let (count, size) = self.count_and_size_recursive(thumbnails_dir.clone()).await?; + let (count, size) = self + .count_and_size_recursive(thumbnails_dir.clone()) + .await?; total_count += count; total_size += size; debug!( @@ -1441,7 +1443,8 @@ impl Library { while let Some(entry) = entries.next_entry().await? { let file_type = entry.file_type().await?; if file_type.is_dir() { - let (sub_count, sub_size) = Box::pin(self.count_and_size_recursive_impl(entry.path())).await?; + let (sub_count, sub_size) = + Box::pin(self.count_and_size_recursive_impl(entry.path())).await?; count += sub_count; size += sub_size; } else if file_type.is_file() { @@ -1796,7 +1799,8 @@ impl Library { while let Some(entry) = entries.next_entry().await? { let file_type = entry.file_type().await?; if file_type.is_dir() { - let (sub_count, sub_size) = Box::pin(Self::count_and_size_recursive_static_impl(entry.path())).await?; + let (sub_count, sub_size) = + Box::pin(Self::count_and_size_recursive_static_impl(entry.path())).await?; count += sub_count; size += sub_size; } else if file_type.is_file() { diff --git a/core/src/location/manager.rs b/core/src/location/manager.rs index a9c5cea9f..aa2cc8d3c 100644 --- a/core/src/location/manager.rs +++ b/core/src/location/manager.rs @@ -103,7 +103,11 @@ impl LocationManager { } } Err(e) => { - warn!("Failed to get metadata for location root {}: {}", path.display(), e); + warn!( + "Failed to get metadata for location root {}: {}", + path.display(), + e + ); None } } @@ -132,14 +136,19 @@ impl LocationManager { // Resolve volume for this location path BEFORE creating the entry // Volume detection is required - all locations must have a volume - let volume_id = match volume_manager.resolve_volume_for_sdpath(&sd_path, &library).await { + let volume_id = match volume_manager + .resolve_volume_for_sdpath(&sd_path, &library) + .await + { Ok(Some(volume)) => { info!("Resolved volume '{}' for location path", volume.name); // Ensure volume is in database and get its ID volume_manager .ensure_volume_in_db(&volume, &library) .await - .map_err(|e| LocationError::Other(format!("Failed to register volume: {}", e)))? + .map_err(|e| { + LocationError::Other(format!("Failed to register volume: {}", e)) + })? } Ok(None) => { return Err(LocationError::Other(format!( @@ -173,8 +182,8 @@ impl LocationManager { indexed_at: Set(Some(now)), // Record when location root was created permissions: Set(None), inode: Set(inode.map(|i| i as i64)), // Use extracted inode - parent_id: Set(None), // Location root has no parent - volume_id: Set(Some(volume_id)), // Volume is required for all locations + parent_id: Set(None), // Location root has no parent + volume_id: Set(Some(volume_id)), // Volume is required for all locations ..Default::default() }; diff --git a/core/src/ops/devices/update.rs b/core/src/ops/devices/update.rs index 7663cd730..a06bcc324 100644 --- a/core/src/ops/devices/update.rs +++ b/core/src/ops/devices/update.rs @@ -66,17 +66,16 @@ impl CoreAction for UpdateDeviceAction { } // Validate slug format (alphanumeric + hyphens only) if !slug.chars().all(|c| c.is_alphanumeric() || c == '-') { - return Err("Device slug can only contain letters, numbers, and hyphens".to_string()); + return Err( + "Device slug can only contain letters, numbers, and hyphens".to_string() + ); } } Ok(Self { input }) } - async fn execute( - self, - context: Arc, - ) -> Result { + async fn execute(self, context: Arc) -> Result { // Load current device config let mut device_config = DeviceConfig::load_from(&context.data_dir) .map_err(|e| ActionError::Internal(format!("Failed to load device config: {}", e)))?; @@ -105,10 +104,7 @@ impl CoreAction for UpdateDeviceAction { }) } - async fn validate( - &self, - _context: Arc, - ) -> Result { + async fn validate(&self, _context: Arc) -> Result { // Basic validation is done in from_input Ok(ValidationResult::Success { metadata: None }) } diff --git a/core/src/ops/files/copy/action.rs b/core/src/ops/files/copy/action.rs index a7f683788..78d86a412 100644 --- a/core/src/ops/files/copy/action.rs +++ b/core/src/ops/files/copy/action.rs @@ -299,14 +299,15 @@ impl LibraryAction for FileCopyAction { // Get strategy metadata for rich UI display let first_source = &self.sources.paths[0]; - let (_, strategy_metadata) = super::routing::CopyStrategyRouter::select_strategy_with_metadata( - first_source, - &self.destination, - self.options.delete_after_copy, - &self.options.copy_method, - Some(&*context.volume_manager), - ) - .await; + let (_, strategy_metadata) = + super::routing::CopyStrategyRouter::select_strategy_with_metadata( + first_source, + &self.destination, + self.options.delete_after_copy, + &self.options.copy_method, + Some(&*context.volume_manager), + ) + .await; // Calculate file counts and total bytes let (file_count, total_bytes) = self.calculate_totals().await?; @@ -453,9 +454,9 @@ impl FileCopyAction { let mut stack = vec![path.to_path_buf()]; while let Some(current) = stack.pop() { - let metadata = tokio::fs::metadata(¤t).await.map_err(|e| { - ActionError::Internal(format!("Failed to read metadata: {}", e)) - })?; + let metadata = tokio::fs::metadata(¤t) + .await + .map_err(|e| ActionError::Internal(format!("Failed to read metadata: {}", e)))?; if metadata.is_file() { count += 1; diff --git a/core/src/ops/files/copy/database.rs b/core/src/ops/files/copy/database.rs index 4631b68e0..eb78c8268 100644 --- a/core/src/ops/files/copy/database.rs +++ b/core/src/ops/files/copy/database.rs @@ -35,7 +35,7 @@ impl CopyDatabaseQuery { match PathResolver::resolve_to_entry(&self.db, source).await { Ok(Some(entry)) => { let (file_count, total_size) = match entry.kind { - 0 => (1u64, entry.size as u64), // File + 0 => (1u64, entry.size as u64), // File 1 => (entry.file_count as u64, entry.aggregate_size as u64), // Directory _ => (0, 0), }; @@ -316,7 +316,8 @@ impl CopyDatabaseQuery { for component in components { if let Some(parent_id) = current_parent_id { // Remove extension for entry lookup (extensions stored separately) - let component_without_ext = if let Some(dot_pos) = component.rfind('.') { + let component_without_ext = if let Some(dot_pos) = component.rfind('.') + { &component[..dot_pos] } else { component @@ -325,8 +326,8 @@ impl CopyDatabaseQuery { // Normalize Unicode spaces (macOS uses special space characters) // Replace narrow no-break space (\u{202f}) and other space variants with regular space let normalized_name = component_without_ext - .replace('\u{202f}', " ") // Narrow no-break space - .replace('\u{00a0}', " ") // Non-breaking space + .replace('\u{202f}', " ") // Narrow no-break space + .replace('\u{00a0}', " ") // Non-breaking space .replace('\u{2009}', " "); // Thin space let child = entry::Entity::find() diff --git a/core/src/ops/files/copy/job.rs b/core/src/ops/files/copy/job.rs index f377154e5..ae718f155 100644 --- a/core/src/ops/files/copy/job.rs +++ b/core/src/ops/files/copy/job.rs @@ -375,7 +375,8 @@ impl JobHandler for FileCopyJob { copied_count += files_in_source; // Count actual files as copied for progress tracking // Mark as completed in metadata (already done during previous run) - self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Completed); + self.job_metadata + .update_status(&resolved_source, super::metadata::CopyFileStatus::Completed); continue; } @@ -422,7 +423,8 @@ impl JobHandler for FileCopyJob { }; // Mark file as currently copying in metadata - self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Copying); + self.job_metadata + .update_status(&resolved_source, super::metadata::CopyFileStatus::Copying); // Persist immediately so UI can show "copying" status in real-time self.persist_job_state_to_db(&ctx).await?; @@ -506,7 +508,10 @@ impl JobHandler for FileCopyJob { ctx.log(format!("Skipping existing file: {}", dest_path.display())); // Mark as skipped in metadata - self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Skipped); + self.job_metadata.update_status( + &resolved_source, + super::metadata::CopyFileStatus::Skipped, + ); // Skip this file progress_aggregator.complete_source(); @@ -572,7 +577,10 @@ impl JobHandler for FileCopyJob { self.completed_indices.push(index); // Mark as completed in metadata - self.job_metadata.update_status(&resolved_source, super::metadata::CopyFileStatus::Completed); + self.job_metadata.update_status( + &resolved_source, + super::metadata::CopyFileStatus::Completed, + ); // If this is a move operation and the strategy didn't handle deletion, // we need to delete the source after successful copy @@ -1058,8 +1066,8 @@ impl FileCopyJob { match PathResolver::resolve_to_entry(ctx.library_db(), source).await { Ok(Some(entry)) => { let size = match entry.kind { - 0 => entry.size as u64, // File - 1 => entry.aggregate_size as u64, // Directory + 0 => entry.size as u64, // File + 1 => entry.aggregate_size as u64, // Directory _ => 0, }; total += size; @@ -1094,8 +1102,9 @@ impl FileCopyJob { use sea_orm::{ActiveModelTrait, ActiveValue::Set}; // Serialize current job state - let job_state = rmp_serde::to_vec(self) - .map_err(|e| JobError::serialization(format!("Failed to serialize job state: {}", e)))?; + let job_state = rmp_serde::to_vec(self).map_err(|e| { + JobError::serialization(format!("Failed to serialize job state: {}", e)) + })?; // Update the jobs.state field in the database let job_db = ctx.library.jobs().database(); @@ -1105,7 +1114,9 @@ impl FileCopyJob { ..Default::default() }; - job_model.update(job_db.conn()).await + job_model + .update(job_db.conn()) + .await .map_err(|e| JobError::execution(format!("Failed to persist job state: {}", e)))?; ctx.log(format!( @@ -1129,7 +1140,9 @@ impl FileCopyJob { JobError::execution(format!("Failed to resolve source path: {}", e)) })?; - let (size_bytes, is_directory, entry_id) = if let Some(local_path) = resolved_source.as_local_path() { + let (size_bytes, is_directory, entry_id) = if let Some(local_path) = + resolved_source.as_local_path() + { // Local path - get from filesystem let metadata = tokio::fs::metadata(local_path) .await diff --git a/core/src/ops/files/query/file_by_path.rs b/core/src/ops/files/query/file_by_path.rs index d5f1ba2cc..044ff464b 100644 --- a/core/src/ops/files/query/file_by_path.rs +++ b/core/src/ops/files/query/file_by_path.rs @@ -316,7 +316,9 @@ impl FileByPathQuery { PathResolver::resolve_to_entry(db, sd_path) .await .map_err(|e| QueryError::Internal(format!("Database error: {}", e)))? - .ok_or_else(|| QueryError::Internal(format!("Entry not found for path: {}", sd_path.display()))) + .ok_or_else(|| { + QueryError::Internal(format!("Entry not found for path: {}", sd_path.display())) + }) } } diff --git a/core/src/ops/indexing/action.rs b/core/src/ops/indexing/action.rs index 8f5b9c820..2b6ae0f7e 100644 --- a/core/src/ops/indexing/action.rs +++ b/core/src/ops/indexing/action.rs @@ -95,7 +95,8 @@ impl LibraryAction for IndexingAction { } IndexPersistence::Persistent => { // Persistent mode stores entries in the database but doesn't require a location binding yet. - let mut c = IndexerJobConfig::ephemeral_browse(sd_path, self.input.scope, false); + let mut c = + IndexerJobConfig::ephemeral_browse(sd_path, self.input.scope, false); c.persistence = IndexPersistence::Persistent; c } diff --git a/core/src/ops/indexing/database_storage.rs b/core/src/ops/indexing/database_storage.rs index 483c8da96..c3ffc781a 100644 --- a/core/src/ops/indexing/database_storage.rs +++ b/core/src/ops/indexing/database_storage.rs @@ -830,7 +830,10 @@ impl DatabaseStorage { .await .map_err(|e| JobError::execution(format!("Failed to query content identity: {}", e)))?; - let (content_model, is_new_content, mime_type_model, is_new_mime_type) = if let Some(existing) = existing { + let (content_model, is_new_content, mime_type_model, is_new_mime_type) = if let Some( + existing, + ) = existing + { let mut existing_active: entities::content_identity::ActiveModel = existing.into(); existing_active.entry_count = Set(existing_active.entry_count.unwrap() + 1); existing_active.last_verified_at = Set(chrono::Utc::now()); @@ -855,11 +858,13 @@ impl DatabaseStorage { let file_type_result = registry.identify(path).await; - let (kind_id, mime_type_id, mime_type_model, is_new_mime_type) = match file_type_result { + let (kind_id, mime_type_id, mime_type_model, is_new_mime_type) = match file_type_result + { Ok(result) => { let kind_id = result.file_type.category as i32; - let (mime_type_id, mime_type_model, is_new_mime_type) = if let Some(mime_str) = result.file_type.primary_mime_type() + let (mime_type_id, mime_type_model, is_new_mime_type) = if let Some(mime_str) = + result.file_type.primary_mime_type() { let existing = entities::mime_type::Entity::find() .filter(entities::mime_type::Column::MimeType.eq(mime_str)) diff --git a/core/src/ops/indexing/ephemeral/cache.rs b/core/src/ops/indexing/ephemeral/cache.rs index 0e0c7dd01..5c10cd777 100644 --- a/core/src/ops/indexing/ephemeral/cache.rs +++ b/core/src/ops/indexing/ephemeral/cache.rs @@ -138,7 +138,8 @@ impl EphemeralIndexCache { // Try to load from snapshot if let Ok(snapshot_cache_dir) = super::snapshot::get_snapshot_cache_dir() { - if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir) { + if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir) + { if let Ok(Some(loaded_index)) = EphemeralIndex::load_snapshot(&snapshot_path) { // Replace the global index with the loaded one let mut index = self.index.write().await; @@ -161,7 +162,8 @@ impl EphemeralIndexCache { /// Save the current index to a snapshot file pub async fn save_snapshot(&self, path: &Path) -> anyhow::Result<()> { if let Ok(snapshot_cache_dir) = super::snapshot::get_snapshot_cache_dir() { - if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir) { + if let Ok(snapshot_path) = super::snapshot::snapshot_path_for(path, &snapshot_cache_dir) + { let index = self.index.read().await; index.save_snapshot(&snapshot_path)?; tracing::info!("Saved snapshot for path: {}", path.display()); diff --git a/core/src/ops/indexing/ephemeral/index.rs b/core/src/ops/indexing/ephemeral/index.rs index bb42fb9b4..e9284cd66 100644 --- a/core/src/ops/indexing/ephemeral/index.rs +++ b/core/src/ops/indexing/ephemeral/index.rs @@ -635,7 +635,7 @@ impl EphemeralIndex { self.entry_uuids.remove(&id); self.content_kinds.remove(&id); - // Also remove from parent's children list in arena + // Also remove from parent's children list in arena // Get the parent's entry ID if let Some(parent_path) = path.parent() { if let Some(&parent_id) = self.path_index.get(parent_path) { @@ -734,7 +734,11 @@ impl EphemeralIndex { /// /// The root path is stored in the snapshot so it can be restored to /// indexed_paths when loaded, making the cached data queryable. - pub fn save_snapshot_with_root(&self, snapshot_path: &Path, _root_path: &Path) -> anyhow::Result<()> { + pub fn save_snapshot_with_root( + &self, + snapshot_path: &Path, + _root_path: &Path, + ) -> anyhow::Result<()> { super::snapshot::save_snapshot_impl(self, snapshot_path) } diff --git a/core/src/ops/indexing/path_resolver.rs b/core/src/ops/indexing/path_resolver.rs index 3571db1c1..f5691b7f7 100644 --- a/core/src/ops/indexing/path_resolver.rs +++ b/core/src/ops/indexing/path_resolver.rs @@ -11,7 +11,9 @@ use sea_orm::{prelude::*, ConnectionTrait, QuerySelect, Statement}; use crate::{ domain::addressing::SdPath, - infra::db::entities::{device, directory_paths, entry, location, volume, DirectoryPaths, Entry}, + infra::db::entities::{ + device, directory_paths, entry, location, volume, DirectoryPaths, Entry, + }, }; pub struct PathResolver; diff --git a/core/src/ops/indexing/phases/processing.rs b/core/src/ops/indexing/phases/processing.rs index a053b3408..46ee20946 100644 --- a/core/src/ops/indexing/phases/processing.rs +++ b/core/src/ops/indexing/phases/processing.rs @@ -529,15 +529,16 @@ pub async fn run_processing_phase( // Check if the root entry needs updating let needs_update = root_entry.inode.is_none() - || inode.map(|i| i != root_entry.inode.unwrap_or(-1) as u64).unwrap_or(false) + || inode + .map(|i| i != root_entry.inode.unwrap_or(-1) as u64) + .unwrap_or(false) || root_entry.size != metadata.len() as i64 || { if let Ok(modified) = metadata.modified() { if let Ok(duration) = modified.duration_since(std::time::UNIX_EPOCH) { - if let Some(timestamp) = chrono::DateTime::from_timestamp( - duration.as_secs() as i64, - 0, - ) { + if let Some(timestamp) = + chrono::DateTime::from_timestamp(duration.as_secs() as i64, 0) + { root_entry.modified_at != timestamp } else { false @@ -576,16 +577,16 @@ pub async fn run_processing_phase( JobError::execution(format!("Failed to begin root update transaction: {}", e)) })?; - if let Err(e) = DatabaseStorage::update_entry_in_conn( - location_entry_id, - &root_dir_entry, - &txn, - ) - .await + if let Err(e) = + DatabaseStorage::update_entry_in_conn(location_entry_id, &root_dir_entry, &txn) + .await { ctx.add_non_critical_error(format!("Failed to update root entry: {}", e)); if let Err(rollback_err) = txn.rollback().await { - warn!("Failed to rollback root update transaction: {}", rollback_err); + warn!( + "Failed to rollback root update transaction: {}", + rollback_err + ); } } else { txn.commit().await.map_err(|e| { diff --git a/core/src/ops/indexing/processor.rs b/core/src/ops/indexing/processor.rs index 0960ebd8e..bc563e569 100644 --- a/core/src/ops/indexing/processor.rs +++ b/core/src/ops/indexing/processor.rs @@ -151,7 +151,14 @@ impl ContentHashProcessor { let content_hash = ContentHashGenerator::generate_content_hash(&entry.path).await?; debug!("āœ“ Generated content hash: {}", content_hash); - DatabaseStorage::link_to_content_identity(db, entry.id, &entry.path, content_hash, registry).await?; + DatabaseStorage::link_to_content_identity( + db, + entry.id, + &entry.path, + content_hash, + registry, + ) + .await?; debug!("āœ“ Linked content identity for entry {}", entry.id); diff --git a/core/src/ops/jobs/copy_metadata/query.rs b/core/src/ops/jobs/copy_metadata/query.rs index fa2e280ff..f904c0138 100644 --- a/core/src/ops/jobs/copy_metadata/query.rs +++ b/core/src/ops/jobs/copy_metadata/query.rs @@ -75,9 +75,8 @@ impl LibraryQuery for CopyMetadataQuery { } // Deserialize the job state - let copy_job: FileCopyJob = rmp_serde::from_slice(&job_record.state).map_err(|e| { - QueryError::Internal(format!("Failed to deserialize job state: {}", e)) - })?; + let copy_job: FileCopyJob = rmp_serde::from_slice(&job_record.state) + .map_err(|e| QueryError::Internal(format!("Failed to deserialize job state: {}", e)))?; // Build File domain objects from entry UUIDs let mut metadata = copy_job.job_metadata; @@ -91,11 +90,8 @@ impl LibraryQuery for CopyMetadataQuery { // Batch load File objects if !entry_uuids.is_empty() { - match crate::domain::file::File::from_entry_uuids( - library.db().conn(), - &entry_uuids, - ) - .await + match crate::domain::file::File::from_entry_uuids(library.db().conn(), &entry_uuids) + .await { Ok(files) => { metadata.file_objects = files; diff --git a/core/src/ops/locations/validate/query.rs b/core/src/ops/locations/validate/query.rs index f2c09a9d4..34641f00f 100644 --- a/core/src/ops/locations/validate/query.rs +++ b/core/src/ops/locations/validate/query.rs @@ -63,7 +63,15 @@ impl LibraryQuery for ValidateLocationPathQuery { let volume_manager = &context.volume_manager; let volume_opt = volume_manager.volume_for_path(path).await; - tracing::info!("Volume lookup for path {}: {:?}", path.display(), volume_opt.as_ref().map(|v| (v.name.as_str(), &v.volume_type, v.fingerprint.0.as_str()))); + tracing::info!( + "Volume lookup for path {}: {:?}", + path.display(), + volume_opt.as_ref().map(|v| ( + v.name.as_str(), + &v.volume_type, + v.fingerprint.0.as_str() + )) + ); let is_primary = volume_opt .as_ref() @@ -87,7 +95,11 @@ impl LibraryQuery for ValidateLocationPathQuery { } matches }); - tracing::info!("is_system_dir: {}, is_primary: {}", is_system_dir, is_primary); + tracing::info!( + "is_system_dir: {}, is_primary: {}", + is_system_dir, + is_primary + ); // Determine risk level using hybrid approach (depth + system directory check) let risk_level = if is_system_dir || depth <= 1 { diff --git a/core/src/ops/media/ocr/job.rs b/core/src/ops/media/ocr/job.rs index 22fe6d00b..962f0e23b 100644 --- a/core/src/ops/media/ocr/job.rs +++ b/core/src/ops/media/ocr/job.rs @@ -243,9 +243,9 @@ impl OcrJob { if let Ok(Some(mime)) = mime_type::Entity::find_by_id(mime_id).one(db).await { if super::is_ocr_supported( - &mime.mime_type, - ctx.library().core_context().file_type_registry(), - ) { + &mime.mime_type, + ctx.library().core_context().file_type_registry(), + ) { if let Ok(path) = crate::ops::indexing::PathResolver::get_full_path( db, entry_model.id, @@ -305,9 +305,9 @@ impl OcrJob { mime_type::Entity::find_by_id(mime_id).one(db).await { if super::is_ocr_supported( - &mime.mime_type, - ctx.library().core_context().file_type_registry(), - ) { + &mime.mime_type, + ctx.library().core_context().file_type_registry(), + ) { // Get full path if let Ok(path) = crate::ops::indexing::PathResolver::get_full_path( diff --git a/core/src/ops/media/speech/job.rs b/core/src/ops/media/speech/job.rs index a2e720e90..b6a0208a4 100644 --- a/core/src/ops/media/speech/job.rs +++ b/core/src/ops/media/speech/job.rs @@ -264,9 +264,9 @@ impl SpeechToTextJob { if let Ok(Some(mime)) = mime_type::Entity::find_by_id(mime_id).one(db).await { if super::is_speech_supported( - &mime.mime_type, - ctx.library().core_context().file_type_registry(), - ) { + &mime.mime_type, + ctx.library().core_context().file_type_registry(), + ) { if let Ok(path) = crate::ops::indexing::PathResolver::get_full_path( db, entry_model.id, @@ -310,9 +310,9 @@ impl SpeechToTextJob { if let Ok(Some(mime)) = mime_type::Entity::find_by_id(mime_id).one(db).await { if super::is_speech_supported( - &mime.mime_type, - ctx.library().core_context().file_type_registry(), - ) { + &mime.mime_type, + ctx.library().core_context().file_type_registry(), + ) { if let Ok(path) = crate::ops::indexing::PathResolver::get_full_path( db, entry_model.id, diff --git a/core/src/ops/media/thumbnail/action.rs b/core/src/ops/media/thumbnail/action.rs index c0258b65f..748ab7a90 100644 --- a/core/src/ops/media/thumbnail/action.rs +++ b/core/src/ops/media/thumbnail/action.rs @@ -1,6 +1,9 @@ //! Thumbnail generation action handlers -use super::{job::{ThumbnailJob, ThumbnailJobConfig}, processor::ThumbnailProcessor}; +use super::{ + job::{ThumbnailJob, ThumbnailJobConfig}, + processor::ThumbnailProcessor, +}; use crate::{ context::CoreContext, infra::action::{error::ActionError, LibraryAction}, diff --git a/core/src/ops/media/thumbstrip/action.rs b/core/src/ops/media/thumbstrip/action.rs index 181103023..1f836237b 100644 --- a/core/src/ops/media/thumbstrip/action.rs +++ b/core/src/ops/media/thumbstrip/action.rs @@ -144,10 +144,9 @@ impl LibraryAction for GenerateThumbstripAction { } // Process the file - let result = processor - .process(db, &proc_entry) - .await - .map_err(|e| ActionError::Internal(format!("Thumbstrip generation failed: {}", e)))?; + let result = processor.process(db, &proc_entry).await.map_err(|e| { + ActionError::Internal(format!("Thumbstrip generation failed: {}", e)) + })?; if !result.success { return Err(ActionError::Internal( diff --git a/core/src/ops/network/status/query.rs b/core/src/ops/network/status/query.rs index 135b09749..ec42250e0 100644 --- a/core/src/ops/network/status/query.rs +++ b/core/src/ops/network/status/query.rs @@ -30,9 +30,7 @@ impl CoreQuery for NetworkStatusQuery { if let Some(net) = networking { let node_id = net.node_id().to_string(); let addresses = if let Ok(Some(addr)) = net.get_node_addr() { - addr.ip_addrs() - .map(|a| a.to_string()) - .collect::>() + addr.ip_addrs().map(|a| a.to_string()).collect::>() } else { Vec::new() }; diff --git a/core/src/ops/search/input.rs b/core/src/ops/search/input.rs index 5c0b1f166..8c2c45e3c 100644 --- a/core/src/ops/search/input.rs +++ b/core/src/ops/search/input.rs @@ -187,8 +187,8 @@ impl FileSearchInput { /// Validate the search input pub fn validate(&self) -> Result<(), String> { // Allow empty queries when sorting by IndexedAt (for recents view) - let is_recents_query = self.query.trim().is_empty() - && matches!(self.sort.field, SortField::IndexedAt); + let is_recents_query = + self.query.trim().is_empty() && matches!(self.sort.field, SortField::IndexedAt); if self.query.trim().is_empty() && !is_recents_query { return Err("Query cannot be empty".to_string()); diff --git a/core/src/ops/search/query.rs b/core/src/ops/search/query.rs index 07a358dac..f1fb04bb8 100644 --- a/core/src/ops/search/query.rs +++ b/core/src/ops/search/query.rs @@ -115,9 +115,9 @@ impl LibraryQuery for FileSearchQuery { // Get actual total count for pagination let total_count = self - .get_total_count(db.conn(), context.file_type_registry()) - .await - .unwrap_or(0); + .get_total_count(db.conn(), context.file_type_registry()) + .await + .unwrap_or(0); // Create output with persistent index type let output = FileSearchOutput::new_persistent( @@ -867,7 +867,8 @@ impl FileSearchQuery { // Get volume to find device info let Some(volume) = crate::infra::db::entities::volume::Entity::find_by_id(volume_id) .one(db) - .await? else { + .await? + else { return Ok(None); }; @@ -875,7 +876,8 @@ impl FileSearchQuery { let Some(device) = crate::infra::db::entities::device::Entity::find() .filter(crate::infra::db::entities::device::Column::Uuid.eq(volume.device_id)) .one(db) - .await? else { + .await? + else { return Ok(None); }; @@ -895,11 +897,7 @@ impl FileSearchQuery { file, score, score_breakdown: crate::ops::search::output::ScoreBreakdown::new( - score, - None, - 0.0, - 0.0, - 0.0, + score, None, 0.0, 0.0, 0.0, ), highlights: Vec::new(), matched_content: None, @@ -951,10 +949,7 @@ impl FileSearchQuery { // Execute query let entries = query.all(db).await?; - tracing::info!( - "Fast search without FTS returned {} entries", - entries.len() - ); + tracing::info!("Fast search without FTS returned {} entries", entries.len()); // Convert entries to FileSearchResult using helper let mut results = Vec::new(); diff --git a/core/src/ops/sync/get_sync_partners/action.rs b/core/src/ops/sync/get_sync_partners/action.rs index f565f8dd7..cf83d7364 100644 --- a/core/src/ops/sync/get_sync_partners/action.rs +++ b/core/src/ops/sync/get_sync_partners/action.rs @@ -5,8 +5,8 @@ use crate::infra::query::{LibraryQuery, QueryError, QueryResult}; use crate::infra::sync::NetworkTransport; use std::sync::Arc; -use super::{GetSyncPartnersInput, GetSyncPartnersOutput}; use super::output::{DeviceDebugInfo, SyncPartnerInfo, SyncPartnersDebugInfo}; +use super::{GetSyncPartnersInput, GetSyncPartnersOutput}; /// Get computed sync partners for the current library pub struct GetSyncPartners { @@ -27,7 +27,7 @@ impl LibraryQuery for GetSyncPartners { session: crate::infra::api::SessionContext, ) -> QueryResult { use crate::infra::db::entities; - use sea_orm::{EntityTrait}; + use sea_orm::EntityTrait; // Get library from session let library_id = session diff --git a/core/src/ops/tags/create/action.rs b/core/src/ops/tags/create/action.rs index 546b75631..2cfbf8975 100644 --- a/core/src/ops/tags/create/action.rs +++ b/core/src/ops/tags/create/action.rs @@ -246,4 +246,4 @@ async fn lookup_entry_uuid( entry_model .uuid .ok_or_else(|| format!("Entry {} has no UUID assigned", entry_id)) -} \ No newline at end of file +} diff --git a/core/src/ops/volumes/eject/action.rs b/core/src/ops/volumes/eject/action.rs index 9f663149a..9586ad07b 100644 --- a/core/src/ops/volumes/eject/action.rs +++ b/core/src/ops/volumes/eject/action.rs @@ -1,11 +1,7 @@ //! Volume eject action use super::{VolumeEjectInput, VolumeEjectOutput}; -use crate::{ - context::CoreContext, - infra::action::error::ActionError, - volume::VolumeFingerprint, -}; +use crate::{context::CoreContext, infra::action::error::ActionError, volume::VolumeFingerprint}; use serde::{Deserialize, Serialize}; use std::sync::Arc; use tracing::{error, info}; @@ -45,9 +41,7 @@ impl crate::infra::action::LibraryAction for VolumeEjectAction { .volume_manager .get_volume(&fingerprint) .await - .ok_or_else(|| { - ActionError::Internal(format!("Volume not found: {}", fingerprint)) - })?; + .ok_or_else(|| ActionError::Internal(format!("Volume not found: {}", fingerprint)))?; // Check if volume is mounted if !volume.is_mounted { diff --git a/core/src/ops/volumes/list/query.rs b/core/src/ops/volumes/list/query.rs index f59b7612d..0105a2f8f 100644 --- a/core/src/ops/volumes/list/query.rs +++ b/core/src/ops/volumes/list/query.rs @@ -128,7 +128,6 @@ impl VolumeListQuery { _ => Ok(None), } } - } impl LibraryQuery for VolumeListQuery { diff --git a/core/src/ops/volumes/speed_test/action.rs b/core/src/ops/volumes/speed_test/action.rs index 4524bdf96..1d4ec82ef 100644 --- a/core/src/ops/volumes/speed_test/action.rs +++ b/core/src/ops/volumes/speed_test/action.rs @@ -94,9 +94,7 @@ impl LibraryAction for VolumeSpeedTestAction { use crate::domain::resource::EventEmitter; volume .emit_changed(&context.events) - .map_err(|e| { - ActionError::Internal(format!("Failed to emit volume event: {}", e)) - })?; + .map_err(|e| ActionError::Internal(format!("Failed to emit volume event: {}", e)))?; // Return native output directly Ok(VolumeSpeedTestOutput::new( diff --git a/core/src/service/network/core/event_loop.rs b/core/src/service/network/core/event_loop.rs index ec83cf458..215f469d6 100644 --- a/core/src/service/network/core/event_loop.rs +++ b/core/src/service/network/core/event_loop.rs @@ -99,7 +99,9 @@ impl NetworkingEventLoop { device_registry: Arc>, event_sender: broadcast::Sender, identity: NetworkIdentity, - active_connections: Arc), Connection>>>, + active_connections: Arc< + RwLock), Connection>>, + >, logger: Arc, ) -> Self { let (command_tx, command_rx) = mpsc::unbounded_channel(); diff --git a/core/src/service/network/core/mod.rs b/core/src/service/network/core/mod.rs index e24624ffc..5edae2bc2 100644 --- a/core/src/service/network/core/mod.rs +++ b/core/src/service/network/core/mod.rs @@ -181,7 +181,10 @@ impl NetworkingService { /// /// This enables the device registry to emit complete device data with hardware_model /// by querying the library database instead of just using network DeviceInfo. - pub async fn set_library_manager(&self, library_manager: std::sync::Weak) { + pub async fn set_library_manager( + &self, + library_manager: std::sync::Weak, + ) { let mut registry = self.device_registry.write().await; registry.set_library_manager(library_manager); } @@ -999,7 +1002,11 @@ impl NetworkingService { /// # Parameters /// * `node_addr` - The node address to connect to /// * `force_relay` - If true, strip direct addresses and only use relay - pub async fn connect_to_node(&self, endpoint_addr: EndpointAddr, force_relay: bool) -> Result<()> { + pub async fn connect_to_node( + &self, + endpoint_addr: EndpointAddr, + force_relay: bool, + ) -> Result<()> { let endpoint_addr = if force_relay { Self::strip_ip_addresses(endpoint_addr) } else { @@ -1053,7 +1060,11 @@ impl NetworkingService { pub async fn get_relay_url(&self) -> Option { if let Some(endpoint) = &self.endpoint { // In v0.95+, get relay URL from the endpoint address - endpoint.addr().relay_urls().next().map(|url| url.to_string()) + endpoint + .addr() + .relay_urls() + .next() + .map(|url| url.to_string()) } else { None } @@ -1073,9 +1084,9 @@ impl NetworkingService { // Create mDNS discovery service to subscribe to events // Note: In v0.95+, we need to get discovery services individually and subscribe let endpoint_id = endpoint.id(); - let mdns_discovery = MdnsDiscovery::builder() - .build(endpoint_id) - .map_err(|e| NetworkingError::ConnectionFailed(format!("Failed to create mDNS discovery: {}", e)))?; + let mdns_discovery = MdnsDiscovery::builder().build(endpoint_id).map_err(|e| { + NetworkingError::ConnectionFailed(format!("Failed to create mDNS discovery: {}", e)) + })?; let mut discovery_stream = mdns_discovery.subscribe().await; let session_id_str = session_id.to_string(); let timeout = tokio::time::Duration::from_secs(5); // Shorter timeout for mDNS @@ -1871,9 +1882,7 @@ async fn spawn_connection_watcher_task( // Use update_device_from_connection with is_connected=false (all connections closed) if let Err(e) = registry .update_device_from_connection( - device_id, - node_id, - false, // is_connected + device_id, node_id, false, // is_connected None, // latency ) .await diff --git a/core/src/service/network/device/registry.rs b/core/src/service/network/device/registry.rs index 8402139df..eee1f1396 100644 --- a/core/src/service/network/device/registry.rs +++ b/core/src/service/network/device/registry.rs @@ -266,7 +266,12 @@ impl DeviceRegistry { } /// Add a discovered node - pub fn add_discovered_node(&mut self, device_id: Uuid, node_id: EndpointId, node_addr: EndpointAddr) { + pub fn add_discovered_node( + &mut self, + device_id: Uuid, + node_id: EndpointId, + node_addr: EndpointAddr, + ) { let state = DeviceState::Discovered { node_id, node_addr, @@ -587,7 +592,9 @@ impl DeviceRegistry { | DeviceState::Connected { info, .. } | DeviceState::Disconnected { info, .. } => { // Extract node ID from network fingerprint and clean up mapping - if let Ok(node_id) = info.network_fingerprint.node_id.parse::() { + if let Ok(node_id) = + info.network_fingerprint.node_id.parse::() + { self.node_to_device.remove(&node_id); } } @@ -877,7 +884,11 @@ impl DeviceRegistry { } /// Set a device as connected with its node ID - pub async fn set_device_connected(&mut self, device_id: Uuid, node_id: EndpointId) -> Result<()> { + pub async fn set_device_connected( + &mut self, + device_id: Uuid, + node_id: EndpointId, + ) -> Result<()> { // Update the node_to_device mapping self.node_to_device.insert(node_id, device_id); diff --git a/core/src/service/network/protocol/messaging.rs b/core/src/service/network/protocol/messaging.rs index 7d0fd505e..0b2581cea 100644 --- a/core/src/service/network/protocol/messaging.rs +++ b/core/src/service/network/protocol/messaging.rs @@ -283,7 +283,8 @@ impl MessagingProtocolHandler { match existing { Ok(Some(existing_device)) => { // Device exists (from pre-registration) - update with full hardware - let mut device_model: entities::device::ActiveModel = existing_device.into(); + let mut device_model: entities::device::ActiveModel = + existing_device.into(); // Update all fields with data from message device_model.name = Set(device_name.clone()); @@ -299,7 +300,8 @@ impl MessagingProtocolHandler { device_model.memory_total_bytes = Set(memory_total_bytes); device_model.form_factor = Set(form_factor.clone()); device_model.manufacturer = Set(manufacturer.clone()); - device_model.gpu_models = Set(gpu_models.clone().map(|g| serde_json::json!(g))); + device_model.gpu_models = + Set(gpu_models.clone().map(|g| serde_json::json!(g))); device_model.boot_disk_type = Set(boot_disk_type.clone()); device_model.boot_disk_capacity_bytes = Set(boot_disk_capacity_bytes); device_model.swap_total_bytes = Set(swap_total_bytes); @@ -400,35 +402,52 @@ impl MessagingProtocolHandler { if let Ok(our_device) = context_clone.device_manager.to_device() { // Get our slug for this library if let Some(lib_id) = library_id { - if let Ok(our_slug) = context_clone.device_manager.slug_for_library(lib_id) { + if let Ok(our_slug) = + context_clone.device_manager.slug_for_library(lib_id) + { // Get networking - if let Some(networking) = context_clone.get_networking().await { - let our_register_request = LibraryMessage::RegisterDeviceRequest { - request_id: Uuid::new_v4(), - library_id, - device_id: our_device.id, - device_name: our_device.name, - device_slug: our_slug, - os_name: our_device.os.to_string(), - os_version: our_device.os_version, - hardware_model: our_device.hardware_model, - cpu_model: our_device.cpu_model, - cpu_architecture: our_device.cpu_architecture, - cpu_cores_physical: our_device.cpu_cores_physical, - cpu_cores_logical: our_device.cpu_cores_logical, - cpu_frequency_mhz: our_device.cpu_frequency_mhz, - memory_total_bytes: our_device.memory_total_bytes, - form_factor: our_device.form_factor.map(|f| f.to_string()), - manufacturer: our_device.manufacturer, - gpu_models: our_device.gpu_models, - boot_disk_type: our_device.boot_disk_type, - boot_disk_capacity_bytes: our_device.boot_disk_capacity_bytes, - swap_total_bytes: our_device.swap_total_bytes, - }; + if let Some(networking) = + context_clone.get_networking().await + { + let our_register_request = + LibraryMessage::RegisterDeviceRequest { + request_id: Uuid::new_v4(), + library_id, + device_id: our_device.id, + device_name: our_device.name, + device_slug: our_slug, + os_name: our_device.os.to_string(), + os_version: our_device.os_version, + hardware_model: our_device.hardware_model, + cpu_model: our_device.cpu_model, + cpu_architecture: our_device + .cpu_architecture, + cpu_cores_physical: our_device + .cpu_cores_physical, + cpu_cores_logical: our_device + .cpu_cores_logical, + cpu_frequency_mhz: our_device + .cpu_frequency_mhz, + memory_total_bytes: our_device + .memory_total_bytes, + form_factor: our_device + .form_factor + .map(|f| f.to_string()), + manufacturer: our_device.manufacturer, + gpu_models: our_device.gpu_models, + boot_disk_type: our_device.boot_disk_type, + boot_disk_capacity_bytes: our_device + .boot_disk_capacity_bytes, + swap_total_bytes: our_device + .swap_total_bytes, + }; // Send to the device that just registered with us if let Err(e) = networking - .send_library_request(sender_device_id, our_register_request) + .send_library_request( + sender_device_id, + our_register_request, + ) .await { tracing::warn!( diff --git a/core/src/service/network/protocol/pairing/initiator.rs b/core/src/service/network/protocol/pairing/initiator.rs index d91906ded..89b44e58c 100644 --- a/core/src/service/network/protocol/pairing/initiator.rs +++ b/core/src/service/network/protocol/pairing/initiator.rs @@ -192,39 +192,43 @@ impl PairingProtocolHandler { .map_err(|e| NetworkingError::Serialization(e)); } - self.log_info(&format!( - "Signature verified successfully for session {} from device {}", - session_id, from_device - )) - .await; + self.log_info(&format!( + "Signature verified successfully for session {} from device {}", + session_id, from_device + )) + .await; - // Update session with the final device_info from Response (has correct node_id) - // This ensures vouching uses the joiner's authoritative device info - { - let mut sessions = self.active_sessions.write().await; - if let Some(session) = sessions.get_mut(&session_id) { - session.remote_device_info = Some(device_info.clone()); - self.log_debug(&format!( - "Updated session {} with joiner's device info (node_id: {})", - session_id, device_info.network_fingerprint.node_id - )) - .await; - } - } - - // Signature is valid - complete pairing on Initiator's side - let shared_secret = self.generate_shared_secret(session_id).await?; - let session_keys = SessionKeys::from_shared_secret(shared_secret.clone()); - - let actual_device_id = device_info.device_id; - let node_id = match device_info.network_fingerprint.node_id.parse::() { - Ok(id) => id, - Err(_) => { - self.log_warn("Failed to parse node ID from device info, using fallback") + // Update session with the final device_info from Response (has correct node_id) + // This ensures vouching uses the joiner's authoritative device info + { + let mut sessions = self.active_sessions.write().await; + if let Some(session) = sessions.get_mut(&session_id) { + session.remote_device_info = Some(device_info.clone()); + self.log_debug(&format!( + "Updated session {} with joiner's device info (node_id: {})", + session_id, device_info.network_fingerprint.node_id + )) .await; - EndpointId::from_bytes(&[0u8; 32]).unwrap() + } } - }; + + // Signature is valid - complete pairing on Initiator's side + let shared_secret = self.generate_shared_secret(session_id).await?; + let session_keys = SessionKeys::from_shared_secret(shared_secret.clone()); + + let actual_device_id = device_info.device_id; + let node_id = match device_info + .network_fingerprint + .node_id + .parse::() + { + Ok(id) => id, + Err(_) => { + self.log_warn("Failed to parse node ID from device info, using fallback") + .await; + EndpointId::from_bytes(&[0u8; 32]).unwrap() + } + }; // Register joiner's device in Pairing state { diff --git a/core/src/service/network/protocol/pairing/mod.rs b/core/src/service/network/protocol/pairing/mod.rs index 78fc01516..2aadfc9ce 100644 --- a/core/src/service/network/protocol/pairing/mod.rs +++ b/core/src/service/network/protocol/pairing/mod.rs @@ -1009,13 +1009,13 @@ impl PairingProtocolHandler { NetworkingError::Protocol("Missing vouchee public key".to_string()) })?; let secret = session.shared_secret.clone(); - + self.log_debug(&format!( "Vouching device {} with node_id: '{}'", device_info.device_id, device_info.network_fingerprint.node_id )) .await; - + (device_info, public_key, secret) }; @@ -1435,27 +1435,27 @@ impl PairingProtocolHandler { return Ok(()); } - if proxy_config.auto_accept_vouched && voucher_is_trusted { - { - self.log_info(&format!( - "Auto-accepting proxy pairing for device {} with node_id: '{}'", - vouchee_device_info.device_id, vouchee_device_info.network_fingerprint.node_id - )) - .await; + if proxy_config.auto_accept_vouched && voucher_is_trusted { + { + self.log_info(&format!( + "Auto-accepting proxy pairing for device {} with node_id: '{}'", + vouchee_device_info.device_id, vouchee_device_info.network_fingerprint.node_id + )) + .await; - let mut registry = self.device_registry.write().await; - registry - .complete_pairing( - vouchee_device_info.device_id, - vouchee_device_info.clone(), - proxied_session_keys.clone(), - None, - crate::service::network::device::PairingType::Proxied, - Some(voucher_device_id), - Some(chrono::Utc::now()), - ) - .await?; - } + let mut registry = self.device_registry.write().await; + registry + .complete_pairing( + vouchee_device_info.device_id, + vouchee_device_info.clone(), + proxied_session_keys.clone(), + None, + crate::service::network::device::PairingType::Proxied, + Some(voucher_device_id), + Some(chrono::Utc::now()), + ) + .await?; + } let accepting_device_id = self.get_device_info().await?.device_id; let response = PairingMessage::ProxyPairingResponse { diff --git a/core/src/service/network/protocol/pairing/types.rs b/core/src/service/network/protocol/pairing/types.rs index db43367e4..d8b0b071b 100644 --- a/core/src/service/network/protocol/pairing/types.rs +++ b/core/src/service/network/protocol/pairing/types.rs @@ -444,12 +444,16 @@ impl PairingAdvertisement { /// Convert node address info back to EndpointAddr pub fn node_addr(&self) -> crate::service::network::Result { // Parse node ID - let node_id = self.node_addr_info.node_id.parse::().map_err(|e| { - crate::service::network::NetworkingError::Protocol(format!( - "Invalid node ID in advertisement: {}", - e - )) - })?; + let node_id = self + .node_addr_info + .node_id + .parse::() + .map_err(|e| { + crate::service::network::NetworkingError::Protocol(format!( + "Invalid node ID in advertisement: {}", + e + )) + })?; // In v0.95+, EndpointAddr is immutable and builder methods were removed. // Create a minimal EndpointAddr with just the ID - Iroh's discovery system diff --git a/core/src/service/network/protocol/pairing/vouching_queue.rs b/core/src/service/network/protocol/pairing/vouching_queue.rs index e58d70be1..52ddfcc93 100644 --- a/core/src/service/network/protocol/pairing/vouching_queue.rs +++ b/core/src/service/network/protocol/pairing/vouching_queue.rs @@ -57,14 +57,16 @@ impl VouchingQueue { pub async fn open(data_dir: impl AsRef) -> Result { let networking_dir = data_dir.as_ref().join("networking"); // Ensure networking directory exists - tokio::fs::create_dir_all(&networking_dir).await.map_err(|e| { - NetworkingError::Protocol(format!("Failed to create networking directory: {}", e)) - })?; + tokio::fs::create_dir_all(&networking_dir) + .await + .map_err(|e| { + NetworkingError::Protocol(format!("Failed to create networking directory: {}", e)) + })?; // Ensure networking directory exists std::fs::create_dir_all(&networking_dir).map_err(|e| { NetworkingError::Protocol(format!("Failed to create networking directory: {}", e)) })?; - + let db_path = networking_dir.join("vouching_queue.db"); let database_url = format!("sqlite://{}?mode=rwc", db_path.display()); let conn = Database::connect(&database_url).await.map_err(|e| { diff --git a/core/src/service/sync/backfill.rs b/core/src/service/sync/backfill.rs index d5cb3c2e0..803c8d3b6 100644 --- a/core/src/service/sync/backfill.rs +++ b/core/src/service/sync/backfill.rs @@ -1026,7 +1026,10 @@ impl BackfillManager { ); if let Err(e) = resource_manager - .emit_batch_resource_events(&model_type, applied_snapshot_uuids) + .emit_batch_resource_events( + &model_type, + applied_snapshot_uuids, + ) .await { warn!( @@ -1082,20 +1085,26 @@ impl BackfillManager { } else { // FK error but can't extract UUID (raw SQLite error) // Extract diagnostic information for troubleshooting - let fk_mappings = crate::infra::sync::registry::get_fk_mappings(&entry.model_type); + let fk_mappings = crate::infra::sync::registry::get_fk_mappings( + &entry.model_type, + ); // Extract UUID fields from entry data to show which FKs are present - let uuid_fields: Vec = if let Some(obj) = entry.data.as_object() { - obj.keys() - .filter(|k| k.ends_with("_uuid")) - .map(|k| { - let value = obj.get(k).and_then(|v| v.as_str()).unwrap_or("null"); - format!("{}={}", k, value) - }) - .collect() - } else { - vec![] - }; + let uuid_fields: Vec = + if let Some(obj) = entry.data.as_object() { + obj.keys() + .filter(|k| k.ends_with("_uuid")) + .map(|k| { + let value = obj + .get(k) + .and_then(|v| v.as_str()) + .unwrap_or("null"); + format!("{}={}", k, value) + }) + .collect() + } else { + vec![] + }; // Log comprehensive diagnostic information tracing::info!( diff --git a/core/src/service/sync/protocol_handler.rs b/core/src/service/sync/protocol_handler.rs index 4fd7070d4..2e04fbf37 100644 --- a/core/src/service/sync/protocol_handler.rs +++ b/core/src/service/sync/protocol_handler.rs @@ -71,10 +71,8 @@ impl LogSyncHandler { // Emit resource event for UI reactivity (for insert/update changes) if matches!(change_type, ChangeType::Insert | ChangeType::Update) { - let resource_manager = crate::domain::ResourceManager::new( - db.clone(), - self.peer_sync.event_bus().clone(), - ); + let resource_manager = + crate::domain::ResourceManager::new(db.clone(), self.peer_sync.event_bus().clone()); if let Err(e) = resource_manager .emit_resource_events(&model_type, vec![record_uuid]) diff --git a/core/src/testing/integration_utils.rs b/core/src/testing/integration_utils.rs index c6ec0244a..2678e802d 100644 --- a/core/src/testing/integration_utils.rs +++ b/core/src/testing/integration_utils.rs @@ -87,7 +87,9 @@ pub struct TestEnvironment { impl TestEnvironment { /// Create a new test environment with the given name - pub fn new(test_name: impl Into) -> Result> { + pub fn new( + test_name: impl Into, + ) -> Result> { let test_name = test_name.into(); let test_root = PathBuf::from("test_data"); let test_data_dir = test_root.join(&test_name); @@ -225,7 +227,9 @@ impl TestConfigBuilder { } /// Build and save the AppConfig to the data directory - pub async fn build_and_save(self) -> Result> { + pub async fn build_and_save( + self, + ) -> Result> { let config = self.build(); // Ensure the data directory exists @@ -318,7 +322,9 @@ pub struct IntegrationTestSetup { impl IntegrationTestSetup { /// Create a new integration test setup with default configuration - pub async fn new(test_name: impl Into) -> Result> { + pub async fn new( + test_name: impl Into, + ) -> Result> { let environment = TestEnvironment::new(test_name)?; // Clean any existing data @@ -411,7 +417,9 @@ impl IntegrationTestSetup { /// /// This method ensures that the custom AppConfig settings from the test setup /// are properly applied when initializing the Core. - pub async fn create_core(&self) -> Result> { + pub async fn create_core( + &self, + ) -> Result> { info!( "Creating Core with test configuration from: {}", self.data_dir().display() diff --git a/core/src/volume/fs/apfs.rs b/core/src/volume/fs/apfs.rs index 9a1a77a6b..a688095fa 100644 --- a/core/src/volume/fs/apfs.rs +++ b/core/src/volume/fs/apfs.rs @@ -337,8 +337,10 @@ pub fn containers_to_volumes( // Create stable volume fingerprint for APFS volumes // APFS volumes are always local system/primary volumes, use mount_point + device_id - let fingerprint = - crate::volume::types::VolumeFingerprint::from_primary_volume(mount_point, device_id); + let fingerprint = crate::volume::types::VolumeFingerprint::from_primary_volume( + mount_point, + device_id, + ); debug!( "APFS_CONVERT: Generated fingerprint {} for volume '{}' (consumed: {} bytes)", @@ -357,8 +359,7 @@ pub fn containers_to_volumes( // Auto-track eligibility: Only Primary volume (Data volume on modern macOS) let auto_track_eligible = - matches!(volume_type, crate::volume::types::VolumeType::Primary) - && is_user_visible; + matches!(volume_type, crate::volume::types::VolumeType::Primary) && is_user_visible; debug!( "APFS_CONVERT: Volume '{}' classified as Type={:?}, user_visible={}, auto_track_eligible={}", diff --git a/core/src/volume/platform/linux.rs b/core/src/volume/platform/linux.rs index 98d611d33..12e8fc523 100644 --- a/core/src/volume/platform/linux.rs +++ b/core/src/volume/platform/linux.rs @@ -119,10 +119,7 @@ fn parse_df_line( } crate::volume::types::VolumeType::Network => { // Use filesystem device as backend identifier for network volumes - VolumeFingerprint::from_network_volume( - filesystem_device, - &mount_path.to_string_lossy(), - ) + VolumeFingerprint::from_network_volume(filesystem_device, &mount_path.to_string_lossy()) } _ => { // Primary, UserData, Secondary, System, Virtual, Unknown diff --git a/core/src/volume/platform/windows.rs b/core/src/volume/platform/windows.rs index 703469588..d04e82e5a 100644 --- a/core/src/volume/platform/windows.rs +++ b/core/src/volume/platform/windows.rs @@ -254,7 +254,10 @@ pub fn create_volume_from_windows_info( } crate::volume::types::VolumeType::Network => { // Use mount path as backend identifier for network volumes - let backend_id = info.volume_guid.as_deref().unwrap_or(&mount_path.to_string_lossy()); + let backend_id = info + .volume_guid + .as_deref() + .unwrap_or(&mount_path.to_string_lossy()); VolumeFingerprint::from_network_volume(backend_id, &mount_path.to_string_lossy()) } _ => { diff --git a/core/tests/copy_progress_test.rs b/core/tests/copy_progress_test.rs index 44918b8c2..7213d7854 100644 --- a/core/tests/copy_progress_test.rs +++ b/core/tests/copy_progress_test.rs @@ -189,7 +189,8 @@ async fn test_copy_progress_with_metadata_tracking() { let mut event_subscriber = core.events.subscribe(); // Start monitoring task BEFORE dispatching to avoid missing events - let (job_id_tx, job_id_rx) = tokio::sync::oneshot::channel::(); + let (job_id_tx, job_id_rx) = + tokio::sync::oneshot::channel::(); let monitor_handle = tokio::spawn(async move { // Wait for job ID to be sent @@ -292,9 +293,10 @@ async fn test_copy_progress_with_metadata_tracking() { // Limit queries use sd_core::infra::query::LibraryQuery; - let query_input = sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput { - job_id: job_id.into(), - }; + let query_input = + sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput { + job_id: job_id.into(), + }; let query = sd_core::ops::jobs::copy_metadata::query::CopyMetadataQuery::from_input( @@ -357,7 +359,9 @@ async fn test_copy_progress_with_metadata_tracking() { let job_id = job_handle.id; // Send job ID to monitoring task - job_id_tx.send(job_id).expect("Monitor task should be running"); + job_id_tx + .send(job_id) + .expect("Monitor task should be running"); // Wait for job completion with timeout let (event_count, metadata_query_count) = @@ -374,8 +378,9 @@ async fn test_copy_progress_with_metadata_tracking() { // Query final metadata state println!("\nQuerying final job metadata..."); use sd_core::infra::query::LibraryQuery; - let query_input = - sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput { job_id: job_id.into() }; + let query_input = sd_core::ops::jobs::copy_metadata::query::CopyMetadataQueryInput { + job_id: job_id.into(), + }; let query = sd_core::ops::jobs::copy_metadata::query::CopyMetadataQuery::from_input(query_input) .unwrap(); @@ -412,9 +417,8 @@ async fn test_copy_progress_with_metadata_tracking() { 0 }; - let test_passed = files_completed_at_end == file_count - && max_percentage >= 0.99 - && max_jump < 50.0; + let test_passed = + files_completed_at_end == file_count && max_percentage >= 0.99 && max_jump < 50.0; let failure_reason = if !test_passed { if files_completed_at_end != file_count { @@ -478,9 +482,7 @@ async fn test_copy_progress_with_metadata_tracking() { // Always write to temp dir for local inspection let temp_snapshot_path = test_root.join("test_snapshot.json"); let snapshot_json = serde_json::to_string_pretty(&snapshot).unwrap(); - fs::write(&temp_snapshot_path, snapshot_json) - .await - .unwrap(); + fs::write(&temp_snapshot_path, snapshot_json).await.unwrap(); println!( "\nšŸ“„ Snapshot written to temp: {}", temp_snapshot_path.display() @@ -518,4 +520,3 @@ async fn test_copy_progress_with_metadata_tracking() { ); } } - diff --git a/core/tests/cross_device_copy_test.rs b/core/tests/cross_device_copy_test.rs index a2b6d6f9a..f87fd9458 100644 --- a/core/tests/cross_device_copy_test.rs +++ b/core/tests/cross_device_copy_test.rs @@ -587,4 +587,4 @@ async fn test_cross_device_copy() { panic!("Cross-device copy test failed"); } } -} \ No newline at end of file +} diff --git a/core/tests/file_copy_pull_test.rs b/core/tests/file_copy_pull_test.rs index e1e729a88..7334b38d0 100644 --- a/core/tests/file_copy_pull_test.rs +++ b/core/tests/file_copy_pull_test.rs @@ -606,4 +606,4 @@ async fn test_file_copy_pull() { panic!("PULL transfer test failed"); } } -} \ No newline at end of file +} diff --git a/core/tests/file_move_test.rs b/core/tests/file_move_test.rs index a33de2199..504fa045b 100644 --- a/core/tests/file_move_test.rs +++ b/core/tests/file_move_test.rs @@ -303,7 +303,8 @@ async fn test_ephemeral_file_move_via_reindex() -> anyhow::Result<()> { // Index in ephemeral mode let test_root_sd = SdPath::local(test_root.clone()); - let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); + let indexer_config = + IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); let indexer_job = IndexerJob::new(indexer_config); tracing::info!("Initial ephemeral indexing"); @@ -327,8 +328,11 @@ async fn test_ephemeral_file_move_via_reindex() -> anyhow::Result<()> { .await?; // Manual reindex to detect the change - let reindex_config = - IndexerJobConfig::ephemeral_browse(SdPath::local(test_root.clone()), IndexScope::Recursive, false); + let reindex_config = IndexerJobConfig::ephemeral_browse( + SdPath::local(test_root.clone()), + IndexScope::Recursive, + false, + ); let reindex_job = IndexerJob::new(reindex_config); let reindex_handle = harness.library.jobs().dispatch(reindex_job).await?; reindex_handle.wait().await?; @@ -381,7 +385,8 @@ async fn test_ephemeral_file_move_via_watcher() -> anyhow::Result<()> { // Index in ephemeral mode let test_root_sd = SdPath::local(test_root.clone()); - let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); + let indexer_config = + IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); let indexer_job = IndexerJob::new(indexer_config); tracing::info!("Initial ephemeral indexing"); diff --git a/core/tests/file_sync_test.rs b/core/tests/file_sync_test.rs index 2a5c06922..65e82aebc 100644 --- a/core/tests/file_sync_test.rs +++ b/core/tests/file_sync_test.rs @@ -50,22 +50,22 @@ impl FileSyncTestSetup { let temp_dir = TempDir::new()?; - let config = sd_core::config::AppConfig { - version: 3, - data_dir: temp_dir.path().to_path_buf(), - log_level: "info".to_string(), - telemetry_enabled: false, - preferences: sd_core::config::Preferences::default(), - job_logging: sd_core::config::JobLoggingConfig::default(), - services: sd_core::config::ServiceConfig { - networking_enabled: false, - volume_monitoring_enabled: false, - fs_watcher_enabled: false, - statistics_listener_enabled: false, - }, - logging: sd_core::config::LoggingConfig::default(), - proxy_pairing: sd_core::config::app_config::ProxyPairingConfig::default(), - }; + let config = sd_core::config::AppConfig { + version: 3, + data_dir: temp_dir.path().to_path_buf(), + log_level: "info".to_string(), + telemetry_enabled: false, + preferences: sd_core::config::Preferences::default(), + job_logging: sd_core::config::JobLoggingConfig::default(), + services: sd_core::config::ServiceConfig { + networking_enabled: false, + volume_monitoring_enabled: false, + fs_watcher_enabled: false, + statistics_listener_enabled: false, + }, + logging: sd_core::config::LoggingConfig::default(), + proxy_pairing: sd_core::config::app_config::ProxyPairingConfig::default(), + }; config.save()?; let core = Core::new(temp_dir.path().to_path_buf()) diff --git a/core/tests/folder_rename_test.rs b/core/tests/folder_rename_test.rs index 48f933b6d..db29e42f8 100644 --- a/core/tests/folder_rename_test.rs +++ b/core/tests/folder_rename_test.rs @@ -317,7 +317,8 @@ async fn test_ephemeral_folder_rename_via_reindex() -> anyhow::Result<()> { // Index in ephemeral mode let test_root_sd = SdPath::local(test_root.clone()); - let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); + let indexer_config = + IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); let indexer_job = IndexerJob::new(indexer_config); tracing::info!("Initial ephemeral indexing"); @@ -338,8 +339,11 @@ async fn test_ephemeral_folder_rename_via_reindex() -> anyhow::Result<()> { tokio::fs::rename(&original_folder, &renamed_folder).await?; // Manual reindex to detect the change - let reindex_config = - IndexerJobConfig::ephemeral_browse(SdPath::local(test_root.clone()), IndexScope::Recursive, false); + let reindex_config = IndexerJobConfig::ephemeral_browse( + SdPath::local(test_root.clone()), + IndexScope::Recursive, + false, + ); let reindex_job = IndexerJob::new(reindex_config); let reindex_handle = harness.library.jobs().dispatch(reindex_job).await?; reindex_handle.wait().await?; @@ -386,7 +390,8 @@ async fn test_ephemeral_folder_rename_via_watcher() -> anyhow::Result<()> { // Index in ephemeral mode let test_root_sd = SdPath::local(test_root.clone()); - let indexer_config = IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); + let indexer_config = + IndexerJobConfig::ephemeral_browse(test_root_sd, IndexScope::Recursive, false); let indexer_job = IndexerJob::new(indexer_config); tracing::info!("Initial ephemeral indexing"); diff --git a/core/tests/location_export_import_test.rs b/core/tests/location_export_import_test.rs index cae6f6085..bdd80f445 100644 --- a/core/tests/location_export_import_test.rs +++ b/core/tests/location_export_import_test.rs @@ -372,7 +372,8 @@ async fn test_location_export_import() -> Result<(), Box Result<(), Box> { +async fn test_export_nonexistent_location() -> Result<(), Box> +{ let temp_dir = TempDir::new()?; let core_dir = temp_dir.path().join("core"); let export_file = temp_dir.path().join("export.sql"); @@ -475,7 +476,8 @@ async fn test_import_invalid_file() -> Result<(), Box Result<(), Box> { +async fn test_import_links_existing_content_identities( +) -> Result<(), Box> { // This test verifies that when importing a location that has content matching // existing content_identities in the destination library, the entries link to // the existing content_identities rather than creating duplicates. diff --git a/core/tests/proxy_pairing_protocol_test.rs b/core/tests/proxy_pairing_protocol_test.rs index 8e172e6e3..ccbfdcf12 100644 --- a/core/tests/proxy_pairing_protocol_test.rs +++ b/core/tests/proxy_pairing_protocol_test.rs @@ -163,10 +163,7 @@ fn test_vouching_session_with_multiple_vouches() { assert!(all_terminal); session.state = VouchingSessionState::Completed; - assert!(matches!( - session.state, - VouchingSessionState::Completed - )); + assert!(matches!(session.state, VouchingSessionState::Completed)); } #[test] @@ -234,24 +231,15 @@ fn test_vouching_session_state_transitions() { }; // Start as Pending - assert!(matches!( - session.state, - VouchingSessionState::Pending - )); + assert!(matches!(session.state, VouchingSessionState::Pending)); // Transition to InProgress when vouching starts session.state = VouchingSessionState::InProgress; - assert!(matches!( - session.state, - VouchingSessionState::InProgress - )); + assert!(matches!(session.state, VouchingSessionState::InProgress)); // Transition to Completed when all vouches are processed session.state = VouchingSessionState::Completed; - assert!(matches!( - session.state, - VouchingSessionState::Completed - )); + assert!(matches!(session.state, VouchingSessionState::Completed)); } #[test] diff --git a/core/tests/proxy_pairing_test.rs b/core/tests/proxy_pairing_test.rs index 33f9c6df9..8aed11619 100644 --- a/core/tests/proxy_pairing_test.rs +++ b/core/tests/proxy_pairing_test.rs @@ -93,7 +93,11 @@ async fn alice_proxy_pairing_scenario() { loop { tokio::time::sleep(Duration::from_secs(1)).await; let paired_devices = if let Some(networking) = core.networking() { - networking.device_registry().read().await.get_paired_devices() + networking + .device_registry() + .read() + .await + .get_paired_devices() } else { vec![] }; @@ -146,7 +150,11 @@ async fn alice_proxy_pairing_scenario() { loop { tokio::time::sleep(Duration::from_secs(1)).await; let paired_devices = if let Some(networking) = core.networking() { - networking.device_registry().read().await.get_paired_devices() + networking + .device_registry() + .read() + .await + .get_paired_devices() } else { vec![] }; @@ -181,7 +189,11 @@ async fn alice_proxy_pairing_scenario() { // Get Carol's device ID let paired_devices = if let Some(networking) = core.networking() { - networking.device_registry().read().await.get_paired_devices() + networking + .device_registry() + .read() + .await + .get_paired_devices() } else { vec![] }; @@ -281,7 +293,11 @@ async fn carol_proxy_pairing_scenario() { loop { tokio::time::sleep(Duration::from_secs(1)).await; let paired_devices = if let Some(networking) = core.networking() { - networking.device_registry().read().await.get_paired_devices() + networking + .device_registry() + .read() + .await + .get_paired_devices() } else { vec![] }; @@ -327,7 +343,11 @@ async fn carol_proxy_pairing_scenario() { loop { tokio::time::sleep(Duration::from_secs(1)).await; let paired_devices = if let Some(networking) = core.networking() { - networking.device_registry().read().await.get_paired_devices() + networking + .device_registry() + .read() + .await + .get_paired_devices() } else { vec![] }; @@ -447,7 +467,11 @@ async fn bob_proxy_pairing_scenario() { loop { tokio::time::sleep(Duration::from_secs(1)).await; let paired_devices = if let Some(networking) = core.networking() { - networking.device_registry().read().await.get_paired_devices() + networking + .device_registry() + .read() + .await + .get_paired_devices() } else { vec![] }; @@ -485,7 +509,11 @@ async fn bob_proxy_pairing_scenario() { loop { tokio::time::sleep(Duration::from_secs(1)).await; let paired_devices = if let Some(networking) = core.networking() { - networking.device_registry().read().await.get_paired_devices() + networking + .device_registry() + .read() + .await + .get_paired_devices() } else { vec![] }; diff --git a/core/tests/sync_backfill_race_test.rs b/core/tests/sync_backfill_race_test.rs index 62c6d006a..31c0af5b5 100644 --- a/core/tests/sync_backfill_race_test.rs +++ b/core/tests/sync_backfill_race_test.rs @@ -479,8 +479,20 @@ async fn test_sequential_backfill_control() -> anyhow::Result<()> { tracing::info!("Indexing both locations on Alice first"); - add_and_index_location(&harness.library_alice, &harness.core_alice.volumes, core_path.to_str().unwrap(), "core").await?; - add_and_index_location(&harness.library_alice, &harness.core_alice.volumes, apps_path.to_str().unwrap(), "apps").await?; + add_and_index_location( + &harness.library_alice, + &harness.core_alice.volumes, + core_path.to_str().unwrap(), + "core", + ) + .await?; + add_and_index_location( + &harness.library_alice, + &harness.core_alice.volumes, + apps_path.to_str().unwrap(), + "apps", + ) + .await?; let alice_entries = entities::entry::Entity::find() .count(harness.library_alice.db().conn()) diff --git a/core/tests/sync_backfill_test.rs b/core/tests/sync_backfill_test.rs index 6be343700..9ec382706 100644 --- a/core/tests/sync_backfill_test.rs +++ b/core/tests/sync_backfill_test.rs @@ -53,7 +53,12 @@ async fn test_initial_backfill_alice_indexes_first() -> anyhow::Result<()> { let device_alice_id = core_alice.device.device_id()?; let library_alice = core_alice .libraries - .create_library_with_id(library_id, "Backfill Test Library", None, core_alice.context.clone()) + .create_library_with_id( + library_id, + "Backfill Test Library", + None, + core_alice.context.clone(), + ) .await?; let device_record = entities::device::Entity::find() @@ -179,7 +184,12 @@ async fn test_initial_backfill_alice_indexes_first() -> anyhow::Result<()> { let device_bob_id = core_bob.device.device_id()?; let library_bob = core_bob .libraries - .create_library_with_id(library_id, "Backfill Test Library", None, core_bob.context.clone()) + .create_library_with_id( + library_id, + "Backfill Test Library", + None, + core_bob.context.clone(), + ) .await?; register_device(&library_alice, device_bob_id, "Bob").await?; @@ -414,7 +424,12 @@ async fn test_bidirectional_volume_sync() -> anyhow::Result<()> { let device_alice_id = core_alice.device.device_id()?; let library_alice = core_alice .libraries - .create_library_with_id(library_id, "Volume Sync Test", None, core_alice.context.clone()) + .create_library_with_id( + library_id, + "Volume Sync Test", + None, + core_alice.context.clone(), + ) .await?; let core_bob = Core::new(temp_dir_bob.clone()) @@ -423,7 +438,12 @@ async fn test_bidirectional_volume_sync() -> anyhow::Result<()> { let device_bob_id = core_bob.device.device_id()?; let library_bob = core_bob .libraries - .create_library_with_id(library_id, "Volume Sync Test", None, core_bob.context.clone()) + .create_library_with_id( + library_id, + "Volume Sync Test", + None, + core_bob.context.clone(), + ) .await?; register_device(&library_alice, device_bob_id, "Bob").await?; @@ -634,7 +654,12 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> { let device_alice_id = core_alice.device.device_id()?; let library_alice = core_alice .libraries - .create_library_with_id(library_id, "Volume Event Test", None, core_alice.context.clone()) + .create_library_with_id( + library_id, + "Volume Event Test", + None, + core_alice.context.clone(), + ) .await?; let core_bob = Core::new(temp_dir_bob.clone()) @@ -643,7 +668,12 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> { let device_bob_id = core_bob.device.device_id()?; let library_bob = core_bob .libraries - .create_library_with_id(library_id, "Volume Event Test", None, core_bob.context.clone()) + .create_library_with_id( + library_id, + "Volume Event Test", + None, + core_bob.context.clone(), + ) .await?; register_device(&library_alice, device_bob_id, "Bob").await?; @@ -683,17 +713,27 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> { tracing::debug!("Bob received event: {:?}", event); match event { - Event::ResourceChangedBatch { resource_type, resources, .. } => { + Event::ResourceChangedBatch { + resource_type, + resources, + .. + } => { if resource_type == "volume" { tracing::info!( - resource_count = if let serde_json::Value::Array(arr) = &resources { arr.len() } else { 0 }, + resource_count = if let serde_json::Value::Array(arr) = &resources { + arr.len() + } else { + 0 + }, "Bob received ResourceChangedBatch for volumes" ); // Check if Alice's volume is in the batch if let serde_json::Value::Array(volume_array) = resources { for volume_json in volume_array { - if let Some(uuid_str) = volume_json.get("id").and_then(|v| v.as_str()) { + if let Some(uuid_str) = + volume_json.get("id").and_then(|v| v.as_str()) + { if let Ok(volume_id) = Uuid::parse_str(uuid_str) { if volume_id == alice_volume_uuid_clone { tracing::info!( @@ -709,7 +749,11 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> { } } } - Event::ResourceChanged { resource_type, resource, .. } => { + Event::ResourceChanged { + resource_type, + resource, + .. + } => { if resource_type == "volume" { tracing::info!("Bob received single ResourceChanged for volume"); @@ -803,17 +847,16 @@ async fn test_volume_resource_events_on_sync() -> anyhow::Result<()> { // Abort the listener task event_listener.abort(); - tracing::info!( - event_received = event_was_received, - "=== Test Result ===" - ); + tracing::info!(event_received = event_was_received, "=== Test Result ==="); assert!( event_was_received, "Bob should have received a ResourceChanged event for Alice's volume during sync, but didn't" ); - tracing::info!("āœ… Volume ResourceChanged event was emitted on the receiving device during sync"); + tracing::info!( + "āœ… Volume ResourceChanged event was emitted on the receiving device during sync" + ); Ok(()) } diff --git a/core/tests/sync_setup_test.rs b/core/tests/sync_setup_test.rs index 788c6b028..406278642 100644 --- a/core/tests/sync_setup_test.rs +++ b/core/tests/sync_setup_test.rs @@ -315,8 +315,7 @@ async fn carol_three_device_scenario() { // Wait for Alice's library ID println!("Carol: Waiting for Alice's library ID..."); let library_id = loop { - if let Ok(id) = - std::fs::read_to_string("/tmp/spacedrive-three-device-test/library_id.txt") + if let Ok(id) = std::fs::read_to_string("/tmp/spacedrive-three-device-test/library_id.txt") { break id.trim().to_string(); } diff --git a/core/tests/transitive_sync_backfill_test.rs b/core/tests/transitive_sync_backfill_test.rs index 1db44bff4..5398a159a 100644 --- a/core/tests/transitive_sync_backfill_test.rs +++ b/core/tests/transitive_sync_backfill_test.rs @@ -688,7 +688,10 @@ async fn carol_transitive_sync_scenario() { let tolerance = (alice_expected_count as f64 * 0.1) as i64; if diff <= tolerance && carol_final_count > 10 { - println!("Carol: Sync complete! Received {} entries", carol_final_count); + println!( + "Carol: Sync complete! Received {} entries", + carol_final_count + ); break; } @@ -796,9 +799,12 @@ async fn test_transitive_sync_backfill() { match result { Ok(_) => { println!("\nāœ… TRANSITIVE SYNC BACKFILL TEST PASSED!"); - println!(" āœ… Alice indexed {} entries", + println!( + " āœ… Alice indexed {} entries", std::fs::read_to_string(format!("{}/alice_entry_count.txt", TEST_DIR)) - .unwrap_or_default().trim()); + .unwrap_or_default() + .trim() + ); println!(" āœ… Alice paired with Bob (direct)"); println!(" āœ… Bob synced Alice's data"); println!(" āœ… Bob paired with Carol (direct)"); diff --git a/core/tests/volume_detection_test.rs b/core/tests/volume_detection_test.rs index 3079d578e..98f2aa444 100644 --- a/core/tests/volume_detection_test.rs +++ b/core/tests/volume_detection_test.rs @@ -118,7 +118,11 @@ async fn test_macos_volume_detection() { " {} -> {} ({})", path_str, volume.name, - if volume.name == expected_volume { "āœ“" } else { "āœ— WRONG" } + if volume.name == expected_volume { + "āœ“" + } else { + "āœ— WRONG" + } ); assert_eq!( volume.name, expected_volume, @@ -501,4 +505,4 @@ async fn test_full_copy_workflow_simulation() { ); } } -} \ No newline at end of file +} diff --git a/core/tests/volume_tracking_test.rs b/core/tests/volume_tracking_test.rs index 6bd90f74d..25d38920a 100644 --- a/core/tests/volume_tracking_test.rs +++ b/core/tests/volume_tracking_test.rs @@ -1301,4 +1301,4 @@ async fn test_volume_monitor_service() { // Cleanup: shutdown core to release file descriptors core.shutdown().await.expect("Failed to shutdown core"); -} \ No newline at end of file +} diff --git a/xtask/src/main.rs b/xtask/src/main.rs index a1867c18f..bc0d1edc8 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -425,7 +425,10 @@ fn build_ios() -> Result<()> { let xcframework_path = ios_core_dir.join(format!("{}.xcframework", framework_name)); if xcframework_path.exists() { - println!("Updating existing XCFramework at: {}", xcframework_path.display()); + println!( + "Updating existing XCFramework at: {}", + xcframework_path.display() + ); } else { println!("Creating XCFramework at: {}", xcframework_path.display()); std::fs::create_dir_all(&xcframework_path) @@ -439,8 +442,7 @@ fn build_ios() -> Result<()> { // Create/update device framework directory let device_target = xcframework_path.join("ios-arm64"); - std::fs::create_dir_all(&device_target) - .context("Failed to create device target directory")?; + std::fs::create_dir_all(&device_target).context("Failed to create device target directory")?; std::fs::copy( device_framework_dir.join(framework_name), device_target.join(format!("lib{}.a", framework_name)), @@ -449,8 +451,7 @@ fn build_ios() -> Result<()> { // Create/update simulator framework directory let sim_target = xcframework_path.join("ios-arm64-simulator"); - std::fs::create_dir_all(&sim_target) - .context("Failed to create simulator target directory")?; + std::fs::create_dir_all(&sim_target).context("Failed to create simulator target directory")?; std::fs::copy( sim_framework_dir.join(framework_name), sim_target.join(format!("lib{}.a", framework_name)),