diff --git a/src/arch/arm64/boot/mod.rs b/src/arch/arm64/boot/mod.rs index 5693882..65818b2 100644 --- a/src/arch/arm64/boot/mod.rs +++ b/src/arch/arm64/boot/mod.rs @@ -143,6 +143,9 @@ fn arch_init_secondary(ctx_frame: *mut ExceptionState) -> *mut ExceptionState { TCR_EL1.modify(TCR_EL1::EPD0::DisableTTBR0Walks); barrier::isb(barrier::SY); + // Don't trap secondaries wfi/wfe in el0. + SCTLR_EL1.modify(SCTLR_EL1::NTWE::DontTrap + SCTLR_EL1::NTWI::DontTrap); + // Enable interrupts and exceptions. secondary_exceptions_init(); diff --git a/src/arch/arm64/boot/secondary.rs b/src/arch/arm64/boot/secondary.rs index dbe4327..e8658a8 100644 --- a/src/arch/arm64/boot/secondary.rs +++ b/src/arch/arm64/boot/secondary.rs @@ -6,6 +6,7 @@ use crate::{ arch_init_secondary, memory::{KERNEL_STACK_PG_ORDER, allocate_kstack_region}, }, + memory::flush_to_ram, psci::{PSCIEntry, PSCIMethod, boot_secondary_psci}, }, }, @@ -122,7 +123,9 @@ fn prepare_for_secondary_entry() -> Result<(PA, PA)> { )?; unsafe { - (&raw mut SECONDARY_BOOT_CTX as *mut SecondaryBootInfo).write(SecondaryBootInfo { + let boot_ctx = &raw mut SECONDARY_BOOT_CTX as *mut SecondaryBootInfo; + + boot_ctx.write(SecondaryBootInfo { boot_stack_addr: boot_stack, kstack_addr: kstack_vaddr.end_address(), kmem_ttbr: ArchImpl::kern_address_space().lock_save_irq().table_pa(), @@ -132,6 +135,11 @@ fn prepare_for_secondary_entry() -> Result<(PA, PA)> { start_fn: VA::from_value(arch_init_secondary as *const () as usize), exception_ret: VA::from_value(&exception_return as *const _ as usize), }); + + // Flush the cache to SRAM. Since the secondary will start without the + // MMU enabled (and therefore caches), we can't reply on the CCI. + // Therefore, manually flush the boot context to RAM. + flush_to_ram(boot_ctx); }; Ok((entry_fn, ctx)) diff --git a/src/arch/arm64/memory/mod.rs b/src/arch/arm64/memory/mod.rs index eb5773f..afed1fd 100644 --- a/src/arch/arm64/memory/mod.rs +++ b/src/arch/arm64/memory/mod.rs @@ -1,5 +1,6 @@ use core::{ alloc::{GlobalAlloc, Layout}, + arch::asm, ptr::NonNull, }; @@ -90,3 +91,30 @@ pub fn translate_kernel_va(addr: VA) -> PA { PA::from_value(v + get_kimage_start().value()) } + +pub fn flush_to_ram(mut x: *const T) { + let mut stride: u64 = 0; + + // Calc cache line stride. + unsafe { asm!("mrs {0}, ctr_el0", out(reg) stride, options(nostack, nomem)) }; + stride = (1 << ((stride >> 16) & 0xf)) * 4; + + let end = unsafe { x.byte_add(size_of::()) }; + + while x < end { + // Clear the cache line for the given VA. + // + // NOTE: We allow the below lint since a flush to the cache is + // transparrent; memory hasn't changed from the point of view of the + // flushing core. + #[allow(clippy::pointers_in_nomem_asm_block)] + unsafe { + asm!("dc cvac, {0}", in(reg) x, options(nostack, nomem)); + + x = x.byte_add(stride as _); + } + } + + // Ensure the cache maintaince op has finished. + unsafe { asm!("dsb ish", options(nostack, nomem)) }; +}