From: Michalis Pappas Date: Thu, 13 Jul 2023 11:06:08 +0000 (+0200) Subject: plat/kvm: Do not unconditionally clean and invalidate the cache X-Git-Tag: RELEASE-0.14.0~18 X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=a3c74da53303ff1eab828d2c753a8624fddec7bf;p=unikraft%2Funikraft.git plat/kvm: Do not unconditionally clean and invalidate the cache The arm64 linux boot protocol provides the requirements for the system's state before jumping into the kernel [1]. Among these it is required that upon entry: - The MMU is off - The D-cache for the region corresponging to the loaded image must be cleaned to the point of coherence. Skip expensive cache clean & invalidate operations if the MMU is found to be disabled at boot. Although this heuristic is not strictly required it provides with some additional confidence that the bootloader behaves as expected. With a clean cache, additionally optmize cache invalidations by limiting them to the regions accessed before the MMU is enabled. [1] https://www.kernel.org/doc/Documentation/arm64/booting.txt Signed-off-by: Michalis Pappas Reviewed-by: Sergiu Moga Approved-by: Razvan Deaconescu Tested-by: Unikraft CI GitHub-Closes: #1049 --- diff --git a/plat/kvm/arm/entry64.S b/plat/kvm/arm/entry64.S index ea554bad5..f9bb7a3d7 100644 --- a/plat/kvm/arm/entry64.S +++ b/plat/kvm/arm/entry64.S @@ -36,11 +36,13 @@ #include #include +#define BOOTSTACK_SIZE 4096 + /* Prefer using in-image stack, to avoid conflicts when unmapping is done by * paging_init (it invalidates our stack) */ .section .bss -.space 4096 +.space BOOTSTACK_SIZE lcpu_bootstack: .text @@ -57,31 +59,58 @@ ENTRY(_libkvmplat_entry) isb #endif /* CONFIG_FPSIMD */ - /* - * We will disable MMU and cache before the pagetables are ready. - * This means we will change memory with cache disabled, so we need to - * invalidate the cache to ensure there is no stale data in it. - * But we don't know the size of the RAM either. And it would be - * expensive to invalidate the whole cache. In this case, just - * just need to invalidate what we are going to use: - * DTB, TEXT, DATA, BSS, and bootstack. + /* If we boot via the linux boot protocol we expect that the MMU is + * disabled and the cache for the region of the image is clean. + * If we find that the MMU is enabled, we consider the cache state + * unpredicatable. In that case we clean-invalidate the cache for + * the entire image and disable the MMU. + * + * Notice: A clean cache would suffice as long as we invalidate any + * memory we touch while the MMU is disabled. But since PIE + * can write anywhere we also invalidate the entire image. + */ + mrs x19, sctlr_el1 + and x19, x19, #SCTLR_EL1_M_BIT +#if CONFIG_LIBUKRELOC + /* If PIE is enabled and the MMU is disabled we proceed to + * invalidate the entire image region anyway, as libukreloc + * right now does not invalidate the individual lines it touches. */ + cbz x19, 0f +#else /* !CONFIG_LIBUKRELOC */ + cbz x19, 1f +#endif /* !CONFIG_LIBUKRELOC */ + ur_ldr x0, _start_ram_addr ur_ldr x1, _end - bl clean_and_invalidate_dcache_range + bl clean_and_invalidate_dcache_range /* Disable the MMU and D-Cache. */ - dsb sy - mrs x2, sctlr_el1 - mov x3, #SCTLR_EL1_M_BIT|SCTLR_EL1_C_BIT - bic x2, x2, x3 - msr sctlr_el1, x2 + dsb sy + mrs x2, sctlr_el1 + mov x3, #SCTLR_EL1_M_BIT|SCTLR_EL1_C_BIT + bic x2, x2, x3 + msr sctlr_el1, x2 isb - - /* Set the boot stack */ +#if CONFIG_LIBUKRELOC + b 1f +0: + ur_ldr x0, _start_ram_addr + ur_ldr x1, _end + bl invalidate_dcache_range +#endif /* CONFIG_LIBUKRELOC */ +1: + /* Set the boot stack.Invalidate the corresponding cache lines to + * avoid stale cache contents shadowing our changes once the MMU + * and D-cache are enabled. + */ ur_ldr x26, lcpu_bootstack and x26, x26, ~(__STACK_ALIGN_SIZE - 1) mov sp, x26 + mov x0, x26 + mov x1, #BOOTSTACK_SIZE + dmb sy + bl invalidate_dcache_range /* Set the context id */ msr contextidr_el1, xzr