From: Oleksii Kurochko Date: Wed, 23 Apr 2025 07:41:42 +0000 (+0200) Subject: xen/riscv: Increase XEN_VIRT_SIZE X-Git-Url: http://xenbits.xensource.com/gitweb?a=commitdiff_plain;h=16d69b2870ce7dacf47565673992131eafd03644;p=xen.git xen/riscv: Increase XEN_VIRT_SIZE A randconfig job failed with the following issue: riscv64-linux-gnu-ld: Xen too large for early-boot assumptions The reason is that enabling the UBSAN config increased the size of the Xen binary. Increase XEN_VIRT_SIZE to reserve enough space, allowing both UBSAN and GCOV to be enabled together, with some slack for future growth. Additionally, add checks to verify that XEN_VIRT_START is 1GB-aligned and XEN_VIRT_SIZE is 2MB-aligned to reduce the number of page tables needed for the initial mapping. In the future, when 2MB mappings are used for .text (rx), .rodata (r), and .data (rw), this will also help reduce TLB pressure. Reported-by: Andrew Cooper Signed-off-by: Oleksii Kurochko Acked-by: Jan Beulich --- diff --git a/xen/arch/riscv/include/asm/config.h b/xen/arch/riscv/include/asm/config.h index 314c97c20a..e150f28f53 100644 --- a/xen/arch/riscv/include/asm/config.h +++ b/xen/arch/riscv/include/asm/config.h @@ -41,11 +41,11 @@ * Start addr | End addr | Slot | area description * ============================================================================ * ..... L2 511 Unused - * 0xffffffffc0a00000 0xffffffffc0bfffff L2 511 Fixmap + * 0xffffffffc1800000 0xffffffffc19fffff L2 511 Fixmap * ..... ( 2 MB gap ) - * 0xffffffffc0400000 0xffffffffc07fffff L2 511 FDT + * 0xffffffffc1200000 0xffffffffc15fffff L2 511 FDT * ..... ( 2 MB gap ) - * 0xffffffffc0000000 0xffffffffc01fffff L2 511 Xen + * 0xffffffffc0000000 0xffffffffc0ffffff L2 511 Xen * ..... L2 510 Unused * 0x3200000000 0x7f7fffffff L2 200-509 Direct map * ..... L2 199 Unused @@ -78,7 +78,7 @@ #define GAP_SIZE MB(2) -#define XEN_VIRT_SIZE MB(2) +#define XEN_VIRT_SIZE MB(16) #define BOOT_FDT_VIRT_START (XEN_VIRT_START + XEN_VIRT_SIZE + GAP_SIZE) #define BOOT_FDT_VIRT_SIZE MB(4) diff --git a/xen/arch/riscv/include/asm/mm.h b/xen/arch/riscv/include/asm/mm.h index 4035cd400a..ef8b35d7c2 100644 --- a/xen/arch/riscv/include/asm/mm.h +++ b/xen/arch/riscv/include/asm/mm.h @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -43,13 +44,21 @@ static inline void *maddr_to_virt(paddr_t ma) */ static inline unsigned long virt_to_maddr(unsigned long va) { + const unsigned long xen_size = (unsigned long)(_end - _start); + const unsigned long xen_virt_start = _AC(XEN_VIRT_START, UL); + const unsigned long xen_virt_end = xen_virt_start + xen_size - 1; + if ((va >= DIRECTMAP_VIRT_START) && (va <= DIRECTMAP_VIRT_END)) return directmapoff_to_maddr(va - directmap_virt_start); - BUILD_BUG_ON(XEN_VIRT_SIZE != MB(2)); - ASSERT((va >> (PAGETABLE_ORDER + PAGE_SHIFT)) == - (_AC(XEN_VIRT_START, UL) >> (PAGETABLE_ORDER + PAGE_SHIFT))); + ASSERT((va >= xen_virt_start) && (va <= xen_virt_end)); + + /* + * The .init* sections will be freed when Xen completes booting, + * so the [__init_begin, __init_end) range must be excluded. + */ + ASSERT((system_state < SYS_STATE_active) || !is_init_section(va)); /* phys_offset = load_start - XEN_VIRT_START */ return phys_offset + va; diff --git a/xen/arch/riscv/mm.c b/xen/arch/riscv/mm.c index f2bf279bac..d3ece9f132 100644 --- a/xen/arch/riscv/mm.c +++ b/xen/arch/riscv/mm.c @@ -31,20 +31,39 @@ unsigned long __ro_after_init phys_offset; /* = load_start - XEN_VIRT_START */ #define LOAD_TO_LINK(addr) ((unsigned long)(addr) - phys_offset) /* - * It is expected that Xen won't be more then 2 MB. + * It is expected that Xen won't be more then XEN_VIRT_SIZE. * The check in xen.lds.S guarantees that. - * At least 3 page tables (in case of Sv39 ) are needed to cover 2 MB. - * One for each page level table with PAGE_SIZE = 4 Kb. * - * One L0 page table can cover 2 MB(512 entries of one page table * PAGE_SIZE). + * Root page table is shared with the initial mapping and is declared + * separately (look at stage1_pgtbl_root), so it isn't taken into account + * in PGTBL_INITIAL_COUNT. * - * It might be needed one more page table in case when Xen load address - * isn't 2 MB aligned. + * An amount of page tables between root page table and L0 page table + * (in the case of Sv39 it covers L1 table): + * (CONFIG_PAGING_LEVELS - 2) are needed for an identity mapping and + * the same amount are needed for Xen. * - * CONFIG_PAGING_LEVELS page tables are needed for the identity mapping, - * except that the root page table is shared with the initial mapping + * An amount of L0 page tables: + * (512 entries of one L0 page table covers 2MB == 1<> XEN_PT_LEVEL_SHIFT(1) are needed for Xen and + * one L0 is needed for identity mapping. */ -#define PGTBL_INITIAL_COUNT ((CONFIG_PAGING_LEVELS - 1) * 2 + 1) +#define PGTBL_INITIAL_COUNT ((CONFIG_PAGING_LEVELS - 2) * 2 + \ + (XEN_VIRT_SIZE >> XEN_PT_LEVEL_SHIFT(1)) + 1) + +/* + * Modifying these checks may require updating PGTBL_INITIAL_COUNT. + * + * If XEN_VIRT_{START,SIZE} are not properly aligned and XEN_VIRT_SIZE > GB(1), + * additional L1 and L0 page tables are required. + */ +static void __init __maybe_unused build_assertions(void) +{ + BUILD_BUG_ON(!IS_ALIGNED(XEN_VIRT_START, GB(1))); + BUILD_BUG_ON(!IS_ALIGNED(XEN_VIRT_SIZE, MB(2))); + + BUILD_BUG_ON(XEN_VIRT_SIZE > GB(1)); +} pte_t __section(".bss.page_aligned") __aligned(PAGE_SIZE) stage1_pgtbl_root[PAGETABLE_ENTRIES]; diff --git a/xen/include/xen/sections.h b/xen/include/xen/sections.h index f2fac8d7fa..fe49d7d0e6 100644 --- a/xen/include/xen/sections.h +++ b/xen/include/xen/sections.h @@ -7,6 +7,10 @@ /* SAF-0-safe */ extern char __init_begin[], __init_end[]; +#define is_init_section(p) ({ \ + const char *p_ = (const char *)(unsigned long)(p); \ + (p_ >= __init_begin) && (p_ < __init_end); \ +}) /* * Some data is expected to be written rarely (if at all).