From 73b47eea21045556dc5334e4f17d0c05c23f3c16 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Roger=20Pau=20Monn=C3=A9?= Date: Thu, 5 Mar 2020 10:43:15 +0100 Subject: [PATCH] x86/dom0: improve PVH initrd and metadata placement MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Don't assume there's going to be enough space at the tail of the loaded kernel and instead try to find a suitable memory area where the initrd and metadata can be loaded. Reported-by: Andrew Cooper Signed-off-by: Roger Pau Monné Reviewed-by: Jan Beulich --- xen/arch/x86/hvm/dom0_build.c | 58 ++++++++++++++++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c index eded87eaf5..c41504f22d 100644 --- a/xen/arch/x86/hvm/dom0_build.c +++ b/xen/arch/x86/hvm/dom0_build.c @@ -490,6 +490,45 @@ static int __init pvh_populate_p2m(struct domain *d) #undef MB1_PAGES } +static paddr_t find_memory(const struct domain *d, const struct elf_binary *elf, + size_t size) +{ + paddr_t kernel_start = (paddr_t)elf->dest_base & PAGE_MASK; + paddr_t kernel_end = ROUNDUP((paddr_t)elf->dest_base + elf->dest_size, + PAGE_SIZE); + unsigned int i; + + /* + * The memory map is sorted and all RAM regions starts and sizes are + * aligned to page boundaries. + */ + for ( i = 0; i < d->arch.nr_e820; i++ ) + { + paddr_t start, end = d->arch.e820[i].addr + d->arch.e820[i].size; + + /* Don't use memory below 1MB, as it could overwrite BDA/EBDA/IBFT. */ + if ( end <= MB(1) || d->arch.e820[i].type != E820_RAM ) + continue; + + start = MAX(ROUNDUP(d->arch.e820[i].addr, PAGE_SIZE), MB(1)); + + ASSERT(IS_ALIGNED(start, PAGE_SIZE) && IS_ALIGNED(end, PAGE_SIZE)); + + if ( end <= kernel_start || start >= kernel_end ) + ; /* No overlap, nothing to do. */ + /* Deal with the kernel already being loaded in the region. */ + else if ( kernel_start - start > end - kernel_end ) + end = kernel_start; + else + start = kernel_end; + + if ( end - start >= size ) + return start; + } + + return INVALID_PADDR; +} + static int __init pvh_load_kernel(struct domain *d, const module_t *image, unsigned long image_headroom, module_t *initrd, void *image_base, @@ -546,7 +585,24 @@ static int __init pvh_load_kernel(struct domain *d, const module_t *image, return rc; } - last_addr = ROUNDUP(parms.virt_kend - parms.virt_base, PAGE_SIZE); + /* + * Find a RAM region big enough (and that doesn't overlap with the loaded + * kernel) in order to load the initrd and the metadata. Note it could be + * split into smaller allocations, done as a single region in order to + * simplify it. + */ + last_addr = find_memory(d, &elf, sizeof(start_info) + + (initrd ? ROUNDUP(initrd->mod_end, PAGE_SIZE) + + sizeof(mod) + : 0) + + (cmdline ? ROUNDUP(strlen(cmdline) + 1, + elf_64bit(&elf) ? 8 : 4) + : 0)); + if ( last_addr == INVALID_PADDR ) + { + printk("Unable to find a memory region to load initrd and metadata\n"); + return -ENOMEM; + } if ( initrd != NULL ) { -- 2.39.5