]> xenbits.xensource.com Git - people/dwmw2/xen.git/commitdiff
x86/setup: Don't skip 2MiB underneath relocated Xen image
authorDavid Woodhouse <dwmw@amazon.co.uk>
Mon, 2 Dec 2019 16:39:00 +0000 (16:39 +0000)
committerDavid Woodhouse <dwmw@amazon.co.uk>
Thu, 30 Jan 2020 22:15:48 +0000 (22:15 +0000)
Set 'e' correctly to reflect the location that Xen is actually relocated
to from its default 2MiB location. Not 2MiB below that.

This is only vaguely a bug fix. The "missing" 2MiB would have been used
in the end, and fed to the allocator. It's just that other things don't
get to sit right up *next* to the Xen image, and it isn't very tidy.

For live update, I'd quite like a single contiguous region for the
reserved bootmem and Xen, allowing the 'slack' in the former to be used
when Xen itself grows larger. Let's not allow 2MiB of random heap pages
to get in the way...

Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
xen/arch/x86/setup.c

index a353d76f9a9b9a9559c5102025df70f96c036a8b..89bc8a182983e9a9eb052860a448a8a7b0469a35 100644 (file)
@@ -1034,9 +1034,9 @@ void __init noreturn __start_xen(unsigned long mbi_p)
             int i, j, k;
 
             /* Select relocation address. */
-            e = end - reloc_size;
-            xen_phys_start = e;
-            bootsym(trampoline_xen_phys_start) = e;
+            xen_phys_start = end - reloc_size;
+            e = xen_phys_start + XEN_IMG_OFFSET;
+            bootsym(trampoline_xen_phys_start) = xen_phys_start;
 
             /*
              * Perform relocation to new physical address.
@@ -1045,7 +1045,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
              * data until after we have switched to the relocated pagetables!
              */
             barrier();
-            move_memory(e + XEN_IMG_OFFSET, XEN_IMG_OFFSET, _end - _start, 1);
+            move_memory(e, XEN_IMG_OFFSET, _end - _start, 1);
 
             /* Walk initial pagetables, relocating page directory entries. */
             pl4e = __va(__pa(idle_pg_table));