}
destroy_xen_mappings(start, end);
- init_xenheap_pages(__pa(start), __pa(end));
+ init_lu_reserved_pages(__pa(start), __pa(end));
printk("Freed %lukB init memory\n", (end - start) >> 10);
startup_cpu_idle_loop();
unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
uint64_t mask = PAGE_SIZE - 1;
+ /*
+ * Pages in the reserved LU region must not be used for anything which
+ * will need to persist across a live update. There is ongoing work to
+ * eliminate or limit the use of share_xen_page_with_guest() and get
+ * to a point where we can actually honour that promise, but for now
+ * just *don't* add those pages to the heap. Clear the boot allocator
+ * out completely, before adding the non-reserved ranges.
+ */
+ clear_boot_allocator();
+
for ( i = 0; i < boot_e820.nr_map; i++ )
{
uint64_t s, e;
printk("\n");
}
+/*
+ * Called when live update is supported. The memory ranges currently
+ * still free in the boot allocator must be added to the reserved
+ * heap, distinct from the xenheap in that pages from it MUST NOT be
+ * used for anything which will be mapped to a domain or otherwise
+ * need to survive a live update.
+ */
+void __init clear_boot_allocator(void)
+{
+ unsigned int i;
+
+ /* Add at least one range on node zero first, if we can. */
+ for ( i = 0; i < nr_bootmem_regions; i++ )
+ {
+ struct bootmem_region *r = &bootmem_region_list[i];
+ if ( (r->s < r->e) &&
+ (phys_to_nid(pfn_to_paddr(r->s)) == cpu_to_node(0)) )
+ {
+ init_lu_reserved_pages(r->s << PAGE_SHIFT, r->e << PAGE_SHIFT);
+ r->e = r->s;
+ break;
+ }
+ }
+ for ( i = nr_bootmem_regions; i-- > 0; )
+ {
+ struct bootmem_region *r = &bootmem_region_list[i];
+ if ( r->s < r->e )
+ init_lu_reserved_pages(r->s << PAGE_SHIFT, r->e << PAGE_SHIFT);
+ }
+ nr_bootmem_regions = 0;
+}
+
+void init_lu_reserved_pages(paddr_t ps, paddr_t pe)
+{
+ if (!lu_bootmem_start)
+ init_xenheap_pages(ps, pe);
+
+ /* There is ongoing work for other reasons to eliminate the use of
+ * share_xen_page_with_guest() and get to a point where the normal
+ * xenheap actually meets the requirement we need for live update
+ * reserved memory, that nothing allocated from it will be mapped
+ * to a guest and/or need to be preserved over a live update.
+ * Until then, we simply don't use these pages after boot. */
+}
+
static void __init smp_scrub_heap_pages(void *data)
{
unsigned long mfn, start, end;
void init_boot_pages(paddr_t ps, paddr_t pe);
mfn_t alloc_boot_pages(unsigned long nr_pfns, unsigned long pfn_align);
void end_boot_allocator(void);
+void clear_boot_allocator(void);
/* Xen suballocator. These functions are interrupt-safe. */
+void init_lu_reserved_pages(paddr_t ps, paddr_t pe);
void init_xenheap_pages(paddr_t ps, paddr_t pe);
void xenheap_max_mfn(unsigned long mfn);
void *alloc_xenheap_pages(unsigned int order, unsigned int memflags);