#include <xen/nodemask.h>
#include <xen/virtual_region.h>
#include <xen/watchdog.h>
+#include <xen/lu.h>
#include <public/version.h>
#include <compat/platform.h>
#include <compat/xen.h>
#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT)
unsigned long lu_bootmem_start, lu_bootmem_size;
+static unsigned long lu_breadcrumb_phys;
#ifdef CONFIG_LIVE_UPDATE
static int __init parse_liveupdate(const char *str)
printk(XENLOG_INFO "Live update area 0x%lx-0x%lx (0x%lx)\n", lu_bootmem_start,
lu_bootmem_start + lu_bootmem_size, lu_bootmem_size);
+ /*
+ * If present, the breadcrumb leading to the migration data stream is
+ * in the very beginning of the reserved bootmem region.
+ */
+ lu_breadcrumb_phys = lu_bootmem_start;
return 0;
}
custom_param("liveupdate", parse_liveupdate);
.stop_bits = 1
};
const char *hypervisor_name;
+ uint64_t lu_mfnlist_phys = 0, lu_nr_pages = 0;
+ struct lu_stream lu_stream;
/* Critical region without IDT or TSS. Any fault is deadly! */
printk(" Found %d EDD information structures\n",
bootsym(boot_edd_info_nr));
- /* Check that we have at least one Multiboot module. */
if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
- panic("dom0 kernel not specified. Check bootloader configuration\n");
+ {
+ if ( !lu_breadcrumb_phys )
+ panic("dom0 kernel not specified. Check bootloader configuration\n");
+ }
+ else
+ {
+ /* If modules are provided, don't even look for live update data. */
+ lu_breadcrumb_phys = 0;
+ }
/* Check that we don't have a silly number of modules. */
if ( mbi->mods_count > sizeof(module_map) * 8 )
if ( !xen_phys_start )
panic("Not enough memory to relocate Xen\n");
+ /* Check for the state breadcrumb before giving it to the boot allocator */
+ if ( IS_ENABLED(CONFIG_LIVE_UPDATE) && lu_breadcrumb_phys )
+ {
+ uint64_t *breadcrumb = maddr_to_virt(lu_breadcrumb_phys);
+
+ lu_mfnlist_phys = breadcrumb[1];
+ lu_nr_pages = breadcrumb[2] >> PAGE_SHIFT;
+
+ if ( breadcrumb[0] == LIVE_UPDATE_MAGIC && lu_nr_pages) {
+ printk("%ld pages of live update data at 0x%lx\n", lu_nr_pages, lu_mfnlist_phys);
+ } else {
+ panic("Live update breadcrumb not found: %lx %lx %lx at %lx\n",
+ breadcrumb[0], breadcrumb[1], breadcrumb[2], lu_breadcrumb_phys);
+ }
+ }
+
if ( lu_bootmem_start )
{
if ( !lu_reserved )
numa_initmem_init(0, raw_max_page);
+ if ( IS_ENABLED(CONFIG_LIVE_UPDATE) && lu_nr_pages )
+ {
+ lu_stream_map(&lu_stream, lu_mfnlist_phys, lu_nr_pages);
+ }
+
if ( lu_bootmem_start )
{
unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
void lu_stream_free(struct lu_stream *stream)
{
unsigned int order = get_order_from_bytes((stream->nr_pages + 1) * sizeof(mfn_t));
+ struct page_info *pg;
unsigned int i;
if ( stream->data )
for ( i = 0; i < stream->nr_pages; i++ )
{
if (mfn_valid(stream->pagelist[i]))
- free_domheap_page(mfn_to_page(stream->pagelist[i]));
+ {
+ pg = mfn_to_page(stream->pagelist[i]);
+ pg->count_info &= ~PGC_allocated;
+ free_domheap_page(pg);
+ }
}
+ pg = virt_to_page(stream->pagelist);
+ for ( i = 0; i < 1<<order; i++)
+ pg[i].count_info &= ~PGC_allocated;
+
free_xenheap_pages(stream->pagelist, order);
}
}
+void lu_stream_map(struct lu_stream *stream, unsigned long mfns_phys, int nr_pages)
+{
+ unsigned int order = get_order_from_bytes((nr_pages + 1) * sizeof(mfn_t));
+ unsigned int i;
+
+ memset(stream, 0, sizeof(*stream));
+
+ stream->len = nr_pages << PAGE_SHIFT;
+ stream->nr_pages = nr_pages;
+ stream->pagelist = __va(mfns_phys);
+
+ map_pages_to_xen((unsigned long)stream->pagelist, maddr_to_mfn(mfns_phys),
+ 1 << order, PAGE_HYPERVISOR);
+
+ /* Reserve the pages used for the pagelist itself. */
+ for ( i = 0; i < (1 << order); i++ )
+ {
+ maddr_to_page(mfns_phys + (i << PAGE_SHIFT))->count_info |= PGC_allocated;
+ }
+
+ /* Validate and reserve the data pages */
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ if (!mfn_valid(stream->pagelist[i]))
+ panic("Invalid MFN %lx in live update stream\n", mfn_x(stream->pagelist[i]));
+
+ mfn_to_page(stream->pagelist[i])->count_info |= PGC_allocated;
+ }
+
+ stream->data = vmap(stream->pagelist, nr_pages);
+ if (!stream->data)
+ panic("Failed to map live update data\n");
+}
+
/*
* local variables:
* mode: c
for ( i = 0; i < max_pages; i++)
{
- if ( page_state_is(pg + i, broken) )
+ if ( page_state_is(pg + i, broken) || pg[i].count_info & PGC_allocated )
break;
}
{
unsigned int nid = phys_to_nid(page_to_maddr(pg+i));
- /* If the (first) page is already marked broken, don't add it. */
- if ( page_state_is(pg + i, broken) )
+ /*
+ * If the (first) page is already marked broken or allocated,
+ * don't add it.
+ */
+ if ( page_state_is(pg + i, broken) || pg[i].count_info & PGC_allocated )
continue;
if ( unlikely(!avail[nid]) )