enum xenmap_operation {
INSERT,
- REMOVE
+ REMOVE,
+ RESERVE
};
static int create_xen_entries(enum xenmap_operation op,
switch ( op ) {
case INSERT:
+ case RESERVE:
if ( third[third_table_offset(addr)].pt.valid )
{
printk("create_xen_entries: trying to replace an existing mapping addr=%lx mfn=%lx\n",
addr, mfn);
return -EINVAL;
}
+ if ( op == RESERVE )
+ break;
pte = mfn_to_xen_entry(mfn, ai);
pte.pt.table = 1;
write_pte(&third[third_table_offset(addr)], pte);
{
return create_xen_entries(INSERT, virt, mfn, nr_mfns, flags);
}
+
+int populate_pt_range(unsigned long virt, unsigned long mfn,
+ unsigned long nr_mfns)
+{
+ return create_xen_entries(RESERVE, virt, mfn, nr_mfns, 0);
+}
+
void destroy_xen_mappings(unsigned long v, unsigned long e)
{
create_xen_entries(REMOVE, v, 0, (e - v) >> PAGE_SHIFT, 0);
return 0;
}
+int populate_pt_range(unsigned long virt, unsigned long mfn,
+ unsigned long nr_mfns)
+{
+ return map_pages_to_xen(virt, mfn, nr_mfns, MAP_SMALL_PAGES);
+}
+
void destroy_xen_mappings(unsigned long s, unsigned long e)
{
bool_t locking = system_state > SYS_STATE_boot;
bitmap_fill(vm_bitmap, vm_low);
/* Populate page tables for the bitmap if necessary. */
- map_pages_to_xen(va, 0, vm_low - nr, MAP_SMALL_PAGES);
+ populate_pt_range(va, 0, vm_low - nr);
}
void *vm_alloc(unsigned int nr, unsigned int align)
#define PAGE_HYPERVISOR (WRITEALLOC)
#define PAGE_HYPERVISOR_NOCACHE (DEV_SHARED)
#define PAGE_HYPERVISOR_WC (DEV_WC)
-#define MAP_SMALL_PAGES PAGE_HYPERVISOR
/*
* Stage 2 Memory Type.
unsigned long nr_mfns,
unsigned int flags);
void destroy_xen_mappings(unsigned long v, unsigned long e);
-
+/*
+ * Create only non-leaf page table entries for the
+ * page range in Xen virtual address space.
+ */
+int populate_pt_range(unsigned long virt, unsigned long mfn,
+ unsigned long nr_mfns);
/* Claim handling */
unsigned long domain_adjust_tot_pages(struct domain *d, long pages);
int domain_set_outstanding_pages(struct domain *d, unsigned long pages);