ASSERT(cur_pages == nr_pages);
}
-static int __init pvh_setup_p2m(struct domain *d)
+static void __init pvh_init_p2m(struct domain *d)
{
- struct vcpu *v = d->vcpu[0];
unsigned long nr_pages = dom0_compute_nr_pages(d, NULL, 0);
- unsigned int i;
- int rc;
bool preempted;
-#define MB1_PAGES PFN_DOWN(MB(1))
pvh_setup_e820(d, nr_pages);
do {
&preempted);
process_pending_softirqs();
} while ( preempted );
+}
+
+static int __init pvh_populate_p2m(struct domain *d)
+{
+ struct vcpu *v = d->vcpu[0];
+ unsigned int i;
+ int rc;
+#define MB1_PAGES PFN_DOWN(MB(1))
/*
* Memory below 1MB is identity mapped initially. RAM regions are
printk(XENLOG_INFO "*** Building a PVH Dom%d ***\n", d->domain_id);
- rc = pvh_setup_p2m(d);
- if ( rc )
- {
- printk("Failed to setup Dom0 physical memory map\n");
- return rc;
- }
-
/*
* NB: MMCFG initialization needs to be performed before iommu
* initialization so the iommu code can fetch the MMCFG regions used by the
*/
pvh_setup_mmcfg(d);
+ /*
+ * Craft dom0 physical memory map and set the paging allocation. This must
+ * be done before the iommu initializion, since iommu initialization code
+ * will likely add mappings required by devices to the p2m (ie: RMRRs).
+ */
+ pvh_init_p2m(d);
+
iommu_hwdom_init(d);
+ rc = pvh_populate_p2m(d);
+ if ( rc )
+ {
+ printk("Failed to setup Dom0 physical memory map\n");
+ return rc;
+ }
+
rc = pvh_load_kernel(d, image, image_headroom, initrd, bootstrap_map(image),
cmdline, &entry, &start_info);
if ( rc )
* inclusive mapping additionally maps in every pfn up to 4GB except those
* that fall in unusable ranges for PV Dom0.
*/
- if ( (pfn > max_pfn && !mfn_valid(mfn)) || xen_in_range(pfn) ||
- /*
- * Ignore any address below 1MB, that's already identity mapped by the
- * Dom0 builder for HVM.
- */
- (!d->domain_id && is_hvm_domain(d) && pfn < PFN_DOWN(MB(1))) )
+ if ( (pfn > max_pfn && !mfn_valid(mfn)) || xen_in_range(pfn) )
return false;
switch ( type = page_get_ram_type(mfn) )