4GB when running with AMD NPT on PAE host.
From: Wei Huang <Wei.Huang2@amd.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
/*
* Xen command-line option to allow/disallow hardware-assisted paging.
* Since the phys-to-machine table of AMD NPT is in host format, 32-bit Xen
- * could only support guests using NPT with up to a 4GB memory map. Therefore
- * we only allow HAP by default on 64-bit Xen.
+ * can only support guests using NPT with up to a 4GB memory map. Therefore
+ * we disallow HAP by default on PAE Xen (by default we want to support an
+ * 8GB pseudophysical memory map for HVM guests on a PAE host).
*/
-static int opt_hap_permitted = (BITS_PER_LONG == 8);
+static int opt_hap_permitted = (CONFIG_PAGING_LEVELS != 3);
boolean_param("hap", opt_hap_permitted);
int hvm_enabled __read_mostly;
goto out;
#endif
#if CONFIG_PAGING_LEVELS >= 3
- // When using PAE Xen, we only allow 33 bits of pseudo-physical
- // address in translated guests (i.e. 8 GBytes). This restriction
- // comes from wanting to map the P2M table into the 16MB RO_MPT hole
- // in Xen's address space for translated PV guests.
- //
+ /*
+ * When using PAE Xen, we only allow 33 bits of pseudo-physical
+ * address in translated guests (i.e. 8 GBytes). This restriction
+ * comes from wanting to map the P2M table into the 16MB RO_MPT hole
+ * in Xen's address space for translated PV guests.
+ * When using AMD's NPT on PAE Xen, we are restricted to 4GB.
+ */
if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
L3_PAGETABLE_SHIFT - PAGE_SHIFT,
- (CONFIG_PAGING_LEVELS == 3
- ? 8
+ ((CONFIG_PAGING_LEVELS == 3)
+ ? (hvm_funcs.hap_supported ? 4 : 8)
: L3_PAGETABLE_ENTRIES),
PGT_l2_page_table) )
goto out;
p2m_unlock(d);
}
-void
+int
guest_physmap_add_entry(struct domain *d, unsigned long gfn,
unsigned long mfn, p2m_type_t t)
{
unsigned long ogfn;
p2m_type_t ot;
mfn_t omfn;
+ int rc = 0;
if ( !paging_mode_translate(d) )
- return;
+ return -EINVAL;
+
+#if CONFIG_PAGING_LEVELS == 3
+ /* 32bit PAE nested paging does not support over 4GB guest due to
+ * hardware translation limit. This limitation is checked by comparing
+ * gfn with 0xfffffUL.
+ */
+ if ( paging_mode_hap(d) && (gfn > 0xfffffUL) )
+ return -EINVAL;
+#endif
p2m_lock(d);
audit_p2m(d);
if ( mfn_valid(_mfn(mfn)) )
{
- set_p2m_entry(d, gfn, _mfn(mfn), t);
+ if ( !set_p2m_entry(d, gfn, _mfn(mfn), t) )
+ rc = -EINVAL;
set_gpfn_from_mfn(mfn, gfn);
}
else
{
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
gfn, mfn);
- set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid);
+ if ( !set_p2m_entry(d, gfn, _mfn(INVALID_MFN), p2m_invalid) )
+ rc = -EINVAL;
}
audit_p2m(d);
p2m_unlock(d);
+
+ return rc;
}
/* Walk the whole p2m table, changing any entries of the old type
if ( unlikely(paging_mode_translate(d)) )
{
for ( j = 0; j < (1 << a->extent_order); j++ )
- guest_physmap_add_page(d, gpfn + j, mfn + j);
+ if ( guest_physmap_add_page(d, gpfn + j, mfn + j) )
+ goto out;
}
else
{
mfn = page_to_mfn(page);
if ( unlikely(paging_mode_translate(d)) )
{
+ /* Ignore failure here. There's nothing we can do. */
for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
- guest_physmap_add_page(d, gpfn + k, mfn + k);
+ (void)guest_physmap_add_page(d, gpfn + k, mfn + k);
}
else
{
void p2m_teardown(struct domain *d);
/* Add a page to a domain's p2m table */
-void guest_physmap_add_entry(struct domain *d, unsigned long gfn,
+int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
unsigned long mfn, p2m_type_t t);
-/* Untyped version for RAM only, for compatibility */
-static inline void guest_physmap_add_page(struct domain *d, unsigned long gfn,
- unsigned long mfn)
+/* Untyped version for RAM only, for compatibility
+ *
+ * Return 0 for success
+ */
+static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
+ unsigned long mfn)
{
- guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw);
+ return guest_physmap_add_entry(d, gfn, mfn, p2m_ram_rw);
}
/* Remove a page from a domain's p2m table */