{
/* Guest l1e maps emulated MMIO space */
*sp = sh_l1e_mmio(target_gfn, gflags);
- d->arch.paging.shadow.has_fast_mmio_entries = true;
+ if ( sh_l1e_is_magic(*sp) )
+ d->arch.paging.shadow.has_fast_mmio_entries = true;
goto done;
}
* pagetables.
*
* This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
- * have reserved bits that we can use for this.
+ * have reserved bits that we can use for this. And even there it can only
+ * be used if the processor doesn't use all 52 address bits.
*/
#define SH_L1E_MAGIC 0xffffffff00000001ULL
}
/* Guest not present: a single magic value */
-static inline shadow_l1e_t sh_l1e_gnp(void)
+static inline shadow_l1e_t sh_l1e_gnp_raw(void)
{
return (shadow_l1e_t){ -1ULL };
}
+static inline shadow_l1e_t sh_l1e_gnp(void)
+{
+ /*
+ * On systems with no reserved physical address bits we can't engage the
+ * fast fault path.
+ */
+ return paddr_bits < PADDR_BITS ? sh_l1e_gnp_raw()
+ : shadow_l1e_empty();
+}
+
static inline bool sh_l1e_is_gnp(shadow_l1e_t sl1e)
{
- return sl1e.l1 == sh_l1e_gnp().l1;
+ return sl1e.l1 == sh_l1e_gnp_raw().l1;
}
/*
static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
{
- return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
- | MASK_INSR(gfn_x(gfn), SH_L1E_MMIO_GFN_MASK)
- | (gflags & (_PAGE_USER|_PAGE_RW))) };
+ unsigned long gfn_val = MASK_INSR(gfn_x(gfn), SH_L1E_MMIO_GFN_MASK);
+
+ if ( paddr_bits >= PADDR_BITS ||
+ gfn_x(gfn) != MASK_EXTR(gfn_val, SH_L1E_MMIO_GFN_MASK) )
+ return shadow_l1e_empty();
+
+ return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC | gfn_val |
+ (gflags & (_PAGE_USER | _PAGE_RW))) };
}
static inline bool sh_l1e_is_mmio(shadow_l1e_t sl1e)