*
* This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
* have reserved bits that we can use for this. And even there it can only
- * be used if the processor doesn't use all 52 address bits.
+ * be used if we can be certain the processor doesn't use all 52 address bits.
*/
#define SH_L1E_MAGIC 0xffffffff00000001ULL
+
+static inline bool sh_have_pte_rsvd_bits(void)
+{
+ return paddr_bits < PADDR_BITS && !cpu_has_hypervisor;
+}
+
static inline bool sh_l1e_is_magic(shadow_l1e_t sl1e)
{
return (sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC;
* On systems with no reserved physical address bits we can't engage the
* fast fault path.
*/
- return paddr_bits < PADDR_BITS ? sh_l1e_gnp_raw()
+ return sh_have_pte_rsvd_bits() ? sh_l1e_gnp_raw()
: shadow_l1e_empty();
}
{
unsigned long gfn_val = MASK_INSR(gfn_x(gfn), SH_L1E_MMIO_GFN_MASK);
- if ( paddr_bits >= PADDR_BITS ||
+ if ( !sh_have_pte_rsvd_bits() ||
gfn_x(gfn) != MASK_EXTR(gfn_val, SH_L1E_MMIO_GFN_MASK) )
return shadow_l1e_empty();