is_pv_domain(d)) ? \
L1_DISALLOW_MASK : (L1_DISALLOW_MASK & ~PAGE_CACHE_ATTRS))
+static s8 __read_mostly opt_mmio_relax;
+static void __init parse_mmio_relax(const char *s)
+{
+ if ( !*s )
+ opt_mmio_relax = 1;
+ else
+ opt_mmio_relax = parse_bool(s);
+ if ( opt_mmio_relax < 0 && strcmp(s, "all") )
+ opt_mmio_relax = 0;
+}
+custom_param("mmio-relax", parse_mmio_relax);
+
static void __init init_frametable_chunk(void *start, void *end)
{
unsigned long s = (unsigned long)start;
if ( !mfn_valid(mfn) ||
(real_pg_owner = page_get_owner_and_reference(page)) == dom_io )
{
-#ifndef NDEBUG
- const unsigned long *ro_map;
- unsigned int seg, bdf;
-#endif
+ int flip = 0;
/* Only needed the reference to confirm dom_io ownership. */
if ( mfn_valid(mfn) )
return -EINVAL;
}
- if ( !(l1f & _PAGE_RW) ||
- !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
- return 0;
+ if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn) )
+ {
+ /* MMIO pages must not be mapped cachable unless requested so. */
+ switch ( opt_mmio_relax )
+ {
+ case 0:
+ break;
+ case 1:
+ if ( is_hardware_domain(l1e_owner) )
+ case -1:
+ return 0;
+ default:
+ ASSERT_UNREACHABLE();
+ }
+ }
+ else if ( l1f & _PAGE_RW )
+ {
#ifndef NDEBUG
- if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
- ((ro_map = pci_get_ro_map(seg)) != NULL &&
- test_bit(bdf, ro_map)) )
- printk(XENLOG_G_WARNING
- "d%d: Forcing read-only access to MFN %lx\n",
- l1e_owner->domain_id, mfn);
- else
- rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
- print_mmio_emul_range,
- &(struct mmio_emul_range_ctxt){
- .d = l1e_owner,
- .mfn = mfn });
+ const unsigned long *ro_map;
+ unsigned int seg, bdf;
+
+ if ( !pci_mmcfg_decode(mfn, &seg, &bdf) ||
+ ((ro_map = pci_get_ro_map(seg)) != NULL &&
+ test_bit(bdf, ro_map)) )
+ printk(XENLOG_G_WARNING
+ "d%d: Forcing read-only access to MFN %lx\n",
+ l1e_owner->domain_id, mfn);
+ else
+ rangeset_report_ranges(mmio_ro_ranges, 0, ~0UL,
+ print_mmio_emul_range,
+ &(struct mmio_emul_range_ctxt){
+ .d = l1e_owner,
+ .mfn = mfn });
#endif
- return 1;
+ flip = _PAGE_RW;
+ }
+
+ switch ( l1f & PAGE_CACHE_ATTRS )
+ {
+ case 0: /* WB */
+ flip |= _PAGE_PWT | _PAGE_PCD;
+ break;
+ case _PAGE_PWT: /* WT */
+ case _PAGE_PWT | _PAGE_PAT: /* WP */
+ flip |= _PAGE_PCD | (l1f & _PAGE_PAT);
+ break;
+ }
+
+ return flip;
}
if ( unlikely( (real_pg_owner != pg_owner) &&
goto fail;
case 0:
break;
- case 1:
- l1e_remove_flags(pl1e[i], _PAGE_RW);
+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
+ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
+ l1e_flip_flags(pl1e[i], ret);
break;
}
return -EINVAL;
}
- /* Fast path for identical mapping, r/w and presence. */
- if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
+ /* Fast path for identical mapping, r/w, presence, and cachability. */
+ if ( !l1e_has_changed(ol1e, nl1e,
+ PAGE_CACHE_ATTRS | _PAGE_RW | _PAGE_PRESENT) )
{
adjust_guest_l1e(nl1e, pt_dom);
if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
return rc;
case 0:
break;
- case 1:
- l1e_remove_flags(nl1e, _PAGE_RW);
+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
+ ASSERT(!(rc & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
+ l1e_flip_flags(nl1e, rc);
rc = 0;
break;
}
l1_pgentry_t pte, ol1e, nl1e, *pl1e;
struct vcpu *v = current;
struct domain *d = v->domain;
+ int ret;
/* Only allow naturally-aligned stores within the original %cr2 page. */
if ( unlikely(((addr^ptwr_ctxt->cr2) & PAGE_MASK) || (addr & (bytes-1))) )
/* Check the new PTE. */
nl1e = l1e_from_intpte(val);
- switch ( get_page_from_l1e(nl1e, d, d) )
+ switch ( ret = get_page_from_l1e(nl1e, d, d) )
{
default:
if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
break;
case 0:
break;
- case 1:
- l1e_remove_flags(nl1e, _PAGE_RW);
+ case _PAGE_RW ... _PAGE_RW | PAGE_CACHE_ATTRS:
+ ASSERT(!(ret & ~(_PAGE_RW | PAGE_CACHE_ATTRS)));
+ l1e_flip_flags(nl1e, ret);
break;
}
gfn_t target_gfn = guest_l1e_get_gfn(guest_entry);
u32 pass_thru_flags;
u32 gflags, sflags;
+ bool_t mmio_mfn;
/* We don't shadow PAE l3s */
ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
// mfn means that we can not usefully shadow anything, and so we
// return early.
//
- if ( !mfn_valid(target_mfn)
+ mmio_mfn = !mfn_valid(target_mfn)
+ || (level == 1
+ && page_get_owner(mfn_to_page(target_mfn)) == dom_io);
+ if ( mmio_mfn
&& !(level == 1 && (!shadow_mode_refcounts(d)
|| p2mt == p2m_mmio_direct)) )
{
_PAGE_RW | _PAGE_PRESENT);
if ( guest_supports_nx(v) )
pass_thru_flags |= _PAGE_NX_BIT;
- if ( !shadow_mode_refcounts(d) && !mfn_valid(target_mfn) )
+ if ( level == 1 && !shadow_mode_refcounts(d) && mmio_mfn )
pass_thru_flags |= _PAGE_PAT | _PAGE_PCD | _PAGE_PWT;
sflags = gflags & pass_thru_flags;
}
/* Read-only memory */
- if ( p2m_is_readonly(p2mt) ||
- (p2mt == p2m_mmio_direct &&
- rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn))) )
+ if ( p2m_is_readonly(p2mt) )
sflags &= ~_PAGE_RW;
+ else if ( p2mt == p2m_mmio_direct &&
+ rangeset_contains_singleton(mmio_ro_ranges, mfn_x(target_mfn)) )
+ {
+ sflags &= ~(_PAGE_RW | _PAGE_PAT);
+ sflags |= _PAGE_PCD | _PAGE_PWT;
+ }
// protect guest page tables
//
&& !sh_l1e_is_magic(new_sl1e) )
{
/* About to install a new reference */
- if ( shadow_mode_refcounts(d) ) {
+ if ( shadow_mode_refcounts(d) )
+ {
+#define PAGE_FLIPPABLE (_PAGE_RW | _PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
+ int rc;
+
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
- switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
+ switch ( rc = shadow_get_page_from_l1e(new_sl1e, d, new_type) )
{
default:
/* Doesn't look like a pagetable. */
flags |= SHADOW_SET_ERROR;
new_sl1e = shadow_l1e_empty();
break;
- case 1:
- shadow_l1e_remove_flags(new_sl1e, _PAGE_RW);
+ case PAGE_FLIPPABLE & -PAGE_FLIPPABLE ... PAGE_FLIPPABLE:
+ ASSERT(!(rc & ~PAGE_FLIPPABLE));
+ new_sl1e = shadow_l1e_flip_flags(new_sl1e, rc);
/* fall through */
case 0:
shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
break;
}
+#undef PAGE_FLIPPABLE
}
}