#define _PGT_partial PG_shift(8)
#define PGT_partial PG_mask(1, 8)
+/* Has this page been mapped writeable with a non-coherent memory type? */
+#define _PGT_non_coherent PG_shift(9)
+#define PGT_non_coherent PG_mask(1, 9)
+
/* Count of uses of this frame as its current type. */
-#define PGT_count_width PG_shift(8)
+#define PGT_count_width PG_shift(9)
#define PGT_count_mask ((1UL<<PGT_count_width)-1)
/* Are the 'type mask' bits identical? */
return -EACCES;
}
+ /*
+ * Track writeable non-coherent mappings to RAM pages, to trigger a cache
+ * flush later if the target is used as anything but a PGT_writeable page.
+ * We care about all writeable mappings, including foreign mappings.
+ */
+ if ( !boot_cpu_has(X86_FEATURE_XEN_SELFSNOOP) &&
+ (l1f & (PAGE_CACHE_ATTRS | _PAGE_RW)) == (_PAGE_WC | _PAGE_RW) )
+ set_bit(_PGT_non_coherent, &page->u.inuse.type_info);
+
return 0;
could_not_pin:
}
}
+ /*
+ * Flush the cache if there were previously non-coherent writeable
+ * mappings of this page. This forces the page to be coherent before it
+ * is freed back to the heap.
+ */
+ if ( __test_and_clear_bit(_PGT_non_coherent, &page->u.inuse.type_info) )
+ {
+ void *addr = __map_domain_page(page);
+
+ cache_flush(addr, PAGE_SIZE);
+ unmap_domain_page(addr);
+ }
+
return rc;
}
if ( unlikely(!(nx & PGT_validated)) )
{
+ /*
+ * Flush the cache if there were previously non-coherent mappings of
+ * this page, and we're trying to use it as anything other than a
+ * writeable page. This forces the page to be coherent before we
+ * validate its contents for safety.
+ */
+ if ( (nx & PGT_non_coherent) && type != PGT_writable_page )
+ {
+ void *addr = __map_domain_page(page);
+
+ cache_flush(addr, PAGE_SIZE);
+ unmap_domain_page(addr);
+
+ page->u.inuse.type_info &= ~PGT_non_coherent;
+ }
+
/*
* No special validation needed for writable or shared pages. Page
* tables and GDT/LDT need to have their contents audited.
ol1e = *pl1e;
if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, 0) )
+ {
+ /*
+ * We always create mappings in this path. However, our caller,
+ * map_grant_ref(), only passes potentially non-zero cache_flags for
+ * MMIO frames, so this path doesn't create non-coherent mappings of
+ * RAM frames and there's no need to calculate PGT_non_coherent.
+ */
+ ASSERT(!cache_flags || is_iomem_page(frame));
+
rc = GNTST_okay;
+ }
out_unlock:
page_unlock(page);
l1e_get_flags(ol1e), addr, grant_pte_flags);
if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, curr, 0) )
+ {
+ /*
+ * Generally, replace_grant_pv_mapping() is used to destroy mappings
+ * (n1le = l1e_empty()), but it can be a present mapping on the
+ * GNTABOP_unmap_and_replace path.
+ *
+ * In such cases, the PTE is fully transplanted from its old location
+ * via steal_linear_addr(), so we need not perform PGT_non_coherent
+ * checking here.
+ */
rc = GNTST_okay;
+ }
out_unlock:
page_unlock(page);