guest mapping requests.
Based on a patch by Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
return 0;
}
-int
-iomem_page_test(unsigned long mfn, struct page_info *page)
+int is_iomem_page(unsigned long mfn)
{
- return unlikely(!mfn_valid(mfn)) ||
- unlikely(page_get_owner(page) == dom_io);
+ return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));
}
/*
}
-int iomem_page_test(unsigned long mfn, struct page_info *page)
+int is_iomem_page(unsigned long mfn)
{
- return unlikely(!mfn_valid(mfn)) ||
- unlikely(page_get_owner(page) == dom_io);
+ return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io));
}
{
unsigned long mfn = l1e_get_pfn(l1e);
struct page_info *page = mfn_to_page(mfn);
+ uint32_t l1f = l1e_get_flags(l1e);
int okay;
- if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
+ if ( !(l1f & _PAGE_PRESENT) )
return 1;
- if ( unlikely(l1e_get_flags(l1e) & l1_disallow_mask(d)) )
+ if ( unlikely(l1f & l1_disallow_mask(d)) )
{
- MEM_LOG("Bad L1 flags %x",
- l1e_get_flags(l1e) & l1_disallow_mask(d));
+ MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d));
return 0;
}
- if ( iomem_page_test(mfn, page) )
+ if ( is_iomem_page(mfn) )
{
/* DOMID_IO reverts to caller for privilege checks. */
if ( d == dom_io )
* contribute to writeable mapping refcounts. (This allows the
* qemu-dm helper process in dom0 to map the domain's memory without
* messing up the count of "real" writable mappings.) */
- okay = (((l1e_get_flags(l1e) & _PAGE_RW) &&
+ okay = (((l1f & _PAGE_RW) &&
!(unlikely(paging_mode_external(d) && (d != current->domain))))
? get_page_and_type(page, d, PGT_writable_page)
: get_page(page, d));
mfn, get_gpfn_from_mfn(mfn),
l1e_get_intpte(l1e), d->domain_id);
}
+ else if ( (pte_flags_to_cacheattr(l1f) !=
+ ((page->count_info >> PGC_cacheattr_base) & 7)) &&
+ !is_iomem_page(mfn) )
+ {
+ uint32_t x, nx, y = page->count_info;
+ uint32_t cacheattr = pte_flags_to_cacheattr(l1f);
+
+ if ( is_xen_heap_frame(page) )
+ {
+ if ( (l1f & _PAGE_RW) &&
+ !(unlikely(paging_mode_external(d) &&
+ (d != current->domain))) )
+ put_page_type(page);
+ put_page(page);
+ MEM_LOG("Attempt to change cache attributes of Xen heap page");
+ return 0;
+ }
+
+ while ( ((y >> PGC_cacheattr_base) & 7) != cacheattr )
+ {
+ x = y;
+ nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
+ y = cmpxchg(&page->count_info, x, nx);
+ }
+
+#ifdef __x86_64__
+ map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
+ PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
+#endif
+ }
return okay;
}
}
+void cleanup_page_cacheattr(struct page_info *page)
+{
+ uint32_t cacheattr = (page->count_info >> PGC_cacheattr_base) & 7;
+
+ if ( likely(cacheattr == 0) )
+ return;
+
+ page->count_info &= ~PGC_cacheattr_mask;
+
+ BUG_ON(is_xen_heap_frame(page));
+
+#ifdef __x86_64__
+ map_pages_to_xen((unsigned long)page_to_virt(page), page_to_mfn(page),
+ 1, PAGE_HYPERVISOR);
+#endif
+}
+
+
int new_guest_cr3(unsigned long mfn)
{
struct vcpu *v = current;
{
unsigned long _p = (unsigned long)p;
unsigned long _l = (unsigned long)l;
- unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
+ unsigned int flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
/* Ensure we are dealing with a page-aligned whole number of pages. */
ASSERT((_p&~PAGE_MASK) == 0);
if ( op->flags & GNTMAP_host_map )
{
/* Could be an iomem page for setting up permission */
- if ( iomem_page_test(frame, mfn_to_page(frame)) )
+ if ( is_iomem_page(frame) )
{
is_iomem = 1;
if ( iomem_permit_access(ld, frame, frame) )
op->flags)) < 0 )
goto unmap_out;
}
- else if ( iomem_page_test(op->frame, mfn_to_page(op->frame)) &&
+ else if ( is_iomem_page(op->frame) &&
iomem_access_permitted(ld, op->frame, op->frame) )
{
if ( (rc = iomem_deny_access(ld, op->frame, op->frame)) < 0 )
BUG_ON(!(act->pin & GNTPIN_hstw_mask));
act->pin -= GNTPIN_hstw_inc;
- if ( iomem_page_test(act->frame, mfn_to_page(act->frame)) &&
+ if ( is_iomem_page(act->frame) &&
iomem_access_permitted(rd, act->frame, act->frame) )
rc = iomem_deny_access(rd, act->frame, act->frame);
else
return 1;
}
-/* Decide whether this page looks like iomem or real memory */
-int iomem_page_test(unsigned long mfn, struct page_info *page);
+int is_iomem_page(unsigned long mfn);
extern void put_page_type(struct page_info *page);
extern int get_page_type(struct page_info *page, u32 type);
#define _PGT_pae_xen_l2 26
#define PGT_pae_xen_l2 (1U<<_PGT_pae_xen_l2)
- /* 16-bit count of uses of this frame as its current type. */
-#define PGT_count_mask ((1U<<16)-1)
+ /* 26-bit count of uses of this frame as its current type. */
+#define PGT_count_mask ((1U<<26)-1)
/* Cleared when the owning guest 'frees' this page. */
#define _PGC_allocated 31
#define PGC_allocated (1U<<_PGC_allocated)
/* Set on a *guest* page to mark it out-of-sync with its shadow */
-#define _PGC_out_of_sync 30
+#define _PGC_out_of_sync 30
#define PGC_out_of_sync (1U<<_PGC_out_of_sync)
/* Set when is using a page as a page table */
-#define _PGC_page_table 29
+#define _PGC_page_table 29
#define PGC_page_table (1U<<_PGC_page_table)
- /* 29-bit count of references to this frame. */
-#define PGC_count_mask ((1U<<29)-1)
-
-/* We trust the slab allocator in slab.c, and our use of it. */
-#define PageSlab(page) (1)
-#define PageSetSlab(page) ((void)0)
-#define PageClearSlab(page) ((void)0)
+ /* 3-bit PAT/PCD/PWT cache-attribute hint. */
+#define PGC_cacheattr_base 26
+#define PGC_cacheattr_mask (7U<<PGC_cacheattr_base)
+ /* 26-bit count of references to this frame. */
+#define PGC_count_mask ((1U<<26)-1)
#define is_xen_heap_frame(pfn) ({ \
paddr_t maddr = page_to_maddr(pfn); \
void free_page_type(struct page_info *page, unsigned long type);
int _shadow_mode_refcounts(struct domain *d);
+void cleanup_page_cacheattr(struct page_info *page);
+
static inline void put_page(struct page_info *page)
{
u32 nx, x, y = page->count_info;
while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
if ( unlikely((nx & PGC_count_mask) == 0) )
+ {
+ cleanup_page_cacheattr(page);
free_domheap_page(page);
+ }
}
return 1;
}
-/* Decide whether this page looks like iomem or real memory */
-int iomem_page_test(unsigned long mfn, struct page_info *page);
+int is_iomem_page(unsigned long mfn);
void put_page_type(struct page_info *page);
int get_page_type(struct page_info *page, unsigned long type);
unsigned int flags);
void destroy_xen_mappings(unsigned long v, unsigned long e);
+/* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */
+static inline uint32_t pte_flags_to_cacheattr(uint32_t flags)
+{
+ return ((flags >> 5) & 4) | ((flags >> 3) & 3);
+}
+static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr)
+{
+ return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3);
+}
+
#endif /* !__ASSEMBLY__ */
#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)