ia64/xen-unstable
changeset 16344:ff2edb1fd9f2
x86: Change cache attributes of Xen 1:1 page mappings in response to
guest mapping requests.
Based on a patch by Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
guest mapping requests.
Based on a patch by Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author | Keir Fraser <keir@xensource.com> |
---|---|
date | Wed Nov 07 11:44:05 2007 +0000 (2007-11-07) |
parents | fbe7ed173314 |
children | c982fe8a9f91 |
files | xen/arch/ia64/xen/mm.c xen/arch/x86/mm.c xen/common/grant_table.c xen/include/asm-ia64/mm.h xen/include/asm-x86/mm.h xen/include/asm-x86/page.h |
line diff
1.1 --- a/xen/arch/ia64/xen/mm.c Wed Nov 07 09:22:31 2007 +0000 1.2 +++ b/xen/arch/ia64/xen/mm.c Wed Nov 07 11:44:05 2007 +0000 1.3 @@ -2894,11 +2894,9 @@ arch_memory_op(int op, XEN_GUEST_HANDLE( 1.4 return 0; 1.5 } 1.6 1.7 -int 1.8 -iomem_page_test(unsigned long mfn, struct page_info *page) 1.9 +int is_iomem_page(unsigned long mfn) 1.10 { 1.11 - return unlikely(!mfn_valid(mfn)) || 1.12 - unlikely(page_get_owner(page) == dom_io); 1.13 + return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io)); 1.14 } 1.15 1.16 /*
2.1 --- a/xen/arch/x86/mm.c Wed Nov 07 09:22:31 2007 +0000 2.2 +++ b/xen/arch/x86/mm.c Wed Nov 07 11:44:05 2007 +0000 2.3 @@ -607,10 +607,9 @@ get_##level##_linear_pagetable( 2.4 } 2.5 2.6 2.7 -int iomem_page_test(unsigned long mfn, struct page_info *page) 2.8 +int is_iomem_page(unsigned long mfn) 2.9 { 2.10 - return unlikely(!mfn_valid(mfn)) || 2.11 - unlikely(page_get_owner(page) == dom_io); 2.12 + return (!mfn_valid(mfn) || (page_get_owner(mfn_to_page(mfn)) == dom_io)); 2.13 } 2.14 2.15 2.16 @@ -620,19 +619,19 @@ get_page_from_l1e( 2.17 { 2.18 unsigned long mfn = l1e_get_pfn(l1e); 2.19 struct page_info *page = mfn_to_page(mfn); 2.20 + uint32_t l1f = l1e_get_flags(l1e); 2.21 int okay; 2.22 2.23 - if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) ) 2.24 + if ( !(l1f & _PAGE_PRESENT) ) 2.25 return 1; 2.26 2.27 - if ( unlikely(l1e_get_flags(l1e) & l1_disallow_mask(d)) ) 2.28 + if ( unlikely(l1f & l1_disallow_mask(d)) ) 2.29 { 2.30 - MEM_LOG("Bad L1 flags %x", 2.31 - l1e_get_flags(l1e) & l1_disallow_mask(d)); 2.32 + MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(d)); 2.33 return 0; 2.34 } 2.35 2.36 - if ( iomem_page_test(mfn, page) ) 2.37 + if ( is_iomem_page(mfn) ) 2.38 { 2.39 /* DOMID_IO reverts to caller for privilege checks. */ 2.40 if ( d == dom_io ) 2.41 @@ -657,7 +656,7 @@ get_page_from_l1e( 2.42 * contribute to writeable mapping refcounts. (This allows the 2.43 * qemu-dm helper process in dom0 to map the domain's memory without 2.44 * messing up the count of "real" writable mappings.) */ 2.45 - okay = (((l1e_get_flags(l1e) & _PAGE_RW) && 2.46 + okay = (((l1f & _PAGE_RW) && 2.47 !(unlikely(paging_mode_external(d) && (d != current->domain)))) 2.48 ? get_page_and_type(page, d, PGT_writable_page) 2.49 : get_page(page, d)); 2.50 @@ -668,6 +667,36 @@ get_page_from_l1e( 2.51 mfn, get_gpfn_from_mfn(mfn), 2.52 l1e_get_intpte(l1e), d->domain_id); 2.53 } 2.54 + else if ( (pte_flags_to_cacheattr(l1f) != 2.55 + ((page->count_info >> PGC_cacheattr_base) & 7)) && 2.56 + !is_iomem_page(mfn) ) 2.57 + { 2.58 + uint32_t x, nx, y = page->count_info; 2.59 + uint32_t cacheattr = pte_flags_to_cacheattr(l1f); 2.60 + 2.61 + if ( is_xen_heap_frame(page) ) 2.62 + { 2.63 + if ( (l1f & _PAGE_RW) && 2.64 + !(unlikely(paging_mode_external(d) && 2.65 + (d != current->domain))) ) 2.66 + put_page_type(page); 2.67 + put_page(page); 2.68 + MEM_LOG("Attempt to change cache attributes of Xen heap page"); 2.69 + return 0; 2.70 + } 2.71 + 2.72 + while ( ((y >> PGC_cacheattr_base) & 7) != cacheattr ) 2.73 + { 2.74 + x = y; 2.75 + nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base); 2.76 + y = cmpxchg(&page->count_info, x, nx); 2.77 + } 2.78 + 2.79 +#ifdef __x86_64__ 2.80 + map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1, 2.81 + PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr)); 2.82 +#endif 2.83 + } 2.84 2.85 return okay; 2.86 } 2.87 @@ -1828,6 +1857,24 @@ int get_page_type(struct page_info *page 2.88 } 2.89 2.90 2.91 +void cleanup_page_cacheattr(struct page_info *page) 2.92 +{ 2.93 + uint32_t cacheattr = (page->count_info >> PGC_cacheattr_base) & 7; 2.94 + 2.95 + if ( likely(cacheattr == 0) ) 2.96 + return; 2.97 + 2.98 + page->count_info &= ~PGC_cacheattr_mask; 2.99 + 2.100 + BUG_ON(is_xen_heap_frame(page)); 2.101 + 2.102 +#ifdef __x86_64__ 2.103 + map_pages_to_xen((unsigned long)page_to_virt(page), page_to_mfn(page), 2.104 + 1, PAGE_HYPERVISOR); 2.105 +#endif 2.106 +} 2.107 + 2.108 + 2.109 int new_guest_cr3(unsigned long mfn) 2.110 { 2.111 struct vcpu *v = current; 2.112 @@ -3803,7 +3850,7 @@ static void __memguard_change_range(void 2.113 { 2.114 unsigned long _p = (unsigned long)p; 2.115 unsigned long _l = (unsigned long)l; 2.116 - unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES; 2.117 + unsigned int flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES; 2.118 2.119 /* Ensure we are dealing with a page-aligned whole number of pages. */ 2.120 ASSERT((_p&~PAGE_MASK) == 0);
3.1 --- a/xen/common/grant_table.c Wed Nov 07 09:22:31 2007 +0000 3.2 +++ b/xen/common/grant_table.c Wed Nov 07 11:44:05 2007 +0000 3.3 @@ -332,7 +332,7 @@ static void 3.4 if ( op->flags & GNTMAP_host_map ) 3.5 { 3.6 /* Could be an iomem page for setting up permission */ 3.7 - if ( iomem_page_test(frame, mfn_to_page(frame)) ) 3.8 + if ( is_iomem_page(frame) ) 3.9 { 3.10 is_iomem = 1; 3.11 if ( iomem_permit_access(ld, frame, frame) ) 3.12 @@ -527,7 +527,7 @@ static void 3.13 op->flags)) < 0 ) 3.14 goto unmap_out; 3.15 } 3.16 - else if ( iomem_page_test(op->frame, mfn_to_page(op->frame)) && 3.17 + else if ( is_iomem_page(op->frame) && 3.18 iomem_access_permitted(ld, op->frame, op->frame) ) 3.19 { 3.20 if ( (rc = iomem_deny_access(ld, op->frame, op->frame)) < 0 ) 3.21 @@ -1651,7 +1651,7 @@ gnttab_release_mappings( 3.22 BUG_ON(!(act->pin & GNTPIN_hstw_mask)); 3.23 act->pin -= GNTPIN_hstw_inc; 3.24 3.25 - if ( iomem_page_test(act->frame, mfn_to_page(act->frame)) && 3.26 + if ( is_iomem_page(act->frame) && 3.27 iomem_access_permitted(rd, act->frame, act->frame) ) 3.28 rc = iomem_deny_access(rd, act->frame, act->frame); 3.29 else
4.1 --- a/xen/include/asm-ia64/mm.h Wed Nov 07 09:22:31 2007 +0000 4.2 +++ b/xen/include/asm-ia64/mm.h Wed Nov 07 11:44:05 2007 +0000 4.3 @@ -185,8 +185,7 @@ static inline int get_page(struct page_i 4.4 return 1; 4.5 } 4.6 4.7 -/* Decide whether this page looks like iomem or real memory */ 4.8 -int iomem_page_test(unsigned long mfn, struct page_info *page); 4.9 +int is_iomem_page(unsigned long mfn); 4.10 4.11 extern void put_page_type(struct page_info *page); 4.12 extern int get_page_type(struct page_info *page, u32 type);
5.1 --- a/xen/include/asm-x86/mm.h Wed Nov 07 09:22:31 2007 +0000 5.2 +++ b/xen/include/asm-x86/mm.h Wed Nov 07 11:44:05 2007 +0000 5.3 @@ -84,25 +84,23 @@ struct page_info 5.4 #define _PGT_pae_xen_l2 26 5.5 #define PGT_pae_xen_l2 (1U<<_PGT_pae_xen_l2) 5.6 5.7 - /* 16-bit count of uses of this frame as its current type. */ 5.8 -#define PGT_count_mask ((1U<<16)-1) 5.9 + /* 26-bit count of uses of this frame as its current type. */ 5.10 +#define PGT_count_mask ((1U<<26)-1) 5.11 5.12 /* Cleared when the owning guest 'frees' this page. */ 5.13 #define _PGC_allocated 31 5.14 #define PGC_allocated (1U<<_PGC_allocated) 5.15 /* Set on a *guest* page to mark it out-of-sync with its shadow */ 5.16 -#define _PGC_out_of_sync 30 5.17 +#define _PGC_out_of_sync 30 5.18 #define PGC_out_of_sync (1U<<_PGC_out_of_sync) 5.19 /* Set when is using a page as a page table */ 5.20 -#define _PGC_page_table 29 5.21 +#define _PGC_page_table 29 5.22 #define PGC_page_table (1U<<_PGC_page_table) 5.23 - /* 29-bit count of references to this frame. */ 5.24 -#define PGC_count_mask ((1U<<29)-1) 5.25 - 5.26 -/* We trust the slab allocator in slab.c, and our use of it. */ 5.27 -#define PageSlab(page) (1) 5.28 -#define PageSetSlab(page) ((void)0) 5.29 -#define PageClearSlab(page) ((void)0) 5.30 + /* 3-bit PAT/PCD/PWT cache-attribute hint. */ 5.31 +#define PGC_cacheattr_base 26 5.32 +#define PGC_cacheattr_mask (7U<<PGC_cacheattr_base) 5.33 + /* 26-bit count of references to this frame. */ 5.34 +#define PGC_count_mask ((1U<<26)-1) 5.35 5.36 #define is_xen_heap_frame(pfn) ({ \ 5.37 paddr_t maddr = page_to_maddr(pfn); \ 5.38 @@ -147,6 +145,8 @@ void init_frametable(void); 5.39 void free_page_type(struct page_info *page, unsigned long type); 5.40 int _shadow_mode_refcounts(struct domain *d); 5.41 5.42 +void cleanup_page_cacheattr(struct page_info *page); 5.43 + 5.44 static inline void put_page(struct page_info *page) 5.45 { 5.46 u32 nx, x, y = page->count_info; 5.47 @@ -158,7 +158,10 @@ static inline void put_page(struct page_ 5.48 while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) ); 5.49 5.50 if ( unlikely((nx & PGC_count_mask) == 0) ) 5.51 + { 5.52 + cleanup_page_cacheattr(page); 5.53 free_domheap_page(page); 5.54 + } 5.55 } 5.56 5.57 5.58 @@ -196,8 +199,7 @@ static inline int get_page(struct page_i 5.59 return 1; 5.60 } 5.61 5.62 -/* Decide whether this page looks like iomem or real memory */ 5.63 -int iomem_page_test(unsigned long mfn, struct page_info *page); 5.64 +int is_iomem_page(unsigned long mfn); 5.65 5.66 void put_page_type(struct page_info *page); 5.67 int get_page_type(struct page_info *page, unsigned long type);
6.1 --- a/xen/include/asm-x86/page.h Wed Nov 07 09:22:31 2007 +0000 6.2 +++ b/xen/include/asm-x86/page.h Wed Nov 07 11:44:05 2007 +0000 6.3 @@ -360,6 +360,16 @@ int map_pages_to_xen( 6.4 unsigned int flags); 6.5 void destroy_xen_mappings(unsigned long v, unsigned long e); 6.6 6.7 +/* Convert between PAT/PCD/PWT embedded in PTE flags and 3-bit cacheattr. */ 6.8 +static inline uint32_t pte_flags_to_cacheattr(uint32_t flags) 6.9 +{ 6.10 + return ((flags >> 5) & 4) | ((flags >> 3) & 3); 6.11 +} 6.12 +static inline uint32_t cacheattr_to_pte_flags(uint32_t cacheattr) 6.13 +{ 6.14 + return ((cacheattr & 4) << 5) | ((cacheattr & 3) << 3); 6.15 +} 6.16 + 6.17 #endif /* !__ASSEMBLY__ */ 6.18 6.19 #define PFN_DOWN(x) ((x) >> PAGE_SHIFT)