ia64/xen-unstable

view xen/include/asm-x86/x86_64/page.h @ 18722:3ff349c7aeb7

x86: relax restrictions on reserved bits in L3 for 32on64 x86 guests

A 32on64 guest cannot copy an existing pinned L3 entry to use as a new
L3 because COMPAT_L3_DISALLOW_MASK contains bits which are added to L3
entries by adjust_guest_l3e (U/S & R/W) or by the hardware (A & D).

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 27 10:08:48 2008 +0000 (2008-10-27)
parents 7750906b06b3
children 4ec25db9326a
line source
2 #ifndef __X86_64_PAGE_H__
3 #define __X86_64_PAGE_H__
5 #define L1_PAGETABLE_SHIFT 12
6 #define L2_PAGETABLE_SHIFT 21
7 #define L3_PAGETABLE_SHIFT 30
8 #define L4_PAGETABLE_SHIFT 39
9 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
10 #define ROOT_PAGETABLE_SHIFT L4_PAGETABLE_SHIFT
12 #define PAGETABLE_ORDER 9
13 #define L1_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
14 #define L2_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
15 #define L3_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
16 #define L4_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
17 #define ROOT_PAGETABLE_ENTRIES L4_PAGETABLE_ENTRIES
19 #define __PAGE_OFFSET (0xFFFF830000000000)
20 #define __XEN_VIRT_START (0xFFFF828C80000000)
22 /* These are architectural limits. Current CPUs support only 40-bit phys. */
23 #define PADDR_BITS 52
24 #define VADDR_BITS 48
25 #define PADDR_MASK ((1UL << PADDR_BITS)-1)
26 #define VADDR_MASK ((1UL << VADDR_BITS)-1)
28 #define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63))
30 #ifndef __ASSEMBLY__
32 #include <xen/config.h>
33 #include <asm/types.h>
35 /* Physical address where Xen was relocated to. */
36 extern unsigned long xen_phys_start;
38 static inline unsigned long __virt_to_maddr(unsigned long va)
39 {
40 ASSERT(va >= XEN_VIRT_START);
41 ASSERT(va < DIRECTMAP_VIRT_END);
42 ASSERT((va < XEN_VIRT_END) || (va >= DIRECTMAP_VIRT_START));
43 if ( va > DIRECTMAP_VIRT_START )
44 return va - DIRECTMAP_VIRT_START;
45 return va - XEN_VIRT_START + xen_phys_start;
46 }
47 #define virt_to_maddr(va) \
48 (__virt_to_maddr((unsigned long)(va)))
49 #define maddr_to_virt(ma) \
50 ((void *)((unsigned long)(ma)+DIRECTMAP_VIRT_START))
52 /* read access (should only be used for debug printk's) */
53 typedef u64 intpte_t;
54 #define PRIpte "016lx"
56 typedef struct { intpte_t l1; } l1_pgentry_t;
57 typedef struct { intpte_t l2; } l2_pgentry_t;
58 typedef struct { intpte_t l3; } l3_pgentry_t;
59 typedef struct { intpte_t l4; } l4_pgentry_t;
60 typedef l4_pgentry_t root_pgentry_t;
62 #endif /* !__ASSEMBLY__ */
64 #define pte_read_atomic(ptep) (*(ptep))
65 #define pte_write_atomic(ptep, pte) (*(ptep) = (pte))
66 #define pte_write(ptep, pte) (*(ptep) = (pte))
68 /* Given a virtual address, get an entry offset into a linear page table. */
69 #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
70 #define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
71 #define l3_linear_offset(_a) (((_a) & VADDR_MASK) >> L3_PAGETABLE_SHIFT)
72 #define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT)
74 #define is_guest_l1_slot(_s) (1)
75 #define is_guest_l2_slot(_d, _t, _s) \
76 ( !is_pv_32bit_domain(_d) || \
77 !((_t) & PGT_pae_xen_l2) || \
78 ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
79 #define is_guest_l3_slot(_s) (1)
80 #define is_guest_l4_slot(_d, _s) \
81 ( is_pv_32bit_domain(_d) \
82 ? ((_s) == 0) \
83 : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
84 ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
86 #define root_get_pfn l4e_get_pfn
87 #define root_get_flags l4e_get_flags
88 #define root_get_intpte l4e_get_intpte
89 #define root_empty l4e_empty
90 #define root_from_paddr l4e_from_paddr
91 #define PGT_root_page_table PGT_l4_page_table
93 /*
94 * PTE pfn and flags:
95 * 40-bit pfn = (pte[51:12])
96 * 24-bit flags = (pte[63:52],pte[11:0])
97 */
99 /* Extract flags into 24-bit integer, or turn 24-bit flags into a pte mask. */
100 #define get_pte_flags(x) (((int)((x) >> 40) & ~0xFFF) | ((int)(x) & 0xFFF))
101 #define put_pte_flags(x) (((intpte_t)((x) & ~0xFFF) << 40) | ((x) & 0xFFF))
103 /* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/
104 #define _PAGE_NX_BIT (1U<<23)
105 #define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0U)
107 /* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/
108 #define _PAGE_GNTTAB (1U<<22)
110 /*
111 * Disallow unused flag bits plus PAT/PSE, PCD, PWT and GLOBAL.
112 * Permit the NX bit if the hardware supports it.
113 * Note that range [62:52] is available for software use on x86/64.
114 */
115 #define BASE_DISALLOW_MASK (0xFF800198U & ~_PAGE_NX)
117 #define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
118 #define L2_DISALLOW_MASK (BASE_DISALLOW_MASK)
119 #define L3_DISALLOW_MASK (BASE_DISALLOW_MASK)
120 #define L4_DISALLOW_MASK (BASE_DISALLOW_MASK)
122 #define COMPAT_L3_DISALLOW_MASK 0xFFFFF198U
124 #define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
125 #define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
127 #define GRANT_PTE_FLAGS \
128 (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_GNTTAB|_PAGE_USER)
130 #define USER_MAPPINGS_ARE_GLOBAL
131 #ifdef USER_MAPPINGS_ARE_GLOBAL
132 /*
133 * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
134 * This is needed to distinguish between user and kernel PTEs since _PAGE_USER
135 * is asserted for both.
136 */
137 #define _PAGE_GUEST_KERNEL (1U<<12)
138 /* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
139 #undef L1_DISALLOW_MASK
140 #define L1_DISALLOW_MASK ((BASE_DISALLOW_MASK | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
141 #else
142 #define _PAGE_GUEST_KERNEL 0
143 #endif
145 #endif /* __X86_64_PAGE_H__ */
147 /*
148 * Local variables:
149 * mode: C
150 * c-set-style: "BSD"
151 * c-basic-offset: 4
152 * tab-width: 4
153 * indent-tabs-mode: nil
154 * End:
155 */