ia64/xen-unstable

view xen/include/asm-x86/x86_64/page.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents 23c4790512db
children ca2984b17fcf
line source
2 #ifndef __X86_64_PAGE_H__
3 #define __X86_64_PAGE_H__
5 #define L1_PAGETABLE_SHIFT 12
6 #define L2_PAGETABLE_SHIFT 21
7 #define L3_PAGETABLE_SHIFT 30
8 #define L4_PAGETABLE_SHIFT 39
9 #define PAGE_SHIFT L1_PAGETABLE_SHIFT
10 #define ROOT_PAGETABLE_SHIFT L4_PAGETABLE_SHIFT
12 #define PAGETABLE_ORDER 9
13 #define L1_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
14 #define L2_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
15 #define L3_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
16 #define L4_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
17 #define ROOT_PAGETABLE_ENTRIES L4_PAGETABLE_ENTRIES
19 #define __PAGE_OFFSET (0xFFFF830000000000)
20 #define __XEN_VIRT_START (0xFFFF828C80000000)
22 /* These are architectural limits. Current CPUs support only 40-bit phys. */
23 #define PADDR_BITS 52
24 #define VADDR_BITS 48
25 #define PADDR_MASK ((1UL << PADDR_BITS)-1)
26 #define VADDR_MASK ((1UL << VADDR_BITS)-1)
28 #define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63))
30 #ifndef __ASSEMBLY__
32 #include <xen/config.h>
33 #include <asm/types.h>
35 /* Physical address where Xen was relocated to. */
36 extern unsigned long xen_phys_start;
38 static inline unsigned long __virt_to_maddr(unsigned long va)
39 {
40 ASSERT(va >= XEN_VIRT_START);
41 ASSERT(va < DIRECTMAP_VIRT_END);
42 ASSERT((va < XEN_VIRT_END) || (va >= DIRECTMAP_VIRT_START));
43 if ( va > DIRECTMAP_VIRT_START )
44 return va - DIRECTMAP_VIRT_START;
45 return va - XEN_VIRT_START + xen_phys_start;
46 }
47 #define virt_to_maddr(va) \
48 (__virt_to_maddr((unsigned long)(va)))
49 #define maddr_to_virt(ma) \
50 ((void *)((unsigned long)(ma)+DIRECTMAP_VIRT_START))
52 /* read access (should only be used for debug printk's) */
53 typedef u64 intpte_t;
54 #define PRIpte "016lx"
56 typedef struct { intpte_t l1; } l1_pgentry_t;
57 typedef struct { intpte_t l2; } l2_pgentry_t;
58 typedef struct { intpte_t l3; } l3_pgentry_t;
59 typedef struct { intpte_t l4; } l4_pgentry_t;
60 typedef l4_pgentry_t root_pgentry_t;
62 #endif /* !__ASSEMBLY__ */
64 #define pte_read_atomic(ptep) (*(ptep))
65 #define pte_write_atomic(ptep, pte) (*(ptep) = (pte))
66 #define pte_write(ptep, pte) (*(ptep) = (pte))
68 /* Given a virtual address, get an entry offset into a linear page table. */
69 #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
70 #define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
71 #define l3_linear_offset(_a) (((_a) & VADDR_MASK) >> L3_PAGETABLE_SHIFT)
72 #define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT)
74 #define is_guest_l1_slot(_s) (1)
75 #define is_guest_l2_slot(_d, _t, _s) \
76 ( !is_pv_32bit_domain(_d) || \
77 !((_t) & PGT_pae_xen_l2) || \
78 ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
79 #define is_guest_l3_slot(_s) (1)
80 #define is_guest_l4_slot(_d, _s) \
81 ( is_pv_32bit_domain(_d) \
82 ? ((_s) == 0) \
83 : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
84 ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
86 #define root_get_pfn l4e_get_pfn
87 #define root_get_flags l4e_get_flags
88 #define root_get_intpte l4e_get_intpte
89 #define root_empty l4e_empty
90 #define root_from_paddr l4e_from_paddr
91 #define PGT_root_page_table PGT_l4_page_table
93 /*
94 * PTE pfn and flags:
95 * 40-bit pfn = (pte[51:12])
96 * 24-bit flags = (pte[63:52],pte[11:0])
97 */
99 /* Extract flags into 24-bit integer, or turn 24-bit flags into a pte mask. */
100 #define get_pte_flags(x) (((int)((x) >> 40) & ~0xFFF) | ((int)(x) & 0xFFF))
101 #define put_pte_flags(x) (((intpte_t)((x) & ~0xFFF) << 40) | ((x) & 0xFFF))
103 /* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/
104 #define _PAGE_NX_BIT (1U<<23)
105 #define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0U)
107 /*
108 * Disallow unused flag bits plus PAT, PSE and GLOBAL.
109 * Permit the NX bit if the hardware supports it.
110 * Note that range [62:52] is available for software use on x86/64.
111 */
112 #define BASE_DISALLOW_MASK (0xFF800180U & ~_PAGE_NX)
114 #define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
115 #define L2_DISALLOW_MASK (BASE_DISALLOW_MASK)
116 #define L3_DISALLOW_MASK (BASE_DISALLOW_MASK)
117 #define L4_DISALLOW_MASK (BASE_DISALLOW_MASK)
119 #define COMPAT_L3_DISALLOW_MASK 0xFFFFF1E6U
121 #define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
122 #define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
124 #define GRANT_PTE_FLAGS \
125 (_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_GNTTAB|_PAGE_USER)
127 #define USER_MAPPINGS_ARE_GLOBAL
128 #ifdef USER_MAPPINGS_ARE_GLOBAL
129 /*
130 * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
131 * This is needed to distinguish between user and kernel PTEs since _PAGE_USER
132 * is asserted for both.
133 */
134 #define _PAGE_GUEST_KERNEL (1U<<12)
135 /* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
136 #undef L1_DISALLOW_MASK
137 #define L1_DISALLOW_MASK ((BASE_DISALLOW_MASK | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
138 #else
139 #define _PAGE_GUEST_KERNEL 0
140 #endif
142 #endif /* __X86_64_PAGE_H__ */
144 /*
145 * Local variables:
146 * mode: C
147 * c-set-style: "BSD"
148 * c-basic-offset: 4
149 * tab-width: 4
150 * indent-tabs-mode: nil
151 * End:
152 */