ia64/xen-unstable

view xen/include/asm-x86/guest_pt.h @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 0cd1ba8bd7cd
children
line source
1 /******************************************************************************
2 * xen/asm-x86/guest_pt.h
3 *
4 * Types and accessors for guest pagetable entries, as distinct from
5 * Xen's pagetable types.
6 *
7 * Users must #define GUEST_PAGING_LEVELS to 2, 3 or 4 before including
8 * this file.
9 *
10 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
11 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
12 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 */
29 #ifndef _XEN_ASM_GUEST_PT_H
30 #define _XEN_ASM_GUEST_PT_H
32 /* Type of the guest's frame numbers */
33 TYPE_SAFE(unsigned long,gfn)
34 #define PRI_gfn "05lx"
36 #define VALID_GFN(m) (m != INVALID_GFN)
38 static inline int
39 valid_gfn(gfn_t m)
40 {
41 return VALID_GFN(gfn_x(m));
42 }
44 static inline paddr_t
45 gfn_to_paddr(gfn_t gfn)
46 {
47 return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
48 }
50 /* Override gfn_to_mfn to work with gfn_t */
51 #undef gfn_to_mfn
52 #define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), gfn_x(g), (t), p2m_alloc)
55 /* Types of the guest's page tables and access functions for them */
57 #if GUEST_PAGING_LEVELS == 2
59 #define GUEST_L1_PAGETABLE_ENTRIES 1024
60 #define GUEST_L2_PAGETABLE_ENTRIES 1024
61 #define GUEST_L1_PAGETABLE_SHIFT 12
62 #define GUEST_L2_PAGETABLE_SHIFT 22
64 typedef uint32_t guest_intpte_t;
65 typedef struct { guest_intpte_t l1; } guest_l1e_t;
66 typedef struct { guest_intpte_t l2; } guest_l2e_t;
68 #define PRI_gpte "08x"
70 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
71 { return ((paddr_t) gl1e.l1) & (PADDR_MASK & PAGE_MASK); }
72 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
73 { return ((paddr_t) gl2e.l2) & (PADDR_MASK & PAGE_MASK); }
75 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
76 { return _gfn(guest_l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
77 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
78 { return _gfn(guest_l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
80 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
81 { return gl1e.l1 & 0xfff; }
82 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
83 { return gl2e.l2 & 0xfff; }
85 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
86 { return (guest_l1e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; }
87 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
88 { return (guest_l2e_t) { (gfn_x(gfn) << PAGE_SHIFT) | flags }; }
90 #define guest_l1_table_offset(_va) \
91 (((_va) >> GUEST_L1_PAGETABLE_SHIFT) & (GUEST_L1_PAGETABLE_ENTRIES - 1))
92 #define guest_l2_table_offset(_va) \
93 (((_va) >> GUEST_L2_PAGETABLE_SHIFT) & (GUEST_L2_PAGETABLE_ENTRIES - 1))
95 #else /* GUEST_PAGING_LEVELS != 2 */
97 #if GUEST_PAGING_LEVELS == 3
98 #define GUEST_L1_PAGETABLE_ENTRIES 512
99 #define GUEST_L2_PAGETABLE_ENTRIES 512
100 #define GUEST_L3_PAGETABLE_ENTRIES 4
101 #define GUEST_L1_PAGETABLE_SHIFT 12
102 #define GUEST_L2_PAGETABLE_SHIFT 21
103 #define GUEST_L3_PAGETABLE_SHIFT 30
104 #else /* GUEST_PAGING_LEVELS == 4 */
105 #define GUEST_L1_PAGETABLE_ENTRIES 512
106 #define GUEST_L2_PAGETABLE_ENTRIES 512
107 #define GUEST_L3_PAGETABLE_ENTRIES 512
108 #define GUEST_L4_PAGETABLE_ENTRIES 512
109 #define GUEST_L1_PAGETABLE_SHIFT 12
110 #define GUEST_L2_PAGETABLE_SHIFT 21
111 #define GUEST_L3_PAGETABLE_SHIFT 30
112 #define GUEST_L4_PAGETABLE_SHIFT 39
113 #endif
115 typedef l1_pgentry_t guest_l1e_t;
116 typedef l2_pgentry_t guest_l2e_t;
117 typedef l3_pgentry_t guest_l3e_t;
118 #if GUEST_PAGING_LEVELS >= 4
119 typedef l4_pgentry_t guest_l4e_t;
120 #endif
121 typedef intpte_t guest_intpte_t;
123 #define PRI_gpte "016"PRIx64
125 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
126 { return l1e_get_paddr(gl1e); }
127 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
128 { return l2e_get_paddr(gl2e); }
129 static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e)
130 { return l3e_get_paddr(gl3e); }
131 #if GUEST_PAGING_LEVELS >= 4
132 static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e)
133 { return l4e_get_paddr(gl4e); }
134 #endif
136 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
137 { return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
138 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
139 { return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
140 static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e)
141 { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); }
142 #if GUEST_PAGING_LEVELS >= 4
143 static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e)
144 { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); }
145 #endif
147 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
148 { return l1e_get_flags(gl1e); }
149 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
150 { return l2e_get_flags(gl2e); }
151 static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e)
152 { return l3e_get_flags(gl3e); }
153 #if GUEST_PAGING_LEVELS >= 4
154 static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e)
155 { return l4e_get_flags(gl4e); }
156 #endif
158 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
159 { return l1e_from_pfn(gfn_x(gfn), flags); }
160 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
161 { return l2e_from_pfn(gfn_x(gfn), flags); }
162 static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags)
163 { return l3e_from_pfn(gfn_x(gfn), flags); }
164 #if GUEST_PAGING_LEVELS >= 4
165 static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags)
166 { return l4e_from_pfn(gfn_x(gfn), flags); }
167 #endif
169 #define guest_l1_table_offset(a) l1_table_offset(a)
170 #define guest_l2_table_offset(a) l2_table_offset(a)
171 #define guest_l3_table_offset(a) l3_table_offset(a)
172 #define guest_l4_table_offset(a) l4_table_offset(a)
174 #endif /* GUEST_PAGING_LEVELS != 2 */
177 /* Which pagetable features are supported on this vcpu? */
179 static inline int
180 guest_supports_superpages(struct vcpu *v)
181 {
182 /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
183 * CR4.PSE is set or the guest is in PAE or long mode.
184 * It's also used in the dummy PT for vcpus with CR4.PG cleared. */
185 return (is_hvm_vcpu(v) &&
186 (GUEST_PAGING_LEVELS != 2
187 || !hvm_paging_enabled(v)
188 || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)));
189 }
191 static inline int
192 guest_supports_nx(struct vcpu *v)
193 {
194 if ( GUEST_PAGING_LEVELS == 2 || !cpu_has_nx )
195 return 0;
196 if ( !is_hvm_vcpu(v) )
197 return cpu_has_nx;
198 return hvm_nx_enabled(v);
199 }
203 /* Type used for recording a walk through guest pagetables. It is
204 * filled in by the pagetable walk function, and also used as a cache
205 * for later walks. When we encounter a superpage l2e, we fabricate an
206 * l1e for propagation to the shadow (for splintering guest superpages
207 * into many shadow l1 entries). */
208 typedef struct guest_pagetable_walk walk_t;
209 struct guest_pagetable_walk
210 {
211 unsigned long va; /* Address we were looking for */
212 #if GUEST_PAGING_LEVELS >= 3
213 #if GUEST_PAGING_LEVELS >= 4
214 guest_l4e_t l4e; /* Guest's level 4 entry */
215 #endif
216 guest_l3e_t l3e; /* Guest's level 3 entry */
217 #endif
218 guest_l2e_t l2e; /* Guest's level 2 entry */
219 guest_l1e_t l1e; /* Guest's level 1 entry (or fabrication) */
220 #if GUEST_PAGING_LEVELS >= 4
221 mfn_t l4mfn; /* MFN that the level 4 entry was in */
222 mfn_t l3mfn; /* MFN that the level 3 entry was in */
223 #endif
224 mfn_t l2mfn; /* MFN that the level 2 entry was in */
225 mfn_t l1mfn; /* MFN that the level 1 entry was in */
226 };
228 /* Given a walk_t, translate the gw->va into the guest's notion of the
229 * corresponding frame number. */
230 static inline gfn_t
231 guest_walk_to_gfn(walk_t *gw)
232 {
233 if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) )
234 return _gfn(INVALID_GFN);
235 return guest_l1e_get_gfn(gw->l1e);
236 }
238 /* Given a walk_t, translate the gw->va into the guest's notion of the
239 * corresponding physical address. */
240 static inline paddr_t
241 guest_walk_to_gpa(walk_t *gw)
242 {
243 if ( !(guest_l1e_get_flags(gw->l1e) & _PAGE_PRESENT) )
244 return 0;
245 return guest_l1e_get_paddr(gw->l1e) + (gw->va & ~PAGE_MASK);
246 }
248 /* Walk the guest pagetables, after the manner of a hardware walker.
249 *
250 * Inputs: a vcpu, a virtual address, a walk_t to fill, a
251 * pointer to a pagefault code, the MFN of the guest's
252 * top-level pagetable, and a mapping of the
253 * guest's top-level pagetable.
254 *
255 * We walk the vcpu's guest pagetables, filling the walk_t with what we
256 * see and adding any Accessed and Dirty bits that are needed in the
257 * guest entries. Using the pagefault code, we check the permissions as
258 * we go. For the purposes of reading pagetables we treat all non-RAM
259 * memory as contining zeroes.
260 *
261 * Returns 0 for success, or the set of permission bits that we failed on
262 * if the walk did not complete. */
264 /* Macro-fu so you can call guest_walk_tables() and get the right one. */
265 #define GPT_RENAME2(_n, _l) _n ## _ ## _l ## _levels
266 #define GPT_RENAME(_n, _l) GPT_RENAME2(_n, _l)
267 #define guest_walk_tables GPT_RENAME(guest_walk_tables, GUEST_PAGING_LEVELS)
269 extern uint32_t
270 guest_walk_tables(struct vcpu *v, unsigned long va, walk_t *gw,
271 uint32_t pfec, mfn_t top_mfn, void *top_map);
273 /* Pretty-print the contents of a guest-walk */
274 static inline void print_gw(walk_t *gw)
275 {
276 gdprintk(XENLOG_INFO, "GUEST WALK TO %#lx:\n", gw->va);
277 #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */
278 #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */
279 gdprintk(XENLOG_INFO, " l4mfn=%" PRI_mfn "\n", mfn_x(gw->l4mfn));
280 gdprintk(XENLOG_INFO, " l4e=%" PRI_gpte "\n", gw->l4e.l4);
281 gdprintk(XENLOG_INFO, " l3mfn=%" PRI_mfn "\n", mfn_x(gw->l3mfn));
282 #endif /* PAE or 64... */
283 gdprintk(XENLOG_INFO, " l3e=%" PRI_gpte "\n", gw->l3e.l3);
284 #endif /* All levels... */
285 gdprintk(XENLOG_INFO, " l2mfn=%" PRI_mfn "\n", mfn_x(gw->l2mfn));
286 gdprintk(XENLOG_INFO, " l2e=%" PRI_gpte "\n", gw->l2e.l2);
287 gdprintk(XENLOG_INFO, " l1mfn=%" PRI_mfn "\n", mfn_x(gw->l1mfn));
288 gdprintk(XENLOG_INFO, " l1e=%" PRI_gpte "\n", gw->l1e.l1);
289 }
291 #endif /* _XEN_ASM_GUEST_PT_H */