ia64/xen-unstable

view xen/arch/x86/mm/shadow/types.h @ 15812:86a154e1ef5d

[HVM] Shadow: don't shadow the p2m table.
For HVM vcpus with paging disabled, we used to shadow the p2m table,
and skip the p2m lookup to go from gfn to mfn. Instead, we now
provide a simple pagetable that gives a one-to-one mapping of 4GB, and
shadow that, making the translations from gfn to mfn via the p2m.
This removes the paging-disabled special-case code from the shadow
fault handler, and allows us to expand the p2m interface, since all HVM
translations now go through the same p2m lookups.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Aug 31 11:06:22 2007 +0100 (2007-08-31)
parents bd3d6b4c52ec
children 4633e9604da9
line source
1 /******************************************************************************
2 * arch/x86/mm/shadow/types.h
3 *
4 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #ifndef _XEN_SHADOW_TYPES_H
24 #define _XEN_SHADOW_TYPES_H
26 // Map a shadow page
27 static inline void *
28 map_shadow_page(mfn_t smfn)
29 {
30 // XXX -- Possible optimization/measurement question for 32-bit and PAE
31 // hypervisors:
32 // How often is this smfn already available in the shadow linear
33 // table? Might it be worth checking that table first,
34 // presumably using the reverse map hint in the page_info of this
35 // smfn, rather than calling map_domain_page()?
36 //
37 return sh_map_domain_page(smfn);
38 }
40 // matching unmap for map_shadow_page()
41 static inline void
42 unmap_shadow_page(void *p)
43 {
44 sh_unmap_domain_page(p);
45 }
47 /*
48 * Define various types for handling pagetabels, based on these options:
49 * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
50 * GUEST_PAGING_LEVELS : Number of levels of guest pagetables
51 */
53 #if (CONFIG_PAGING_LEVELS < SHADOW_PAGING_LEVELS)
54 #error Cannot have more levels of shadow pagetables than host pagetables
55 #endif
57 #if (SHADOW_PAGING_LEVELS < GUEST_PAGING_LEVELS)
58 #error Cannot have more levels of guest pagetables than shadow pagetables
59 #endif
61 #if SHADOW_PAGING_LEVELS == 2
62 #define SHADOW_L1_PAGETABLE_ENTRIES 1024
63 #define SHADOW_L2_PAGETABLE_ENTRIES 1024
64 #define SHADOW_L1_PAGETABLE_SHIFT 12
65 #define SHADOW_L2_PAGETABLE_SHIFT 22
66 #endif
68 #if SHADOW_PAGING_LEVELS == 3
69 #define SHADOW_L1_PAGETABLE_ENTRIES 512
70 #define SHADOW_L2_PAGETABLE_ENTRIES 512
71 #define SHADOW_L3_PAGETABLE_ENTRIES 4
72 #define SHADOW_L1_PAGETABLE_SHIFT 12
73 #define SHADOW_L2_PAGETABLE_SHIFT 21
74 #define SHADOW_L3_PAGETABLE_SHIFT 30
75 #endif
77 #if SHADOW_PAGING_LEVELS == 4
78 #define SHADOW_L1_PAGETABLE_ENTRIES 512
79 #define SHADOW_L2_PAGETABLE_ENTRIES 512
80 #define SHADOW_L3_PAGETABLE_ENTRIES 512
81 #define SHADOW_L4_PAGETABLE_ENTRIES 512
82 #define SHADOW_L1_PAGETABLE_SHIFT 12
83 #define SHADOW_L2_PAGETABLE_SHIFT 21
84 #define SHADOW_L3_PAGETABLE_SHIFT 30
85 #define SHADOW_L4_PAGETABLE_SHIFT 39
86 #endif
88 /* Types of the shadow page tables */
89 typedef l1_pgentry_t shadow_l1e_t;
90 typedef l2_pgentry_t shadow_l2e_t;
91 #if SHADOW_PAGING_LEVELS >= 3
92 typedef l3_pgentry_t shadow_l3e_t;
93 #if SHADOW_PAGING_LEVELS >= 4
94 typedef l4_pgentry_t shadow_l4e_t;
95 #endif
96 #endif
98 /* Access functions for them */
99 static inline paddr_t shadow_l1e_get_paddr(shadow_l1e_t sl1e)
100 { return l1e_get_paddr(sl1e); }
101 static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
102 { return l2e_get_paddr(sl2e); }
103 #if SHADOW_PAGING_LEVELS >= 3
104 static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
105 { return l3e_get_paddr(sl3e); }
106 #if SHADOW_PAGING_LEVELS >= 4
107 static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
108 { return l4e_get_paddr(sl4e); }
109 #endif
110 #endif
112 static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
113 { return _mfn(l1e_get_pfn(sl1e)); }
114 static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
115 { return _mfn(l2e_get_pfn(sl2e)); }
116 #if SHADOW_PAGING_LEVELS >= 3
117 static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
118 { return _mfn(l3e_get_pfn(sl3e)); }
119 #if SHADOW_PAGING_LEVELS >= 4
120 static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
121 { return _mfn(l4e_get_pfn(sl4e)); }
122 #endif
123 #endif
125 static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
126 { return l1e_get_flags(sl1e); }
127 static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
128 { return l2e_get_flags(sl2e); }
129 #if SHADOW_PAGING_LEVELS >= 3
130 static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
131 { return l3e_get_flags(sl3e); }
132 #if SHADOW_PAGING_LEVELS >= 4
133 static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
134 { return l4e_get_flags(sl4e); }
135 #endif
136 #endif
138 static inline shadow_l1e_t
139 shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
140 { l1e_remove_flags(sl1e, flags); return sl1e; }
142 static inline shadow_l1e_t shadow_l1e_empty(void)
143 { return l1e_empty(); }
144 static inline shadow_l2e_t shadow_l2e_empty(void)
145 { return l2e_empty(); }
146 #if SHADOW_PAGING_LEVELS >= 3
147 static inline shadow_l3e_t shadow_l3e_empty(void)
148 { return l3e_empty(); }
149 #if SHADOW_PAGING_LEVELS >= 4
150 static inline shadow_l4e_t shadow_l4e_empty(void)
151 { return l4e_empty(); }
152 #endif
153 #endif
155 static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
156 { return l1e_from_pfn(mfn_x(mfn), flags); }
157 static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
158 { return l2e_from_pfn(mfn_x(mfn), flags); }
159 #if SHADOW_PAGING_LEVELS >= 3
160 static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
161 { return l3e_from_pfn(mfn_x(mfn), flags); }
162 #if SHADOW_PAGING_LEVELS >= 4
163 static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
164 { return l4e_from_pfn(mfn_x(mfn), flags); }
165 #endif
166 #endif
168 #define shadow_l1_table_offset(a) l1_table_offset(a)
169 #define shadow_l2_table_offset(a) l2_table_offset(a)
170 #define shadow_l3_table_offset(a) l3_table_offset(a)
171 #define shadow_l4_table_offset(a) l4_table_offset(a)
173 /**************************************************************************/
174 /* Access to the linear mapping of shadow page tables. */
176 /* Offsets into each level of the linear mapping for a virtual address. */
177 #define shadow_l1_linear_offset(_a) \
178 (((_a) & VADDR_MASK) >> SHADOW_L1_PAGETABLE_SHIFT)
179 #define shadow_l2_linear_offset(_a) \
180 (((_a) & VADDR_MASK) >> SHADOW_L2_PAGETABLE_SHIFT)
181 #define shadow_l3_linear_offset(_a) \
182 (((_a) & VADDR_MASK) >> SHADOW_L3_PAGETABLE_SHIFT)
183 #define shadow_l4_linear_offset(_a) \
184 (((_a) & VADDR_MASK) >> SHADOW_L4_PAGETABLE_SHIFT)
186 /* Where to find each level of the linear mapping. For PV guests, we use
187 * the shadow linear-map self-entry as many times as we need. For HVM
188 * guests, the shadow doesn't have a linear-map self-entry so we must use
189 * the monitor-table's linear-map entry N-1 times and then the shadow-map
190 * entry once. */
191 #define __sh_linear_l1_table ((shadow_l1e_t *)(SH_LINEAR_PT_VIRT_START))
192 #define __sh_linear_l2_table ((shadow_l2e_t *) \
193 (__sh_linear_l1_table + shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)))
195 // shadow linear L3 and L4 tables only exist in 4 level paging...
196 #if SHADOW_PAGING_LEVELS == 4
197 #define __sh_linear_l3_table ((shadow_l3e_t *) \
198 (__sh_linear_l2_table + shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)))
199 #define __sh_linear_l4_table ((shadow_l4e_t *) \
200 (__sh_linear_l3_table + shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)))
201 #endif
203 #define sh_linear_l1_table(v) ({ \
204 ASSERT(current == (v)); \
205 __sh_linear_l1_table; \
206 })
208 // XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
209 // shadow_mode_external(d)...
210 //
211 #define sh_linear_l2_table(v) ({ \
212 ASSERT(current == (v)); \
213 ((shadow_l2e_t *) \
214 (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
215 shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
216 })
218 #if SHADOW_PAGING_LEVELS >= 4
219 #define sh_linear_l3_table(v) ({ \
220 ASSERT(current == (v)); \
221 ((shadow_l3e_t *) \
222 (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
223 shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
224 })
226 // we use l4_pgentry_t instead of shadow_l4e_t below because shadow_l4e_t is
227 // not defined for when xen_levels==4 & shadow_levels==3...
228 #define sh_linear_l4_table(v) ({ \
229 ASSERT(current == (v)); \
230 ((l4_pgentry_t *) \
231 (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
232 shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
233 })
234 #endif
236 #if GUEST_PAGING_LEVELS == 2
238 #include "../page-guest32.h"
240 #define GUEST_L1_PAGETABLE_ENTRIES 1024
241 #define GUEST_L2_PAGETABLE_ENTRIES 1024
242 #define GUEST_L1_PAGETABLE_SHIFT 12
243 #define GUEST_L2_PAGETABLE_SHIFT 22
245 /* Type of the guest's frame numbers */
246 TYPE_SAFE(u32,gfn)
247 #undef INVALID_GFN
248 #define INVALID_GFN ((u32)(-1u))
249 #define SH_PRI_gfn "05x"
251 /* Types of the guest's page tables */
252 typedef l1_pgentry_32_t guest_l1e_t;
253 typedef l2_pgentry_32_t guest_l2e_t;
255 /* Access functions for them */
256 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
257 { return l1e_get_paddr_32(gl1e); }
258 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
259 { return l2e_get_paddr_32(gl2e); }
261 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
262 { return _gfn(l1e_get_paddr_32(gl1e) >> PAGE_SHIFT); }
263 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
264 { return _gfn(l2e_get_paddr_32(gl2e) >> PAGE_SHIFT); }
266 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
267 { return l1e_get_flags_32(gl1e); }
268 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
269 { return l2e_get_flags_32(gl2e); }
271 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
272 { l1e_add_flags_32(gl1e, flags); return gl1e; }
273 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
274 { l2e_add_flags_32(gl2e, flags); return gl2e; }
276 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
277 { return l1e_from_pfn_32(gfn_x(gfn), flags); }
278 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
279 { return l2e_from_pfn_32(gfn_x(gfn), flags); }
281 #define guest_l1_table_offset(a) l1_table_offset_32(a)
282 #define guest_l2_table_offset(a) l2_table_offset_32(a)
284 /* The shadow types needed for the various levels. */
285 #define SH_type_l1_shadow SH_type_l1_32_shadow
286 #define SH_type_l2_shadow SH_type_l2_32_shadow
287 #define SH_type_fl1_shadow SH_type_fl1_32_shadow
289 #else /* GUEST_PAGING_LEVELS != 2 */
291 #if GUEST_PAGING_LEVELS == 3
292 #define GUEST_L1_PAGETABLE_ENTRIES 512
293 #define GUEST_L2_PAGETABLE_ENTRIES 512
294 #define GUEST_L3_PAGETABLE_ENTRIES 4
295 #define GUEST_L1_PAGETABLE_SHIFT 12
296 #define GUEST_L2_PAGETABLE_SHIFT 21
297 #define GUEST_L3_PAGETABLE_SHIFT 30
298 #else /* GUEST_PAGING_LEVELS == 4 */
299 #define GUEST_L1_PAGETABLE_ENTRIES 512
300 #define GUEST_L2_PAGETABLE_ENTRIES 512
301 #define GUEST_L3_PAGETABLE_ENTRIES 512
302 #define GUEST_L4_PAGETABLE_ENTRIES 512
303 #define GUEST_L1_PAGETABLE_SHIFT 12
304 #define GUEST_L2_PAGETABLE_SHIFT 21
305 #define GUEST_L3_PAGETABLE_SHIFT 30
306 #define GUEST_L4_PAGETABLE_SHIFT 39
307 #endif
309 /* Type of the guest's frame numbers */
310 TYPE_SAFE(unsigned long,gfn)
311 #undef INVALID_GFN
312 #define INVALID_GFN ((unsigned long)(-1ul))
313 #define SH_PRI_gfn "05lx"
315 /* Types of the guest's page tables */
316 typedef l1_pgentry_t guest_l1e_t;
317 typedef l2_pgentry_t guest_l2e_t;
318 typedef l3_pgentry_t guest_l3e_t;
319 #if GUEST_PAGING_LEVELS >= 4
320 typedef l4_pgentry_t guest_l4e_t;
321 #endif
323 /* Access functions for them */
324 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
325 { return l1e_get_paddr(gl1e); }
326 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
327 { return l2e_get_paddr(gl2e); }
328 static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e)
329 { return l3e_get_paddr(gl3e); }
330 #if GUEST_PAGING_LEVELS >= 4
331 static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e)
332 { return l4e_get_paddr(gl4e); }
333 #endif
335 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
336 { return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
337 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
338 { return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
339 static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e)
340 { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); }
341 #if GUEST_PAGING_LEVELS >= 4
342 static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e)
343 { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); }
344 #endif
346 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
347 { return l1e_get_flags(gl1e); }
348 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
349 { return l2e_get_flags(gl2e); }
350 static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e)
351 { return l3e_get_flags(gl3e); }
352 #if GUEST_PAGING_LEVELS >= 4
353 static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e)
354 { return l4e_get_flags(gl4e); }
355 #endif
357 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
358 { l1e_add_flags(gl1e, flags); return gl1e; }
359 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
360 { l2e_add_flags(gl2e, flags); return gl2e; }
361 static inline guest_l3e_t guest_l3e_add_flags(guest_l3e_t gl3e, u32 flags)
362 { l3e_add_flags(gl3e, flags); return gl3e; }
363 #if GUEST_PAGING_LEVELS >= 4
364 static inline guest_l4e_t guest_l4e_add_flags(guest_l4e_t gl4e, u32 flags)
365 { l4e_add_flags(gl4e, flags); return gl4e; }
366 #endif
368 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
369 { return l1e_from_pfn(gfn_x(gfn), flags); }
370 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
371 { return l2e_from_pfn(gfn_x(gfn), flags); }
372 static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags)
373 { return l3e_from_pfn(gfn_x(gfn), flags); }
374 #if GUEST_PAGING_LEVELS >= 4
375 static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags)
376 { return l4e_from_pfn(gfn_x(gfn), flags); }
377 #endif
379 #define guest_l1_table_offset(a) l1_table_offset(a)
380 #define guest_l2_table_offset(a) l2_table_offset(a)
381 #define guest_l3_table_offset(a) l3_table_offset(a)
382 #define guest_l4_table_offset(a) l4_table_offset(a)
384 /* The shadow types needed for the various levels. */
385 #if GUEST_PAGING_LEVELS == 3
386 #define SH_type_l1_shadow SH_type_l1_pae_shadow
387 #define SH_type_fl1_shadow SH_type_fl1_pae_shadow
388 #define SH_type_l2_shadow SH_type_l2_pae_shadow
389 #define SH_type_l2h_shadow SH_type_l2h_pae_shadow
390 #else
391 #define SH_type_l1_shadow SH_type_l1_64_shadow
392 #define SH_type_fl1_shadow SH_type_fl1_64_shadow
393 #define SH_type_l2_shadow SH_type_l2_64_shadow
394 #define SH_type_l2h_shadow SH_type_l2h_64_shadow
395 #define SH_type_l3_shadow SH_type_l3_64_shadow
396 #define SH_type_l4_shadow SH_type_l4_64_shadow
397 #endif
399 #endif /* GUEST_PAGING_LEVELS != 2 */
401 #define VALID_GFN(m) (m != INVALID_GFN)
403 static inline int
404 valid_gfn(gfn_t m)
405 {
406 return VALID_GFN(gfn_x(m));
407 }
409 static inline paddr_t
410 gfn_to_paddr(gfn_t gfn)
411 {
412 return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
413 }
415 /* Override gfn_to_mfn to work with gfn_t */
416 #undef gfn_to_mfn
417 #define gfn_to_mfn(d, g) _gfn_to_mfn((d), gfn_x(g))
420 /* Type used for recording a walk through guest pagetables. It is
421 * filled in by the pagetable walk function, and also used as a cache
422 * for later walks.
423 * Any non-null pointer in this structure represents a mapping of guest
424 * memory. We must always call walk_init() before using a walk_t, and
425 * call walk_unmap() when we're done.
426 * The "Effective l1e" field is used when there isn't an l1e to point to,
427 * but we have fabricated an l1e for propagation to the shadow (e.g.,
428 * for splintering guest superpages into many shadow l1 entries). */
429 typedef struct shadow_walk_t walk_t;
430 struct shadow_walk_t
431 {
432 unsigned long va; /* Address we were looking for */
433 #if GUEST_PAGING_LEVELS >= 3
434 #if GUEST_PAGING_LEVELS >= 4
435 guest_l4e_t *l4e; /* Pointer to guest's level 4 entry */
436 #endif
437 guest_l3e_t *l3e; /* Pointer to guest's level 3 entry */
438 #endif
439 guest_l2e_t *l2e; /* Pointer to guest's level 2 entry */
440 guest_l1e_t *l1e; /* Pointer to guest's level 1 entry */
441 guest_l1e_t eff_l1e; /* Effective level 1 entry */
442 #if GUEST_PAGING_LEVELS >= 4
443 mfn_t l4mfn; /* MFN that the level 4 entry is in */
444 mfn_t l3mfn; /* MFN that the level 3 entry is in */
445 #endif
446 mfn_t l2mfn; /* MFN that the level 2 entry is in */
447 mfn_t l1mfn; /* MFN that the level 1 entry is in */
448 };
450 /* macros for dealing with the naming of the internal function names of the
451 * shadow code's external entry points.
452 */
453 #define INTERNAL_NAME(name) \
454 SHADOW_INTERNAL_NAME(name, SHADOW_PAGING_LEVELS, GUEST_PAGING_LEVELS)
456 /* macros for renaming the primary entry points, so that they are more
457 * easily distinguished from a debugger
458 */
459 #define sh_page_fault INTERNAL_NAME(sh_page_fault)
460 #define sh_invlpg INTERNAL_NAME(sh_invlpg)
461 #define sh_gva_to_gfn INTERNAL_NAME(sh_gva_to_gfn)
462 #define sh_update_cr3 INTERNAL_NAME(sh_update_cr3)
463 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
464 #define sh_rm_mappings_from_l1 INTERNAL_NAME(sh_rm_mappings_from_l1)
465 #define sh_remove_l1_shadow INTERNAL_NAME(sh_remove_l1_shadow)
466 #define sh_remove_l2_shadow INTERNAL_NAME(sh_remove_l2_shadow)
467 #define sh_remove_l3_shadow INTERNAL_NAME(sh_remove_l3_shadow)
468 #define sh_map_and_validate_gl4e INTERNAL_NAME(sh_map_and_validate_gl4e)
469 #define sh_map_and_validate_gl3e INTERNAL_NAME(sh_map_and_validate_gl3e)
470 #define sh_map_and_validate_gl2e INTERNAL_NAME(sh_map_and_validate_gl2e)
471 #define sh_map_and_validate_gl2he INTERNAL_NAME(sh_map_and_validate_gl2he)
472 #define sh_map_and_validate_gl1e INTERNAL_NAME(sh_map_and_validate_gl1e)
473 #define sh_destroy_l4_shadow INTERNAL_NAME(sh_destroy_l4_shadow)
474 #define sh_destroy_l3_shadow INTERNAL_NAME(sh_destroy_l3_shadow)
475 #define sh_destroy_l2_shadow INTERNAL_NAME(sh_destroy_l2_shadow)
476 #define sh_destroy_l1_shadow INTERNAL_NAME(sh_destroy_l1_shadow)
477 #define sh_unhook_32b_mappings INTERNAL_NAME(sh_unhook_32b_mappings)
478 #define sh_unhook_pae_mappings INTERNAL_NAME(sh_unhook_pae_mappings)
479 #define sh_unhook_64b_mappings INTERNAL_NAME(sh_unhook_64b_mappings)
480 #define sh_paging_mode INTERNAL_NAME(sh_paging_mode)
481 #define sh_detach_old_tables INTERNAL_NAME(sh_detach_old_tables)
482 #define sh_x86_emulate_write INTERNAL_NAME(sh_x86_emulate_write)
483 #define sh_x86_emulate_cmpxchg INTERNAL_NAME(sh_x86_emulate_cmpxchg)
484 #define sh_x86_emulate_cmpxchg8b INTERNAL_NAME(sh_x86_emulate_cmpxchg8b)
485 #define sh_audit_l1_table INTERNAL_NAME(sh_audit_l1_table)
486 #define sh_audit_fl1_table INTERNAL_NAME(sh_audit_fl1_table)
487 #define sh_audit_l2_table INTERNAL_NAME(sh_audit_l2_table)
488 #define sh_audit_l3_table INTERNAL_NAME(sh_audit_l3_table)
489 #define sh_audit_l4_table INTERNAL_NAME(sh_audit_l4_table)
490 #define sh_guess_wrmap INTERNAL_NAME(sh_guess_wrmap)
491 #define sh_clear_shadow_entry INTERNAL_NAME(sh_clear_shadow_entry)
493 /* The sh_guest_(map|get)_* functions only depends on the number of config
494 * levels
495 */
496 #define sh_guest_map_l1e \
497 SHADOW_INTERNAL_NAME(sh_guest_map_l1e, \
498 CONFIG_PAGING_LEVELS, \
499 CONFIG_PAGING_LEVELS)
500 #define sh_guest_get_eff_l1e \
501 SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, \
502 CONFIG_PAGING_LEVELS, \
503 CONFIG_PAGING_LEVELS)
505 /* sh_make_monitor_table only depends on the number of shadow levels */
506 #define sh_make_monitor_table \
507 SHADOW_INTERNAL_NAME(sh_make_monitor_table, \
508 SHADOW_PAGING_LEVELS, \
509 SHADOW_PAGING_LEVELS)
510 #define sh_destroy_monitor_table \
511 SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, \
512 SHADOW_PAGING_LEVELS, \
513 SHADOW_PAGING_LEVELS)
516 #if SHADOW_PAGING_LEVELS == 3
517 #define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
518 #endif
520 #if SHADOW_PAGING_LEVELS == 2
521 #define SH_PRI_pte "08x"
522 #else /* SHADOW_PAGING_LEVELS >= 3 */
523 #ifndef __x86_64__
524 #define SH_PRI_pte "016llx"
525 #else
526 #define SH_PRI_pte "016lx"
527 #endif
528 #endif /* SHADOW_PAGING_LEVELS >= 3 */
530 #if GUEST_PAGING_LEVELS == 2
531 #define SH_PRI_gpte "08x"
532 #else /* GUEST_PAGING_LEVELS >= 3 */
533 #ifndef __x86_64__
534 #define SH_PRI_gpte "016llx"
535 #else
536 #define SH_PRI_gpte "016lx"
537 #endif
538 #endif /* GUEST_PAGING_LEVELS >= 3 */
540 static inline u32
541 accumulate_guest_flags(struct vcpu *v, walk_t *gw)
542 {
543 u32 accumulated_flags;
545 if ( unlikely(!(guest_l1e_get_flags(gw->eff_l1e) & _PAGE_PRESENT)) )
546 return 0;
548 // We accumulate the permission flags with bitwise ANDing.
549 // This works for the PRESENT bit, RW bit, and USER bit.
550 // For the NX bit, however, the polarity is wrong, so we accumulate the
551 // inverse of the NX bit.
552 //
553 accumulated_flags = guest_l1e_get_flags(gw->eff_l1e) ^ _PAGE_NX_BIT;
554 accumulated_flags &= guest_l2e_get_flags(*gw->l2e) ^ _PAGE_NX_BIT;
556 // Note that PAE guests do not have USER or RW or NX bits in their L3s.
557 //
558 #if GUEST_PAGING_LEVELS == 3
559 accumulated_flags &=
560 ~_PAGE_PRESENT | (guest_l3e_get_flags(*gw->l3e) & _PAGE_PRESENT);
561 #elif GUEST_PAGING_LEVELS >= 4
562 accumulated_flags &= guest_l3e_get_flags(*gw->l3e) ^ _PAGE_NX_BIT;
563 accumulated_flags &= guest_l4e_get_flags(*gw->l4e) ^ _PAGE_NX_BIT;
564 #endif
566 // Revert the NX bit back to its original polarity
567 accumulated_flags ^= _PAGE_NX_BIT;
569 // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
570 // entries (since even the guest kernel runs in ring 3).
571 //
572 if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
573 accumulated_flags |= _PAGE_USER;
575 return accumulated_flags;
576 }
579 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
580 /******************************************************************************
581 * We implement a "fast path" for two special cases: faults that require
582 * MMIO emulation, and faults where the guest PTE is not present. We
583 * record these as shadow l1 entries that have reserved bits set in
584 * them, so we can spot them immediately in the fault handler and handle
585 * them without needing to hold the shadow lock or walk the guest
586 * pagetables.
587 *
588 * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
589 * have reserved bits that we can use for this.
590 */
592 #define SH_L1E_MAGIC 0xffffffff00000001ULL
593 static inline int sh_l1e_is_magic(shadow_l1e_t sl1e)
594 {
595 return ((sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC);
596 }
598 /* Guest not present: a single magic value */
599 static inline shadow_l1e_t sh_l1e_gnp(void)
600 {
601 return (shadow_l1e_t){ -1ULL };
602 }
604 static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e)
605 {
606 return (sl1e.l1 == sh_l1e_gnp().l1);
607 }
609 /* MMIO: an invalid PTE that contains the GFN of the equivalent guest l1e.
610 * We store 28 bits of GFN in bits 4:32 of the entry.
611 * The present bit is set, and the U/S and R/W bits are taken from the guest.
612 * Bit 3 is always 0, to differentiate from gnp above. */
613 #define SH_L1E_MMIO_MAGIC 0xffffffff00000001ULL
614 #define SH_L1E_MMIO_MAGIC_MASK 0xffffffff00000009ULL
615 #define SH_L1E_MMIO_GFN_MASK 0x00000000fffffff0ULL
616 #define SH_L1E_MMIO_GFN_SHIFT 4
618 static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
619 {
620 return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
621 | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT)
622 | (gflags & (_PAGE_USER|_PAGE_RW))) };
623 }
625 static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e)
626 {
627 return ((sl1e.l1 & SH_L1E_MMIO_MAGIC_MASK) == SH_L1E_MMIO_MAGIC);
628 }
630 static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e)
631 {
632 return _gfn((sl1e.l1 & SH_L1E_MMIO_GFN_MASK) >> SH_L1E_MMIO_GFN_SHIFT);
633 }
635 static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e)
636 {
637 return (u32)((sl1e.l1 & (_PAGE_USER|_PAGE_RW)));
638 }
640 #else
642 #define sh_l1e_gnp() shadow_l1e_empty()
643 #define sh_l1e_mmio(_gfn, _flags) shadow_l1e_empty()
644 #define sh_l1e_is_magic(_e) (0)
646 #endif /* SHOPT_FAST_FAULT_PATH */
649 #endif /* _XEN_SHADOW_TYPES_H */
651 /*
652 * Local variables:
653 * mode: C
654 * c-set-style: "BSD"
655 * c-basic-offset: 4
656 * indent-tabs-mode: nil
657 * End:
658 */