ia64/xen-unstable

view xen/arch/x86/mm/shadow/types.h @ 14498:e9a5ba552808

[XEN] Shadow: check the _PAGE_PRESENT bit in fast-path MMIO shadow ptes
otherwise the fast-path code is not safe against some kinds of
concurrent shadow updates.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Mar 21 17:17:08 2007 +0000 (2007-03-21)
parents a7f6392ea850
children bd3d6b4c52ec
line source
1 /******************************************************************************
2 * arch/x86/mm/shadow/types.h
3 *
4 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #ifndef _XEN_SHADOW_TYPES_H
24 #define _XEN_SHADOW_TYPES_H
26 // Map a shadow page
27 static inline void *
28 map_shadow_page(mfn_t smfn)
29 {
30 // XXX -- Possible optimization/measurement question for 32-bit and PAE
31 // hypervisors:
32 // How often is this smfn already available in the shadow linear
33 // table? Might it be worth checking that table first,
34 // presumably using the reverse map hint in the page_info of this
35 // smfn, rather than calling map_domain_page()?
36 //
37 return sh_map_domain_page(smfn);
38 }
40 // matching unmap for map_shadow_page()
41 static inline void
42 unmap_shadow_page(void *p)
43 {
44 sh_unmap_domain_page(p);
45 }
47 /*
48 * Define various types for handling pagetabels, based on these options:
49 * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
50 * GUEST_PAGING_LEVELS : Number of levels of guest pagetables
51 */
53 #if (CONFIG_PAGING_LEVELS < SHADOW_PAGING_LEVELS)
54 #error Cannot have more levels of shadow pagetables than host pagetables
55 #endif
57 #if (SHADOW_PAGING_LEVELS < GUEST_PAGING_LEVELS)
58 #error Cannot have more levels of guest pagetables than shadow pagetables
59 #endif
61 #if SHADOW_PAGING_LEVELS == 2
62 #define SHADOW_L1_PAGETABLE_ENTRIES 1024
63 #define SHADOW_L2_PAGETABLE_ENTRIES 1024
64 #define SHADOW_L1_PAGETABLE_SHIFT 12
65 #define SHADOW_L2_PAGETABLE_SHIFT 22
66 #endif
68 #if SHADOW_PAGING_LEVELS == 3
69 #define SHADOW_L1_PAGETABLE_ENTRIES 512
70 #define SHADOW_L2_PAGETABLE_ENTRIES 512
71 #define SHADOW_L3_PAGETABLE_ENTRIES 4
72 #define SHADOW_L1_PAGETABLE_SHIFT 12
73 #define SHADOW_L2_PAGETABLE_SHIFT 21
74 #define SHADOW_L3_PAGETABLE_SHIFT 30
75 #endif
77 #if SHADOW_PAGING_LEVELS == 4
78 #define SHADOW_L1_PAGETABLE_ENTRIES 512
79 #define SHADOW_L2_PAGETABLE_ENTRIES 512
80 #define SHADOW_L3_PAGETABLE_ENTRIES 512
81 #define SHADOW_L4_PAGETABLE_ENTRIES 512
82 #define SHADOW_L1_PAGETABLE_SHIFT 12
83 #define SHADOW_L2_PAGETABLE_SHIFT 21
84 #define SHADOW_L3_PAGETABLE_SHIFT 30
85 #define SHADOW_L4_PAGETABLE_SHIFT 39
86 #endif
88 /* Types of the shadow page tables */
89 typedef l1_pgentry_t shadow_l1e_t;
90 typedef l2_pgentry_t shadow_l2e_t;
91 #if SHADOW_PAGING_LEVELS >= 3
92 typedef l3_pgentry_t shadow_l3e_t;
93 #if SHADOW_PAGING_LEVELS >= 4
94 typedef l4_pgentry_t shadow_l4e_t;
95 #endif
96 #endif
98 /* Access functions for them */
99 static inline paddr_t shadow_l1e_get_paddr(shadow_l1e_t sl1e)
100 { return l1e_get_paddr(sl1e); }
101 static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
102 { return l2e_get_paddr(sl2e); }
103 #if SHADOW_PAGING_LEVELS >= 3
104 static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
105 { return l3e_get_paddr(sl3e); }
106 #if SHADOW_PAGING_LEVELS >= 4
107 static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
108 { return l4e_get_paddr(sl4e); }
109 #endif
110 #endif
112 static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
113 { return _mfn(l1e_get_pfn(sl1e)); }
114 static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
115 { return _mfn(l2e_get_pfn(sl2e)); }
116 #if SHADOW_PAGING_LEVELS >= 3
117 static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
118 { return _mfn(l3e_get_pfn(sl3e)); }
119 #if SHADOW_PAGING_LEVELS >= 4
120 static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
121 { return _mfn(l4e_get_pfn(sl4e)); }
122 #endif
123 #endif
125 static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
126 { return l1e_get_flags(sl1e); }
127 static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
128 { return l2e_get_flags(sl2e); }
129 #if SHADOW_PAGING_LEVELS >= 3
130 static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
131 { return l3e_get_flags(sl3e); }
132 #if SHADOW_PAGING_LEVELS >= 4
133 static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
134 { return l4e_get_flags(sl4e); }
135 #endif
136 #endif
138 static inline shadow_l1e_t
139 shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
140 { l1e_remove_flags(sl1e, flags); return sl1e; }
142 static inline shadow_l1e_t shadow_l1e_empty(void)
143 { return l1e_empty(); }
144 static inline shadow_l2e_t shadow_l2e_empty(void)
145 { return l2e_empty(); }
146 #if SHADOW_PAGING_LEVELS >= 3
147 static inline shadow_l3e_t shadow_l3e_empty(void)
148 { return l3e_empty(); }
149 #if SHADOW_PAGING_LEVELS >= 4
150 static inline shadow_l4e_t shadow_l4e_empty(void)
151 { return l4e_empty(); }
152 #endif
153 #endif
155 static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
156 { return l1e_from_pfn(mfn_x(mfn), flags); }
157 static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
158 { return l2e_from_pfn(mfn_x(mfn), flags); }
159 #if SHADOW_PAGING_LEVELS >= 3
160 static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
161 { return l3e_from_pfn(mfn_x(mfn), flags); }
162 #if SHADOW_PAGING_LEVELS >= 4
163 static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
164 { return l4e_from_pfn(mfn_x(mfn), flags); }
165 #endif
166 #endif
168 #define shadow_l1_table_offset(a) l1_table_offset(a)
169 #define shadow_l2_table_offset(a) l2_table_offset(a)
170 #define shadow_l3_table_offset(a) l3_table_offset(a)
171 #define shadow_l4_table_offset(a) l4_table_offset(a)
173 /**************************************************************************/
174 /* Access to the linear mapping of shadow page tables. */
176 /* Offsets into each level of the linear mapping for a virtual address. */
177 #define shadow_l1_linear_offset(_a) \
178 (((_a) & VADDR_MASK) >> SHADOW_L1_PAGETABLE_SHIFT)
179 #define shadow_l2_linear_offset(_a) \
180 (((_a) & VADDR_MASK) >> SHADOW_L2_PAGETABLE_SHIFT)
181 #define shadow_l3_linear_offset(_a) \
182 (((_a) & VADDR_MASK) >> SHADOW_L3_PAGETABLE_SHIFT)
183 #define shadow_l4_linear_offset(_a) \
184 (((_a) & VADDR_MASK) >> SHADOW_L4_PAGETABLE_SHIFT)
186 /* Where to find each level of the linear mapping. For PV guests, we use
187 * the shadow linear-map self-entry as many times as we need. For HVM
188 * guests, the shadow doesn't have a linear-map self-entry so we must use
189 * the monitor-table's linear-map entry N-1 times and then the shadow-map
190 * entry once. */
191 #define __sh_linear_l1_table ((shadow_l1e_t *)(SH_LINEAR_PT_VIRT_START))
192 #define __sh_linear_l2_table ((shadow_l2e_t *) \
193 (__sh_linear_l1_table + shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)))
195 // shadow linear L3 and L4 tables only exist in 4 level paging...
196 #if SHADOW_PAGING_LEVELS == 4
197 #define __sh_linear_l3_table ((shadow_l3e_t *) \
198 (__sh_linear_l2_table + shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)))
199 #define __sh_linear_l4_table ((shadow_l4e_t *) \
200 (__sh_linear_l3_table + shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)))
201 #endif
203 #define sh_linear_l1_table(v) ({ \
204 ASSERT(current == (v)); \
205 __sh_linear_l1_table; \
206 })
208 // XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
209 // shadow_mode_external(d)...
210 //
211 #define sh_linear_l2_table(v) ({ \
212 ASSERT(current == (v)); \
213 ((shadow_l2e_t *) \
214 (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
215 shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
216 })
218 #if SHADOW_PAGING_LEVELS >= 4
219 #define sh_linear_l3_table(v) ({ \
220 ASSERT(current == (v)); \
221 ((shadow_l3e_t *) \
222 (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
223 shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
224 })
226 // we use l4_pgentry_t instead of shadow_l4e_t below because shadow_l4e_t is
227 // not defined for when xen_levels==4 & shadow_levels==3...
228 #define sh_linear_l4_table(v) ({ \
229 ASSERT(current == (v)); \
230 ((l4_pgentry_t *) \
231 (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
232 shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
233 })
234 #endif
236 #if GUEST_PAGING_LEVELS == 2
238 #include "../page-guest32.h"
240 #define GUEST_L1_PAGETABLE_ENTRIES 1024
241 #define GUEST_L2_PAGETABLE_ENTRIES 1024
242 #define GUEST_L1_PAGETABLE_SHIFT 12
243 #define GUEST_L2_PAGETABLE_SHIFT 22
245 /* Type of the guest's frame numbers */
246 TYPE_SAFE(u32,gfn)
247 #undef INVALID_GFN
248 #define INVALID_GFN ((u32)(-1u))
249 #define SH_PRI_gfn "05x"
251 /* Types of the guest's page tables */
252 typedef l1_pgentry_32_t guest_l1e_t;
253 typedef l2_pgentry_32_t guest_l2e_t;
255 /* Access functions for them */
256 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
257 { return l1e_get_paddr_32(gl1e); }
258 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
259 { return l2e_get_paddr_32(gl2e); }
261 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
262 { return _gfn(l1e_get_paddr_32(gl1e) >> PAGE_SHIFT); }
263 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
264 { return _gfn(l2e_get_paddr_32(gl2e) >> PAGE_SHIFT); }
266 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
267 { return l1e_get_flags_32(gl1e); }
268 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
269 { return l2e_get_flags_32(gl2e); }
271 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
272 { l1e_add_flags_32(gl1e, flags); return gl1e; }
273 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
274 { l2e_add_flags_32(gl2e, flags); return gl2e; }
276 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
277 { return l1e_from_pfn_32(gfn_x(gfn), flags); }
278 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
279 { return l2e_from_pfn_32(gfn_x(gfn), flags); }
281 #define guest_l1_table_offset(a) l1_table_offset_32(a)
282 #define guest_l2_table_offset(a) l2_table_offset_32(a)
284 /* The shadow types needed for the various levels. */
285 #define SH_type_l1_shadow SH_type_l1_32_shadow
286 #define SH_type_l2_shadow SH_type_l2_32_shadow
287 #define SH_type_fl1_shadow SH_type_fl1_32_shadow
289 #else /* GUEST_PAGING_LEVELS != 2 */
291 #if GUEST_PAGING_LEVELS == 3
292 #define GUEST_L1_PAGETABLE_ENTRIES 512
293 #define GUEST_L2_PAGETABLE_ENTRIES 512
294 #define GUEST_L3_PAGETABLE_ENTRIES 4
295 #define GUEST_L1_PAGETABLE_SHIFT 12
296 #define GUEST_L2_PAGETABLE_SHIFT 21
297 #define GUEST_L3_PAGETABLE_SHIFT 30
298 #else /* GUEST_PAGING_LEVELS == 4 */
299 #define GUEST_L1_PAGETABLE_ENTRIES 512
300 #define GUEST_L2_PAGETABLE_ENTRIES 512
301 #define GUEST_L3_PAGETABLE_ENTRIES 512
302 #define GUEST_L4_PAGETABLE_ENTRIES 512
303 #define GUEST_L1_PAGETABLE_SHIFT 12
304 #define GUEST_L2_PAGETABLE_SHIFT 21
305 #define GUEST_L3_PAGETABLE_SHIFT 30
306 #define GUEST_L4_PAGETABLE_SHIFT 39
307 #endif
309 /* Type of the guest's frame numbers */
310 TYPE_SAFE(unsigned long,gfn)
311 #undef INVALID_GFN
312 #define INVALID_GFN ((unsigned long)(-1ul))
313 #define SH_PRI_gfn "05lx"
315 /* Types of the guest's page tables */
316 typedef l1_pgentry_t guest_l1e_t;
317 typedef l2_pgentry_t guest_l2e_t;
318 typedef l3_pgentry_t guest_l3e_t;
319 #if GUEST_PAGING_LEVELS >= 4
320 typedef l4_pgentry_t guest_l4e_t;
321 #endif
323 /* Access functions for them */
324 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
325 { return l1e_get_paddr(gl1e); }
326 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
327 { return l2e_get_paddr(gl2e); }
328 static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e)
329 { return l3e_get_paddr(gl3e); }
330 #if GUEST_PAGING_LEVELS >= 4
331 static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e)
332 { return l4e_get_paddr(gl4e); }
333 #endif
335 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
336 { return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
337 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
338 { return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
339 static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e)
340 { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); }
341 #if GUEST_PAGING_LEVELS >= 4
342 static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e)
343 { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); }
344 #endif
346 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
347 { return l1e_get_flags(gl1e); }
348 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
349 { return l2e_get_flags(gl2e); }
350 static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e)
351 { return l3e_get_flags(gl3e); }
352 #if GUEST_PAGING_LEVELS >= 4
353 static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e)
354 { return l4e_get_flags(gl4e); }
355 #endif
357 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
358 { l1e_add_flags(gl1e, flags); return gl1e; }
359 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
360 { l2e_add_flags(gl2e, flags); return gl2e; }
361 static inline guest_l3e_t guest_l3e_add_flags(guest_l3e_t gl3e, u32 flags)
362 { l3e_add_flags(gl3e, flags); return gl3e; }
363 #if GUEST_PAGING_LEVELS >= 4
364 static inline guest_l4e_t guest_l4e_add_flags(guest_l4e_t gl4e, u32 flags)
365 { l4e_add_flags(gl4e, flags); return gl4e; }
366 #endif
368 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
369 { return l1e_from_pfn(gfn_x(gfn), flags); }
370 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
371 { return l2e_from_pfn(gfn_x(gfn), flags); }
372 static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags)
373 { return l3e_from_pfn(gfn_x(gfn), flags); }
374 #if GUEST_PAGING_LEVELS >= 4
375 static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags)
376 { return l4e_from_pfn(gfn_x(gfn), flags); }
377 #endif
379 #define guest_l1_table_offset(a) l1_table_offset(a)
380 #define guest_l2_table_offset(a) l2_table_offset(a)
381 #define guest_l3_table_offset(a) l3_table_offset(a)
382 #define guest_l4_table_offset(a) l4_table_offset(a)
384 /* The shadow types needed for the various levels. */
385 #if GUEST_PAGING_LEVELS == 3
386 #define SH_type_l1_shadow SH_type_l1_pae_shadow
387 #define SH_type_fl1_shadow SH_type_fl1_pae_shadow
388 #define SH_type_l2_shadow SH_type_l2_pae_shadow
389 #define SH_type_l2h_shadow SH_type_l2h_pae_shadow
390 #else
391 #define SH_type_l1_shadow SH_type_l1_64_shadow
392 #define SH_type_fl1_shadow SH_type_fl1_64_shadow
393 #define SH_type_l2_shadow SH_type_l2_64_shadow
394 #define SH_type_l2h_shadow SH_type_l2h_64_shadow
395 #define SH_type_l3_shadow SH_type_l3_64_shadow
396 #define SH_type_l4_shadow SH_type_l4_64_shadow
397 #endif
399 #endif /* GUEST_PAGING_LEVELS != 2 */
401 #define VALID_GFN(m) (m != INVALID_GFN)
403 static inline int
404 valid_gfn(gfn_t m)
405 {
406 return VALID_GFN(gfn_x(m));
407 }
409 /* Translation between mfns and gfns */
411 // vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty
412 // little secret that, for hvm guests with paging disabled, nearly all of the
413 // shadow code actually think that the guest is running on *untranslated* page
414 // tables (which is actually domain->phys_table).
415 //
417 static inline mfn_t
418 vcpu_gfn_to_mfn(struct vcpu *v, gfn_t gfn)
419 {
420 if ( !paging_vcpu_mode_translate(v) )
421 return _mfn(gfn_x(gfn));
422 return gfn_to_mfn(v->domain, gfn_x(gfn));
423 }
425 static inline paddr_t
426 gfn_to_paddr(gfn_t gfn)
427 {
428 return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
429 }
431 /* Type used for recording a walk through guest pagetables. It is
432 * filled in by the pagetable walk function, and also used as a cache
433 * for later walks.
434 * Any non-null pointer in this structure represents a mapping of guest
435 * memory. We must always call walk_init() before using a walk_t, and
436 * call walk_unmap() when we're done.
437 * The "Effective l1e" field is used when there isn't an l1e to point to,
438 * but we have fabricated an l1e for propagation to the shadow (e.g.,
439 * for splintering guest superpages into many shadow l1 entries). */
440 typedef struct shadow_walk_t walk_t;
441 struct shadow_walk_t
442 {
443 unsigned long va; /* Address we were looking for */
444 #if GUEST_PAGING_LEVELS >= 3
445 #if GUEST_PAGING_LEVELS >= 4
446 guest_l4e_t *l4e; /* Pointer to guest's level 4 entry */
447 #endif
448 guest_l3e_t *l3e; /* Pointer to guest's level 3 entry */
449 #endif
450 guest_l2e_t *l2e; /* Pointer to guest's level 2 entry */
451 guest_l1e_t *l1e; /* Pointer to guest's level 1 entry */
452 guest_l1e_t eff_l1e; /* Effective level 1 entry */
453 #if GUEST_PAGING_LEVELS >= 4
454 mfn_t l4mfn; /* MFN that the level 4 entry is in */
455 mfn_t l3mfn; /* MFN that the level 3 entry is in */
456 #endif
457 mfn_t l2mfn; /* MFN that the level 2 entry is in */
458 mfn_t l1mfn; /* MFN that the level 1 entry is in */
459 };
461 /* macros for dealing with the naming of the internal function names of the
462 * shadow code's external entry points.
463 */
464 #define INTERNAL_NAME(name) \
465 SHADOW_INTERNAL_NAME(name, SHADOW_PAGING_LEVELS, GUEST_PAGING_LEVELS)
467 /* macros for renaming the primary entry points, so that they are more
468 * easily distinguished from a debugger
469 */
470 #define sh_page_fault INTERNAL_NAME(sh_page_fault)
471 #define sh_invlpg INTERNAL_NAME(sh_invlpg)
472 #define sh_gva_to_gfn INTERNAL_NAME(sh_gva_to_gfn)
473 #define sh_update_cr3 INTERNAL_NAME(sh_update_cr3)
474 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
475 #define sh_rm_mappings_from_l1 INTERNAL_NAME(sh_rm_mappings_from_l1)
476 #define sh_remove_l1_shadow INTERNAL_NAME(sh_remove_l1_shadow)
477 #define sh_remove_l2_shadow INTERNAL_NAME(sh_remove_l2_shadow)
478 #define sh_remove_l3_shadow INTERNAL_NAME(sh_remove_l3_shadow)
479 #define sh_map_and_validate_gl4e INTERNAL_NAME(sh_map_and_validate_gl4e)
480 #define sh_map_and_validate_gl3e INTERNAL_NAME(sh_map_and_validate_gl3e)
481 #define sh_map_and_validate_gl2e INTERNAL_NAME(sh_map_and_validate_gl2e)
482 #define sh_map_and_validate_gl2he INTERNAL_NAME(sh_map_and_validate_gl2he)
483 #define sh_map_and_validate_gl1e INTERNAL_NAME(sh_map_and_validate_gl1e)
484 #define sh_destroy_l4_shadow INTERNAL_NAME(sh_destroy_l4_shadow)
485 #define sh_destroy_l3_shadow INTERNAL_NAME(sh_destroy_l3_shadow)
486 #define sh_destroy_l2_shadow INTERNAL_NAME(sh_destroy_l2_shadow)
487 #define sh_destroy_l1_shadow INTERNAL_NAME(sh_destroy_l1_shadow)
488 #define sh_unhook_32b_mappings INTERNAL_NAME(sh_unhook_32b_mappings)
489 #define sh_unhook_pae_mappings INTERNAL_NAME(sh_unhook_pae_mappings)
490 #define sh_unhook_64b_mappings INTERNAL_NAME(sh_unhook_64b_mappings)
491 #define sh_paging_mode INTERNAL_NAME(sh_paging_mode)
492 #define sh_detach_old_tables INTERNAL_NAME(sh_detach_old_tables)
493 #define sh_x86_emulate_write INTERNAL_NAME(sh_x86_emulate_write)
494 #define sh_x86_emulate_cmpxchg INTERNAL_NAME(sh_x86_emulate_cmpxchg)
495 #define sh_x86_emulate_cmpxchg8b INTERNAL_NAME(sh_x86_emulate_cmpxchg8b)
496 #define sh_audit_l1_table INTERNAL_NAME(sh_audit_l1_table)
497 #define sh_audit_fl1_table INTERNAL_NAME(sh_audit_fl1_table)
498 #define sh_audit_l2_table INTERNAL_NAME(sh_audit_l2_table)
499 #define sh_audit_l3_table INTERNAL_NAME(sh_audit_l3_table)
500 #define sh_audit_l4_table INTERNAL_NAME(sh_audit_l4_table)
501 #define sh_guess_wrmap INTERNAL_NAME(sh_guess_wrmap)
502 #define sh_clear_shadow_entry INTERNAL_NAME(sh_clear_shadow_entry)
504 /* The sh_guest_(map|get)_* functions only depends on the number of config
505 * levels
506 */
507 #define sh_guest_map_l1e \
508 SHADOW_INTERNAL_NAME(sh_guest_map_l1e, \
509 CONFIG_PAGING_LEVELS, \
510 CONFIG_PAGING_LEVELS)
511 #define sh_guest_get_eff_l1e \
512 SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, \
513 CONFIG_PAGING_LEVELS, \
514 CONFIG_PAGING_LEVELS)
516 /* sh_make_monitor_table only depends on the number of shadow levels */
517 #define sh_make_monitor_table \
518 SHADOW_INTERNAL_NAME(sh_make_monitor_table, \
519 SHADOW_PAGING_LEVELS, \
520 SHADOW_PAGING_LEVELS)
521 #define sh_destroy_monitor_table \
522 SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, \
523 SHADOW_PAGING_LEVELS, \
524 SHADOW_PAGING_LEVELS)
527 #if SHADOW_PAGING_LEVELS == 3
528 #define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
529 #endif
531 #if SHADOW_PAGING_LEVELS == 2
532 #define SH_PRI_pte "08x"
533 #else /* SHADOW_PAGING_LEVELS >= 3 */
534 #ifndef __x86_64__
535 #define SH_PRI_pte "016llx"
536 #else
537 #define SH_PRI_pte "016lx"
538 #endif
539 #endif /* SHADOW_PAGING_LEVELS >= 3 */
541 #if GUEST_PAGING_LEVELS == 2
542 #define SH_PRI_gpte "08x"
543 #else /* GUEST_PAGING_LEVELS >= 3 */
544 #ifndef __x86_64__
545 #define SH_PRI_gpte "016llx"
546 #else
547 #define SH_PRI_gpte "016lx"
548 #endif
549 #endif /* GUEST_PAGING_LEVELS >= 3 */
551 static inline u32
552 accumulate_guest_flags(struct vcpu *v, walk_t *gw)
553 {
554 u32 accumulated_flags;
556 // We accumulate the permission flags with bitwise ANDing.
557 // This works for the PRESENT bit, RW bit, and USER bit.
558 // For the NX bit, however, the polarity is wrong, so we accumulate the
559 // inverse of the NX bit.
560 //
561 accumulated_flags = guest_l1e_get_flags(gw->eff_l1e) ^ _PAGE_NX_BIT;
562 accumulated_flags &= guest_l2e_get_flags(*gw->l2e) ^ _PAGE_NX_BIT;
564 // Note that PAE guests do not have USER or RW or NX bits in their L3s.
565 //
566 #if GUEST_PAGING_LEVELS == 3
567 accumulated_flags &=
568 ~_PAGE_PRESENT | (guest_l3e_get_flags(*gw->l3e) & _PAGE_PRESENT);
569 #elif GUEST_PAGING_LEVELS >= 4
570 accumulated_flags &= guest_l3e_get_flags(*gw->l3e) ^ _PAGE_NX_BIT;
571 accumulated_flags &= guest_l4e_get_flags(*gw->l4e) ^ _PAGE_NX_BIT;
572 #endif
574 // Revert the NX bit back to its original polarity
575 accumulated_flags ^= _PAGE_NX_BIT;
577 // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
578 // entries (since even the guest kernel runs in ring 3).
579 //
580 if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
581 accumulated_flags |= _PAGE_USER;
583 return accumulated_flags;
584 }
587 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
588 /******************************************************************************
589 * We implement a "fast path" for two special cases: faults that require
590 * MMIO emulation, and faults where the guest PTE is not present. We
591 * record these as shadow l1 entries that have reserved bits set in
592 * them, so we can spot them immediately in the fault handler and handle
593 * them without needing to hold the shadow lock or walk the guest
594 * pagetables.
595 *
596 * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
597 * have reserved bits that we can use for this.
598 */
600 #define SH_L1E_MAGIC 0xffffffff00000001ULL
601 static inline int sh_l1e_is_magic(shadow_l1e_t sl1e)
602 {
603 return ((sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC);
604 }
606 /* Guest not present: a single magic value */
607 static inline shadow_l1e_t sh_l1e_gnp(void)
608 {
609 return (shadow_l1e_t){ -1ULL };
610 }
612 static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e)
613 {
614 return (sl1e.l1 == sh_l1e_gnp().l1);
615 }
617 /* MMIO: an invalid PTE that contains the GFN of the equivalent guest l1e.
618 * We store 28 bits of GFN in bits 4:32 of the entry.
619 * The present bit is set, and the U/S and R/W bits are taken from the guest.
620 * Bit 3 is always 0, to differentiate from gnp above. */
621 #define SH_L1E_MMIO_MAGIC 0xffffffff00000001ULL
622 #define SH_L1E_MMIO_MAGIC_MASK 0xffffffff00000009ULL
623 #define SH_L1E_MMIO_GFN_MASK 0x00000000fffffff0ULL
624 #define SH_L1E_MMIO_GFN_SHIFT 4
626 static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
627 {
628 return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
629 | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT)
630 | (gflags & (_PAGE_USER|_PAGE_RW))) };
631 }
633 static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e)
634 {
635 return ((sl1e.l1 & SH_L1E_MMIO_MAGIC_MASK) == SH_L1E_MMIO_MAGIC);
636 }
638 static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e)
639 {
640 return _gfn((sl1e.l1 & SH_L1E_MMIO_GFN_MASK) >> SH_L1E_MMIO_GFN_SHIFT);
641 }
643 static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e)
644 {
645 return (u32)((sl1e.l1 & (_PAGE_USER|_PAGE_RW)));
646 }
648 #else
650 #define sh_l1e_gnp() shadow_l1e_empty()
651 #define sh_l1e_mmio(_gfn, _flags) shadow_l1e_empty()
652 #define sh_l1e_is_magic(_e) (0)
654 #endif /* SHOPT_FAST_FAULT_PATH */
657 #endif /* _XEN_SHADOW_TYPES_H */
659 /*
660 * Local variables:
661 * mode: C
662 * c-set-style: "BSD"
663 * c-basic-offset: 4
664 * indent-tabs-mode: nil
665 * End:
666 */