ia64/xen-unstable

view xen/arch/x86/mm/shadow/types.h @ 13915:a00b8d3800a8

[XEN] Snapshot PAE l3es when they are shadowed.
We don't update the shadows so we mustn't look at the guest l3es
or we'll be confused by them if they change.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Wed Feb 14 14:46:18 2007 +0000 (2007-02-14)
parents 6daa91dc9247
children 9c2e6f8f3aa7
line source
1 /******************************************************************************
2 * arch/x86/mm/shadow/types.h
3 *
4 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #ifndef _XEN_SHADOW_TYPES_H
24 #define _XEN_SHADOW_TYPES_H
26 // Map a shadow page
27 static inline void *
28 map_shadow_page(mfn_t smfn)
29 {
30 // XXX -- Possible optimization/measurement question for 32-bit and PAE
31 // hypervisors:
32 // How often is this smfn already available in the shadow linear
33 // table? Might it be worth checking that table first,
34 // presumably using the reverse map hint in the page_info of this
35 // smfn, rather than calling map_domain_page()?
36 //
37 return sh_map_domain_page(smfn);
38 }
40 // matching unmap for map_shadow_page()
41 static inline void
42 unmap_shadow_page(void *p)
43 {
44 sh_unmap_domain_page(p);
45 }
47 /*
48 * Define various types for handling pagetabels, based on these options:
49 * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
50 * GUEST_PAGING_LEVELS : Number of levels of guest pagetables
51 */
53 #if (CONFIG_PAGING_LEVELS < SHADOW_PAGING_LEVELS)
54 #error Cannot have more levels of shadow pagetables than host pagetables
55 #endif
57 #if (SHADOW_PAGING_LEVELS < GUEST_PAGING_LEVELS)
58 #error Cannot have more levels of guest pagetables than shadow pagetables
59 #endif
61 #if SHADOW_PAGING_LEVELS == 2
62 #define SHADOW_L1_PAGETABLE_ENTRIES 1024
63 #define SHADOW_L2_PAGETABLE_ENTRIES 1024
64 #define SHADOW_L1_PAGETABLE_SHIFT 12
65 #define SHADOW_L2_PAGETABLE_SHIFT 22
66 #endif
68 #if SHADOW_PAGING_LEVELS == 3
69 #define SHADOW_L1_PAGETABLE_ENTRIES 512
70 #define SHADOW_L2_PAGETABLE_ENTRIES 512
71 #define SHADOW_L3_PAGETABLE_ENTRIES 4
72 #define SHADOW_L1_PAGETABLE_SHIFT 12
73 #define SHADOW_L2_PAGETABLE_SHIFT 21
74 #define SHADOW_L3_PAGETABLE_SHIFT 30
75 #endif
77 #if SHADOW_PAGING_LEVELS == 4
78 #define SHADOW_L1_PAGETABLE_ENTRIES 512
79 #define SHADOW_L2_PAGETABLE_ENTRIES 512
80 #define SHADOW_L3_PAGETABLE_ENTRIES 512
81 #define SHADOW_L4_PAGETABLE_ENTRIES 512
82 #define SHADOW_L1_PAGETABLE_SHIFT 12
83 #define SHADOW_L2_PAGETABLE_SHIFT 21
84 #define SHADOW_L3_PAGETABLE_SHIFT 30
85 #define SHADOW_L4_PAGETABLE_SHIFT 39
86 #endif
88 /* Types of the shadow page tables */
89 typedef l1_pgentry_t shadow_l1e_t;
90 typedef l2_pgentry_t shadow_l2e_t;
91 #if SHADOW_PAGING_LEVELS >= 3
92 typedef l3_pgentry_t shadow_l3e_t;
93 #if SHADOW_PAGING_LEVELS >= 4
94 typedef l4_pgentry_t shadow_l4e_t;
95 #endif
96 #endif
98 /* Access functions for them */
99 static inline paddr_t shadow_l1e_get_paddr(shadow_l1e_t sl1e)
100 { return l1e_get_paddr(sl1e); }
101 static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
102 { return l2e_get_paddr(sl2e); }
103 #if SHADOW_PAGING_LEVELS >= 3
104 static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
105 { return l3e_get_paddr(sl3e); }
106 #if SHADOW_PAGING_LEVELS >= 4
107 static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
108 { return l4e_get_paddr(sl4e); }
109 #endif
110 #endif
112 static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
113 { return _mfn(l1e_get_pfn(sl1e)); }
114 static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
115 { return _mfn(l2e_get_pfn(sl2e)); }
116 #if SHADOW_PAGING_LEVELS >= 3
117 static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
118 { return _mfn(l3e_get_pfn(sl3e)); }
119 #if SHADOW_PAGING_LEVELS >= 4
120 static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
121 { return _mfn(l4e_get_pfn(sl4e)); }
122 #endif
123 #endif
125 static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
126 { return l1e_get_flags(sl1e); }
127 static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
128 { return l2e_get_flags(sl2e); }
129 #if SHADOW_PAGING_LEVELS >= 3
130 static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
131 { return l3e_get_flags(sl3e); }
132 #if SHADOW_PAGING_LEVELS >= 4
133 static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
134 { return l4e_get_flags(sl4e); }
135 #endif
136 #endif
138 static inline shadow_l1e_t
139 shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
140 { l1e_remove_flags(sl1e, flags); return sl1e; }
142 static inline shadow_l1e_t shadow_l1e_empty(void)
143 { return l1e_empty(); }
144 static inline shadow_l2e_t shadow_l2e_empty(void)
145 { return l2e_empty(); }
146 #if SHADOW_PAGING_LEVELS >= 3
147 static inline shadow_l3e_t shadow_l3e_empty(void)
148 { return l3e_empty(); }
149 #if SHADOW_PAGING_LEVELS >= 4
150 static inline shadow_l4e_t shadow_l4e_empty(void)
151 { return l4e_empty(); }
152 #endif
153 #endif
155 static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
156 { return l1e_from_pfn(mfn_x(mfn), flags); }
157 static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
158 { return l2e_from_pfn(mfn_x(mfn), flags); }
159 #if SHADOW_PAGING_LEVELS >= 3
160 static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
161 { return l3e_from_pfn(mfn_x(mfn), flags); }
162 #if SHADOW_PAGING_LEVELS >= 4
163 static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
164 { return l4e_from_pfn(mfn_x(mfn), flags); }
165 #endif
166 #endif
168 #define shadow_l1_table_offset(a) l1_table_offset(a)
169 #define shadow_l2_table_offset(a) l2_table_offset(a)
170 #define shadow_l3_table_offset(a) l3_table_offset(a)
171 #define shadow_l4_table_offset(a) l4_table_offset(a)
173 /**************************************************************************/
174 /* Access to the linear mapping of shadow page tables. */
176 /* Offsets into each level of the linear mapping for a virtual address. */
177 #define shadow_l1_linear_offset(_a) \
178 (((_a) & VADDR_MASK) >> SHADOW_L1_PAGETABLE_SHIFT)
179 #define shadow_l2_linear_offset(_a) \
180 (((_a) & VADDR_MASK) >> SHADOW_L2_PAGETABLE_SHIFT)
181 #define shadow_l3_linear_offset(_a) \
182 (((_a) & VADDR_MASK) >> SHADOW_L3_PAGETABLE_SHIFT)
183 #define shadow_l4_linear_offset(_a) \
184 (((_a) & VADDR_MASK) >> SHADOW_L4_PAGETABLE_SHIFT)
186 /* Where to find each level of the linear mapping. For PV guests, we use
187 * the shadow linear-map self-entry as many times as we need. For HVM
188 * guests, the shadow doesn't have a linear-map self-entry so we must use
189 * the monitor-table's linear-map entry N-1 times and then the shadow-map
190 * entry once. */
191 #define __sh_linear_l1_table ((shadow_l1e_t *)(SH_LINEAR_PT_VIRT_START))
192 #define __sh_linear_l2_table ((shadow_l2e_t *) \
193 (__sh_linear_l1_table + shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)))
195 // shadow linear L3 and L4 tables only exist in 4 level paging...
196 #if SHADOW_PAGING_LEVELS == 4
197 #define __sh_linear_l3_table ((shadow_l3e_t *) \
198 (__sh_linear_l2_table + shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)))
199 #define __sh_linear_l4_table ((shadow_l4e_t *) \
200 (__sh_linear_l3_table + shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)))
201 #endif
203 #define sh_linear_l1_table(v) ({ \
204 ASSERT(current == (v)); \
205 __sh_linear_l1_table; \
206 })
208 // XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
209 // shadow_mode_external(d)...
210 //
211 #define sh_linear_l2_table(v) ({ \
212 ASSERT(current == (v)); \
213 ((shadow_l2e_t *) \
214 (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
215 shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
216 })
218 #if SHADOW_PAGING_LEVELS >= 4
219 #define sh_linear_l3_table(v) ({ \
220 ASSERT(current == (v)); \
221 ((shadow_l3e_t *) \
222 (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
223 shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
224 })
226 // we use l4_pgentry_t instead of shadow_l4e_t below because shadow_l4e_t is
227 // not defined for when xen_levels==4 & shadow_levels==3...
228 #define sh_linear_l4_table(v) ({ \
229 ASSERT(current == (v)); \
230 ((l4_pgentry_t *) \
231 (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
232 shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
233 })
234 #endif
236 #if GUEST_PAGING_LEVELS == 2
238 #include "page-guest32.h"
240 #define GUEST_L1_PAGETABLE_ENTRIES 1024
241 #define GUEST_L2_PAGETABLE_ENTRIES 1024
242 #define GUEST_L1_PAGETABLE_SHIFT 12
243 #define GUEST_L2_PAGETABLE_SHIFT 22
245 /* Type of the guest's frame numbers */
246 TYPE_SAFE(u32,gfn)
247 #define INVALID_GFN ((u32)(-1u))
248 #define SH_PRI_gfn "05x"
250 /* Types of the guest's page tables */
251 typedef l1_pgentry_32_t guest_l1e_t;
252 typedef l2_pgentry_32_t guest_l2e_t;
254 /* Access functions for them */
255 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
256 { return l1e_get_paddr_32(gl1e); }
257 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
258 { return l2e_get_paddr_32(gl2e); }
260 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
261 { return _gfn(l1e_get_paddr_32(gl1e) >> PAGE_SHIFT); }
262 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
263 { return _gfn(l2e_get_paddr_32(gl2e) >> PAGE_SHIFT); }
265 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
266 { return l1e_get_flags_32(gl1e); }
267 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
268 { return l2e_get_flags_32(gl2e); }
270 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
271 { l1e_add_flags_32(gl1e, flags); return gl1e; }
272 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
273 { l2e_add_flags_32(gl2e, flags); return gl2e; }
275 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
276 { return l1e_from_pfn_32(gfn_x(gfn), flags); }
277 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
278 { return l2e_from_pfn_32(gfn_x(gfn), flags); }
280 #define guest_l1_table_offset(a) l1_table_offset_32(a)
281 #define guest_l2_table_offset(a) l2_table_offset_32(a)
283 /* The shadow types needed for the various levels. */
284 #define SH_type_l1_shadow SH_type_l1_32_shadow
285 #define SH_type_l2_shadow SH_type_l2_32_shadow
286 #define SH_type_fl1_shadow SH_type_fl1_32_shadow
288 #else /* GUEST_PAGING_LEVELS != 2 */
290 #if GUEST_PAGING_LEVELS == 3
291 #define GUEST_L1_PAGETABLE_ENTRIES 512
292 #define GUEST_L2_PAGETABLE_ENTRIES 512
293 #define GUEST_L3_PAGETABLE_ENTRIES 4
294 #define GUEST_L1_PAGETABLE_SHIFT 12
295 #define GUEST_L2_PAGETABLE_SHIFT 21
296 #define GUEST_L3_PAGETABLE_SHIFT 30
297 #else /* GUEST_PAGING_LEVELS == 4 */
298 #define GUEST_L1_PAGETABLE_ENTRIES 512
299 #define GUEST_L2_PAGETABLE_ENTRIES 512
300 #define GUEST_L3_PAGETABLE_ENTRIES 512
301 #define GUEST_L4_PAGETABLE_ENTRIES 512
302 #define GUEST_L1_PAGETABLE_SHIFT 12
303 #define GUEST_L2_PAGETABLE_SHIFT 21
304 #define GUEST_L3_PAGETABLE_SHIFT 30
305 #define GUEST_L4_PAGETABLE_SHIFT 39
306 #endif
308 /* Type of the guest's frame numbers */
309 TYPE_SAFE(unsigned long,gfn)
310 #define INVALID_GFN ((unsigned long)(-1ul))
311 #define SH_PRI_gfn "05lx"
313 /* Types of the guest's page tables */
314 typedef l1_pgentry_t guest_l1e_t;
315 typedef l2_pgentry_t guest_l2e_t;
316 typedef l3_pgentry_t guest_l3e_t;
317 #if GUEST_PAGING_LEVELS >= 4
318 typedef l4_pgentry_t guest_l4e_t;
319 #endif
321 /* Access functions for them */
322 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
323 { return l1e_get_paddr(gl1e); }
324 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
325 { return l2e_get_paddr(gl2e); }
326 static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e)
327 { return l3e_get_paddr(gl3e); }
328 #if GUEST_PAGING_LEVELS >= 4
329 static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e)
330 { return l4e_get_paddr(gl4e); }
331 #endif
333 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
334 { return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
335 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
336 { return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
337 static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e)
338 { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); }
339 #if GUEST_PAGING_LEVELS >= 4
340 static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e)
341 { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); }
342 #endif
344 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
345 { return l1e_get_flags(gl1e); }
346 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
347 { return l2e_get_flags(gl2e); }
348 static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e)
349 { return l3e_get_flags(gl3e); }
350 #if GUEST_PAGING_LEVELS >= 4
351 static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e)
352 { return l4e_get_flags(gl4e); }
353 #endif
355 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
356 { l1e_add_flags(gl1e, flags); return gl1e; }
357 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
358 { l2e_add_flags(gl2e, flags); return gl2e; }
359 static inline guest_l3e_t guest_l3e_add_flags(guest_l3e_t gl3e, u32 flags)
360 { l3e_add_flags(gl3e, flags); return gl3e; }
361 #if GUEST_PAGING_LEVELS >= 4
362 static inline guest_l4e_t guest_l4e_add_flags(guest_l4e_t gl4e, u32 flags)
363 { l4e_add_flags(gl4e, flags); return gl4e; }
364 #endif
366 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
367 { return l1e_from_pfn(gfn_x(gfn), flags); }
368 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
369 { return l2e_from_pfn(gfn_x(gfn), flags); }
370 static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags)
371 { return l3e_from_pfn(gfn_x(gfn), flags); }
372 #if GUEST_PAGING_LEVELS >= 4
373 static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags)
374 { return l4e_from_pfn(gfn_x(gfn), flags); }
375 #endif
377 #define guest_l1_table_offset(a) l1_table_offset(a)
378 #define guest_l2_table_offset(a) l2_table_offset(a)
379 #define guest_l3_table_offset(a) l3_table_offset(a)
380 #define guest_l4_table_offset(a) l4_table_offset(a)
382 /* The shadow types needed for the various levels. */
383 #if GUEST_PAGING_LEVELS == 3
384 #define SH_type_l1_shadow SH_type_l1_pae_shadow
385 #define SH_type_fl1_shadow SH_type_fl1_pae_shadow
386 #define SH_type_l2_shadow SH_type_l2_pae_shadow
387 #define SH_type_l2h_shadow SH_type_l2h_pae_shadow
388 #else
389 #define SH_type_l1_shadow SH_type_l1_64_shadow
390 #define SH_type_fl1_shadow SH_type_fl1_64_shadow
391 #define SH_type_l2_shadow SH_type_l2_64_shadow
392 #define SH_type_l3_shadow SH_type_l3_64_shadow
393 #define SH_type_l4_shadow SH_type_l4_64_shadow
394 #endif
396 #endif /* GUEST_PAGING_LEVELS != 2 */
398 #define VALID_GFN(m) (m != INVALID_GFN)
400 static inline int
401 valid_gfn(gfn_t m)
402 {
403 return VALID_GFN(gfn_x(m));
404 }
406 /* Translation between mfns and gfns */
408 // vcpu-specific version of gfn_to_mfn(). This is where we hide the dirty
409 // little secret that, for hvm guests with paging disabled, nearly all of the
410 // shadow code actually think that the guest is running on *untranslated* page
411 // tables (which is actually domain->phys_table).
412 //
414 static inline mfn_t
415 vcpu_gfn_to_mfn(struct vcpu *v, gfn_t gfn)
416 {
417 if ( !paging_vcpu_mode_translate(v) )
418 return _mfn(gfn_x(gfn));
419 return gfn_to_mfn(v->domain, gfn_x(gfn));
420 }
422 static inline paddr_t
423 gfn_to_paddr(gfn_t gfn)
424 {
425 return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
426 }
428 /* Type used for recording a walk through guest pagetables. It is
429 * filled in by the pagetable walk function, and also used as a cache
430 * for later walks.
431 * Any non-null pointer in this structure represents a mapping of guest
432 * memory. We must always call walk_init() before using a walk_t, and
433 * call walk_unmap() when we're done.
434 * The "Effective l1e" field is used when there isn't an l1e to point to,
435 * but we have fabricated an l1e for propagation to the shadow (e.g.,
436 * for splintering guest superpages into many shadow l1 entries). */
437 typedef struct shadow_walk_t walk_t;
438 struct shadow_walk_t
439 {
440 unsigned long va; /* Address we were looking for */
441 #if GUEST_PAGING_LEVELS >= 3
442 #if GUEST_PAGING_LEVELS >= 4
443 guest_l4e_t *l4e; /* Pointer to guest's level 4 entry */
444 #endif
445 guest_l3e_t *l3e; /* Pointer to guest's level 3 entry */
446 #endif
447 guest_l2e_t *l2e; /* Pointer to guest's level 2 entry */
448 guest_l1e_t *l1e; /* Pointer to guest's level 1 entry */
449 guest_l1e_t eff_l1e; /* Effective level 1 entry */
450 #if GUEST_PAGING_LEVELS >= 4
451 mfn_t l4mfn; /* MFN that the level 4 entry is in */
452 mfn_t l3mfn; /* MFN that the level 3 entry is in */
453 #endif
454 mfn_t l2mfn; /* MFN that the level 2 entry is in */
455 mfn_t l1mfn; /* MFN that the level 1 entry is in */
456 };
458 /* macros for dealing with the naming of the internal function names of the
459 * shadow code's external entry points.
460 */
461 #define INTERNAL_NAME(name) \
462 SHADOW_INTERNAL_NAME(name, SHADOW_PAGING_LEVELS, GUEST_PAGING_LEVELS)
464 /* macros for renaming the primary entry points, so that they are more
465 * easily distinguished from a debugger
466 */
467 #define sh_page_fault INTERNAL_NAME(sh_page_fault)
468 #define sh_invlpg INTERNAL_NAME(sh_invlpg)
469 #define sh_gva_to_gpa INTERNAL_NAME(sh_gva_to_gpa)
470 #define sh_gva_to_gfn INTERNAL_NAME(sh_gva_to_gfn)
471 #define sh_update_cr3 INTERNAL_NAME(sh_update_cr3)
472 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
473 #define sh_rm_mappings_from_l1 INTERNAL_NAME(sh_rm_mappings_from_l1)
474 #define sh_remove_l1_shadow INTERNAL_NAME(sh_remove_l1_shadow)
475 #define sh_remove_l2_shadow INTERNAL_NAME(sh_remove_l2_shadow)
476 #define sh_remove_l3_shadow INTERNAL_NAME(sh_remove_l3_shadow)
477 #define sh_map_and_validate_gl4e INTERNAL_NAME(sh_map_and_validate_gl4e)
478 #define sh_map_and_validate_gl3e INTERNAL_NAME(sh_map_and_validate_gl3e)
479 #define sh_map_and_validate_gl2e INTERNAL_NAME(sh_map_and_validate_gl2e)
480 #define sh_map_and_validate_gl2he INTERNAL_NAME(sh_map_and_validate_gl2he)
481 #define sh_map_and_validate_gl1e INTERNAL_NAME(sh_map_and_validate_gl1e)
482 #define sh_destroy_l4_shadow INTERNAL_NAME(sh_destroy_l4_shadow)
483 #define sh_destroy_l3_shadow INTERNAL_NAME(sh_destroy_l3_shadow)
484 #define sh_destroy_l2_shadow INTERNAL_NAME(sh_destroy_l2_shadow)
485 #define sh_destroy_l1_shadow INTERNAL_NAME(sh_destroy_l1_shadow)
486 #define sh_unhook_32b_mappings INTERNAL_NAME(sh_unhook_32b_mappings)
487 #define sh_unhook_pae_mappings INTERNAL_NAME(sh_unhook_pae_mappings)
488 #define sh_unhook_64b_mappings INTERNAL_NAME(sh_unhook_64b_mappings)
489 #define sh_paging_mode INTERNAL_NAME(sh_paging_mode)
490 #define sh_detach_old_tables INTERNAL_NAME(sh_detach_old_tables)
491 #define sh_x86_emulate_write INTERNAL_NAME(sh_x86_emulate_write)
492 #define sh_x86_emulate_cmpxchg INTERNAL_NAME(sh_x86_emulate_cmpxchg)
493 #define sh_x86_emulate_cmpxchg8b INTERNAL_NAME(sh_x86_emulate_cmpxchg8b)
494 #define sh_audit_l1_table INTERNAL_NAME(sh_audit_l1_table)
495 #define sh_audit_fl1_table INTERNAL_NAME(sh_audit_fl1_table)
496 #define sh_audit_l2_table INTERNAL_NAME(sh_audit_l2_table)
497 #define sh_audit_l3_table INTERNAL_NAME(sh_audit_l3_table)
498 #define sh_audit_l4_table INTERNAL_NAME(sh_audit_l4_table)
499 #define sh_guess_wrmap INTERNAL_NAME(sh_guess_wrmap)
500 #define sh_clear_shadow_entry INTERNAL_NAME(sh_clear_shadow_entry)
502 /* The sh_guest_(map|get)_* functions only depends on the number of config
503 * levels
504 */
505 #define sh_guest_map_l1e \
506 SHADOW_INTERNAL_NAME(sh_guest_map_l1e, \
507 CONFIG_PAGING_LEVELS, \
508 CONFIG_PAGING_LEVELS)
509 #define sh_guest_get_eff_l1e \
510 SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, \
511 CONFIG_PAGING_LEVELS, \
512 CONFIG_PAGING_LEVELS)
514 /* sh_make_monitor_table only depends on the number of shadow levels */
515 #define sh_make_monitor_table \
516 SHADOW_INTERNAL_NAME(sh_make_monitor_table, \
517 SHADOW_PAGING_LEVELS, \
518 SHADOW_PAGING_LEVELS)
519 #define sh_destroy_monitor_table \
520 SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, \
521 SHADOW_PAGING_LEVELS, \
522 SHADOW_PAGING_LEVELS)
525 #if SHADOW_PAGING_LEVELS == 3
526 #define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
527 #endif
529 #if SHADOW_PAGING_LEVELS == 2
530 #define SH_PRI_pte "08x"
531 #else /* SHADOW_PAGING_LEVELS >= 3 */
532 #ifndef __x86_64__
533 #define SH_PRI_pte "016llx"
534 #else
535 #define SH_PRI_pte "016lx"
536 #endif
537 #endif /* SHADOW_PAGING_LEVELS >= 3 */
539 #if GUEST_PAGING_LEVELS == 2
540 #define SH_PRI_gpte "08x"
541 #else /* GUEST_PAGING_LEVELS >= 3 */
542 #ifndef __x86_64__
543 #define SH_PRI_gpte "016llx"
544 #else
545 #define SH_PRI_gpte "016lx"
546 #endif
547 #endif /* GUEST_PAGING_LEVELS >= 3 */
549 static inline u32
550 accumulate_guest_flags(struct vcpu *v, walk_t *gw)
551 {
552 u32 accumulated_flags;
554 // We accumulate the permission flags with bitwise ANDing.
555 // This works for the PRESENT bit, RW bit, and USER bit.
556 // For the NX bit, however, the polarity is wrong, so we accumulate the
557 // inverse of the NX bit.
558 //
559 accumulated_flags = guest_l1e_get_flags(gw->eff_l1e) ^ _PAGE_NX_BIT;
560 accumulated_flags &= guest_l2e_get_flags(*gw->l2e) ^ _PAGE_NX_BIT;
562 // Note that PAE guests do not have USER or RW or NX bits in their L3s.
563 //
564 #if GUEST_PAGING_LEVELS == 3
565 accumulated_flags &=
566 ~_PAGE_PRESENT | (guest_l3e_get_flags(*gw->l3e) & _PAGE_PRESENT);
567 #elif GUEST_PAGING_LEVELS >= 4
568 accumulated_flags &= guest_l3e_get_flags(*gw->l3e) ^ _PAGE_NX_BIT;
569 accumulated_flags &= guest_l4e_get_flags(*gw->l4e) ^ _PAGE_NX_BIT;
570 #endif
572 // Revert the NX bit back to its original polarity
573 accumulated_flags ^= _PAGE_NX_BIT;
575 // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
576 // entries (since even the guest kernel runs in ring 3).
577 //
578 if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
579 accumulated_flags |= _PAGE_USER;
581 return accumulated_flags;
582 }
585 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
586 /******************************************************************************
587 * We implement a "fast path" for two special cases: faults that require
588 * MMIO emulation, and faults where the guest PTE is not present. We
589 * record these as shadow l1 entries that have reserved bits set in
590 * them, so we can spot them immediately in the fault handler and handle
591 * them without needing to hold the shadow lock or walk the guest
592 * pagetables.
593 *
594 * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
595 * have reserved bits that we can use for this.
596 */
598 #define SH_L1E_MAGIC 0xffffffff00000000ULL
599 static inline int sh_l1e_is_magic(shadow_l1e_t sl1e)
600 {
601 return ((sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC);
602 }
604 /* Guest not present: a single magic value */
605 static inline shadow_l1e_t sh_l1e_gnp(void)
606 {
607 return (shadow_l1e_t){ -1ULL };
608 }
610 static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e)
611 {
612 return (sl1e.l1 == sh_l1e_gnp().l1);
613 }
615 /* MMIO: an invalid PTE that contains the GFN of the equivalent guest l1e.
616 * We store 28 bits of GFN in bits 4:32 of the entry.
617 * The present bit is set, and the U/S and R/W bits are taken from the guest.
618 * Bit 3 is always 0, to differentiate from gnp above. */
619 #define SH_L1E_MMIO_MAGIC 0xffffffff00000001ULL
620 #define SH_L1E_MMIO_MAGIC_MASK 0xffffffff00000009ULL
621 #define SH_L1E_MMIO_GFN_MASK 0x00000000fffffff0ULL
622 #define SH_L1E_MMIO_GFN_SHIFT 4
624 static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
625 {
626 return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
627 | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT)
628 | (gflags & (_PAGE_USER|_PAGE_RW))) };
629 }
631 static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e)
632 {
633 return ((sl1e.l1 & SH_L1E_MMIO_MAGIC_MASK) == SH_L1E_MMIO_MAGIC);
634 }
636 static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e)
637 {
638 return _gfn((sl1e.l1 & SH_L1E_MMIO_GFN_MASK) >> SH_L1E_MMIO_GFN_SHIFT);
639 }
641 static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e)
642 {
643 return (u32)((sl1e.l1 & (_PAGE_USER|_PAGE_RW)));
644 }
646 #else
648 #define sh_l1e_gnp() shadow_l1e_empty()
649 #define sh_l1e_mmio(_gfn, _flags) shadow_l1e_empty()
650 #define sh_l1e_is_magic(_e) (0)
652 #endif /* SHOPT_FAST_FAULT_PATH */
655 #endif /* _XEN_SHADOW_TYPES_H */
657 /*
658 * Local variables:
659 * mode: C
660 * c-set-style: "BSD"
661 * c-basic-offset: 4
662 * indent-tabs-mode: nil
663 * End:
664 */