ia64/xen-unstable

view xen/arch/x86/mm/shadow/types.h @ 16635:9d447ba0c99a

Shadow: tidy the virtual-TLB translation cache.
Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Dec 19 10:11:54 2007 +0000 (2007-12-19)
parents 0335b9fe2f10
children 8612d3d9578a
line source
1 /******************************************************************************
2 * arch/x86/mm/shadow/types.h
3 *
4 * Parts of this code are Copyright (c) 2006 by XenSource Inc.
5 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
6 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #ifndef _XEN_SHADOW_TYPES_H
24 #define _XEN_SHADOW_TYPES_H
26 // Map a shadow page
27 static inline void *
28 map_shadow_page(mfn_t smfn)
29 {
30 // XXX -- Possible optimization/measurement question for 32-bit and PAE
31 // hypervisors:
32 // How often is this smfn already available in the shadow linear
33 // table? Might it be worth checking that table first,
34 // presumably using the reverse map hint in the page_info of this
35 // smfn, rather than calling map_domain_page()?
36 //
37 return sh_map_domain_page(smfn);
38 }
40 // matching unmap for map_shadow_page()
41 static inline void
42 unmap_shadow_page(void *p)
43 {
44 sh_unmap_domain_page(p);
45 }
47 /*
48 * Define various types for handling pagetabels, based on these options:
49 * SHADOW_PAGING_LEVELS : Number of levels of shadow pagetables
50 * GUEST_PAGING_LEVELS : Number of levels of guest pagetables
51 */
53 #if (CONFIG_PAGING_LEVELS < SHADOW_PAGING_LEVELS)
54 #error Cannot have more levels of shadow pagetables than host pagetables
55 #endif
57 #if (SHADOW_PAGING_LEVELS < GUEST_PAGING_LEVELS)
58 #error Cannot have more levels of guest pagetables than shadow pagetables
59 #endif
61 #if SHADOW_PAGING_LEVELS == 2
62 #define SHADOW_L1_PAGETABLE_ENTRIES 1024
63 #define SHADOW_L2_PAGETABLE_ENTRIES 1024
64 #define SHADOW_L1_PAGETABLE_SHIFT 12
65 #define SHADOW_L2_PAGETABLE_SHIFT 22
66 #endif
68 #if SHADOW_PAGING_LEVELS == 3
69 #define SHADOW_L1_PAGETABLE_ENTRIES 512
70 #define SHADOW_L2_PAGETABLE_ENTRIES 512
71 #define SHADOW_L3_PAGETABLE_ENTRIES 4
72 #define SHADOW_L1_PAGETABLE_SHIFT 12
73 #define SHADOW_L2_PAGETABLE_SHIFT 21
74 #define SHADOW_L3_PAGETABLE_SHIFT 30
75 #endif
77 #if SHADOW_PAGING_LEVELS == 4
78 #define SHADOW_L1_PAGETABLE_ENTRIES 512
79 #define SHADOW_L2_PAGETABLE_ENTRIES 512
80 #define SHADOW_L3_PAGETABLE_ENTRIES 512
81 #define SHADOW_L4_PAGETABLE_ENTRIES 512
82 #define SHADOW_L1_PAGETABLE_SHIFT 12
83 #define SHADOW_L2_PAGETABLE_SHIFT 21
84 #define SHADOW_L3_PAGETABLE_SHIFT 30
85 #define SHADOW_L4_PAGETABLE_SHIFT 39
86 #endif
88 /* Types of the shadow page tables */
89 typedef l1_pgentry_t shadow_l1e_t;
90 typedef l2_pgentry_t shadow_l2e_t;
91 #if SHADOW_PAGING_LEVELS >= 3
92 typedef l3_pgentry_t shadow_l3e_t;
93 #if SHADOW_PAGING_LEVELS >= 4
94 typedef l4_pgentry_t shadow_l4e_t;
95 #endif
96 #endif
98 /* Access functions for them */
99 static inline paddr_t shadow_l1e_get_paddr(shadow_l1e_t sl1e)
100 { return l1e_get_paddr(sl1e); }
101 static inline paddr_t shadow_l2e_get_paddr(shadow_l2e_t sl2e)
102 { return l2e_get_paddr(sl2e); }
103 #if SHADOW_PAGING_LEVELS >= 3
104 static inline paddr_t shadow_l3e_get_paddr(shadow_l3e_t sl3e)
105 { return l3e_get_paddr(sl3e); }
106 #if SHADOW_PAGING_LEVELS >= 4
107 static inline paddr_t shadow_l4e_get_paddr(shadow_l4e_t sl4e)
108 { return l4e_get_paddr(sl4e); }
109 #endif
110 #endif
112 static inline mfn_t shadow_l1e_get_mfn(shadow_l1e_t sl1e)
113 { return _mfn(l1e_get_pfn(sl1e)); }
114 static inline mfn_t shadow_l2e_get_mfn(shadow_l2e_t sl2e)
115 { return _mfn(l2e_get_pfn(sl2e)); }
116 #if SHADOW_PAGING_LEVELS >= 3
117 static inline mfn_t shadow_l3e_get_mfn(shadow_l3e_t sl3e)
118 { return _mfn(l3e_get_pfn(sl3e)); }
119 #if SHADOW_PAGING_LEVELS >= 4
120 static inline mfn_t shadow_l4e_get_mfn(shadow_l4e_t sl4e)
121 { return _mfn(l4e_get_pfn(sl4e)); }
122 #endif
123 #endif
125 static inline u32 shadow_l1e_get_flags(shadow_l1e_t sl1e)
126 { return l1e_get_flags(sl1e); }
127 static inline u32 shadow_l2e_get_flags(shadow_l2e_t sl2e)
128 { return l2e_get_flags(sl2e); }
129 #if SHADOW_PAGING_LEVELS >= 3
130 static inline u32 shadow_l3e_get_flags(shadow_l3e_t sl3e)
131 { return l3e_get_flags(sl3e); }
132 #if SHADOW_PAGING_LEVELS >= 4
133 static inline u32 shadow_l4e_get_flags(shadow_l4e_t sl4e)
134 { return l4e_get_flags(sl4e); }
135 #endif
136 #endif
138 static inline shadow_l1e_t
139 shadow_l1e_remove_flags(shadow_l1e_t sl1e, u32 flags)
140 { l1e_remove_flags(sl1e, flags); return sl1e; }
142 static inline shadow_l1e_t shadow_l1e_empty(void)
143 { return l1e_empty(); }
144 static inline shadow_l2e_t shadow_l2e_empty(void)
145 { return l2e_empty(); }
146 #if SHADOW_PAGING_LEVELS >= 3
147 static inline shadow_l3e_t shadow_l3e_empty(void)
148 { return l3e_empty(); }
149 #if SHADOW_PAGING_LEVELS >= 4
150 static inline shadow_l4e_t shadow_l4e_empty(void)
151 { return l4e_empty(); }
152 #endif
153 #endif
155 static inline shadow_l1e_t shadow_l1e_from_mfn(mfn_t mfn, u32 flags)
156 { return l1e_from_pfn(mfn_x(mfn), flags); }
157 static inline shadow_l2e_t shadow_l2e_from_mfn(mfn_t mfn, u32 flags)
158 { return l2e_from_pfn(mfn_x(mfn), flags); }
159 #if SHADOW_PAGING_LEVELS >= 3
160 static inline shadow_l3e_t shadow_l3e_from_mfn(mfn_t mfn, u32 flags)
161 { return l3e_from_pfn(mfn_x(mfn), flags); }
162 #if SHADOW_PAGING_LEVELS >= 4
163 static inline shadow_l4e_t shadow_l4e_from_mfn(mfn_t mfn, u32 flags)
164 { return l4e_from_pfn(mfn_x(mfn), flags); }
165 #endif
166 #endif
168 #define shadow_l1_table_offset(a) l1_table_offset(a)
169 #define shadow_l2_table_offset(a) l2_table_offset(a)
170 #define shadow_l3_table_offset(a) l3_table_offset(a)
171 #define shadow_l4_table_offset(a) l4_table_offset(a)
173 /**************************************************************************/
174 /* Access to the linear mapping of shadow page tables. */
176 /* Offsets into each level of the linear mapping for a virtual address. */
177 #define shadow_l1_linear_offset(_a) \
178 (((_a) & VADDR_MASK) >> SHADOW_L1_PAGETABLE_SHIFT)
179 #define shadow_l2_linear_offset(_a) \
180 (((_a) & VADDR_MASK) >> SHADOW_L2_PAGETABLE_SHIFT)
181 #define shadow_l3_linear_offset(_a) \
182 (((_a) & VADDR_MASK) >> SHADOW_L3_PAGETABLE_SHIFT)
183 #define shadow_l4_linear_offset(_a) \
184 (((_a) & VADDR_MASK) >> SHADOW_L4_PAGETABLE_SHIFT)
186 /* Where to find each level of the linear mapping. For PV guests, we use
187 * the shadow linear-map self-entry as many times as we need. For HVM
188 * guests, the shadow doesn't have a linear-map self-entry so we must use
189 * the monitor-table's linear-map entry N-1 times and then the shadow-map
190 * entry once. */
191 #define __sh_linear_l1_table ((shadow_l1e_t *)(SH_LINEAR_PT_VIRT_START))
192 #define __sh_linear_l2_table ((shadow_l2e_t *) \
193 (__sh_linear_l1_table + shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)))
195 // shadow linear L3 and L4 tables only exist in 4 level paging...
196 #if SHADOW_PAGING_LEVELS == 4
197 #define __sh_linear_l3_table ((shadow_l3e_t *) \
198 (__sh_linear_l2_table + shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)))
199 #define __sh_linear_l4_table ((shadow_l4e_t *) \
200 (__sh_linear_l3_table + shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)))
201 #endif
203 #define sh_linear_l1_table(v) ({ \
204 ASSERT(current == (v)); \
205 __sh_linear_l1_table; \
206 })
208 // XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
209 // shadow_mode_external(d)...
210 //
211 #define sh_linear_l2_table(v) ({ \
212 ASSERT(current == (v)); \
213 ((shadow_l2e_t *) \
214 (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
215 shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
216 })
218 #if SHADOW_PAGING_LEVELS >= 4
219 #define sh_linear_l3_table(v) ({ \
220 ASSERT(current == (v)); \
221 ((shadow_l3e_t *) \
222 (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
223 shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
224 })
226 // we use l4_pgentry_t instead of shadow_l4e_t below because shadow_l4e_t is
227 // not defined for when xen_levels==4 & shadow_levels==3...
228 #define sh_linear_l4_table(v) ({ \
229 ASSERT(current == (v)); \
230 ((l4_pgentry_t *) \
231 (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
232 shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
233 })
234 #endif
237 /* Type of the guest's frame numbers */
238 TYPE_SAFE(unsigned long,gfn)
239 #define SH_PRI_gfn "05lx"
241 #define VALID_GFN(m) (m != INVALID_GFN)
243 static inline int
244 valid_gfn(gfn_t m)
245 {
246 return VALID_GFN(gfn_x(m));
247 }
249 static inline paddr_t
250 gfn_to_paddr(gfn_t gfn)
251 {
252 return ((paddr_t)gfn_x(gfn)) << PAGE_SHIFT;
253 }
255 /* Override gfn_to_mfn to work with gfn_t */
256 #undef gfn_to_mfn
257 #define gfn_to_mfn(d, g, t) _gfn_to_mfn((d), gfn_x(g), (t))
259 #if GUEST_PAGING_LEVELS == 2
261 #include "../page-guest32.h"
263 #define GUEST_L1_PAGETABLE_ENTRIES 1024
264 #define GUEST_L2_PAGETABLE_ENTRIES 1024
265 #define GUEST_L1_PAGETABLE_SHIFT 12
266 #define GUEST_L2_PAGETABLE_SHIFT 22
268 /* Types of the guest's page tables */
269 typedef l1_pgentry_32_t guest_l1e_t;
270 typedef l2_pgentry_32_t guest_l2e_t;
271 typedef intpte_32_t guest_intpte_t;
273 /* Access functions for them */
274 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
275 { return l1e_get_paddr_32(gl1e); }
276 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
277 { return l2e_get_paddr_32(gl2e); }
279 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
280 { return _gfn(l1e_get_paddr_32(gl1e) >> PAGE_SHIFT); }
281 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
282 { return _gfn(l2e_get_paddr_32(gl2e) >> PAGE_SHIFT); }
284 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
285 { return l1e_get_flags_32(gl1e); }
286 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
287 { return l2e_get_flags_32(gl2e); }
289 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
290 { l1e_add_flags_32(gl1e, flags); return gl1e; }
291 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
292 { l2e_add_flags_32(gl2e, flags); return gl2e; }
294 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
295 { return l1e_from_pfn_32(gfn_x(gfn), flags); }
296 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
297 { return l2e_from_pfn_32(gfn_x(gfn), flags); }
299 #define guest_l1_table_offset(a) l1_table_offset_32(a)
300 #define guest_l2_table_offset(a) l2_table_offset_32(a)
302 /* The shadow types needed for the various levels. */
303 #define SH_type_l1_shadow SH_type_l1_32_shadow
304 #define SH_type_l2_shadow SH_type_l2_32_shadow
305 #define SH_type_fl1_shadow SH_type_fl1_32_shadow
307 #else /* GUEST_PAGING_LEVELS != 2 */
309 #if GUEST_PAGING_LEVELS == 3
310 #define GUEST_L1_PAGETABLE_ENTRIES 512
311 #define GUEST_L2_PAGETABLE_ENTRIES 512
312 #define GUEST_L3_PAGETABLE_ENTRIES 4
313 #define GUEST_L1_PAGETABLE_SHIFT 12
314 #define GUEST_L2_PAGETABLE_SHIFT 21
315 #define GUEST_L3_PAGETABLE_SHIFT 30
316 #else /* GUEST_PAGING_LEVELS == 4 */
317 #define GUEST_L1_PAGETABLE_ENTRIES 512
318 #define GUEST_L2_PAGETABLE_ENTRIES 512
319 #define GUEST_L3_PAGETABLE_ENTRIES 512
320 #define GUEST_L4_PAGETABLE_ENTRIES 512
321 #define GUEST_L1_PAGETABLE_SHIFT 12
322 #define GUEST_L2_PAGETABLE_SHIFT 21
323 #define GUEST_L3_PAGETABLE_SHIFT 30
324 #define GUEST_L4_PAGETABLE_SHIFT 39
325 #endif
327 /* Types of the guest's page tables */
328 typedef l1_pgentry_t guest_l1e_t;
329 typedef l2_pgentry_t guest_l2e_t;
330 typedef l3_pgentry_t guest_l3e_t;
331 #if GUEST_PAGING_LEVELS >= 4
332 typedef l4_pgentry_t guest_l4e_t;
333 #endif
334 typedef intpte_t guest_intpte_t;
336 /* Access functions for them */
337 static inline paddr_t guest_l1e_get_paddr(guest_l1e_t gl1e)
338 { return l1e_get_paddr(gl1e); }
339 static inline paddr_t guest_l2e_get_paddr(guest_l2e_t gl2e)
340 { return l2e_get_paddr(gl2e); }
341 static inline paddr_t guest_l3e_get_paddr(guest_l3e_t gl3e)
342 { return l3e_get_paddr(gl3e); }
343 #if GUEST_PAGING_LEVELS >= 4
344 static inline paddr_t guest_l4e_get_paddr(guest_l4e_t gl4e)
345 { return l4e_get_paddr(gl4e); }
346 #endif
348 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e)
349 { return _gfn(l1e_get_paddr(gl1e) >> PAGE_SHIFT); }
350 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e)
351 { return _gfn(l2e_get_paddr(gl2e) >> PAGE_SHIFT); }
352 static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e)
353 { return _gfn(l3e_get_paddr(gl3e) >> PAGE_SHIFT); }
354 #if GUEST_PAGING_LEVELS >= 4
355 static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e)
356 { return _gfn(l4e_get_paddr(gl4e) >> PAGE_SHIFT); }
357 #endif
359 static inline u32 guest_l1e_get_flags(guest_l1e_t gl1e)
360 { return l1e_get_flags(gl1e); }
361 static inline u32 guest_l2e_get_flags(guest_l2e_t gl2e)
362 { return l2e_get_flags(gl2e); }
363 static inline u32 guest_l3e_get_flags(guest_l3e_t gl3e)
364 { return l3e_get_flags(gl3e); }
365 #if GUEST_PAGING_LEVELS >= 4
366 static inline u32 guest_l4e_get_flags(guest_l4e_t gl4e)
367 { return l4e_get_flags(gl4e); }
368 #endif
370 static inline guest_l1e_t guest_l1e_add_flags(guest_l1e_t gl1e, u32 flags)
371 { l1e_add_flags(gl1e, flags); return gl1e; }
372 static inline guest_l2e_t guest_l2e_add_flags(guest_l2e_t gl2e, u32 flags)
373 { l2e_add_flags(gl2e, flags); return gl2e; }
374 static inline guest_l3e_t guest_l3e_add_flags(guest_l3e_t gl3e, u32 flags)
375 { l3e_add_flags(gl3e, flags); return gl3e; }
376 #if GUEST_PAGING_LEVELS >= 4
377 static inline guest_l4e_t guest_l4e_add_flags(guest_l4e_t gl4e, u32 flags)
378 { l4e_add_flags(gl4e, flags); return gl4e; }
379 #endif
381 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags)
382 { return l1e_from_pfn(gfn_x(gfn), flags); }
383 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags)
384 { return l2e_from_pfn(gfn_x(gfn), flags); }
385 static inline guest_l3e_t guest_l3e_from_gfn(gfn_t gfn, u32 flags)
386 { return l3e_from_pfn(gfn_x(gfn), flags); }
387 #if GUEST_PAGING_LEVELS >= 4
388 static inline guest_l4e_t guest_l4e_from_gfn(gfn_t gfn, u32 flags)
389 { return l4e_from_pfn(gfn_x(gfn), flags); }
390 #endif
392 #define guest_l1_table_offset(a) l1_table_offset(a)
393 #define guest_l2_table_offset(a) l2_table_offset(a)
394 #define guest_l3_table_offset(a) l3_table_offset(a)
395 #define guest_l4_table_offset(a) l4_table_offset(a)
397 /* The shadow types needed for the various levels. */
398 #if GUEST_PAGING_LEVELS == 3
399 #define SH_type_l1_shadow SH_type_l1_pae_shadow
400 #define SH_type_fl1_shadow SH_type_fl1_pae_shadow
401 #define SH_type_l2_shadow SH_type_l2_pae_shadow
402 #define SH_type_l2h_shadow SH_type_l2h_pae_shadow
403 #else
404 #define SH_type_l1_shadow SH_type_l1_64_shadow
405 #define SH_type_fl1_shadow SH_type_fl1_64_shadow
406 #define SH_type_l2_shadow SH_type_l2_64_shadow
407 #define SH_type_l2h_shadow SH_type_l2h_64_shadow
408 #define SH_type_l3_shadow SH_type_l3_64_shadow
409 #define SH_type_l4_shadow SH_type_l4_64_shadow
410 #endif
412 #endif /* GUEST_PAGING_LEVELS != 2 */
415 /* Type used for recording a walk through guest pagetables. It is
416 * filled in by the pagetable walk function, and also used as a cache
417 * for later walks. When we encounter a suporpage l2e, we fabricate an
418 * l1e for propagation to the shadow (for splintering guest superpages
419 * into many shadow l1 entries). */
420 typedef struct shadow_walk_t walk_t;
421 struct shadow_walk_t
422 {
423 unsigned long va; /* Address we were looking for */
424 #if GUEST_PAGING_LEVELS >= 3
425 #if GUEST_PAGING_LEVELS >= 4
426 guest_l4e_t l4e; /* Guest's level 4 entry */
427 #endif
428 guest_l3e_t l3e; /* Guest's level 3 entry */
429 #endif
430 guest_l2e_t l2e; /* Guest's level 2 entry */
431 guest_l1e_t l1e; /* Guest's level 1 entry (or fabrication) */
432 #if GUEST_PAGING_LEVELS >= 4
433 mfn_t l4mfn; /* MFN that the level 4 entry was in */
434 mfn_t l3mfn; /* MFN that the level 3 entry was in */
435 #endif
436 mfn_t l2mfn; /* MFN that the level 2 entry was in */
437 mfn_t l1mfn; /* MFN that the level 1 entry was in */
438 };
440 /* macros for dealing with the naming of the internal function names of the
441 * shadow code's external entry points.
442 */
443 #define INTERNAL_NAME(name) \
444 SHADOW_INTERNAL_NAME(name, SHADOW_PAGING_LEVELS, GUEST_PAGING_LEVELS)
446 /* macros for renaming the primary entry points, so that they are more
447 * easily distinguished from a debugger
448 */
449 #define sh_page_fault INTERNAL_NAME(sh_page_fault)
450 #define sh_invlpg INTERNAL_NAME(sh_invlpg)
451 #define sh_gva_to_gfn INTERNAL_NAME(sh_gva_to_gfn)
452 #define sh_update_cr3 INTERNAL_NAME(sh_update_cr3)
453 #define sh_rm_write_access_from_l1 INTERNAL_NAME(sh_rm_write_access_from_l1)
454 #define sh_rm_mappings_from_l1 INTERNAL_NAME(sh_rm_mappings_from_l1)
455 #define sh_remove_l1_shadow INTERNAL_NAME(sh_remove_l1_shadow)
456 #define sh_remove_l2_shadow INTERNAL_NAME(sh_remove_l2_shadow)
457 #define sh_remove_l3_shadow INTERNAL_NAME(sh_remove_l3_shadow)
458 #define sh_map_and_validate_gl4e INTERNAL_NAME(sh_map_and_validate_gl4e)
459 #define sh_map_and_validate_gl3e INTERNAL_NAME(sh_map_and_validate_gl3e)
460 #define sh_map_and_validate_gl2e INTERNAL_NAME(sh_map_and_validate_gl2e)
461 #define sh_map_and_validate_gl2he INTERNAL_NAME(sh_map_and_validate_gl2he)
462 #define sh_map_and_validate_gl1e INTERNAL_NAME(sh_map_and_validate_gl1e)
463 #define sh_destroy_l4_shadow INTERNAL_NAME(sh_destroy_l4_shadow)
464 #define sh_destroy_l3_shadow INTERNAL_NAME(sh_destroy_l3_shadow)
465 #define sh_destroy_l2_shadow INTERNAL_NAME(sh_destroy_l2_shadow)
466 #define sh_destroy_l1_shadow INTERNAL_NAME(sh_destroy_l1_shadow)
467 #define sh_unhook_32b_mappings INTERNAL_NAME(sh_unhook_32b_mappings)
468 #define sh_unhook_pae_mappings INTERNAL_NAME(sh_unhook_pae_mappings)
469 #define sh_unhook_64b_mappings INTERNAL_NAME(sh_unhook_64b_mappings)
470 #define sh_paging_mode INTERNAL_NAME(sh_paging_mode)
471 #define sh_detach_old_tables INTERNAL_NAME(sh_detach_old_tables)
472 #define sh_x86_emulate_write INTERNAL_NAME(sh_x86_emulate_write)
473 #define sh_x86_emulate_cmpxchg INTERNAL_NAME(sh_x86_emulate_cmpxchg)
474 #define sh_x86_emulate_cmpxchg8b INTERNAL_NAME(sh_x86_emulate_cmpxchg8b)
475 #define sh_audit_l1_table INTERNAL_NAME(sh_audit_l1_table)
476 #define sh_audit_fl1_table INTERNAL_NAME(sh_audit_fl1_table)
477 #define sh_audit_l2_table INTERNAL_NAME(sh_audit_l2_table)
478 #define sh_audit_l3_table INTERNAL_NAME(sh_audit_l3_table)
479 #define sh_audit_l4_table INTERNAL_NAME(sh_audit_l4_table)
480 #define sh_guess_wrmap INTERNAL_NAME(sh_guess_wrmap)
481 #define sh_clear_shadow_entry INTERNAL_NAME(sh_clear_shadow_entry)
483 /* The sh_guest_(map|get)_* functions only depends on the number of config
484 * levels
485 */
486 #define sh_guest_map_l1e \
487 SHADOW_INTERNAL_NAME(sh_guest_map_l1e, \
488 CONFIG_PAGING_LEVELS, \
489 CONFIG_PAGING_LEVELS)
490 #define sh_guest_get_eff_l1e \
491 SHADOW_INTERNAL_NAME(sh_guest_get_eff_l1e, \
492 CONFIG_PAGING_LEVELS, \
493 CONFIG_PAGING_LEVELS)
495 /* sh_make_monitor_table only depends on the number of shadow levels */
496 #define sh_make_monitor_table \
497 SHADOW_INTERNAL_NAME(sh_make_monitor_table, \
498 SHADOW_PAGING_LEVELS, \
499 SHADOW_PAGING_LEVELS)
500 #define sh_destroy_monitor_table \
501 SHADOW_INTERNAL_NAME(sh_destroy_monitor_table, \
502 SHADOW_PAGING_LEVELS, \
503 SHADOW_PAGING_LEVELS)
506 #if SHADOW_PAGING_LEVELS == 3
507 #define MFN_FITS_IN_HVM_CR3(_MFN) !(mfn_x(_MFN) >> 20)
508 #endif
510 #if SHADOW_PAGING_LEVELS == 2
511 #define SH_PRI_pte "08x"
512 #else /* SHADOW_PAGING_LEVELS >= 3 */
513 #ifndef __x86_64__
514 #define SH_PRI_pte "016llx"
515 #else
516 #define SH_PRI_pte "016lx"
517 #endif
518 #endif /* SHADOW_PAGING_LEVELS >= 3 */
520 #if GUEST_PAGING_LEVELS == 2
521 #define SH_PRI_gpte "08x"
522 #else /* GUEST_PAGING_LEVELS >= 3 */
523 #ifndef __x86_64__
524 #define SH_PRI_gpte "016llx"
525 #else
526 #define SH_PRI_gpte "016lx"
527 #endif
528 #endif /* GUEST_PAGING_LEVELS >= 3 */
531 #if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
532 /******************************************************************************
533 * We implement a "fast path" for two special cases: faults that require
534 * MMIO emulation, and faults where the guest PTE is not present. We
535 * record these as shadow l1 entries that have reserved bits set in
536 * them, so we can spot them immediately in the fault handler and handle
537 * them without needing to hold the shadow lock or walk the guest
538 * pagetables.
539 *
540 * This is only feasible for PAE and 64bit Xen: 32-bit non-PAE PTEs don't
541 * have reserved bits that we can use for this.
542 */
544 #define SH_L1E_MAGIC 0xffffffff00000001ULL
545 static inline int sh_l1e_is_magic(shadow_l1e_t sl1e)
546 {
547 return ((sl1e.l1 & SH_L1E_MAGIC) == SH_L1E_MAGIC);
548 }
550 /* Guest not present: a single magic value */
551 static inline shadow_l1e_t sh_l1e_gnp(void)
552 {
553 return (shadow_l1e_t){ -1ULL };
554 }
556 static inline int sh_l1e_is_gnp(shadow_l1e_t sl1e)
557 {
558 return (sl1e.l1 == sh_l1e_gnp().l1);
559 }
561 /* MMIO: an invalid PTE that contains the GFN of the equivalent guest l1e.
562 * We store 28 bits of GFN in bits 4:32 of the entry.
563 * The present bit is set, and the U/S and R/W bits are taken from the guest.
564 * Bit 3 is always 0, to differentiate from gnp above. */
565 #define SH_L1E_MMIO_MAGIC 0xffffffff00000001ULL
566 #define SH_L1E_MMIO_MAGIC_MASK 0xffffffff00000009ULL
567 #define SH_L1E_MMIO_GFN_MASK 0x00000000fffffff0ULL
568 #define SH_L1E_MMIO_GFN_SHIFT 4
570 static inline shadow_l1e_t sh_l1e_mmio(gfn_t gfn, u32 gflags)
571 {
572 return (shadow_l1e_t) { (SH_L1E_MMIO_MAGIC
573 | (gfn_x(gfn) << SH_L1E_MMIO_GFN_SHIFT)
574 | (gflags & (_PAGE_USER|_PAGE_RW))) };
575 }
577 static inline int sh_l1e_is_mmio(shadow_l1e_t sl1e)
578 {
579 return ((sl1e.l1 & SH_L1E_MMIO_MAGIC_MASK) == SH_L1E_MMIO_MAGIC);
580 }
582 static inline gfn_t sh_l1e_mmio_get_gfn(shadow_l1e_t sl1e)
583 {
584 return _gfn((sl1e.l1 & SH_L1E_MMIO_GFN_MASK) >> SH_L1E_MMIO_GFN_SHIFT);
585 }
587 static inline u32 sh_l1e_mmio_get_flags(shadow_l1e_t sl1e)
588 {
589 return (u32)((sl1e.l1 & (_PAGE_USER|_PAGE_RW)));
590 }
592 #else
594 #define sh_l1e_gnp() shadow_l1e_empty()
595 #define sh_l1e_mmio(_gfn, _flags) shadow_l1e_empty()
596 #define sh_l1e_is_magic(_e) (0)
598 #endif /* SHOPT_FAST_FAULT_PATH */
601 #endif /* _XEN_SHADOW_TYPES_H */
603 /*
604 * Local variables:
605 * mode: C
606 * c-set-style: "BSD"
607 * c-basic-offset: 4
608 * indent-tabs-mode: nil
609 * End:
610 */