direct-io.hg

view xen/include/asm-x86/shadow_64.h @ 11135:88e6bd5e2b54

Whitespace clean-ups.

Signed-off-by: Steven Hand <steven@xensource.com>
author shand@kneesaa.uk.xensource.com
date Wed Aug 16 11:36:13 2006 +0100 (2006-08-16)
parents 1507021dccdf
children
line source
1 /******************************************************************************
2 * include/asm-x86/shadow_64.h
3 *
4 * Copyright (c) 2005 Michael A Fetterman
5 * Based on an earlier implementation by Ian Pratt et al
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21 /*
22 * Jun Nakajima <jun.nakajima@intel.com>
23 * Chengyuan Li <chengyuan.li@intel.com>
24 *
25 * Extended to support 64-bit guests.
26 */
27 #ifndef _XEN_SHADOW_64_H
28 #define _XEN_SHADOW_64_H
29 #include <asm/shadow.h>
30 #include <asm/shadow_ops.h>
31 #include <asm/hvm/hvm.h>
33 /*
34 * The naming convention of the shadow_ops:
35 * MODE_<pgentry size>_<guest paging levels>_HANDLER
36 */
37 extern struct shadow_ops MODE_64_2_HANDLER;
38 extern struct shadow_ops MODE_64_3_HANDLER;
39 extern struct shadow_ops MODE_64_PAE_HANDLER;
40 #if CONFIG_PAGING_LEVELS == 4
41 extern struct shadow_ops MODE_64_4_HANDLER;
42 #endif
44 #if CONFIG_PAGING_LEVELS == 3
45 #define L4_PAGETABLE_SHIFT 39
46 #define L4_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
47 typedef struct { intpte_t l4; } l4_pgentry_t;
48 #define is_guest_l4_slot(_s) (1)
49 #endif
51 #define READ_FAULT 0
52 #define WRITE_FAULT 1
54 #define ERROR_P 1
55 #define ERROR_W 2
56 #define ERROR_U 4
57 #define ERROR_I (1 << 4)
59 #define X86_64_SHADOW_DEBUG 0
61 #if X86_64_SHADOW_DEBUG
62 #define ESH_LOG(_f, _a...) \
63 printk(_f, ##_a)
64 #else
65 #define ESH_LOG(_f, _a...) ((void)0)
66 #endif
68 #define L_MASK 0xff
70 #define PAE_PAGING_LEVELS 3
72 #define ROOT_LEVEL_64 PAGING_L4
73 #define ROOT_LEVEL_32 PAGING_L2
75 #define DIRECT_ENTRY (4UL << 16)
76 #define SHADOW_ENTRY (2UL << 16)
77 #define GUEST_ENTRY (1UL << 16)
79 #define GET_ENTRY (2UL << 8)
80 #define SET_ENTRY (1UL << 8)
82 #define PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
84 /* For 32-bit VMX guest to allocate shadow L1 & L2*/
85 #define SL1_ORDER 1
86 #define SL2_ORDER 2
88 typedef struct { intpte_t lo; } pgentry_64_t;
89 #define shadow_level_to_type(l) (l << 29)
90 #define shadow_type_to_level(t) (t >> 29)
92 #define entry_get_value(_x) ((_x).lo)
93 #define entry_get_pfn(_x) \
94 (((_x).lo & (PADDR_MASK&PAGE_MASK)) >> PAGE_SHIFT)
95 #define entry_get_paddr(_x) (((_x).lo & (PADDR_MASK&PAGE_MASK)))
96 #define entry_get_flags(_x) (get_pte_flags((_x).lo))
98 #define entry_empty() ((pgentry_64_t) { 0 })
99 #define entry_from_pfn(pfn, flags) \
100 ((pgentry_64_t) { ((intpte_t)(pfn) << PAGE_SHIFT) | put_pte_flags(flags) })
101 #define entry_from_page(page, flags) (entry_from_pfn(page_to_mfn(page),(flags)))
102 #define entry_add_flags(x, flags) ((x).lo |= put_pte_flags(flags))
103 #define entry_remove_flags(x, flags) ((x).lo &= ~put_pte_flags(flags))
104 #define entry_has_changed(x,y,flags) \
105 ( !!(((x).lo ^ (y).lo) & ((PADDR_MASK&PAGE_MASK)|put_pte_flags(flags))) )
107 /******************************************************************************/
108 /*
109 * The macro and inlines are for 32-bit PAE guest
110 */
111 #define PAE_PDPT_RESERVED 0x1e6 /* [8:5], [2,1] */
113 #define PAE_SHADOW_SELF_ENTRY 259
114 #define PAE_L3_PAGETABLE_ENTRIES 4
116 /******************************************************************************/
117 static inline int table_offset_64(unsigned long va, int level)
118 {
119 switch(level) {
120 case 1:
121 return (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1));
122 case 2:
123 return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1));
124 case 3:
125 return (((va) >> L3_PAGETABLE_SHIFT) & (L3_PAGETABLE_ENTRIES - 1));
126 #if CONFIG_PAGING_LEVELS == 3
127 case 4:
128 return PAE_SHADOW_SELF_ENTRY;
129 #endif
131 #if CONFIG_PAGING_LEVELS >= 4
132 #ifndef GUEST_PGENTRY_32
133 #ifndef GUEST_32PAE
134 case 4:
135 return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
136 #else
137 case 4:
138 return PAE_SHADOW_SELF_ENTRY;
139 #endif
140 #else
141 case 4:
142 return PAE_SHADOW_SELF_ENTRY;
143 #endif
144 #endif
145 default:
146 return -1;
147 }
148 }
150 /*****************************************************************************/
152 #if defined( GUEST_32PAE )
153 static inline int guest_table_offset_64(unsigned long va, int level, unsigned int index)
154 {
155 switch(level) {
156 case 1:
157 return (((va) >> L1_PAGETABLE_SHIFT) & (L1_PAGETABLE_ENTRIES - 1));
158 case 2:
159 return (((va) >> L2_PAGETABLE_SHIFT) & (L2_PAGETABLE_ENTRIES - 1));
160 case 3:
161 return (index * 4 + ((va) >> L3_PAGETABLE_SHIFT));
162 #if CONFIG_PAGING_LEVELS == 3
163 case 4:
164 return PAE_SHADOW_SELF_ENTRY;
165 #endif
167 #if CONFIG_PAGING_LEVELS >= 4
168 #ifndef GUEST_PGENTRY_32
169 case 4:
170 return (((va) >> L4_PAGETABLE_SHIFT) & (L4_PAGETABLE_ENTRIES - 1));
171 #else
172 case 4:
173 return PAE_SHADOW_SELF_ENTRY;
174 #endif
175 #endif
176 default:
177 return -1;
178 }
179 }
181 #define SH_GUEST_32PAE 1
182 #else
183 #define guest_table_offset_64(va, level, index) \
184 table_offset_64((va),(level))
185 #define SH_GUEST_32PAE 0
186 #endif
188 /********************************************************************************/
190 static inline void free_out_of_sync_state(struct domain *d)
191 {
192 struct out_of_sync_entry *entry;
194 // NB: Be careful not to call something that manipulates this list
195 // while walking it. Remove one item at a time, and always
196 // restart from start of list.
197 //
198 while ( (entry = d->arch.out_of_sync) )
199 {
200 d->arch.out_of_sync = entry->next;
201 release_out_of_sync_entry(d, entry);
203 entry->next = d->arch.out_of_sync_free;
204 d->arch.out_of_sync_free = entry;
205 }
206 }
208 static inline int __entry(
209 struct vcpu *v, unsigned long va, pgentry_64_t *e_p, u32 flag)
210 {
211 int i;
212 pgentry_64_t *le_e;
213 pgentry_64_t *le_p = NULL;
214 pgentry_64_t *phys_vtable = NULL;
215 unsigned long mfn;
216 int index;
217 u32 level = flag & L_MASK;
218 struct domain *d = v->domain;
219 int root_level;
220 unsigned int base_idx;
222 base_idx = get_cr3_idxval(v);
224 if ( flag & SHADOW_ENTRY )
225 {
226 root_level = ROOT_LEVEL_64;
227 index = table_offset_64(va, root_level);
228 le_e = (pgentry_64_t *)&v->arch.shadow_vtable[index];
229 }
230 else if ( flag & GUEST_ENTRY )
231 {
232 root_level = v->domain->arch.ops->guest_paging_levels;
233 if ( root_level == PAGING_L3 )
234 index = guest_table_offset_64(va, PAGING_L3, base_idx);
235 else
236 index = guest_table_offset_64(va, root_level, base_idx);
237 le_e = (pgentry_64_t *)&v->arch.guest_vtable[index];
238 }
239 else /* direct mode */
240 {
241 root_level = PAE_PAGING_LEVELS;
242 index = table_offset_64(va, root_level);
243 phys_vtable = (pgentry_64_t *)map_domain_page(
244 pagetable_get_pfn(v->domain->arch.phys_table));
245 le_e = &phys_vtable[index];
246 }
248 /*
249 * If it's not external mode, then mfn should be machine physical.
250 */
251 for ( i = root_level - level; i > 0; i-- )
252 {
253 if ( unlikely(!(entry_get_flags(*le_e) & _PAGE_PRESENT)) )
254 {
255 if ( le_p )
256 unmap_domain_page(le_p);
258 if ( phys_vtable )
259 unmap_domain_page(phys_vtable);
261 return 0;
262 }
264 mfn = entry_get_pfn(*le_e);
265 if ( (flag & GUEST_ENTRY) && shadow_mode_translate(d) )
266 mfn = get_mfn_from_gpfn(mfn);
268 if ( le_p )
269 unmap_domain_page(le_p);
270 le_p = (pgentry_64_t *)map_domain_page(mfn);
272 if ( flag & SHADOW_ENTRY )
273 index = table_offset_64(va, (level + i - 1));
274 else
275 index = guest_table_offset_64(va, (level + i - 1), base_idx);
276 le_e = &le_p[index];
277 }
279 if ( flag & SET_ENTRY )
280 *le_e = *e_p;
281 else
282 *e_p = *le_e;
284 if ( le_p )
285 unmap_domain_page(le_p);
287 if ( phys_vtable )
288 unmap_domain_page(phys_vtable);
290 return 1;
291 }
293 static inline int __rw_entry(
294 struct vcpu *v, unsigned long va, void *e_p, u32 flag)
295 {
296 pgentry_64_t *e = (pgentry_64_t *)e_p;
298 if (e) {
299 return __entry(v, va, e, flag);
300 }
302 return 0;
303 }
305 #define __shadow_set_l4e(v, va, value) \
306 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L4)
307 #define __shadow_get_l4e(v, va, sl4e) \
308 __rw_entry(v, va, sl4e, SHADOW_ENTRY | GET_ENTRY | PAGING_L4)
309 #define __shadow_set_l3e(v, va, value) \
310 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L3)
311 #define __shadow_get_l3e(v, va, sl3e) \
312 __rw_entry(v, va, sl3e, SHADOW_ENTRY | GET_ENTRY | PAGING_L3)
313 #define __shadow_set_l2e(v, va, value) \
314 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L2)
315 #define __shadow_get_l2e(v, va, sl2e) \
316 __rw_entry(v, va, sl2e, SHADOW_ENTRY | GET_ENTRY | PAGING_L2)
317 #define __shadow_set_l1e(v, va, value) \
318 __rw_entry(v, va, value, SHADOW_ENTRY | SET_ENTRY | PAGING_L1)
319 #define __shadow_get_l1e(v, va, sl1e) \
320 __rw_entry(v, va, sl1e, SHADOW_ENTRY | GET_ENTRY | PAGING_L1)
322 #define __guest_set_l4e(v, va, value) \
323 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L4)
324 #define __guest_get_l4e(v, va, gl4e) \
325 __rw_entry(v, va, gl4e, GUEST_ENTRY | GET_ENTRY | PAGING_L4)
326 #define __guest_set_l3e(v, va, value) \
327 __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L3)
328 #define __guest_get_l3e(v, va, sl3e) \
329 __rw_entry(v, va, gl3e, GUEST_ENTRY | GET_ENTRY | PAGING_L3)
331 #define __direct_set_l3e(v, va, value) \
332 __rw_entry(v, va, value, DIRECT_ENTRY | SET_ENTRY | PAGING_L3)
333 #define __direct_get_l3e(v, va, sl3e) \
334 __rw_entry(v, va, sl3e, DIRECT_ENTRY | GET_ENTRY | PAGING_L3)
335 #define __direct_set_l2e(v, va, value) \
336 __rw_entry(v, va, value, DIRECT_ENTRY | SET_ENTRY | PAGING_L2)
337 #define __direct_get_l2e(v, va, sl2e) \
338 __rw_entry(v, va, sl2e, DIRECT_ENTRY | GET_ENTRY | PAGING_L2)
339 #define __direct_set_l1e(v, va, value) \
340 __rw_entry(v, va, value, DIRECT_ENTRY | SET_ENTRY | PAGING_L1)
341 #define __direct_get_l1e(v, va, sl1e) \
342 __rw_entry(v, va, sl1e, DIRECT_ENTRY | GET_ENTRY | PAGING_L1)
345 static inline int __guest_set_l2e(
346 struct vcpu *v, unsigned long va, void *value, int size)
347 {
348 switch(size) {
349 case 4:
350 // 32-bit guest
351 {
352 l2_pgentry_32_t *l2va;
354 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
355 if (value)
356 l2va[l2_table_offset_32(va)] = *(l2_pgentry_32_t *)value;
357 return 1;
358 }
359 case 8:
360 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L2);
361 default:
362 BUG();
363 return 0;
364 }
365 return 0;
366 }
368 #define __guest_set_l2e(v, va, value) \
369 __guest_set_l2e(v, (unsigned long)va, value, sizeof(*value))
371 static inline int __guest_get_l2e(
372 struct vcpu *v, unsigned long va, void *gl2e, int size)
373 {
374 switch(size) {
375 case 4:
376 // 32-bit guest
377 {
378 l2_pgentry_32_t *l2va;
379 l2va = (l2_pgentry_32_t *)v->arch.guest_vtable;
380 if (gl2e)
381 *(l2_pgentry_32_t *)gl2e = l2va[l2_table_offset_32(va)];
382 return 1;
383 }
384 case 8:
385 return __rw_entry(v, va, gl2e, GUEST_ENTRY | GET_ENTRY | PAGING_L2);
386 default:
387 BUG();
388 return 0;
389 }
390 return 0;
391 }
393 #define __guest_get_l2e(v, va, gl2e) \
394 __guest_get_l2e(v, (unsigned long)va, gl2e, sizeof(*gl2e))
396 static inline int __guest_set_l1e(
397 struct vcpu *v, unsigned long va, void *value, int size)
398 {
399 switch(size) {
400 case 4:
401 // 32-bit guest
402 {
403 l2_pgentry_32_t gl2e;
404 l1_pgentry_32_t *l1va;
405 unsigned long l1mfn;
407 if (!__guest_get_l2e(v, va, &gl2e))
408 return 0;
409 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
410 return 0;
412 l1mfn = get_mfn_from_gpfn(
413 l2e_get_pfn(gl2e));
415 l1va = (l1_pgentry_32_t *)map_domain_page(l1mfn);
416 if (value)
417 l1va[l1_table_offset_32(va)] = *(l1_pgentry_32_t *)value;
418 unmap_domain_page(l1va);
420 return 1;
421 }
423 case 8:
424 return __rw_entry(v, va, value, GUEST_ENTRY | SET_ENTRY | PAGING_L1);
425 default:
426 BUG();
427 return 0;
428 }
429 return 0;
430 }
432 #define __guest_set_l1e(v, va, value) \
433 __guest_set_l1e(v, (unsigned long)va, value, sizeof(*value))
435 static inline int __guest_get_l1e(
436 struct vcpu *v, unsigned long va, void *gl1e, int size)
437 {
438 switch(size) {
439 case 4:
440 // 32-bit guest
441 {
442 l2_pgentry_32_t gl2e;
443 l1_pgentry_32_t *l1va;
444 unsigned long l1mfn;
446 if (!(__guest_get_l2e(v, va, &gl2e)))
447 return 0;
450 if (unlikely(!(l2e_get_flags_32(gl2e) & _PAGE_PRESENT)))
451 return 0;
454 l1mfn = get_mfn_from_gpfn(
455 l2e_get_pfn(gl2e));
456 l1va = (l1_pgentry_32_t *) map_domain_page(l1mfn);
457 if (gl1e)
458 *(l1_pgentry_32_t *)gl1e = l1va[l1_table_offset_32(va)];
459 unmap_domain_page(l1va);
460 return 1;
461 }
462 case 8:
463 // 64-bit guest
464 return __rw_entry(v, va, gl1e, GUEST_ENTRY | GET_ENTRY | PAGING_L1);
465 default:
466 BUG();
467 return 0;
468 }
469 return 0;
470 }
472 #define __guest_get_l1e(v, va, gl1e) \
473 __guest_get_l1e(v, (unsigned long)va, gl1e, sizeof(*gl1e))
475 static inline void entry_general(
476 struct domain *d,
477 pgentry_64_t *gle_p,
478 pgentry_64_t *sle_p,
479 unsigned long smfn, u32 level)
481 {
482 pgentry_64_t gle = *gle_p;
483 pgentry_64_t sle;
485 sle = entry_empty();
486 if ( (entry_get_flags(gle) & _PAGE_PRESENT) && (smfn != 0) )
487 {
488 if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) {
489 sle = entry_from_pfn(smfn, entry_get_flags(gle));
490 entry_remove_flags(sle, _PAGE_PSE);
492 if ( shadow_mode_log_dirty(d) ||
493 !(entry_get_flags(gle) & _PAGE_DIRTY) )
494 {
495 pgentry_64_t *l1_p;
496 int i;
498 l1_p =(pgentry_64_t *)map_domain_page(smfn);
499 for (i = 0; i < L1_PAGETABLE_ENTRIES; i++)
500 {
501 if ( mfn_is_page_table(entry_get_pfn(l1_p[i])) )
502 entry_remove_flags(l1_p[i], _PAGE_RW);
503 }
505 unmap_domain_page(l1_p);
506 }
507 } else {
508 if (d->arch.ops->guest_paging_levels <= PAGING_L3
509 && level == PAGING_L3) {
510 sle = entry_from_pfn(smfn, entry_get_flags(gle));
511 } else {
513 sle = entry_from_pfn(
514 smfn,
515 (entry_get_flags(gle) | _PAGE_RW | _PAGE_ACCESSED) & ~_PAGE_AVAIL);
516 entry_add_flags(gle, _PAGE_ACCESSED);
517 }
518 }
519 // XXX mafetter: Hmm...
520 // Shouldn't the dirty log be checked/updated here?
521 // Actually, it needs to be done in this function's callers.
522 //
523 *gle_p = gle;
524 }
526 if ( entry_get_value(sle) || entry_get_value(gle) )
527 SH_VVLOG("%s: gpde=%lx, new spde=%lx", __func__,
528 entry_get_value(gle), entry_get_value(sle));
530 *sle_p = sle;
531 }
533 static inline void entry_propagate_from_guest(
534 struct domain *d, pgentry_64_t *gle_p, pgentry_64_t *sle_p, u32 level)
535 {
536 pgentry_64_t gle = *gle_p;
537 unsigned long smfn = 0;
539 if ( entry_get_flags(gle) & _PAGE_PRESENT ) {
540 if ((entry_get_flags(gle) & _PAGE_PSE) && level == PAGING_L2) {
541 smfn = __shadow_status(d, entry_get_pfn(gle), PGT_fl1_shadow);
542 } else {
543 smfn = __shadow_status(d, entry_get_pfn(gle),
544 shadow_level_to_type((level -1 )));
545 }
546 }
547 entry_general(d, gle_p, sle_p, smfn, level);
549 }
551 static int inline
552 validate_entry_change(
553 struct domain *d,
554 pgentry_64_t *new_gle_p,
555 pgentry_64_t *shadow_le_p,
556 u32 level)
557 {
558 pgentry_64_t old_sle, new_sle;
559 pgentry_64_t new_gle = *new_gle_p;
561 old_sle = *shadow_le_p;
562 entry_propagate_from_guest(d, &new_gle, &new_sle, level);
564 ESH_LOG("old_sle: %lx, new_gle: %lx, new_sle: %lx\n",
565 entry_get_value(old_sle), entry_get_value(new_gle),
566 entry_get_value(new_sle));
568 if ( ((entry_get_value(old_sle) | entry_get_value(new_sle)) & _PAGE_PRESENT) &&
569 entry_has_changed(old_sle, new_sle, _PAGE_PRESENT) )
570 {
571 perfc_incrc(validate_entry_changes);
573 if ( (entry_get_flags(new_sle) & _PAGE_PRESENT) &&
574 !get_shadow_ref(entry_get_pfn(new_sle)) )
575 BUG();
576 if ( entry_get_flags(old_sle) & _PAGE_PRESENT )
577 put_shadow_ref(entry_get_pfn(old_sle));
578 }
580 *shadow_le_p = new_sle;
582 return 1;
583 }
585 #endif