ia64/xen-unstable

view xen/include/asm-x86/shadow.h @ 3872:5950d2ac5f17

bitkeeper revision 1.1205.1.10 (421796afi6cb40DRRlGAd0wByIfSjg)

Bug fix: stop leaking shadow L2 pages like a sieve.
We were looking up shadow_status with a mfn rather than a gpfn.
author maf46@burn.cl.cam.ac.uk
date Sat Feb 19 19:42:39 2005 +0000 (2005-02-19)
parents aca72468d4fe
children 523423e2510b
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
3 #ifndef _XEN_SHADOW_H
4 #define _XEN_SHADOW_H
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/perfc.h>
9 #include <asm/processor.h>
11 #ifdef CONFIG_VMX
12 #include <asm/domain_page.h>
13 #endif
15 /* Shadow PT flag bits in pfn_info */
16 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
17 #define PSH_pfn_mask ((1<<21)-1)
19 /* Shadow PT operation mode : shadow-mode variable in arch_domain. */
21 #define SHM_enable (1<<0) /* we're in one of the shadow modes */
22 #define SHM_log_dirty (1<<1) /* enable log dirty mode */
23 #define SHM_translate (1<<2) /* do p2m tranaltion on guest tables */
24 #define SHM_external (1<<3) /* external page table, not used by Xen */
26 #define shadow_mode_enabled(_d) ((_d)->arch.shadow_mode)
27 #define shadow_mode_log_dirty(_d) ((_d)->arch.shadow_mode & SHM_log_dirty)
28 #define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate)
29 #define shadow_mode_external(_d) ((_d)->arch.shadow_mode & SHM_external)
31 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
32 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
33 (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
35 #define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
36 #define shadow_lock(_d) spin_lock(&(_d)->arch.shadow_lock)
37 #define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock)
39 extern void shadow_mode_init(void);
40 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
41 extern int shadow_fault(unsigned long va, long error_code);
42 extern void shadow_l1_normal_pt_update(
43 unsigned long pa, unsigned long gpte,
44 unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
45 extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpde);
46 extern void unshadow_table(unsigned long gpfn, unsigned int type);
47 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
48 extern void free_shadow_state(struct domain *d);
49 extern void shadow_invlpg(struct exec_domain *, unsigned long);
51 #ifdef CONFIG_VMX
52 extern void vmx_shadow_clear_state(struct domain *);
53 #endif
55 #define __mfn_to_gpfn(_d, mfn) \
56 ( (shadow_mode_translate(_d)) \
57 ? machine_to_phys_mapping[(mfn)] \
58 : (mfn) )
60 #define __gpfn_to_mfn(_d, gpfn) \
61 ( (shadow_mode_translate(_d)) \
62 ? phys_to_machine_mapping(gpfn) \
63 : (gpfn) )
65 extern void __shadow_mode_disable(struct domain *d);
66 static inline void shadow_mode_disable(struct domain *d)
67 {
68 if ( shadow_mode_enabled(d) )
69 __shadow_mode_disable(d);
70 }
72 extern unsigned long shadow_l2_table(
73 struct domain *d, unsigned long gmfn);
75 static inline void shadow_invalidate(struct exec_domain *ed) {
76 if ( !shadow_mode_translate(ed->domain))
77 BUG();
78 memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
79 }
81 #define SHADOW_DEBUG 0
82 #define SHADOW_VERBOSE_DEBUG 0
83 #define SHADOW_HASH_DEBUG 0
85 #if SHADOW_DEBUG
86 extern int shadow_status_noswap;
87 #endif
89 struct shadow_status {
90 unsigned long pfn; /* Guest pfn. */
91 unsigned long smfn_and_flags; /* Shadow mfn plus flags. */
92 struct shadow_status *next; /* Pull-to-front list. */
93 };
95 #define shadow_ht_extra_size 128
96 #define shadow_ht_buckets 256
98 #ifdef VERBOSE
99 #define SH_LOG(_f, _a...) \
100 printk("DOM%uP%u: SH_LOG(%d): " _f "\n", \
101 current->domain->id , current->processor, __LINE__ , ## _a )
102 #else
103 #define SH_LOG(_f, _a...)
104 #endif
106 #if SHADOW_DEBUG
107 #define SH_VLOG(_f, _a...) \
108 printk("DOM%uP%u: SH_VLOG(%d): " _f "\n", \
109 current->domain->id, current->processor, __LINE__ , ## _a )
110 #else
111 #define SH_VLOG(_f, _a...)
112 #endif
114 #if SHADOW_VERBOSE_DEBUG
115 #define SH_VVLOG(_f, _a...) \
116 printk("DOM%uP%u: SH_VVLOG(%d): " _f "\n", \
117 current->domain->id, current->processor, __LINE__ , ## _a )
118 #else
119 #define SH_VVLOG(_f, _a...)
120 #endif
122 // BUG: mafetter: this assumes ed == current, so why pass ed?
123 static inline void __shadow_get_l2e(
124 struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
125 {
126 if ( likely(shadow_mode_enabled(ed->domain)) ) {
127 if ( shadow_mode_translate(ed->domain) )
128 *sl2e = l2_pgentry_val(
129 ed->arch.shadow_vtable[l2_table_offset(va)]);
130 else
131 *sl2e = l2_pgentry_val(
132 shadow_linear_l2_table[l2_table_offset(va)]);
133 }
134 else
135 BUG();
136 }
138 static inline void __shadow_set_l2e(
139 struct exec_domain *ed, unsigned long va, unsigned long value)
140 {
141 if ( likely(shadow_mode_enabled(ed->domain)) ) {
142 if ( shadow_mode_translate(ed->domain) )
143 ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
144 else
145 shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
146 }
147 else
148 BUG();
149 }
151 static inline void __guest_get_l2e(
152 struct exec_domain *ed, unsigned long va, unsigned long *l2e)
153 {
154 *l2e = ( shadow_mode_translate(ed->domain) ) ?
155 l2_pgentry_val(ed->arch.guest_vtable[l2_table_offset(va)]) :
156 l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
157 }
159 static inline void __guest_set_l2e(
160 struct exec_domain *ed, unsigned long va, unsigned long value)
161 {
162 if ( shadow_mode_translate(ed->domain) )
163 {
164 unsigned long pfn;
166 pfn = phys_to_machine_mapping(value >> PAGE_SHIFT);
167 ed->arch.hl2_vtable[l2_table_offset(va)] =
168 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
170 ed->arch.guest_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
171 }
172 else
173 {
174 linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
175 }
176 }
178 /************************************************************************/
180 static inline int __mark_dirty(struct domain *d, unsigned int mfn)
181 {
182 unsigned long pfn;
183 int rc = 0;
185 ASSERT(spin_is_locked(&d->arch.shadow_lock));
186 ASSERT(d->arch.shadow_dirty_bitmap != NULL);
188 pfn = machine_to_phys_mapping[mfn];
190 /*
191 * Values with the MSB set denote MFNs that aren't really part of the
192 * domain's pseudo-physical memory map (e.g., the shared info frame).
193 * Nothing to do here...
194 */
195 if ( unlikely(IS_INVALID_M2P_ENTRY(pfn)) )
196 return rc;
198 if ( likely(pfn < d->arch.shadow_dirty_bitmap_size) )
199 {
200 /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
201 if ( !__test_and_set_bit(pfn, d->arch.shadow_dirty_bitmap) )
202 {
203 d->arch.shadow_dirty_count++;
204 rc = 1;
205 }
206 }
207 #ifndef NDEBUG
208 else if ( mfn < max_page )
209 {
210 SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (dom %p)",
211 mfn, pfn, d->arch.shadow_dirty_bitmap_size, d);
212 SH_LOG("dom=%p caf=%08x taf=%08x\n",
213 page_get_owner(&frame_table[mfn]),
214 frame_table[mfn].count_info,
215 frame_table[mfn].u.inuse.type_info );
216 }
217 #endif
219 return rc;
220 }
223 static inline int mark_dirty(struct domain *d, unsigned int mfn)
224 {
225 int rc;
226 shadow_lock(d);
227 rc = __mark_dirty(d, mfn);
228 shadow_unlock(d);
229 return rc;
230 }
233 /************************************************************************/
235 static inline void l1pte_write_fault(
236 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
237 {
238 unsigned long gpte = *gpte_p;
239 unsigned long spte = *spte_p;
240 unsigned long pfn = gpte >> PAGE_SHIFT;
241 unsigned long mfn = __gpfn_to_mfn(d, pfn);
243 ASSERT(gpte & _PAGE_RW);
244 gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
246 if ( shadow_mode_log_dirty(d) )
247 __mark_dirty(d, pfn);
249 spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
251 SH_VVLOG("l1pte_write_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
252 *gpte_p = gpte;
253 *spte_p = spte;
254 }
256 static inline void l1pte_read_fault(
257 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
258 {
259 unsigned long gpte = *gpte_p;
260 unsigned long spte = *spte_p;
261 unsigned long pfn = gpte >> PAGE_SHIFT;
262 unsigned long mfn = __gpfn_to_mfn(d, pfn);
264 gpte |= _PAGE_ACCESSED;
265 spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
267 if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
268 spte &= ~_PAGE_RW;
270 SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
271 *gpte_p = gpte;
272 *spte_p = spte;
273 }
275 static inline void l1pte_propagate_from_guest(
276 struct domain *d, unsigned long *gpte_p, unsigned long *spte_p)
277 {
278 unsigned long gpte = *gpte_p;
279 unsigned long spte = *spte_p;
280 unsigned long pfn = gpte >> PAGE_SHIFT;
281 unsigned long mfn = __gpfn_to_mfn(d, pfn);
283 #if SHADOW_VERBOSE_DEBUG
284 unsigned long old_spte = spte;
285 #endif
287 if ( shadow_mode_external(d) && mmio_space(gpte & 0xFFFFF000) ) {
288 *spte_p = 0;
289 return;
290 }
292 spte = 0;
293 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
294 (_PAGE_PRESENT|_PAGE_ACCESSED) ) {
296 spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
298 if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
299 spte &= ~_PAGE_RW;
300 }
302 #if SHADOW_VERBOSE_DEBUG
303 if ( old_spte || spte || gpte )
304 SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new spte=0x%p ", gpte, old_spte, spte);
305 #endif
307 *gpte_p = gpte;
308 *spte_p = spte;
309 }
313 static inline void l2pde_general(
314 struct domain *d,
315 unsigned long *gpde_p,
316 unsigned long *spde_p,
317 unsigned long sl1mfn)
318 {
319 unsigned long gpde = *gpde_p;
320 unsigned long spde = *spde_p;
322 spde = 0;
324 if ( sl1mfn != 0 )
325 {
326 spde = (gpde & ~PAGE_MASK) | (sl1mfn << PAGE_SHIFT) |
327 _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
328 gpde |= _PAGE_ACCESSED; /* N.B. PDEs do not have a dirty bit. */
330 /* Detect linear p.t. mappings and write-protect them. */
331 if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) ==
332 PGT_l2_page_table )
333 {
334 if ( !shadow_mode_translate(d) )
335 spde = gpde & ~_PAGE_RW;
337 }
338 }
340 *gpde_p = gpde;
341 *spde_p = spde;
342 }
344 /*********************************************************************/
346 #if SHADOW_HASH_DEBUG
347 static void shadow_audit(struct domain *d, int print)
348 {
349 int live = 0, free = 0, j = 0, abs;
350 struct shadow_status *a;
352 for ( j = 0; j < shadow_ht_buckets; j++ )
353 {
354 a = &d->arch.shadow_ht[j];
355 if ( a->pfn ) { live++; ASSERT(a->smfn_and_flags & PSH_pfn_mask); }
356 ASSERT(a->pfn < 0x00100000UL);
357 a = a->next;
358 while ( a && (live < 9999) )
359 {
360 live++;
361 if ( (a->pfn == 0) || (a->smfn_and_flags == 0) )
362 {
363 printk("XXX live=%d pfn=%p sp=%p next=%p\n",
364 live, a->pfn, a->smfn_and_flags, a->next);
365 BUG();
366 }
367 ASSERT(a->pfn < 0x00100000UL);
368 ASSERT(a->smfn_and_flags & PSH_pfn_mask);
369 a = a->next;
370 }
371 ASSERT(live < 9999);
372 }
374 for ( a = d->arch.shadow_ht_free; a != NULL; a = a->next )
375 free++;
377 if ( print)
378 printk("Xlive=%d free=%d\n",live,free);
380 abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
381 #ifdef PERF_COUNTERS
382 if ( (abs < -1) || (abs > 1) )
383 {
384 printk("live=%d free=%d l1=%d l2=%d\n",live,free,
385 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
386 BUG();
387 }
388 #endif
389 }
390 #else
391 #define shadow_audit(p, print) ((void)0)
392 #endif
395 static inline struct shadow_status *hash_bucket(
396 struct domain *d, unsigned int gpfn)
397 {
398 return &d->arch.shadow_ht[gpfn % shadow_ht_buckets];
399 }
402 /*
403 * N.B. This takes a guest pfn (i.e. a pfn in the guest's namespace,
404 * which, depending on full shadow mode, may or may not equal
405 * its mfn).
406 * The shadow status it returns is a mfn.
407 */
408 static inline unsigned long __shadow_status(
409 struct domain *d, unsigned int gpfn)
410 {
411 struct shadow_status *p, *x, *head;
413 x = head = hash_bucket(d, gpfn);
414 p = NULL;
416 //SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
417 shadow_audit(d, 0);
419 do
420 {
421 ASSERT(x->pfn || ((x == head) && (x->next == NULL)));
423 if ( x->pfn == gpfn )
424 {
425 #if SHADOW_DEBUG
426 if ( unlikely(shadow_status_noswap) )
427 return x->smfn_and_flags;
428 #endif
429 /* Pull-to-front if 'x' isn't already the head item. */
430 if ( unlikely(x != head) )
431 {
432 /* Delete 'x' from list and reinsert immediately after head. */
433 p->next = x->next;
434 x->next = head->next;
435 head->next = x;
437 /* Swap 'x' contents with head contents. */
438 SWAP(head->pfn, x->pfn);
439 SWAP(head->smfn_and_flags, x->smfn_and_flags);
440 }
442 SH_VVLOG("lookup gpfn=%p => status=%p",
443 gpfn, head->smfn_and_flags);
444 return head->smfn_and_flags;
445 }
447 p = x;
448 x = x->next;
449 }
450 while ( x != NULL );
452 SH_VVLOG("lookup gpfn=%p => status=0", gpfn);
453 return 0;
454 }
456 /*
457 * N.B. We can make this locking more fine grained (e.g., per shadow page) if
458 * it ever becomes a problem, but since we need a spin lock on the hash table
459 * anyway it's probably not worth being too clever.
460 */
461 static inline unsigned long get_shadow_status(
462 struct domain *d, unsigned int gpfn )
463 {
464 unsigned long res;
466 ASSERT(shadow_mode_enabled(d));
468 /*
469 * If we get here we know that some sort of update has happened to the
470 * underlying page table page: either a PTE has been updated, or the page
471 * has changed type. If we're in log dirty mode, we should set the
472 * appropriate bit in the dirty bitmap.
473 * N.B. The VA update path doesn't use this and is handled independently.
475 XXX need to think this through for vmx guests, but probably OK
476 */
478 shadow_lock(d);
480 if ( shadow_mode_log_dirty(d) )
481 __mark_dirty(d, gpfn);
483 if ( !(res = __shadow_status(d, gpfn)) )
484 shadow_unlock(d);
486 return res;
487 }
490 static inline void put_shadow_status(
491 struct domain *d)
492 {
493 shadow_unlock(d);
494 }
497 static inline void delete_shadow_status(
498 struct domain *d, unsigned int gpfn)
499 {
500 struct shadow_status *p, *x, *n, *head;
502 ASSERT(spin_is_locked(&d->arch.shadow_lock));
503 ASSERT(gpfn != 0);
505 head = hash_bucket(d, gpfn);
507 SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, head);
508 shadow_audit(d, 0);
510 /* Match on head item? */
511 if ( head->pfn == gpfn )
512 {
513 if ( (n = head->next) != NULL )
514 {
515 /* Overwrite head with contents of following node. */
516 head->pfn = n->pfn;
517 head->smfn_and_flags = n->smfn_and_flags;
519 /* Delete following node. */
520 head->next = n->next;
522 /* Add deleted node to the free list. */
523 n->pfn = 0;
524 n->smfn_and_flags = 0;
525 n->next = d->arch.shadow_ht_free;
526 d->arch.shadow_ht_free = n;
527 }
528 else
529 {
530 /* This bucket is now empty. Initialise the head node. */
531 head->pfn = 0;
532 head->smfn_and_flags = 0;
533 }
535 goto found;
536 }
538 p = head;
539 x = head->next;
541 do
542 {
543 if ( x->pfn == gpfn )
544 {
545 /* Delete matching node. */
546 p->next = x->next;
548 /* Add deleted node to the free list. */
549 x->pfn = 0;
550 x->smfn_and_flags = 0;
551 x->next = d->arch.shadow_ht_free;
552 d->arch.shadow_ht_free = x;
554 goto found;
555 }
557 p = x;
558 x = x->next;
559 }
560 while ( x != NULL );
562 /* If we got here, it wasn't in the list! */
563 BUG();
565 found:
566 shadow_audit(d, 0);
567 }
570 static inline void set_shadow_status(
571 struct domain *d, unsigned int gpfn, unsigned long s)
572 {
573 struct shadow_status *x, *head, *extra;
574 int i;
576 ASSERT(spin_is_locked(&d->arch.shadow_lock));
577 ASSERT(gpfn != 0);
578 ASSERT(s & PSH_shadowed);
580 x = head = hash_bucket(d, gpfn);
582 SH_VVLOG("set gpfn=%08x s=%p bucket=%p(%p)", gpfn, s, x, x->next);
583 shadow_audit(d, 0);
585 /*
586 * STEP 1. If page is already in the table, update it in place.
587 */
589 do
590 {
591 if ( x->pfn == gpfn )
592 {
593 x->smfn_and_flags = s;
594 goto done;
595 }
597 x = x->next;
598 }
599 while ( x != NULL );
601 /*
602 * STEP 2. The page must be inserted into the table.
603 */
605 /* If the bucket is empty then insert the new page as the head item. */
606 if ( head->pfn == 0 )
607 {
608 head->pfn = gpfn;
609 head->smfn_and_flags = s;
610 ASSERT(head->next == NULL);
611 goto done;
612 }
614 /* We need to allocate a new node. Ensure the quicklist is non-empty. */
615 if ( unlikely(d->arch.shadow_ht_free == NULL) )
616 {
617 SH_LOG("Allocate more shadow hashtable blocks.");
619 extra = xmalloc_bytes(
620 sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
622 /* XXX Should be more graceful here. */
623 if ( extra == NULL )
624 BUG();
626 memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
628 /* Record the allocation block so it can be correctly freed later. */
629 d->arch.shadow_extras_count++;
630 *((struct shadow_status **)&extra[shadow_ht_extra_size]) =
631 d->arch.shadow_ht_extras;
632 d->arch.shadow_ht_extras = &extra[0];
634 /* Thread a free chain through the newly-allocated nodes. */
635 for ( i = 0; i < (shadow_ht_extra_size - 1); i++ )
636 extra[i].next = &extra[i+1];
637 extra[i].next = NULL;
639 /* Add the new nodes to the free list. */
640 d->arch.shadow_ht_free = &extra[0];
641 }
643 /* Allocate a new node from the quicklist. */
644 x = d->arch.shadow_ht_free;
645 d->arch.shadow_ht_free = x->next;
647 /* Initialise the new node and insert directly after the head item. */
648 x->pfn = gpfn;
649 x->smfn_and_flags = s;
650 x->next = head->next;
651 head->next = x;
653 done:
654 shadow_audit(d, 0);
655 }
657 #ifdef CONFIG_VMX
659 static inline void vmx_update_shadow_state(
660 struct exec_domain *ed, unsigned long gmfn, unsigned long smfn)
661 {
663 l2_pgentry_t *mpl2e = 0;
664 l2_pgentry_t *gpl2e, *spl2e;
666 /* unmap the old mappings */
667 if ( ed->arch.shadow_vtable )
668 unmap_domain_mem(ed->arch.shadow_vtable);
669 if ( ed->arch.guest_vtable )
670 unmap_domain_mem(ed->arch.guest_vtable);
672 /* new mapping */
673 mpl2e = (l2_pgentry_t *)
674 map_domain_mem(pagetable_val(ed->arch.monitor_table));
676 // mafetter: why do we need to keep setting up shadow_linear_pg_table for
677 // this monitor page table? Seems unnecessary...
678 //
679 mpl2e[l2_table_offset(SH_LINEAR_PT_VIRT_START)] =
680 mk_l2_pgentry((smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
681 __flush_tlb_one(SH_LINEAR_PT_VIRT_START);
683 spl2e = (l2_pgentry_t *)map_domain_mem(smfn << PAGE_SHIFT);
684 gpl2e = (l2_pgentry_t *)map_domain_mem(gmfn << PAGE_SHIFT);
685 memset(spl2e, 0, L2_PAGETABLE_ENTRIES * sizeof(l2_pgentry_t));
687 ed->arch.shadow_vtable = spl2e;
688 ed->arch.guest_vtable = gpl2e; /* expect the guest did clean this up */
689 unmap_domain_mem(mpl2e);
690 }
692 static inline unsigned long gva_to_gpte(unsigned long gva)
693 {
694 unsigned long gpde, gpte, pfn, index;
695 struct exec_domain *ed = current;
697 __guest_get_l2e(ed, gva, &gpde);
698 if (!(gpde & _PAGE_PRESENT))
699 return 0;
701 index = (gva >> L2_PAGETABLE_SHIFT);
703 if (!l2_pgentry_val(ed->arch.hl2_vtable[index])) {
704 pfn = phys_to_machine_mapping(gpde >> PAGE_SHIFT);
705 ed->arch.hl2_vtable[index] =
706 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
707 }
709 if ( unlikely(__get_user(gpte, (unsigned long *)
710 &linear_pg_table[gva >> PAGE_SHIFT])) )
711 return 0;
713 return gpte;
714 }
716 static inline unsigned long gva_to_gpa(unsigned long gva)
717 {
718 unsigned long gpte;
720 gpte = gva_to_gpte(gva);
721 if ( !(gpte & _PAGE_PRESENT) )
722 return 0;
724 return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK);
725 }
727 #endif /* CONFIG_VMX */
729 static inline void __update_pagetables(struct exec_domain *ed)
730 {
731 struct domain *d = ed->domain;
732 unsigned long gmfn = pagetable_val(ed->arch.guest_table) >> PAGE_SHIFT;
733 unsigned long gpfn = __mfn_to_gpfn(d, gmfn);
734 unsigned long smfn = __shadow_status(d, gpfn) & PSH_pfn_mask;
736 SH_VVLOG("0: __update_pagetables(gmfn=%p, smfn=%p)", gmfn, smfn);
738 if ( unlikely(smfn == 0) )
739 smfn = shadow_l2_table(d, gmfn);
740 #ifdef CONFIG_VMX
741 else if ( shadow_mode_translate(ed->domain) )
742 vmx_update_shadow_state(ed, gmfn, smfn);
743 #endif
745 ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
747 if ( !shadow_mode_external(ed->domain) )
748 // mafetter: why do we need to keep overwriting
749 // ed->arch.monitor_table? Seems unnecessary...
750 //
751 ed->arch.monitor_table = ed->arch.shadow_table;
752 }
754 static inline void update_pagetables(struct exec_domain *ed)
755 {
756 if ( unlikely(shadow_mode_enabled(ed->domain)) )
757 {
758 shadow_lock(ed->domain);
759 __update_pagetables(ed);
760 shadow_unlock(ed->domain);
761 }
762 #ifdef __x86_64__
763 else if ( !(ed->arch.flags & TF_kernel_mode) )
764 // mafetter: why do we need to keep overwriting
765 // ed->arch.monitor_table? Seems unnecessary...
766 //
767 ed->arch.monitor_table = ed->arch.guest_table_user;
768 #endif
769 else
770 // mafetter: why do we need to keep overwriting
771 // ed->arch.monitor_table? Seems unnecessary...
772 //
773 ed->arch.monitor_table = ed->arch.guest_table;
774 }
776 #if SHADOW_DEBUG
777 extern int _check_pagetable(struct domain *d, pagetable_t pt, char *s);
778 extern int _check_all_pagetables(struct domain *d, char *s);
780 #define check_pagetable(_d, _pt, _s) _check_pagetable(_d, _pt, _s)
781 //#define check_pagetable(_d, _pt, _s) _check_all_pagetables(_d, _s)
783 #else
784 #define check_pagetable(_d, _pt, _s) ((void)0)
785 #endif
787 #endif /* XEN_SHADOW_H */