ia64/xen-unstable

view xen/include/asm-x86/shadow.h @ 3629:07d5c9548534

bitkeeper revision 1.1159.212.76 (4201eac5AlEp4jSQYKA8-oSf0N15pQ)

Add xmalloc_bytes() to the allocator API.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 09:11:33 2005 +0000 (2005-02-03)
parents 8472fafee3cf
children 677cb76cff18
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4 -*- */
3 #ifndef _XEN_SHADOW_H
4 #define _XEN_SHADOW_H
6 #include <xen/config.h>
7 #include <xen/types.h>
8 #include <xen/perfc.h>
9 #include <asm/processor.h>
11 /* Shadow PT flag bits in pfn_info */
12 #define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
13 #define PSH_pfn_mask ((1<<21)-1)
15 /* Shadow PT operation mode : shadowmode variable in mm_struct */
16 #define SHM_test (1) /* just run domain on shadow PTs */
17 #define SHM_logdirty (2) /* log pages that are dirtied */
18 #define SHM_translate (3) /* lookup machine pages in translation table */
19 #define SHM_cow (4) /* copy on write all dirtied pages */
20 #define SHM_full_32 (8) /* full virtualization for 32-bit */
22 #define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
23 #define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
24 (SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
26 #define shadow_mode(_d) ((_d)->mm.shadow_mode)
27 #define shadow_lock_init(_d) spin_lock_init(&(_d)->mm.shadow_lock)
28 #define shadow_lock(_m) spin_lock(&(_m)->shadow_lock)
29 #define shadow_unlock(_m) spin_unlock(&(_m)->shadow_lock)
31 extern void shadow_mode_init(void);
32 extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
33 extern int shadow_fault(unsigned long va, long error_code);
34 extern void shadow_l1_normal_pt_update(
35 unsigned long pa, unsigned long gpte,
36 unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);
37 extern void shadow_l2_normal_pt_update(unsigned long pa, unsigned long gpte);
38 extern void unshadow_table(unsigned long gpfn, unsigned int type);
39 extern int shadow_mode_enable(struct domain *p, unsigned int mode);
41 #ifdef CONFIG_VMX
42 extern void vmx_shadow_clear_state(struct mm_struct *);
43 extern void vmx_shadow_invlpg(struct mm_struct *, unsigned long);
44 #endif
46 #define __get_machine_to_phys(m, guest_gpfn, gpfn) \
47 if ((m)->shadow_mode == SHM_full_32) \
48 (guest_gpfn) = machine_to_phys_mapping[(gpfn)]; \
49 else \
50 (guest_gpfn) = (gpfn);
52 #define __get_phys_to_machine(m, host_gpfn, gpfn) \
53 if ((m)->shadow_mode == SHM_full_32) \
54 (host_gpfn) = phys_to_machine_mapping[(gpfn)]; \
55 else \
56 (host_gpfn) = (gpfn);
58 extern void __shadow_mode_disable(struct domain *d);
59 static inline void shadow_mode_disable(struct domain *d)
60 {
61 if ( shadow_mode(d->exec_domain[0]) )
62 __shadow_mode_disable(d);
63 }
65 extern unsigned long shadow_l2_table(
66 struct mm_struct *m, unsigned long gpfn);
68 static inline void shadow_invalidate(struct mm_struct *m) {
69 if (m->shadow_mode != SHM_full_32)
70 BUG();
71 memset(m->shadow_vtable, 0, PAGE_SIZE);
72 }
74 #define SHADOW_DEBUG 0
75 #define SHADOW_HASH_DEBUG 0
77 struct shadow_status {
78 unsigned long pfn; /* Guest pfn. */
79 unsigned long spfn_and_flags; /* Shadow pfn plus flags. */
80 struct shadow_status *next; /* Pull-to-front list. */
81 };
83 #define shadow_ht_extra_size 128
84 #define shadow_ht_buckets 256
86 #ifdef VERBOSE
87 #define SH_LOG(_f, _a...) \
88 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
89 current->domain->id , __LINE__ , ## _a )
90 #else
91 #define SH_LOG(_f, _a...)
92 #endif
94 #if SHADOW_DEBUG
95 #define SH_VLOG(_f, _a...) \
96 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
97 current->id , __LINE__ , ## _a )
98 #else
99 #define SH_VLOG(_f, _a...)
100 #endif
102 #if 0
103 #define SH_VVLOG(_f, _a...) \
104 printk("DOM%u: (file=shadow.c, line=%d) " _f "\n", \
105 current->id , __LINE__ , ## _a )
106 #else
107 #define SH_VVLOG(_f, _a...)
108 #endif
110 static inline void __shadow_get_pl2e(struct mm_struct *m,
111 unsigned long va, unsigned long *sl2e)
112 {
113 if (m->shadow_mode == SHM_full_32) {
114 *sl2e = l2_pgentry_val(m->shadow_vtable[va >> L2_PAGETABLE_SHIFT]);
115 }
116 else
117 *sl2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]);
118 }
120 static inline void __shadow_set_pl2e(struct mm_struct *m,
121 unsigned long va, unsigned long value)
122 {
123 if (m->shadow_mode == SHM_full_32) {
124 m->shadow_vtable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
125 }
126 else
127 linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
128 }
130 static inline void __guest_get_pl2e(struct mm_struct *m,
131 unsigned long va, unsigned long *l2e)
132 {
133 if (m->shadow_mode == SHM_full_32) {
134 *l2e = l2_pgentry_val(m->vpagetable[va >> L2_PAGETABLE_SHIFT]);
135 }
136 else
137 *l2e = l2_pgentry_val(linear_l2_table[va >> L2_PAGETABLE_SHIFT]);
138 }
140 static inline void __guest_set_pl2e(struct mm_struct *m,
141 unsigned long va, unsigned long value)
142 {
143 if (m->shadow_mode == SHM_full_32) {
144 unsigned long pfn;
146 pfn = phys_to_machine_mapping[value >> PAGE_SHIFT];
147 m->guest_pl2e_cache[va >> L2_PAGETABLE_SHIFT] =
148 mk_l2_pgentry((pfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
150 m->vpagetable[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
151 }
152 else
153 linear_l2_table[va >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(value);
155 }
157 /************************************************************************/
159 static inline int __mark_dirty( struct mm_struct *m, unsigned int mfn)
160 {
161 unsigned long pfn;
162 int rc = 0;
164 ASSERT(spin_is_locked(&m->shadow_lock));
165 ASSERT(m->shadow_dirty_bitmap != NULL);
167 pfn = machine_to_phys_mapping[mfn];
169 /*
170 * Values with the MSB set denote MFNs that aren't really part of the
171 * domain's pseudo-physical memory map (e.g., the shared info frame).
172 * Nothing to do here...
173 */
174 if ( unlikely(pfn & 0x80000000UL) )
175 return rc;
177 if ( likely(pfn < m->shadow_dirty_bitmap_size) )
178 {
179 /* N.B. Can use non-atomic TAS because protected by shadow_lock. */
180 if ( !__test_and_set_bit(pfn, m->shadow_dirty_bitmap) )
181 {
182 m->shadow_dirty_count++;
183 rc = 1;
184 }
185 }
186 #ifndef NDEBUG
187 else if ( mfn < max_page )
188 {
189 SH_LOG("mark_dirty OOR! mfn=%x pfn=%lx max=%x (mm %p)",
190 mfn, pfn, m->shadow_dirty_bitmap_size, m );
191 SH_LOG("dom=%p caf=%08x taf=%08x\n",
192 frame_table[mfn].u.inuse.domain,
193 frame_table[mfn].count_info,
194 frame_table[mfn].u.inuse.type_info );
195 }
196 #endif
198 return rc;
199 }
202 static inline int mark_dirty(struct mm_struct *m, unsigned int mfn)
203 {
204 int rc;
205 shadow_lock(m);
206 rc = __mark_dirty(m, mfn);
207 shadow_unlock(m);
208 return rc;
209 }
212 /************************************************************************/
214 static inline void l1pte_write_fault(
215 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
216 {
217 unsigned long gpte = *gpte_p;
218 unsigned long spte = *spte_p;
220 ASSERT(gpte & _PAGE_RW);
221 gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
223 switch ( m->shadow_mode )
224 {
225 case SHM_test:
226 spte = gpte | _PAGE_RW;
227 break;
229 case SHM_logdirty:
230 spte = gpte | _PAGE_RW;
231 __mark_dirty(m, gpte >> PAGE_SHIFT);
233 case SHM_full_32:
234 {
235 unsigned long host_pfn, host_gpte;
237 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
238 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
239 spte = host_gpte | _PAGE_RW;
240 }
241 break;
242 }
244 SH_VVLOG("updating spte=%lx gpte=%lx", spte, gpte);
245 *gpte_p = gpte;
246 *spte_p = spte;
247 }
249 static inline void l1pte_read_fault(
250 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
251 {
252 unsigned long gpte = *gpte_p;
253 unsigned long spte = *spte_p;
255 gpte |= _PAGE_ACCESSED;
257 switch ( m->shadow_mode )
258 {
259 case SHM_test:
260 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
261 break;
263 case SHM_logdirty:
264 spte = gpte & ~_PAGE_RW;
265 break;
267 case SHM_full_32:
268 {
269 unsigned long host_pfn, host_gpte;
271 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
272 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
273 spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
274 }
275 break;
277 }
279 *gpte_p = gpte;
280 *spte_p = spte;
281 }
283 static inline void l1pte_propagate_from_guest(
284 struct mm_struct *m, unsigned long *gpte_p, unsigned long *spte_p)
285 {
286 unsigned long gpte = *gpte_p;
287 unsigned long spte = *spte_p;
289 switch ( m->shadow_mode )
290 {
291 case SHM_test:
292 spte = 0;
293 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
294 (_PAGE_PRESENT|_PAGE_ACCESSED) )
295 spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
296 break;
298 case SHM_logdirty:
299 spte = 0;
300 if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
301 (_PAGE_PRESENT|_PAGE_ACCESSED) )
302 spte = gpte & ~_PAGE_RW;
303 break;
305 case SHM_full_32:
306 {
307 unsigned long host_pfn, host_gpte;
308 spte = 0;
310 if (mmio_space(gpte & 0xFFFFF000)) {
311 *spte_p = spte;
312 return;
313 }
315 host_pfn = phys_to_machine_mapping[gpte >> PAGE_SHIFT];
316 host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
318 if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
319 (_PAGE_PRESENT|_PAGE_ACCESSED) )
320 spte = (host_gpte & _PAGE_DIRTY) ? host_gpte : (host_gpte & ~_PAGE_RW);
321 }
322 break;
323 }
325 *gpte_p = gpte;
326 *spte_p = spte;
327 }
329 static inline void l2pde_general(
330 struct mm_struct *m,
331 unsigned long *gpde_p,
332 unsigned long *spde_p,
333 unsigned long sl1pfn)
334 {
335 unsigned long gpde = *gpde_p;
336 unsigned long spde = *spde_p;
338 spde = 0;
340 if ( sl1pfn != 0 )
341 {
342 spde = (gpde & ~PAGE_MASK) | (sl1pfn << PAGE_SHIFT) |
343 _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY;
344 gpde |= _PAGE_ACCESSED | _PAGE_DIRTY;
346 /* Detect linear p.t. mappings and write-protect them. */
347 if ( (frame_table[sl1pfn].u.inuse.type_info & PGT_type_mask) ==
348 PGT_l2_page_table )
349 {
350 if (m->shadow_mode != SHM_full_32)
351 spde = gpde & ~_PAGE_RW;
353 }
354 }
356 *gpde_p = gpde;
357 *spde_p = spde;
358 }
360 /*********************************************************************/
362 #if SHADOW_HASH_DEBUG
363 static void shadow_audit(struct mm_struct *m, int print)
364 {
365 int live = 0, free = 0, j = 0, abs;
366 struct shadow_status *a;
368 for ( j = 0; j < shadow_ht_buckets; j++ )
369 {
370 a = &m->shadow_ht[j];
371 if ( a->pfn ) { live++; ASSERT(a->spfn_and_flags & PSH_pfn_mask); }
372 ASSERT(a->pfn < 0x00100000UL);
373 a = a->next;
374 while ( a && (live < 9999) )
375 {
376 live++;
377 if ( (a->pfn == 0) || (a->spfn_and_flags == 0) )
378 {
379 printk("XXX live=%d pfn=%08lx sp=%08lx next=%p\n",
380 live, a->pfn, a->spfn_and_flags, a->next);
381 BUG();
382 }
383 ASSERT(a->pfn < 0x00100000UL);
384 ASSERT(a->spfn_and_flags & PSH_pfn_mask);
385 a = a->next;
386 }
387 ASSERT(live < 9999);
388 }
390 for ( a = m->shadow_ht_free; a != NULL; a = a->next )
391 free++;
393 if ( print)
394 printk("Xlive=%d free=%d\n",live,free);
396 abs = (perfc_value(shadow_l1_pages) + perfc_value(shadow_l2_pages)) - live;
397 if ( (abs < -1) || (abs > 1) )
398 {
399 printk("live=%d free=%d l1=%d l2=%d\n",live,free,
400 perfc_value(shadow_l1_pages), perfc_value(shadow_l2_pages) );
401 BUG();
402 }
403 }
404 #else
405 #define shadow_audit(p, print) ((void)0)
406 #endif
410 static inline struct shadow_status *hash_bucket(
411 struct mm_struct *m, unsigned int gpfn)
412 {
413 return &m->shadow_ht[gpfn % shadow_ht_buckets];
414 }
417 static inline unsigned long __shadow_status(
418 struct mm_struct *m, unsigned int gpfn)
419 {
420 struct shadow_status *p, *x, *head;
422 x = head = hash_bucket(m, gpfn);
423 p = NULL;
425 SH_VVLOG("lookup gpfn=%08x bucket=%p", gpfn, x);
426 shadow_audit(m, 0);
428 do
429 {
430 ASSERT(x->pfn || ((x == head) && (x->next == NULL)));
432 if ( x->pfn == gpfn )
433 {
434 /* Pull-to-front if 'x' isn't already the head item. */
435 if ( unlikely(x != head) )
436 {
437 /* Delete 'x' from list and reinsert immediately after head. */
438 p->next = x->next;
439 x->next = head->next;
440 head->next = x;
442 /* Swap 'x' contents with head contents. */
443 SWAP(head->pfn, x->pfn);
444 SWAP(head->spfn_and_flags, x->spfn_and_flags);
445 }
447 return head->spfn_and_flags;
448 }
450 p = x;
451 x = x->next;
452 }
453 while ( x != NULL );
455 return 0;
456 }
458 /*
459 * N.B. We can make this locking more fine grained (e.g., per shadow page) if
460 * it ever becomes a problem, but since we need a spin lock on the hash table
461 * anyway it's probably not worth being too clever.
462 */
463 static inline unsigned long get_shadow_status(
464 struct mm_struct *m, unsigned int gpfn )
465 {
466 unsigned long res;
468 ASSERT(m->shadow_mode);
470 /*
471 * If we get here we know that some sort of update has happened to the
472 * underlying page table page: either a PTE has been updated, or the page
473 * has changed type. If we're in log dirty mode, we should set the
474 * appropriate bit in the dirty bitmap.
475 * N.B. The VA update path doesn't use this and is handled independently.
476 */
478 shadow_lock(m);
480 if ( m->shadow_mode == SHM_logdirty )
481 __mark_dirty( m, gpfn );
483 if ( !(res = __shadow_status(m, gpfn)) )
484 shadow_unlock(m);
486 return res;
487 }
490 static inline void put_shadow_status(
491 struct mm_struct *m)
492 {
493 shadow_unlock(m);
494 }
497 static inline void delete_shadow_status(
498 struct mm_struct *m, unsigned int gpfn)
499 {
500 struct shadow_status *p, *x, *n, *head;
502 ASSERT(spin_is_locked(&m->shadow_lock));
503 ASSERT(gpfn != 0);
505 head = hash_bucket(m, gpfn);
507 SH_VVLOG("delete gpfn=%08x bucket=%p", gpfn, head);
508 shadow_audit(m, 0);
510 /* Match on head item? */
511 if ( head->pfn == gpfn )
512 {
513 if ( (n = head->next) != NULL )
514 {
515 /* Overwrite head with contents of following node. */
516 head->pfn = n->pfn;
517 head->spfn_and_flags = n->spfn_and_flags;
519 /* Delete following node. */
520 head->next = n->next;
522 /* Add deleted node to the free list. */
523 n->pfn = 0;
524 n->spfn_and_flags = 0;
525 n->next = m->shadow_ht_free;
526 m->shadow_ht_free = n;
527 }
528 else
529 {
530 /* This bucket is now empty. Initialise the head node. */
531 head->pfn = 0;
532 head->spfn_and_flags = 0;
533 }
535 goto found;
536 }
538 p = head;
539 x = head->next;
541 do
542 {
543 if ( x->pfn == gpfn )
544 {
545 /* Delete matching node. */
546 p->next = x->next;
548 /* Add deleted node to the free list. */
549 x->pfn = 0;
550 x->spfn_and_flags = 0;
551 x->next = m->shadow_ht_free;
552 m->shadow_ht_free = x;
554 goto found;
555 }
557 p = x;
558 x = x->next;
559 }
560 while ( x != NULL );
562 /* If we got here, it wasn't in the list! */
563 BUG();
565 found:
566 shadow_audit(m, 0);
567 }
570 static inline void set_shadow_status(
571 struct mm_struct *m, unsigned int gpfn, unsigned long s)
572 {
573 struct shadow_status *x, *head, *extra;
574 int i;
576 ASSERT(spin_is_locked(&m->shadow_lock));
577 ASSERT(gpfn != 0);
578 ASSERT(s & PSH_shadowed);
580 x = head = hash_bucket(m, gpfn);
582 SH_VVLOG("set gpfn=%08x s=%08lx bucket=%p(%p)", gpfn, s, x, x->next);
583 shadow_audit(m, 0);
585 /*
586 * STEP 1. If page is already in the table, update it in place.
587 */
589 do
590 {
591 if ( x->pfn == gpfn )
592 {
593 x->spfn_and_flags = s;
594 goto done;
595 }
597 x = x->next;
598 }
599 while ( x != NULL );
601 /*
602 * STEP 2. The page must be inserted into the table.
603 */
605 /* If the bucket is empty then insert the new page as the head item. */
606 if ( head->pfn == 0 )
607 {
608 head->pfn = gpfn;
609 head->spfn_and_flags = s;
610 ASSERT(head->next == NULL);
611 goto done;
612 }
614 /* We need to allocate a new node. Ensure the quicklist is non-empty. */
615 if ( unlikely(m->shadow_ht_free == NULL) )
616 {
617 SH_LOG("Allocate more shadow hashtable blocks.");
619 extra = xmalloc_bytes(
620 sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
622 /* XXX Should be more graceful here. */
623 if ( extra == NULL )
624 BUG();
626 memset(extra, 0, sizeof(void *) + (shadow_ht_extra_size * sizeof(*x)));
628 /* Record the allocation block so it can be correctly freed later. */
629 m->shadow_extras_count++;
630 *((struct shadow_status **)&extra[shadow_ht_extra_size]) =
631 m->shadow_ht_extras;
632 m->shadow_ht_extras = &extra[0];
634 /* Thread a free chain through the newly-allocated nodes. */
635 for ( i = 0; i < (shadow_ht_extra_size - 1); i++ )
636 extra[i].next = &extra[i+1];
637 extra[i].next = NULL;
639 /* Add the new nodes to the free list. */
640 m->shadow_ht_free = &extra[0];
641 }
643 /* Allocate a new node from the quicklist. */
644 x = m->shadow_ht_free;
645 m->shadow_ht_free = x->next;
647 /* Initialise the new node and insert directly after the head item. */
648 x->pfn = gpfn;
649 x->spfn_and_flags = s;
650 x->next = head->next;
651 head->next = x;
653 done:
654 shadow_audit(m, 0);
655 }
657 #ifdef CONFIG_VMX
658 #include <asm/domain_page.h>
660 static inline void vmx_update_shadow_state(
661 struct mm_struct *mm, unsigned long gpfn, unsigned long spfn)
662 {
664 l2_pgentry_t *mpl2e = 0;
665 l2_pgentry_t *gpl2e, *spl2e;
667 /* unmap the old mappings */
668 if (mm->shadow_vtable)
669 unmap_domain_mem(mm->shadow_vtable);
670 if (mm->vpagetable)
671 unmap_domain_mem(mm->vpagetable);
673 /* new mapping */
674 mpl2e = (l2_pgentry_t *)
675 map_domain_mem(pagetable_val(mm->monitor_table));
677 mpl2e[SH_LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
678 mk_l2_pgentry((spfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
679 __flush_tlb_one(SH_LINEAR_PT_VIRT_START);
681 spl2e = (l2_pgentry_t *) map_domain_mem(spfn << PAGE_SHIFT);
682 gpl2e = (l2_pgentry_t *) map_domain_mem(gpfn << PAGE_SHIFT);
683 memset(spl2e, 0, ENTRIES_PER_L2_PAGETABLE * sizeof(l2_pgentry_t));
685 mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
686 mm->shadow_vtable = spl2e;
687 mm->vpagetable = gpl2e; /* expect the guest did clean this up */
688 unmap_domain_mem(mpl2e);
689 }
691 static inline void __shadow_mk_pagetable( struct mm_struct *mm )
692 {
693 unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
694 unsigned long spfn;
695 SH_VLOG("0: __shadow_mk_pagetable(gpfn=%08lx\n", gpfn);
697 if (mm->shadow_mode == SHM_full_32)
698 {
699 unsigned long guest_gpfn;
700 guest_gpfn = machine_to_phys_mapping[gpfn];
702 SH_VVLOG("__shadow_mk_pagetable(guest_gpfn=%08lx, gpfn=%08lx\n",
703 guest_gpfn, gpfn);
705 spfn = __shadow_status(mm, guest_gpfn) & PSH_pfn_mask;
706 if ( unlikely(spfn == 0) ) {
707 spfn = shadow_l2_table(mm, gpfn);
708 mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
709 } else {
710 vmx_update_shadow_state(mm, gpfn, spfn);
711 }
712 } else {
713 spfn = __shadow_status(mm, gpfn) & PSH_pfn_mask;
715 if ( unlikely(spfn == 0) ) {
716 spfn = shadow_l2_table(mm, gpfn);
717 }
718 mm->shadow_table = mk_pagetable(spfn<<PAGE_SHIFT);
719 }
720 }
721 #else
722 static inline void __shadow_mk_pagetable(struct mm_struct *mm)
723 {
724 unsigned long gpfn = pagetable_val(mm->pagetable) >> PAGE_SHIFT;
725 unsigned long spfn = __shadow_status(mm, gpfn);
727 if ( unlikely(spfn == 0) )
728 spfn = shadow_l2_table(mm, gpfn);
730 mm->shadow_table = mk_pagetable(spfn << PAGE_SHIFT);
731 }
732 #endif /* CONFIG_VMX */
734 static inline void shadow_mk_pagetable(struct mm_struct *mm)
735 {
736 if ( unlikely(mm->shadow_mode) )
737 {
738 SH_VVLOG("shadow_mk_pagetable( gptbase=%08lx, mode=%d )",
739 pagetable_val(mm->pagetable), mm->shadow_mode );
741 shadow_lock(mm);
742 __shadow_mk_pagetable(mm);
743 shadow_unlock(mm);
745 SH_VVLOG("leaving shadow_mk_pagetable:\n");
747 SH_VVLOG("( gptbase=%08lx, mode=%d ) sh=%08lx",
748 pagetable_val(mm->pagetable), mm->shadow_mode,
749 pagetable_val(mm->shadow_table) );
751 }
752 }
754 #if SHADOW_DEBUG
755 extern int check_pagetable(struct mm_struct *m, pagetable_t pt, char *s);
756 #else
757 #define check_pagetable(m, pt, s) ((void)0)
758 #endif
760 #endif /* XEN_SHADOW_H */