ia64/xen-unstable

view xen/arch/x86/mm/hap/hap.c @ 15369:1feb91894e11

[HVM] HAP tidying.
Tighten up locking discipline, dead/useless code and unnecessary VMEXITS.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Fri Jun 15 16:51:08 2007 +0100 (2007-06-15)
parents 2c8c6ca1296b
children b8e8061c5a98
line source
1 /******************************************************************************
2 * arch/x86/mm/hap/hap.c
3 *
4 * hardware assisted paging
5 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
6 * Parts of this code are Copyright (c) 2007 by XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/trace.h>
27 #include <xen/sched.h>
28 #include <xen/perfc.h>
29 #include <xen/irq.h>
30 #include <xen/domain_page.h>
31 #include <xen/guest_access.h>
32 #include <xen/keyhandler.h>
33 #include <asm/event.h>
34 #include <asm/page.h>
35 #include <asm/current.h>
36 #include <asm/flushtlb.h>
37 #include <asm/shared.h>
38 #include <asm/hap.h>
39 #include <asm/paging.h>
40 #include <asm/domain.h>
42 #include "private.h"
44 /* Override macros from asm/page.h to make them work with mfn_t */
45 #undef mfn_to_page
46 #define mfn_to_page(_m) (frame_table + mfn_x(_m))
47 #undef mfn_valid
48 #define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
49 #undef page_to_mfn
50 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
52 /************************************************/
53 /* HAP LOG DIRTY SUPPORT */
54 /************************************************/
55 /* hap code to call when log_dirty is enable. return 0 if no problem found. */
56 int hap_enable_log_dirty(struct domain *d)
57 {
58 /* turn on PG_log_dirty bit in paging mode */
59 hap_lock(d);
60 d->arch.paging.mode |= PG_log_dirty;
61 hap_unlock(d);
63 /* set l1e entries of P2M table to NOT_WRITABLE. */
64 p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
65 flush_tlb_mask(d->domain_dirty_cpumask);
66 return 0;
67 }
69 int hap_disable_log_dirty(struct domain *d)
70 {
71 hap_lock(d);
72 d->arch.paging.mode &= ~PG_log_dirty;
73 hap_unlock(d);
75 /* set l1e entries of P2M table with normal mode */
76 p2m_set_flags_global(d, __PAGE_HYPERVISOR|_PAGE_USER);
77 return 0;
78 }
80 void hap_clean_dirty_bitmap(struct domain *d)
81 {
82 /* mark physical memory as NOT_WRITEABLE and flush the TLB */
83 p2m_set_flags_global(d, (_PAGE_PRESENT|_PAGE_USER));
84 flush_tlb_mask(d->domain_dirty_cpumask);
85 }
87 /************************************************/
88 /* HAP SUPPORT FUNCTIONS */
89 /************************************************/
90 mfn_t hap_alloc(struct domain *d)
91 {
92 struct page_info *sp = NULL;
93 void *p;
95 ASSERT(hap_locked_by_me(d));
97 sp = list_entry(d->arch.paging.hap.freelists.next, struct page_info, list);
98 list_del(&sp->list);
99 d->arch.paging.hap.free_pages -= 1;
101 /* Now safe to clear the page for reuse */
102 p = hap_map_domain_page(page_to_mfn(sp));
103 ASSERT(p != NULL);
104 clear_page(p);
105 hap_unmap_domain_page(p);
107 return page_to_mfn(sp);
108 }
110 void hap_free(struct domain *d, mfn_t smfn)
111 {
112 struct page_info *sp = mfn_to_page(smfn);
114 ASSERT(hap_locked_by_me(d));
116 d->arch.paging.hap.free_pages += 1;
117 list_add_tail(&sp->list, &d->arch.paging.hap.freelists);
118 }
120 struct page_info * hap_alloc_p2m_page(struct domain *d)
121 {
122 struct page_info *pg;
123 mfn_t mfn;
124 void *p;
126 hap_lock(d);
128 #if CONFIG_PAGING_LEVELS == 3
129 /* Under PAE mode, top-level P2M table should be allocated below 4GB space
130 * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
131 * force this requirement. This page will be de-allocated in
132 * hap_free_p2m_page(), like other P2M pages.
133 */
134 if ( d->arch.paging.hap.p2m_pages == 0 )
135 {
136 pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
137 d->arch.paging.hap.p2m_pages += 1;
138 }
139 else
140 #endif
141 {
142 pg = mfn_to_page(hap_alloc(d));
144 d->arch.paging.hap.p2m_pages += 1;
145 d->arch.paging.hap.total_pages -= 1;
146 }
148 if ( pg == NULL ) {
149 hap_unlock(d);
150 return NULL;
151 }
153 hap_unlock(d);
155 page_set_owner(pg, d);
156 pg->count_info = 1;
157 mfn = page_to_mfn(pg);
158 p = hap_map_domain_page(mfn);
159 clear_page(p);
160 hap_unmap_domain_page(p);
162 return pg;
163 }
165 void hap_free_p2m_page(struct domain *d, struct page_info *pg)
166 {
167 ASSERT(page_get_owner(pg) == d);
168 /* Should have just the one ref we gave it in alloc_p2m_page() */
169 if ( (pg->count_info & PGC_count_mask) != 1 ) {
170 HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
171 pg->count_info, pg->u.inuse.type_info);
172 }
173 pg->count_info = 0;
174 /* Free should not decrement domain's total allocation, since
175 * these pages were allocated without an owner. */
176 page_set_owner(pg, NULL);
177 free_domheap_pages(pg, 0);
178 d->arch.paging.hap.p2m_pages--;
179 ASSERT( d->arch.paging.hap.p2m_pages >= 0 );
180 }
182 /* Return the size of the pool, rounded up to the nearest MB */
183 static unsigned int
184 hap_get_allocation(struct domain *d)
185 {
186 unsigned int pg = d->arch.paging.hap.total_pages;
188 HERE_I_AM;
189 return ((pg >> (20 - PAGE_SHIFT))
190 + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
191 }
193 /* Set the pool of pages to the required number of pages.
194 * Returns 0 for success, non-zero for failure. */
195 static unsigned int
196 hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
197 {
198 struct page_info *sp;
200 ASSERT(hap_locked_by_me(d));
202 while ( d->arch.paging.hap.total_pages != pages ) {
203 if ( d->arch.paging.hap.total_pages < pages ) {
204 /* Need to allocate more memory from domheap */
205 sp = alloc_domheap_pages(NULL, 0, 0);
206 if ( sp == NULL ) {
207 HAP_PRINTK("failed to allocate hap pages.\n");
208 return -ENOMEM;
209 }
210 d->arch.paging.hap.free_pages += 1;
211 d->arch.paging.hap.total_pages += 1;
212 list_add_tail(&sp->list, &d->arch.paging.hap.freelists);
213 }
214 else if ( d->arch.paging.hap.total_pages > pages ) {
215 /* Need to return memory to domheap */
216 ASSERT(!list_empty(&d->arch.paging.hap.freelists));
217 sp = list_entry(d->arch.paging.hap.freelists.next,
218 struct page_info, list);
219 list_del(&sp->list);
220 d->arch.paging.hap.free_pages -= 1;
221 d->arch.paging.hap.total_pages -= 1;
222 sp->count_info = 0;
223 free_domheap_pages(sp, 0);
224 }
226 /* Check to see if we need to yield and try again */
227 if ( preempted && hypercall_preempt_check() ) {
228 *preempted = 1;
229 return 0;
230 }
231 }
233 return 0;
234 }
236 #if CONFIG_PAGING_LEVELS == 4
237 void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t gl4mfn, mfn_t sl4mfn)
238 {
239 struct domain *d = v->domain;
240 l4_pgentry_t *sl4e;
242 sl4e = hap_map_domain_page(sl4mfn);
243 ASSERT(sl4e != NULL);
245 /* Copy the common Xen mappings from the idle domain */
246 memcpy(&sl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
247 &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
248 ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
250 /* Install the per-domain mappings for this domain */
251 sl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
252 l4e_from_pfn(mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3))),
253 __PAGE_HYPERVISOR);
255 sl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
256 l4e_from_pfn(mfn_x(gl4mfn), __PAGE_HYPERVISOR);
258 /* install domain-specific P2M table */
259 sl4e[l4_table_offset(RO_MPT_VIRT_START)] =
260 l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
261 __PAGE_HYPERVISOR);
263 hap_unmap_domain_page(sl4e);
264 }
265 #endif /* CONFIG_PAGING_LEVELS == 4 */
267 #if CONFIG_PAGING_LEVELS == 3
268 void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t sl2hmfn)
269 {
270 struct domain *d = v->domain;
271 l2_pgentry_t *sl2e;
272 l3_pgentry_t *p2m;
274 int i;
276 sl2e = hap_map_domain_page(sl2hmfn);
277 ASSERT(sl2e != NULL);
279 /* Copy the common Xen mappings from the idle domain */
280 memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
281 &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
282 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
284 /* Install the per-domain mappings for this domain */
285 for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
286 sl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
287 l2e_from_pfn(
288 mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
289 __PAGE_HYPERVISOR);
291 for ( i = 0; i < HAP_L3_PAGETABLE_ENTRIES; i++ )
292 sl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
293 l2e_empty();
295 /* Install the domain-specific p2m table */
296 ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
297 p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
298 for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
299 {
300 sl2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
301 (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
302 ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
303 __PAGE_HYPERVISOR)
304 : l2e_empty();
305 }
306 hap_unmap_domain_page(p2m);
307 hap_unmap_domain_page(sl2e);
308 }
309 #endif
311 #if CONFIG_PAGING_LEVELS == 2
312 void hap_install_xen_entries_in_l2(struct vcpu *v, mfn_t gl2mfn, mfn_t sl2mfn)
313 {
314 struct domain *d = v->domain;
315 l2_pgentry_t *sl2e;
316 int i;
318 sl2e = hap_map_domain_page(sl2mfn);
319 ASSERT(sl2e != NULL);
321 /* Copy the common Xen mappings from the idle domain */
322 memcpy(&sl2e[L2_PAGETABLE_FIRST_XEN_SLOT],
323 &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
324 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
326 /* Install the per-domain mappings for this domain */
327 for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
328 sl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
329 l2e_from_pfn(
330 mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
331 __PAGE_HYPERVISOR);
334 sl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
335 l2e_from_pfn(mfn_x(gl2mfn), __PAGE_HYPERVISOR);
337 /* install domain-specific P2M table */
338 sl2e[l2_table_offset(RO_MPT_VIRT_START)] =
339 l2e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
340 __PAGE_HYPERVISOR);
342 hap_unmap_domain_page(sl2e);
343 }
344 #endif
346 mfn_t hap_make_monitor_table(struct vcpu *v)
347 {
348 struct domain *d = v->domain;
350 ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
352 #if CONFIG_PAGING_LEVELS == 4
353 {
354 mfn_t m4mfn;
355 m4mfn = hap_alloc(d);
356 hap_install_xen_entries_in_l4(v, m4mfn, m4mfn);
357 return m4mfn;
358 }
359 #elif CONFIG_PAGING_LEVELS == 3
360 {
361 mfn_t m3mfn, m2mfn;
362 l3_pgentry_t *l3e;
363 l2_pgentry_t *l2e;
364 int i;
366 m3mfn = hap_alloc(d);
368 /* Install a monitor l2 table in slot 3 of the l3 table.
369 * This is used for all Xen entries, including linear maps
370 */
371 m2mfn = hap_alloc(d);
372 l3e = hap_map_domain_page(m3mfn);
373 l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
374 hap_install_xen_entries_in_l2h(v, m2mfn);
375 /* Install the monitor's own linear map */
376 l2e = hap_map_domain_page(m2mfn);
377 for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
378 l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
379 (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
380 ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
381 : l2e_empty();
382 hap_unmap_domain_page(l2e);
383 hap_unmap_domain_page(l3e);
385 HAP_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
386 return m3mfn;
387 }
388 #else
389 {
390 mfn_t m2mfn;
392 m2mfn = hap_alloc(d);
393 hap_install_xen_entries_in_l2(v, m2mfn, m2mfn);
395 return m2mfn;
396 }
397 #endif
398 }
400 void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
401 {
402 struct domain *d = v->domain;
404 #if CONFIG_PAGING_LEVELS == 3
405 /* Need to destroy the l2 monitor page in slot 4 too */
406 {
407 l3_pgentry_t *l3e = hap_map_domain_page(mmfn);
408 ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
409 hap_free(d, _mfn(l3e_get_pfn(l3e[3])));
410 hap_unmap_domain_page(l3e);
411 }
412 #endif
414 /* Put the memory back in the pool */
415 hap_free(d, mmfn);
416 }
418 /************************************************/
419 /* HAP DOMAIN LEVEL FUNCTIONS */
420 /************************************************/
421 void hap_domain_init(struct domain *d)
422 {
423 hap_lock_init(d);
424 INIT_LIST_HEAD(&d->arch.paging.hap.freelists);
426 /* This domain will use HAP for log-dirty mode */
427 paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
428 hap_clean_dirty_bitmap);
429 }
431 /* return 0 for success, -errno for failure */
432 int hap_enable(struct domain *d, u32 mode)
433 {
434 unsigned int old_pages;
435 int rv = 0;
437 HERE_I_AM;
439 domain_pause(d);
440 /* error check */
441 if ( (d == current->domain) ) {
442 rv = -EINVAL;
443 goto out;
444 }
446 old_pages = d->arch.paging.hap.total_pages;
447 if ( old_pages == 0 ) {
448 unsigned int r;
449 hap_lock(d);
450 r = hap_set_allocation(d, 256, NULL);
451 hap_unlock(d);
452 if ( r != 0 ) {
453 hap_set_allocation(d, 0, NULL);
454 rv = -ENOMEM;
455 goto out;
456 }
457 }
459 /* allocate P2m table */
460 if ( mode & PG_translate ) {
461 rv = p2m_alloc_table(d, hap_alloc_p2m_page, hap_free_p2m_page);
462 if ( rv != 0 )
463 goto out;
464 }
466 d->arch.paging.mode = mode | PG_SH_enable;
468 out:
469 domain_unpause(d);
470 return rv;
471 }
473 void hap_final_teardown(struct domain *d)
474 {
475 HERE_I_AM;
477 if ( d->arch.paging.hap.total_pages != 0 )
478 hap_teardown(d);
480 p2m_teardown(d);
481 ASSERT( d->arch.paging.hap.p2m_pages == 0 );
482 }
484 void hap_teardown(struct domain *d)
485 {
486 struct vcpu *v;
487 mfn_t mfn;
488 HERE_I_AM;
490 ASSERT(d->is_dying);
491 ASSERT(d != current->domain);
493 if ( !hap_locked_by_me(d) )
494 hap_lock(d); /* Keep various asserts happy */
496 if ( paging_mode_enabled(d) ) {
497 /* release the monitor table held by each vcpu */
498 for_each_vcpu(d, v) {
499 if ( v->arch.paging.mode && paging_mode_external(d) ) {
500 mfn = pagetable_get_mfn(v->arch.monitor_table);
501 if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
502 hap_destroy_monitor_table(v, mfn);
503 v->arch.monitor_table = pagetable_null();
504 }
505 }
506 }
508 if ( d->arch.paging.hap.total_pages != 0 ) {
509 HAP_PRINTK("teardown of domain %u starts."
510 " pages total = %u, free = %u, p2m=%u\n",
511 d->domain_id,
512 d->arch.paging.hap.total_pages,
513 d->arch.paging.hap.free_pages,
514 d->arch.paging.hap.p2m_pages);
515 hap_set_allocation(d, 0, NULL);
516 HAP_PRINTK("teardown done."
517 " pages total = %u, free = %u, p2m=%u\n",
518 d->arch.paging.hap.total_pages,
519 d->arch.paging.hap.free_pages,
520 d->arch.paging.hap.p2m_pages);
521 ASSERT(d->arch.paging.hap.total_pages == 0);
522 }
524 d->arch.paging.mode &= ~PG_log_dirty;
526 hap_unlock(d);
527 }
529 int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
530 XEN_GUEST_HANDLE(void) u_domctl)
531 {
532 int rc, preempted = 0;
534 HERE_I_AM;
536 switch ( sc->op ) {
537 case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
538 hap_lock(d);
539 rc = hap_set_allocation(d, sc->mb << (20 - PAGE_SHIFT), &preempted);
540 hap_unlock(d);
541 if ( preempted )
542 /* Not finished. Set up to re-run the call. */
543 rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
544 u_domctl);
545 else
546 /* Finished. Return the new allocation */
547 sc->mb = hap_get_allocation(d);
548 return rc;
549 case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
550 sc->mb = hap_get_allocation(d);
551 return 0;
552 default:
553 HAP_ERROR("Bad hap domctl op %u\n", sc->op);
554 return -EINVAL;
555 }
556 }
558 void hap_vcpu_init(struct vcpu *v)
559 {
560 v->arch.paging.mode = &hap_paging_real_mode;
561 }
562 /************************************************/
563 /* HAP PAGING MODE FUNCTIONS */
564 /************************************************/
565 /*
566 * HAP guests can handle page faults (in the guest page tables) without
567 * needing any action from Xen, so we should not be intercepting them.
568 */
569 int hap_page_fault(struct vcpu *v, unsigned long va,
570 struct cpu_user_regs *regs)
571 {
572 HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
573 v->domain->domain_id, v->vcpu_id);
574 domain_crash(v->domain);
575 return 0;
576 }
578 /*
579 * HAP guests can handle invlpg without needing any action from Xen, so
580 * should not be intercepting it.
581 */
582 int hap_invlpg(struct vcpu *v, unsigned long va)
583 {
584 HAP_ERROR("Intercepted a guest INVLPG (%u:%u) with HAP enabled.\n",
585 v->domain->domain_id, v->vcpu_id);
586 domain_crash(v->domain);
587 return 0;
588 }
590 /*
591 * HAP guests do not need to take any action on CR3 writes (they are still
592 * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
593 */
594 void hap_update_cr3(struct vcpu *v, int do_locking)
595 {
596 }
598 void hap_update_paging_modes(struct vcpu *v)
599 {
600 struct domain *d;
602 HERE_I_AM;
604 d = v->domain;
605 hap_lock(d);
607 /* update guest paging mode. Note that we rely on hvm functions to detect
608 * guest's paging mode. So, make sure the shadow registers (CR0, CR4, EFER)
609 * reflect guest's status correctly.
610 */
611 if ( hvm_paging_enabled(v) ) {
612 if ( hvm_long_mode_enabled(v) )
613 v->arch.paging.mode = &hap_paging_long_mode;
614 else if ( hvm_pae_enabled(v) )
615 v->arch.paging.mode = &hap_paging_pae_mode;
616 else
617 v->arch.paging.mode = &hap_paging_protected_mode;
618 }
619 else {
620 v->arch.paging.mode = &hap_paging_real_mode;
621 }
623 v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
625 if ( pagetable_is_null(v->arch.monitor_table) ) {
626 mfn_t mmfn = hap_make_monitor_table(v);
627 v->arch.monitor_table = pagetable_from_mfn(mmfn);
628 make_cr3(v, mfn_x(mmfn));
629 }
631 hap_unlock(d);
632 }
634 #if CONFIG_PAGING_LEVELS == 3
635 static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
636 /* Special case, only used for external-mode domains on PAE hosts:
637 * update the mapping of the p2m table. Once again, this is trivial in
638 * other paging modes (one top-level entry points to the top-level p2m,
639 * no maintenance needed), but PAE makes life difficult by needing a
640 * copy l3es of the p2m table in eight l2h slots in the monitor table. This
641 * function makes fresh copies when a p2m l3e changes. */
642 {
643 l2_pgentry_t *ml2e;
644 struct vcpu *v;
645 unsigned int index;
647 index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
648 ASSERT(index < MACHPHYS_MBYTES>>1);
650 for_each_vcpu(d, v) {
651 if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
652 continue;
654 ASSERT(paging_mode_external(v->domain));
656 if ( v == current ) /* OK to use linear map of monitor_table */
657 ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
658 else {
659 l3_pgentry_t *ml3e;
660 ml3e = hap_map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
661 ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
662 ml2e = hap_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
663 ml2e += l2_table_offset(RO_MPT_VIRT_START);
664 hap_unmap_domain_page(ml3e);
665 }
666 ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
667 if ( v != current )
668 hap_unmap_domain_page(ml2e);
669 }
670 }
671 #endif
673 void
674 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
675 l1_pgentry_t new, unsigned int level)
676 {
677 hap_lock(v->domain);
679 safe_write_pte(p, new);
680 #if CONFIG_PAGING_LEVELS == 3
681 /* install P2M in monitor table for PAE Xen */
682 if ( level == 3 )
683 /* We have written to the p2m l3: need to sync the per-vcpu
684 * copies of it in the monitor tables */
685 p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
686 #endif
688 hap_unlock(v->domain);
689 }
691 /* Entry points into this mode of the hap code. */
692 struct paging_mode hap_paging_real_mode = {
693 .page_fault = hap_page_fault,
694 .invlpg = hap_invlpg,
695 .gva_to_gfn = hap_gva_to_gfn_real_mode,
696 .update_cr3 = hap_update_cr3,
697 .update_paging_modes = hap_update_paging_modes,
698 .write_p2m_entry = hap_write_p2m_entry,
699 .guest_levels = 1
700 };
702 struct paging_mode hap_paging_protected_mode = {
703 .page_fault = hap_page_fault,
704 .invlpg = hap_invlpg,
705 .gva_to_gfn = hap_gva_to_gfn_protected_mode,
706 .update_cr3 = hap_update_cr3,
707 .update_paging_modes = hap_update_paging_modes,
708 .write_p2m_entry = hap_write_p2m_entry,
709 .guest_levels = 2
710 };
712 struct paging_mode hap_paging_pae_mode = {
713 .page_fault = hap_page_fault,
714 .invlpg = hap_invlpg,
715 .gva_to_gfn = hap_gva_to_gfn_pae_mode,
716 .update_cr3 = hap_update_cr3,
717 .update_paging_modes = hap_update_paging_modes,
718 .write_p2m_entry = hap_write_p2m_entry,
719 .guest_levels = 3
720 };
722 struct paging_mode hap_paging_long_mode = {
723 .page_fault = hap_page_fault,
724 .invlpg = hap_invlpg,
725 .gva_to_gfn = hap_gva_to_gfn_long_mode,
726 .update_cr3 = hap_update_cr3,
727 .update_paging_modes = hap_update_paging_modes,
728 .write_p2m_entry = hap_write_p2m_entry,
729 .guest_levels = 4
730 };
732 /*
733 * Local variables:
734 * mode: C
735 * c-set-style: "BSD"
736 * c-basic-offset: 4
737 * indent-tabs-mode: nil
738 * End:
739 */