ia64/xen-unstable

view xen/arch/x86/mm/hap/hap.c @ 15863:4633e9604da9

[HVM] Add type information to the p2m map.
This is a base for memory tricks like page sharing, copy-on-write, lazy
allocation etc. It should also make pass-through MMIO easier to
implement in the p2m.
Signed-off-by: Tim Deegan <Tim.Deegan@xensource.com>
author Tim Deegan <Tim.Deegan@xensource.com>
date Mon Sep 10 14:42:30 2007 +0100 (2007-09-10)
parents 86a154e1ef5d
children db9f62d8f7f4
line source
1 /******************************************************************************
2 * arch/x86/mm/hap/hap.c
3 *
4 * hardware assisted paging
5 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
6 * Parts of this code are Copyright (c) 2007 by XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/trace.h>
27 #include <xen/sched.h>
28 #include <xen/perfc.h>
29 #include <xen/irq.h>
30 #include <xen/domain_page.h>
31 #include <xen/guest_access.h>
32 #include <xen/keyhandler.h>
33 #include <asm/event.h>
34 #include <asm/page.h>
35 #include <asm/current.h>
36 #include <asm/flushtlb.h>
37 #include <asm/shared.h>
38 #include <asm/hap.h>
39 #include <asm/paging.h>
40 #include <asm/domain.h>
42 #include "private.h"
44 /* Override macros from asm/page.h to make them work with mfn_t */
45 #undef mfn_to_page
46 #define mfn_to_page(_m) (frame_table + mfn_x(_m))
47 #undef mfn_valid
48 #define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
49 #undef page_to_mfn
50 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
52 /************************************************/
53 /* HAP LOG DIRTY SUPPORT */
54 /************************************************/
55 /* hap code to call when log_dirty is enable. return 0 if no problem found. */
56 int hap_enable_log_dirty(struct domain *d)
57 {
58 /* turn on PG_log_dirty bit in paging mode */
59 hap_lock(d);
60 d->arch.paging.mode |= PG_log_dirty;
61 hap_unlock(d);
63 /* set l1e entries of P2M table to be read-only. */
64 p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
65 flush_tlb_mask(d->domain_dirty_cpumask);
66 return 0;
67 }
69 int hap_disable_log_dirty(struct domain *d)
70 {
71 hap_lock(d);
72 d->arch.paging.mode &= ~PG_log_dirty;
73 hap_unlock(d);
75 /* set l1e entries of P2M table with normal mode */
76 p2m_change_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
77 return 0;
78 }
80 void hap_clean_dirty_bitmap(struct domain *d)
81 {
82 /* set l1e entries of P2M table to be read-only. */
83 p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
84 flush_tlb_mask(d->domain_dirty_cpumask);
85 }
87 /************************************************/
88 /* HAP SUPPORT FUNCTIONS */
89 /************************************************/
90 static struct page_info *hap_alloc(struct domain *d)
91 {
92 struct page_info *pg = NULL;
93 void *p;
95 ASSERT(hap_locked_by_me(d));
97 if ( unlikely(list_empty(&d->arch.paging.hap.freelist)) )
98 return NULL;
100 pg = list_entry(d->arch.paging.hap.freelist.next, struct page_info, list);
101 list_del(&pg->list);
102 d->arch.paging.hap.free_pages--;
104 p = hap_map_domain_page(page_to_mfn(pg));
105 ASSERT(p != NULL);
106 clear_page(p);
107 hap_unmap_domain_page(p);
109 return pg;
110 }
112 static void hap_free(struct domain *d, mfn_t mfn)
113 {
114 struct page_info *pg = mfn_to_page(mfn);
116 ASSERT(hap_locked_by_me(d));
118 d->arch.paging.hap.free_pages++;
119 list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
120 }
122 static struct page_info *hap_alloc_p2m_page(struct domain *d)
123 {
124 struct page_info *pg;
126 hap_lock(d);
127 pg = hap_alloc(d);
129 #if CONFIG_PAGING_LEVELS == 3
130 /* Under PAE mode, top-level P2M table should be allocated below 4GB space
131 * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
132 * force this requirement, and exchange the guaranteed 32-bit-clean
133 * page for the one we just hap_alloc()ed. */
134 if ( d->arch.paging.hap.p2m_pages == 0
135 && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
136 {
137 free_domheap_page(pg);
138 pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
139 if ( likely(pg != NULL) )
140 {
141 void *p = hap_map_domain_page(page_to_mfn(pg));
142 clear_page(p);
143 hap_unmap_domain_page(p);
144 }
145 }
146 #endif
148 if ( likely(pg != NULL) )
149 {
150 d->arch.paging.hap.total_pages--;
151 d->arch.paging.hap.p2m_pages++;
152 page_set_owner(pg, d);
153 pg->count_info = 1;
154 }
156 hap_unlock(d);
157 return pg;
158 }
160 void hap_free_p2m_page(struct domain *d, struct page_info *pg)
161 {
162 hap_lock(d);
163 ASSERT(page_get_owner(pg) == d);
164 /* Should have just the one ref we gave it in alloc_p2m_page() */
165 if ( (pg->count_info & PGC_count_mask) != 1 )
166 HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
167 pg->count_info, pg->u.inuse.type_info);
168 pg->count_info = 0;
169 /* Free should not decrement domain's total allocation, since
170 * these pages were allocated without an owner. */
171 page_set_owner(pg, NULL);
172 free_domheap_page(pg);
173 d->arch.paging.hap.p2m_pages--;
174 ASSERT(d->arch.paging.hap.p2m_pages >= 0);
175 hap_unlock(d);
176 }
178 /* Return the size of the pool, rounded up to the nearest MB */
179 static unsigned int
180 hap_get_allocation(struct domain *d)
181 {
182 unsigned int pg = d->arch.paging.hap.total_pages;
184 return ((pg >> (20 - PAGE_SHIFT))
185 + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
186 }
188 /* Set the pool of pages to the required number of pages.
189 * Returns 0 for success, non-zero for failure. */
190 static unsigned int
191 hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
192 {
193 struct page_info *pg;
195 ASSERT(hap_locked_by_me(d));
197 while ( d->arch.paging.hap.total_pages != pages )
198 {
199 if ( d->arch.paging.hap.total_pages < pages )
200 {
201 /* Need to allocate more memory from domheap */
202 pg = alloc_domheap_page(NULL);
203 if ( pg == NULL )
204 {
205 HAP_PRINTK("failed to allocate hap pages.\n");
206 return -ENOMEM;
207 }
208 d->arch.paging.hap.free_pages++;
209 d->arch.paging.hap.total_pages++;
210 list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
211 }
212 else if ( d->arch.paging.hap.total_pages > pages )
213 {
214 /* Need to return memory to domheap */
215 ASSERT(!list_empty(&d->arch.paging.hap.freelist));
216 pg = list_entry(d->arch.paging.hap.freelist.next,
217 struct page_info, list);
218 list_del(&pg->list);
219 d->arch.paging.hap.free_pages--;
220 d->arch.paging.hap.total_pages--;
221 pg->count_info = 0;
222 free_domheap_page(pg);
223 }
225 /* Check to see if we need to yield and try again */
226 if ( preempted && hypercall_preempt_check() )
227 {
228 *preempted = 1;
229 return 0;
230 }
231 }
233 return 0;
234 }
236 #if CONFIG_PAGING_LEVELS == 4
237 static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
238 {
239 struct domain *d = v->domain;
240 l4_pgentry_t *l4e;
242 l4e = hap_map_domain_page(l4mfn);
243 ASSERT(l4e != NULL);
245 /* Copy the common Xen mappings from the idle domain */
246 memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
247 &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
248 ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
250 /* Install the per-domain mappings for this domain */
251 l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
252 l4e_from_pfn(mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3))),
253 __PAGE_HYPERVISOR);
255 /* Install a linear mapping */
256 l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
257 l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
259 /* Install the domain-specific P2M table */
260 l4e[l4_table_offset(RO_MPT_VIRT_START)] =
261 l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
262 __PAGE_HYPERVISOR);
264 hap_unmap_domain_page(l4e);
265 }
266 #endif /* CONFIG_PAGING_LEVELS == 4 */
268 #if CONFIG_PAGING_LEVELS == 3
269 static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
270 {
271 struct domain *d = v->domain;
272 l2_pgentry_t *l2e;
273 l3_pgentry_t *p2m;
274 int i;
276 l2e = hap_map_domain_page(l2hmfn);
277 ASSERT(l2e != NULL);
279 /* Copy the common Xen mappings from the idle domain */
280 memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
281 &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
282 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
284 /* Install the per-domain mappings for this domain */
285 for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
286 l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
287 l2e_from_pfn(
288 mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
289 __PAGE_HYPERVISOR);
291 /* No linear mapping; will be set up by monitor-table contructor. */
292 for ( i = 0; i < 4; i++ )
293 l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
294 l2e_empty();
296 /* Install the domain-specific p2m table */
297 ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
298 p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
299 for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
300 {
301 l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
302 (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
303 ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
304 __PAGE_HYPERVISOR)
305 : l2e_empty();
306 }
307 hap_unmap_domain_page(p2m);
308 hap_unmap_domain_page(l2e);
309 }
310 #endif
312 #if CONFIG_PAGING_LEVELS == 2
313 static void hap_install_xen_entries_in_l2(struct vcpu *v, mfn_t l2mfn)
314 {
315 struct domain *d = v->domain;
316 l2_pgentry_t *l2e;
317 int i;
319 l2e = hap_map_domain_page(l2mfn);
320 ASSERT(l2e != NULL);
322 /* Copy the common Xen mappings from the idle domain */
323 memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT],
324 &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
325 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
327 /* Install the per-domain mappings for this domain */
328 for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
329 l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
330 l2e_from_pfn(
331 mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
332 __PAGE_HYPERVISOR);
334 /* Install the linear mapping */
335 l2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
336 l2e_from_pfn(mfn_x(l2mfn), __PAGE_HYPERVISOR);
338 /* Install the domain-specific P2M table */
339 l2e[l2_table_offset(RO_MPT_VIRT_START)] =
340 l2e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
341 __PAGE_HYPERVISOR);
343 hap_unmap_domain_page(l2e);
344 }
345 #endif
347 static mfn_t hap_make_monitor_table(struct vcpu *v)
348 {
349 struct domain *d = v->domain;
350 struct page_info *pg;
352 ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
354 #if CONFIG_PAGING_LEVELS == 4
355 {
356 mfn_t m4mfn;
357 if ( (pg = hap_alloc(d)) == NULL )
358 goto oom;
359 m4mfn = page_to_mfn(pg);
360 hap_install_xen_entries_in_l4(v, m4mfn);
361 return m4mfn;
362 }
363 #elif CONFIG_PAGING_LEVELS == 3
364 {
365 mfn_t m3mfn, m2mfn;
366 l3_pgentry_t *l3e;
367 l2_pgentry_t *l2e;
368 int i;
370 if ( (pg = hap_alloc(d)) == NULL )
371 goto oom;
372 m3mfn = page_to_mfn(pg);
374 /* Install a monitor l2 table in slot 3 of the l3 table.
375 * This is used for all Xen entries, including linear maps
376 */
377 if ( (pg = hap_alloc(d)) == NULL )
378 goto oom;
379 m2mfn = page_to_mfn(pg);
380 l3e = hap_map_domain_page(m3mfn);
381 l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
382 hap_install_xen_entries_in_l2h(v, m2mfn);
383 /* Install the monitor's own linear map */
384 l2e = hap_map_domain_page(m2mfn);
385 for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
386 l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
387 (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
388 ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
389 : l2e_empty();
390 hap_unmap_domain_page(l2e);
391 hap_unmap_domain_page(l3e);
393 HAP_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
394 return m3mfn;
395 }
396 #else
397 {
398 mfn_t m2mfn;
399 if ( (pg = hap_alloc(d)) == NULL )
400 goto oom;
401 m2mfn = page_to_mfn(pg);;
402 hap_install_xen_entries_in_l2(v, m2mfn);
403 return m2mfn;
404 }
405 #endif
407 oom:
408 HAP_ERROR("out of memory building monitor pagetable\n");
409 domain_crash(d);
410 return _mfn(INVALID_MFN);
411 }
413 static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
414 {
415 struct domain *d = v->domain;
417 #if CONFIG_PAGING_LEVELS == 3
418 /* Need to destroy the l2 monitor page in slot 4 too */
419 {
420 l3_pgentry_t *l3e = hap_map_domain_page(mmfn);
421 ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
422 hap_free(d, _mfn(l3e_get_pfn(l3e[3])));
423 hap_unmap_domain_page(l3e);
424 }
425 #endif
427 /* Put the memory back in the pool */
428 hap_free(d, mmfn);
429 }
431 /************************************************/
432 /* HAP DOMAIN LEVEL FUNCTIONS */
433 /************************************************/
434 void hap_domain_init(struct domain *d)
435 {
436 hap_lock_init(d);
437 INIT_LIST_HEAD(&d->arch.paging.hap.freelist);
439 /* This domain will use HAP for log-dirty mode */
440 paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
441 hap_clean_dirty_bitmap);
442 }
444 /* return 0 for success, -errno for failure */
445 int hap_enable(struct domain *d, u32 mode)
446 {
447 unsigned int old_pages;
448 int rv = 0;
450 domain_pause(d);
451 /* error check */
452 if ( (d == current->domain) )
453 {
454 rv = -EINVAL;
455 goto out;
456 }
458 old_pages = d->arch.paging.hap.total_pages;
459 if ( old_pages == 0 )
460 {
461 unsigned int r;
462 hap_lock(d);
463 r = hap_set_allocation(d, 256, NULL);
464 hap_unlock(d);
465 if ( r != 0 )
466 {
467 hap_set_allocation(d, 0, NULL);
468 rv = -ENOMEM;
469 goto out;
470 }
471 }
473 /* allocate P2m table */
474 if ( mode & PG_translate )
475 {
476 rv = p2m_alloc_table(d, hap_alloc_p2m_page, hap_free_p2m_page);
477 if ( rv != 0 )
478 goto out;
479 }
481 d->arch.paging.mode = mode | PG_HAP_enable;
483 out:
484 domain_unpause(d);
485 return rv;
486 }
488 void hap_final_teardown(struct domain *d)
489 {
490 if ( d->arch.paging.hap.total_pages != 0 )
491 hap_teardown(d);
493 p2m_teardown(d);
494 ASSERT(d->arch.paging.hap.p2m_pages == 0);
495 }
497 void hap_teardown(struct domain *d)
498 {
499 struct vcpu *v;
500 mfn_t mfn;
502 ASSERT(d->is_dying);
503 ASSERT(d != current->domain);
505 if ( !hap_locked_by_me(d) )
506 hap_lock(d); /* Keep various asserts happy */
508 if ( paging_mode_enabled(d) )
509 {
510 /* release the monitor table held by each vcpu */
511 for_each_vcpu ( d, v )
512 {
513 if ( v->arch.paging.mode && paging_mode_external(d) )
514 {
515 mfn = pagetable_get_mfn(v->arch.monitor_table);
516 if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
517 hap_destroy_monitor_table(v, mfn);
518 v->arch.monitor_table = pagetable_null();
519 }
520 }
521 }
523 if ( d->arch.paging.hap.total_pages != 0 )
524 {
525 HAP_PRINTK("teardown of domain %u starts."
526 " pages total = %u, free = %u, p2m=%u\n",
527 d->domain_id,
528 d->arch.paging.hap.total_pages,
529 d->arch.paging.hap.free_pages,
530 d->arch.paging.hap.p2m_pages);
531 hap_set_allocation(d, 0, NULL);
532 HAP_PRINTK("teardown done."
533 " pages total = %u, free = %u, p2m=%u\n",
534 d->arch.paging.hap.total_pages,
535 d->arch.paging.hap.free_pages,
536 d->arch.paging.hap.p2m_pages);
537 ASSERT(d->arch.paging.hap.total_pages == 0);
538 }
540 d->arch.paging.mode &= ~PG_log_dirty;
542 hap_unlock(d);
543 }
545 int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
546 XEN_GUEST_HANDLE(void) u_domctl)
547 {
548 int rc, preempted = 0;
550 switch ( sc->op )
551 {
552 case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
553 hap_lock(d);
554 rc = hap_set_allocation(d, sc->mb << (20 - PAGE_SHIFT), &preempted);
555 hap_unlock(d);
556 if ( preempted )
557 /* Not finished. Set up to re-run the call. */
558 rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
559 u_domctl);
560 else
561 /* Finished. Return the new allocation */
562 sc->mb = hap_get_allocation(d);
563 return rc;
564 case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
565 sc->mb = hap_get_allocation(d);
566 return 0;
567 default:
568 HAP_ERROR("Bad hap domctl op %u\n", sc->op);
569 return -EINVAL;
570 }
571 }
573 void hap_vcpu_init(struct vcpu *v)
574 {
575 v->arch.paging.mode = &hap_paging_real_mode;
576 }
578 /************************************************/
579 /* HAP PAGING MODE FUNCTIONS */
580 /************************************************/
581 /*
582 * HAP guests can handle page faults (in the guest page tables) without
583 * needing any action from Xen, so we should not be intercepting them.
584 */
585 static int hap_page_fault(struct vcpu *v, unsigned long va,
586 struct cpu_user_regs *regs)
587 {
588 HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
589 v->domain->domain_id, v->vcpu_id);
590 domain_crash(v->domain);
591 return 0;
592 }
594 /*
595 * HAP guests can handle invlpg without needing any action from Xen, so
596 * should not be intercepting it.
597 */
598 static int hap_invlpg(struct vcpu *v, unsigned long va)
599 {
600 HAP_ERROR("Intercepted a guest INVLPG (%u:%u) with HAP enabled.\n",
601 v->domain->domain_id, v->vcpu_id);
602 domain_crash(v->domain);
603 return 0;
604 }
606 static void hap_update_cr3(struct vcpu *v, int do_locking)
607 {
608 v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
609 hvm_update_guest_cr(v, 3);
610 }
612 static void hap_update_paging_modes(struct vcpu *v)
613 {
614 struct domain *d = v->domain;
616 hap_lock(d);
618 v->arch.paging.mode =
619 !hvm_paging_enabled(v) ? &hap_paging_real_mode :
620 hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
621 hvm_pae_enabled(v) ? &hap_paging_pae_mode :
622 &hap_paging_protected_mode;
624 if ( pagetable_is_null(v->arch.monitor_table) )
625 {
626 mfn_t mmfn = hap_make_monitor_table(v);
627 v->arch.monitor_table = pagetable_from_mfn(mmfn);
628 make_cr3(v, mfn_x(mmfn));
629 hvm_update_host_cr3(v);
630 }
632 /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
633 hap_update_cr3(v, 0);
635 hap_unlock(d);
636 }
638 #if CONFIG_PAGING_LEVELS == 3
639 static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
640 /* Special case, only used for PAE hosts: update the mapping of the p2m
641 * table. This is trivial in other paging modes (one top-level entry
642 * points to the top-level p2m, no maintenance needed), but PAE makes
643 * life difficult by needing a copy of the p2m table in eight l2h slots
644 * in the monitor table. This function makes fresh copies when a p2m
645 * l3e changes. */
646 {
647 l2_pgentry_t *ml2e;
648 struct vcpu *v;
649 unsigned int index;
651 index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
652 ASSERT(index < MACHPHYS_MBYTES>>1);
654 for_each_vcpu ( d, v )
655 {
656 if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
657 continue;
659 ASSERT(paging_mode_external(v->domain));
661 if ( v == current ) /* OK to use linear map of monitor_table */
662 ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
663 else {
664 l3_pgentry_t *ml3e;
665 ml3e = hap_map_domain_page(
666 pagetable_get_mfn(v->arch.monitor_table));
667 ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
668 ml2e = hap_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
669 ml2e += l2_table_offset(RO_MPT_VIRT_START);
670 hap_unmap_domain_page(ml3e);
671 }
672 ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
673 if ( v != current )
674 hap_unmap_domain_page(ml2e);
675 }
676 }
677 #endif
679 static void
680 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
681 mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
682 {
683 hap_lock(v->domain);
685 safe_write_pte(p, new);
686 #if CONFIG_PAGING_LEVELS == 3
687 /* install P2M in monitor table for PAE Xen */
688 if ( level == 3 )
689 /* We have written to the p2m l3: need to sync the per-vcpu
690 * copies of it in the monitor tables */
691 p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
692 #endif
694 hap_unlock(v->domain);
695 }
697 static unsigned long hap_gva_to_gfn_real_mode(
698 struct vcpu *v, unsigned long gva)
699 {
700 return ((paddr_t)gva >> PAGE_SHIFT);
701 }
703 /* Entry points into this mode of the hap code. */
704 struct paging_mode hap_paging_real_mode = {
705 .page_fault = hap_page_fault,
706 .invlpg = hap_invlpg,
707 .gva_to_gfn = hap_gva_to_gfn_real_mode,
708 .update_cr3 = hap_update_cr3,
709 .update_paging_modes = hap_update_paging_modes,
710 .write_p2m_entry = hap_write_p2m_entry,
711 .guest_levels = 1
712 };
714 struct paging_mode hap_paging_protected_mode = {
715 .page_fault = hap_page_fault,
716 .invlpg = hap_invlpg,
717 .gva_to_gfn = hap_gva_to_gfn_2level,
718 .update_cr3 = hap_update_cr3,
719 .update_paging_modes = hap_update_paging_modes,
720 .write_p2m_entry = hap_write_p2m_entry,
721 .guest_levels = 2
722 };
724 struct paging_mode hap_paging_pae_mode = {
725 .page_fault = hap_page_fault,
726 .invlpg = hap_invlpg,
727 .gva_to_gfn = hap_gva_to_gfn_3level,
728 .update_cr3 = hap_update_cr3,
729 .update_paging_modes = hap_update_paging_modes,
730 .write_p2m_entry = hap_write_p2m_entry,
731 .guest_levels = 3
732 };
734 struct paging_mode hap_paging_long_mode = {
735 .page_fault = hap_page_fault,
736 .invlpg = hap_invlpg,
737 .gva_to_gfn = hap_gva_to_gfn_4level,
738 .update_cr3 = hap_update_cr3,
739 .update_paging_modes = hap_update_paging_modes,
740 .write_p2m_entry = hap_write_p2m_entry,
741 .guest_levels = 4
742 };
744 /*
745 * Local variables:
746 * mode: C
747 * c-set-style: "BSD"
748 * c-basic-offset: 4
749 * indent-tabs-mode: nil
750 * End:
751 */