ia64/xen-unstable

view xen/arch/x86/mm/hap/hap.c @ 18667:f4dab783b58b

x86: Add TLB flushing to HAP p2m changes

Removing an MFN from the p2m requires it to be flushed from the
guest's TLBs on HAP, like we do when we're using shadows.

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Oct 20 16:49:25 2008 +0100 (2008-10-20)
parents 1e98ea5c8604
children 39517e863cc8
line source
1 /******************************************************************************
2 * arch/x86/mm/hap/hap.c
3 *
4 * hardware assisted paging
5 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
6 * Parts of this code are Copyright (c) 2007 by XenSource Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
23 #include <xen/config.h>
24 #include <xen/types.h>
25 #include <xen/mm.h>
26 #include <xen/trace.h>
27 #include <xen/sched.h>
28 #include <xen/perfc.h>
29 #include <xen/irq.h>
30 #include <xen/domain_page.h>
31 #include <xen/guest_access.h>
32 #include <xen/keyhandler.h>
33 #include <asm/event.h>
34 #include <asm/page.h>
35 #include <asm/current.h>
36 #include <asm/flushtlb.h>
37 #include <asm/shared.h>
38 #include <asm/hap.h>
39 #include <asm/paging.h>
40 #include <asm/p2m.h>
41 #include <asm/domain.h>
42 #include <xen/numa.h>
44 #include "private.h"
46 /* Override macros from asm/page.h to make them work with mfn_t */
47 #undef mfn_to_page
48 #define mfn_to_page(_m) (frame_table + mfn_x(_m))
49 #undef mfn_valid
50 #define mfn_valid(_mfn) (mfn_x(_mfn) < max_page)
51 #undef page_to_mfn
52 #define page_to_mfn(_pg) (_mfn((_pg) - frame_table))
54 /************************************************/
55 /* HAP LOG DIRTY SUPPORT */
56 /************************************************/
57 /* hap code to call when log_dirty is enable. return 0 if no problem found. */
58 int hap_enable_log_dirty(struct domain *d)
59 {
60 /* turn on PG_log_dirty bit in paging mode */
61 hap_lock(d);
62 d->arch.paging.mode |= PG_log_dirty;
63 hap_unlock(d);
65 /* set l1e entries of P2M table to be read-only. */
66 p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
67 flush_tlb_mask(d->domain_dirty_cpumask);
68 return 0;
69 }
71 int hap_disable_log_dirty(struct domain *d)
72 {
73 hap_lock(d);
74 d->arch.paging.mode &= ~PG_log_dirty;
75 hap_unlock(d);
77 /* set l1e entries of P2M table with normal mode */
78 p2m_change_entry_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
79 return 0;
80 }
82 void hap_clean_dirty_bitmap(struct domain *d)
83 {
84 /* set l1e entries of P2M table to be read-only. */
85 p2m_change_entry_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
86 flush_tlb_mask(d->domain_dirty_cpumask);
87 }
89 /************************************************/
90 /* HAP SUPPORT FUNCTIONS */
91 /************************************************/
92 static struct page_info *hap_alloc(struct domain *d)
93 {
94 struct page_info *pg = NULL;
95 void *p;
97 ASSERT(hap_locked_by_me(d));
99 if ( unlikely(list_empty(&d->arch.paging.hap.freelist)) )
100 return NULL;
102 pg = list_entry(d->arch.paging.hap.freelist.next, struct page_info, list);
103 list_del(&pg->list);
104 d->arch.paging.hap.free_pages--;
106 p = hap_map_domain_page(page_to_mfn(pg));
107 ASSERT(p != NULL);
108 clear_page(p);
109 hap_unmap_domain_page(p);
111 return pg;
112 }
114 static void hap_free(struct domain *d, mfn_t mfn)
115 {
116 struct page_info *pg = mfn_to_page(mfn);
118 ASSERT(hap_locked_by_me(d));
120 d->arch.paging.hap.free_pages++;
121 list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
122 }
124 static struct page_info *hap_alloc_p2m_page(struct domain *d)
125 {
126 struct page_info *pg;
128 hap_lock(d);
129 pg = hap_alloc(d);
131 #if CONFIG_PAGING_LEVELS == 3
132 /* Under PAE mode, top-level P2M table should be allocated below 4GB space
133 * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
134 * force this requirement, and exchange the guaranteed 32-bit-clean
135 * page for the one we just hap_alloc()ed. */
136 if ( d->arch.paging.hap.p2m_pages == 0
137 && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
138 {
139 free_domheap_page(pg);
140 pg = alloc_domheap_page(
141 NULL, MEMF_bits(32) | MEMF_node(domain_to_node(d)));
142 if ( likely(pg != NULL) )
143 {
144 void *p = hap_map_domain_page(page_to_mfn(pg));
145 clear_page(p);
146 hap_unmap_domain_page(p);
147 }
148 }
149 #endif
151 if ( likely(pg != NULL) )
152 {
153 d->arch.paging.hap.total_pages--;
154 d->arch.paging.hap.p2m_pages++;
155 page_set_owner(pg, d);
156 pg->count_info = 1;
157 }
159 hap_unlock(d);
160 return pg;
161 }
163 void hap_free_p2m_page(struct domain *d, struct page_info *pg)
164 {
165 hap_lock(d);
166 ASSERT(page_get_owner(pg) == d);
167 /* Should have just the one ref we gave it in alloc_p2m_page() */
168 if ( (pg->count_info & PGC_count_mask) != 1 )
169 HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
170 pg->count_info, pg->u.inuse.type_info);
171 pg->count_info = 0;
172 /* Free should not decrement domain's total allocation, since
173 * these pages were allocated without an owner. */
174 page_set_owner(pg, NULL);
175 free_domheap_page(pg);
176 d->arch.paging.hap.p2m_pages--;
177 ASSERT(d->arch.paging.hap.p2m_pages >= 0);
178 hap_unlock(d);
179 }
181 /* Return the size of the pool, rounded up to the nearest MB */
182 static unsigned int
183 hap_get_allocation(struct domain *d)
184 {
185 unsigned int pg = d->arch.paging.hap.total_pages;
187 return ((pg >> (20 - PAGE_SHIFT))
188 + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
189 }
191 /* Set the pool of pages to the required number of pages.
192 * Returns 0 for success, non-zero for failure. */
193 static unsigned int
194 hap_set_allocation(struct domain *d, unsigned int pages, int *preempted)
195 {
196 struct page_info *pg;
198 ASSERT(hap_locked_by_me(d));
200 while ( d->arch.paging.hap.total_pages != pages )
201 {
202 if ( d->arch.paging.hap.total_pages < pages )
203 {
204 /* Need to allocate more memory from domheap */
205 pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
206 if ( pg == NULL )
207 {
208 HAP_PRINTK("failed to allocate hap pages.\n");
209 return -ENOMEM;
210 }
211 d->arch.paging.hap.free_pages++;
212 d->arch.paging.hap.total_pages++;
213 list_add_tail(&pg->list, &d->arch.paging.hap.freelist);
214 }
215 else if ( d->arch.paging.hap.total_pages > pages )
216 {
217 /* Need to return memory to domheap */
218 ASSERT(!list_empty(&d->arch.paging.hap.freelist));
219 pg = list_entry(d->arch.paging.hap.freelist.next,
220 struct page_info, list);
221 list_del(&pg->list);
222 d->arch.paging.hap.free_pages--;
223 d->arch.paging.hap.total_pages--;
224 pg->count_info = 0;
225 free_domheap_page(pg);
226 }
228 /* Check to see if we need to yield and try again */
229 if ( preempted && hypercall_preempt_check() )
230 {
231 *preempted = 1;
232 return 0;
233 }
234 }
236 return 0;
237 }
239 #if CONFIG_PAGING_LEVELS == 4
240 static void hap_install_xen_entries_in_l4(struct vcpu *v, mfn_t l4mfn)
241 {
242 struct domain *d = v->domain;
243 l4_pgentry_t *l4e;
245 l4e = hap_map_domain_page(l4mfn);
246 ASSERT(l4e != NULL);
248 /* Copy the common Xen mappings from the idle domain */
249 memcpy(&l4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
250 &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
251 ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
253 /* Install the per-domain mappings for this domain */
254 l4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
255 l4e_from_pfn(mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_l3))),
256 __PAGE_HYPERVISOR);
258 /* Install a linear mapping */
259 l4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
260 l4e_from_pfn(mfn_x(l4mfn), __PAGE_HYPERVISOR);
262 /* Install the domain-specific P2M table */
263 l4e[l4_table_offset(RO_MPT_VIRT_START)] =
264 l4e_from_pfn(mfn_x(pagetable_get_mfn(d->arch.phys_table)),
265 __PAGE_HYPERVISOR);
267 hap_unmap_domain_page(l4e);
268 }
269 #endif /* CONFIG_PAGING_LEVELS == 4 */
271 #if CONFIG_PAGING_LEVELS == 3
272 static void hap_install_xen_entries_in_l2h(struct vcpu *v, mfn_t l2hmfn)
273 {
274 struct domain *d = v->domain;
275 l2_pgentry_t *l2e;
276 l3_pgentry_t *p2m;
277 int i;
279 l2e = hap_map_domain_page(l2hmfn);
280 ASSERT(l2e != NULL);
282 /* Copy the common Xen mappings from the idle domain */
283 memcpy(&l2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
284 &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
285 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
287 /* Install the per-domain mappings for this domain */
288 for ( i = 0; i < PDPT_L2_ENTRIES; i++ )
289 l2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
290 l2e_from_pfn(
291 mfn_x(page_to_mfn(virt_to_page(d->arch.mm_perdomain_pt) + i)),
292 __PAGE_HYPERVISOR);
294 /* No linear mapping; will be set up by monitor-table contructor. */
295 for ( i = 0; i < 4; i++ )
296 l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
297 l2e_empty();
299 /* Install the domain-specific p2m table */
300 ASSERT(pagetable_get_pfn(d->arch.phys_table) != 0);
301 p2m = hap_map_domain_page(pagetable_get_mfn(d->arch.phys_table));
302 for ( i = 0; i < MACHPHYS_MBYTES>>1; i++ )
303 {
304 l2e[l2_table_offset(RO_MPT_VIRT_START) + i] =
305 (l3e_get_flags(p2m[i]) & _PAGE_PRESENT)
306 ? l2e_from_pfn(mfn_x(_mfn(l3e_get_pfn(p2m[i]))),
307 __PAGE_HYPERVISOR)
308 : l2e_empty();
309 }
310 hap_unmap_domain_page(p2m);
311 hap_unmap_domain_page(l2e);
312 }
313 #endif
315 static mfn_t hap_make_monitor_table(struct vcpu *v)
316 {
317 struct domain *d = v->domain;
318 struct page_info *pg;
320 ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
322 #if CONFIG_PAGING_LEVELS == 4
323 {
324 mfn_t m4mfn;
325 if ( (pg = hap_alloc(d)) == NULL )
326 goto oom;
327 m4mfn = page_to_mfn(pg);
328 hap_install_xen_entries_in_l4(v, m4mfn);
329 return m4mfn;
330 }
331 #elif CONFIG_PAGING_LEVELS == 3
332 {
333 mfn_t m3mfn, m2mfn;
334 l3_pgentry_t *l3e;
335 l2_pgentry_t *l2e;
336 int i;
338 if ( (pg = hap_alloc(d)) == NULL )
339 goto oom;
340 m3mfn = page_to_mfn(pg);
342 /* Install a monitor l2 table in slot 3 of the l3 table.
343 * This is used for all Xen entries, including linear maps
344 */
345 if ( (pg = hap_alloc(d)) == NULL )
346 goto oom;
347 m2mfn = page_to_mfn(pg);
348 l3e = hap_map_domain_page(m3mfn);
349 l3e[3] = l3e_from_pfn(mfn_x(m2mfn), _PAGE_PRESENT);
350 hap_install_xen_entries_in_l2h(v, m2mfn);
351 /* Install the monitor's own linear map */
352 l2e = hap_map_domain_page(m2mfn);
353 for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
354 l2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
355 (l3e_get_flags(l3e[i]) & _PAGE_PRESENT)
356 ? l2e_from_pfn(l3e_get_pfn(l3e[i]), __PAGE_HYPERVISOR)
357 : l2e_empty();
358 hap_unmap_domain_page(l2e);
359 hap_unmap_domain_page(l3e);
361 HAP_PRINTK("new monitor table: %#lx\n", mfn_x(m3mfn));
362 return m3mfn;
363 }
364 #endif
366 oom:
367 HAP_ERROR("out of memory building monitor pagetable\n");
368 domain_crash(d);
369 return _mfn(INVALID_MFN);
370 }
372 static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn)
373 {
374 struct domain *d = v->domain;
376 #if CONFIG_PAGING_LEVELS == 3
377 /* Need to destroy the l2 monitor page in slot 4 too */
378 {
379 l3_pgentry_t *l3e = hap_map_domain_page(mmfn);
380 ASSERT(l3e_get_flags(l3e[3]) & _PAGE_PRESENT);
381 hap_free(d, _mfn(l3e_get_pfn(l3e[3])));
382 hap_unmap_domain_page(l3e);
383 }
384 #endif
386 /* Put the memory back in the pool */
387 hap_free(d, mmfn);
388 }
390 /************************************************/
391 /* HAP DOMAIN LEVEL FUNCTIONS */
392 /************************************************/
393 void hap_domain_init(struct domain *d)
394 {
395 hap_lock_init(d);
396 INIT_LIST_HEAD(&d->arch.paging.hap.freelist);
398 /* This domain will use HAP for log-dirty mode */
399 paging_log_dirty_init(d, hap_enable_log_dirty, hap_disable_log_dirty,
400 hap_clean_dirty_bitmap);
401 }
403 /* return 0 for success, -errno for failure */
404 int hap_enable(struct domain *d, u32 mode)
405 {
406 unsigned int old_pages;
407 int rv = 0;
409 domain_pause(d);
410 /* error check */
411 if ( (d == current->domain) )
412 {
413 rv = -EINVAL;
414 goto out;
415 }
417 old_pages = d->arch.paging.hap.total_pages;
418 if ( old_pages == 0 )
419 {
420 unsigned int r;
421 hap_lock(d);
422 r = hap_set_allocation(d, 256, NULL);
423 hap_unlock(d);
424 if ( r != 0 )
425 {
426 hap_set_allocation(d, 0, NULL);
427 rv = -ENOMEM;
428 goto out;
429 }
430 }
432 /* allocate P2m table */
433 if ( mode & PG_translate )
434 {
435 rv = p2m_alloc_table(d, hap_alloc_p2m_page, hap_free_p2m_page);
436 if ( rv != 0 )
437 goto out;
438 }
440 d->arch.paging.mode = mode | PG_HAP_enable;
442 out:
443 domain_unpause(d);
444 return rv;
445 }
447 void hap_final_teardown(struct domain *d)
448 {
449 if ( d->arch.paging.hap.total_pages != 0 )
450 hap_teardown(d);
452 p2m_teardown(d);
453 ASSERT(d->arch.paging.hap.p2m_pages == 0);
454 }
456 void hap_teardown(struct domain *d)
457 {
458 struct vcpu *v;
459 mfn_t mfn;
461 ASSERT(d->is_dying);
462 ASSERT(d != current->domain);
464 if ( !hap_locked_by_me(d) )
465 hap_lock(d); /* Keep various asserts happy */
467 if ( paging_mode_enabled(d) )
468 {
469 /* release the monitor table held by each vcpu */
470 for_each_vcpu ( d, v )
471 {
472 if ( v->arch.paging.mode && paging_mode_external(d) )
473 {
474 mfn = pagetable_get_mfn(v->arch.monitor_table);
475 if ( mfn_valid(mfn) && (mfn_x(mfn) != 0) )
476 hap_destroy_monitor_table(v, mfn);
477 v->arch.monitor_table = pagetable_null();
478 }
479 }
480 }
482 if ( d->arch.paging.hap.total_pages != 0 )
483 {
484 HAP_PRINTK("teardown of domain %u starts."
485 " pages total = %u, free = %u, p2m=%u\n",
486 d->domain_id,
487 d->arch.paging.hap.total_pages,
488 d->arch.paging.hap.free_pages,
489 d->arch.paging.hap.p2m_pages);
490 hap_set_allocation(d, 0, NULL);
491 HAP_PRINTK("teardown done."
492 " pages total = %u, free = %u, p2m=%u\n",
493 d->arch.paging.hap.total_pages,
494 d->arch.paging.hap.free_pages,
495 d->arch.paging.hap.p2m_pages);
496 ASSERT(d->arch.paging.hap.total_pages == 0);
497 }
499 d->arch.paging.mode &= ~PG_log_dirty;
501 hap_unlock(d);
502 }
504 int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
505 XEN_GUEST_HANDLE(void) u_domctl)
506 {
507 int rc, preempted = 0;
509 switch ( sc->op )
510 {
511 case XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION:
512 hap_lock(d);
513 rc = hap_set_allocation(d, sc->mb << (20 - PAGE_SHIFT), &preempted);
514 hap_unlock(d);
515 if ( preempted )
516 /* Not finished. Set up to re-run the call. */
517 rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
518 u_domctl);
519 else
520 /* Finished. Return the new allocation */
521 sc->mb = hap_get_allocation(d);
522 return rc;
523 case XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION:
524 sc->mb = hap_get_allocation(d);
525 return 0;
526 default:
527 HAP_ERROR("Bad hap domctl op %u\n", sc->op);
528 return -EINVAL;
529 }
530 }
532 void hap_vcpu_init(struct vcpu *v)
533 {
534 v->arch.paging.mode = &hap_paging_real_mode;
535 }
537 /************************************************/
538 /* HAP PAGING MODE FUNCTIONS */
539 /************************************************/
540 /*
541 * HAP guests can handle page faults (in the guest page tables) without
542 * needing any action from Xen, so we should not be intercepting them.
543 */
544 static int hap_page_fault(struct vcpu *v, unsigned long va,
545 struct cpu_user_regs *regs)
546 {
547 HAP_ERROR("Intercepted a guest #PF (%u:%u) with HAP enabled.\n",
548 v->domain->domain_id, v->vcpu_id);
549 domain_crash(v->domain);
550 return 0;
551 }
553 /*
554 * HAP guests can handle invlpg without needing any action from Xen, so
555 * should not be intercepting it.
556 */
557 static int hap_invlpg(struct vcpu *v, unsigned long va)
558 {
559 HAP_ERROR("Intercepted a guest INVLPG (%u:%u) with HAP enabled.\n",
560 v->domain->domain_id, v->vcpu_id);
561 domain_crash(v->domain);
562 return 0;
563 }
565 static void hap_update_cr3(struct vcpu *v, int do_locking)
566 {
567 v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
568 hvm_update_guest_cr(v, 3);
569 }
571 static void hap_update_paging_modes(struct vcpu *v)
572 {
573 struct domain *d = v->domain;
575 hap_lock(d);
577 v->arch.paging.mode =
578 !hvm_paging_enabled(v) ? &hap_paging_real_mode :
579 hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
580 hvm_pae_enabled(v) ? &hap_paging_pae_mode :
581 &hap_paging_protected_mode;
583 if ( pagetable_is_null(v->arch.monitor_table) )
584 {
585 mfn_t mmfn = hap_make_monitor_table(v);
586 v->arch.monitor_table = pagetable_from_mfn(mmfn);
587 make_cr3(v, mfn_x(mmfn));
588 hvm_update_host_cr3(v);
589 }
591 /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
592 hap_update_cr3(v, 0);
594 hap_unlock(d);
595 }
597 #if CONFIG_PAGING_LEVELS == 3
598 static void p2m_install_entry_in_monitors(struct domain *d, l3_pgentry_t *l3e)
599 /* Special case, only used for PAE hosts: update the mapping of the p2m
600 * table. This is trivial in other paging modes (one top-level entry
601 * points to the top-level p2m, no maintenance needed), but PAE makes
602 * life difficult by needing a copy of the p2m table in eight l2h slots
603 * in the monitor table. This function makes fresh copies when a p2m
604 * l3e changes. */
605 {
606 l2_pgentry_t *ml2e;
607 struct vcpu *v;
608 unsigned int index;
610 index = ((unsigned long)l3e & ~PAGE_MASK) / sizeof(l3_pgentry_t);
611 ASSERT(index < MACHPHYS_MBYTES>>1);
613 for_each_vcpu ( d, v )
614 {
615 if ( pagetable_get_pfn(v->arch.monitor_table) == 0 )
616 continue;
618 ASSERT(paging_mode_external(v->domain));
620 if ( v == current ) /* OK to use linear map of monitor_table */
621 ml2e = __linear_l2_table + l2_linear_offset(RO_MPT_VIRT_START);
622 else {
623 l3_pgentry_t *ml3e;
624 ml3e = hap_map_domain_page(
625 pagetable_get_mfn(v->arch.monitor_table));
626 ASSERT(l3e_get_flags(ml3e[3]) & _PAGE_PRESENT);
627 ml2e = hap_map_domain_page(_mfn(l3e_get_pfn(ml3e[3])));
628 ml2e += l2_table_offset(RO_MPT_VIRT_START);
629 hap_unmap_domain_page(ml3e);
630 }
631 ml2e[index] = l2e_from_pfn(l3e_get_pfn(*l3e), __PAGE_HYPERVISOR);
632 if ( v != current )
633 hap_unmap_domain_page(ml2e);
634 }
635 }
636 #endif
638 static void
639 hap_write_p2m_entry(struct vcpu *v, unsigned long gfn, l1_pgentry_t *p,
640 mfn_t table_mfn, l1_pgentry_t new, unsigned int level)
641 {
642 uint32_t old_flags;
644 hap_lock(v->domain);
646 old_flags = l1e_get_flags(*p);
647 safe_write_pte(p, new);
648 if ( (old_flags & _PAGE_PRESENT)
649 && (level == 1 || (level == 2 && (old_flags & _PAGE_PSE))) )
650 flush_tlb_mask(v->domain->domain_dirty_cpumask);
652 #if CONFIG_PAGING_LEVELS == 3
653 /* install P2M in monitor table for PAE Xen */
654 if ( level == 3 )
655 /* We have written to the p2m l3: need to sync the per-vcpu
656 * copies of it in the monitor tables */
657 p2m_install_entry_in_monitors(v->domain, (l3_pgentry_t *)p);
658 #endif
660 hap_unlock(v->domain);
661 }
663 static unsigned long hap_gva_to_gfn_real_mode(
664 struct vcpu *v, unsigned long gva, uint32_t *pfec)
665 {
666 return ((paddr_t)gva >> PAGE_SHIFT);
667 }
669 /* Entry points into this mode of the hap code. */
670 struct paging_mode hap_paging_real_mode = {
671 .page_fault = hap_page_fault,
672 .invlpg = hap_invlpg,
673 .gva_to_gfn = hap_gva_to_gfn_real_mode,
674 .update_cr3 = hap_update_cr3,
675 .update_paging_modes = hap_update_paging_modes,
676 .write_p2m_entry = hap_write_p2m_entry,
677 .guest_levels = 1
678 };
680 struct paging_mode hap_paging_protected_mode = {
681 .page_fault = hap_page_fault,
682 .invlpg = hap_invlpg,
683 .gva_to_gfn = hap_gva_to_gfn_2level,
684 .update_cr3 = hap_update_cr3,
685 .update_paging_modes = hap_update_paging_modes,
686 .write_p2m_entry = hap_write_p2m_entry,
687 .guest_levels = 2
688 };
690 struct paging_mode hap_paging_pae_mode = {
691 .page_fault = hap_page_fault,
692 .invlpg = hap_invlpg,
693 .gva_to_gfn = hap_gva_to_gfn_3level,
694 .update_cr3 = hap_update_cr3,
695 .update_paging_modes = hap_update_paging_modes,
696 .write_p2m_entry = hap_write_p2m_entry,
697 .guest_levels = 3
698 };
700 struct paging_mode hap_paging_long_mode = {
701 .page_fault = hap_page_fault,
702 .invlpg = hap_invlpg,
703 .gva_to_gfn = hap_gva_to_gfn_4level,
704 .update_cr3 = hap_update_cr3,
705 .update_paging_modes = hap_update_paging_modes,
706 .write_p2m_entry = hap_write_p2m_entry,
707 .guest_levels = 4
708 };
710 /*
711 * Local variables:
712 * mode: C
713 * c-set-style: "BSD"
714 * c-basic-offset: 4
715 * indent-tabs-mode: nil
716 * End:
717 */