ia64/xen-unstable

view xen/arch/x86/mm.c @ 6427:3428d58a85e1

merge?
author cl349@firebug.cl.cam.ac.uk
date Thu Aug 25 14:41:52 2005 +0000 (2005-08-25)
parents 4abd299ef2f6 c42a9e2f6c5b
children b54144915ae6
line source
1 /******************************************************************************
2 * arch/x86/mm.c
3 *
4 * Copyright (c) 2002-2005 K A Fraser
5 * Copyright (c) 2004 Christian Limpach
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
22 /*
23 * A description of the x86 page table API:
24 *
25 * Domains trap to do_mmu_update with a list of update requests.
26 * This is a list of (ptr, val) pairs, where the requested operation
27 * is *ptr = val.
28 *
29 * Reference counting of pages:
30 * ----------------------------
31 * Each page has two refcounts: tot_count and type_count.
32 *
33 * TOT_COUNT is the obvious reference count. It counts all uses of a
34 * physical page frame by a domain, including uses as a page directory,
35 * a page table, or simple mappings via a PTE. This count prevents a
36 * domain from releasing a frame back to the free pool when it still holds
37 * a reference to it.
38 *
39 * TYPE_COUNT is more subtle. A frame can be put to one of three
40 * mutually-exclusive uses: it might be used as a page directory, or a
41 * page table, or it may be mapped writable by the domain [of course, a
42 * frame may not be used in any of these three ways!].
43 * So, type_count is a count of the number of times a frame is being
44 * referred to in its current incarnation. Therefore, a page can only
45 * change its type when its type count is zero.
46 *
47 * Pinning the page type:
48 * ----------------------
49 * The type of a page can be pinned/unpinned with the commands
50 * MMUEXT_[UN]PIN_L?_TABLE. Each page can be pinned exactly once (that is,
51 * pinning is not reference counted, so it can't be nested).
52 * This is useful to prevent a page's type count falling to zero, at which
53 * point safety checks would need to be carried out next time the count
54 * is increased again.
55 *
56 * A further note on writable page mappings:
57 * -----------------------------------------
58 * For simplicity, the count of writable mappings for a page may not
59 * correspond to reality. The 'writable count' is incremented for every
60 * PTE which maps the page with the _PAGE_RW flag set. However, for
61 * write access to be possible the page directory entry must also have
62 * its _PAGE_RW bit set. We do not check this as it complicates the
63 * reference counting considerably [consider the case of multiple
64 * directory entries referencing a single page table, some with the RW
65 * bit set, others not -- it starts getting a bit messy].
66 * In normal use, this simplification shouldn't be a problem.
67 * However, the logic can be added if required.
68 *
69 * One more note on read-only page mappings:
70 * -----------------------------------------
71 * We want domains to be able to map pages for read-only access. The
72 * main reason is that page tables and directories should be readable
73 * by a domain, but it would not be safe for them to be writable.
74 * However, domains have free access to rings 1 & 2 of the Intel
75 * privilege model. In terms of page protection, these are considered
76 * to be part of 'supervisor mode'. The WP bit in CR0 controls whether
77 * read-only restrictions are respected in supervisor mode -- if the
78 * bit is clear then any mapped page is writable.
79 *
80 * We get round this by always setting the WP bit and disallowing
81 * updates to it. This is very unlikely to cause a problem for guest
82 * OS's, which will generally use the WP bit to simplify copy-on-write
83 * implementation (in that case, OS wants a fault when it writes to
84 * an application-supplied buffer).
85 */
87 #include <xen/config.h>
88 #include <xen/init.h>
89 #include <xen/kernel.h>
90 #include <xen/lib.h>
91 #include <xen/mm.h>
92 #include <xen/sched.h>
93 #include <xen/errno.h>
94 #include <xen/perfc.h>
95 #include <xen/irq.h>
96 #include <xen/softirq.h>
97 #include <xen/domain_page.h>
98 #include <xen/event.h>
99 #include <asm/shadow.h>
100 #include <asm/page.h>
101 #include <asm/flushtlb.h>
102 #include <asm/io.h>
103 #include <asm/uaccess.h>
104 #include <asm/ldt.h>
105 #include <asm/x86_emulate.h>
107 #ifdef VERBOSE
108 #define MEM_LOG(_f, _a...) \
109 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
110 current->domain->domain_id , __LINE__ , ## _a )
111 #else
112 #define MEM_LOG(_f, _a...) ((void)0)
113 #endif
115 /*
116 * Both do_mmuext_op() and do_mmu_update():
117 * We steal the m.s.b. of the @count parameter to indicate whether this
118 * invocation of do_mmu_update() is resuming a previously preempted call.
119 */
120 #define MMU_UPDATE_PREEMPTED (~(~0U>>1))
122 static void free_l2_table(struct pfn_info *page);
123 static void free_l1_table(struct pfn_info *page);
125 static int mod_l2_entry(l2_pgentry_t *, l2_pgentry_t, unsigned long,
126 unsigned long type);
127 static int mod_l1_entry(l1_pgentry_t *, l1_pgentry_t);
129 /* Used to defer flushing of memory structures. */
130 static struct {
131 #define DOP_FLUSH_TLB (1<<0) /* Flush the TLB. */
132 #define DOP_RELOAD_LDT (1<<1) /* Reload the LDT shadow mapping. */
133 unsigned int deferred_ops;
134 /* If non-NULL, specifies a foreign subject domain for some operations. */
135 struct domain *foreign;
136 } __cacheline_aligned percpu_info[NR_CPUS];
138 /*
139 * Returns the current foreign domain; defaults to the currently-executing
140 * domain if a foreign override hasn't been specified.
141 */
142 #define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ?: current->domain)
144 /* Private domain structs for DOMID_XEN and DOMID_IO. */
145 static struct domain *dom_xen, *dom_io;
147 /* Frame table and its size in pages. */
148 struct pfn_info *frame_table;
149 unsigned long max_page;
151 void __init init_frametable(void)
152 {
153 unsigned long nr_pages, page_step, i, pfn;
155 frame_table = (struct pfn_info *)FRAMETABLE_VIRT_START;
157 nr_pages = PFN_UP(max_page * sizeof(*frame_table));
158 page_step = (1 << L2_PAGETABLE_SHIFT) >> PAGE_SHIFT;
160 for ( i = 0; i < nr_pages; i += page_step )
161 {
162 pfn = alloc_boot_pages(min(nr_pages - i, page_step), page_step);
163 if ( pfn == 0 )
164 panic("Not enough memory for frame table\n");
165 map_pages_to_xen(
166 FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
167 pfn, page_step, PAGE_HYPERVISOR);
168 }
170 memset(frame_table, 0, nr_pages << PAGE_SHIFT);
171 }
173 void arch_init_memory(void)
174 {
175 extern void subarch_init_memory(struct domain *);
177 unsigned long i, pfn, rstart_pfn, rend_pfn;
178 struct pfn_info *page;
180 memset(percpu_info, 0, sizeof(percpu_info));
182 /*
183 * Initialise our DOMID_XEN domain.
184 * Any Xen-heap pages that we will allow to be mapped will have
185 * their domain field set to dom_xen.
186 */
187 dom_xen = alloc_domain_struct();
188 atomic_set(&dom_xen->refcnt, 1);
189 dom_xen->domain_id = DOMID_XEN;
191 /*
192 * Initialise our DOMID_IO domain.
193 * This domain owns I/O pages that are within the range of the pfn_info
194 * array. Mappings occur at the priv of the caller.
195 */
196 dom_io = alloc_domain_struct();
197 atomic_set(&dom_io->refcnt, 1);
198 dom_io->domain_id = DOMID_IO;
200 /* First 1MB of RAM is historically marked as I/O. */
201 for ( i = 0; i < 0x100; i++ )
202 {
203 page = &frame_table[i];
204 page->count_info = PGC_allocated | 1;
205 page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
206 page_set_owner(page, dom_io);
207 }
209 /* Any areas not specified as RAM by the e820 map are considered I/O. */
210 for ( i = 0, pfn = 0; i < e820.nr_map; i++ )
211 {
212 if ( e820.map[i].type != E820_RAM )
213 continue;
214 /* Every page from cursor to start of next RAM region is I/O. */
215 rstart_pfn = PFN_UP(e820.map[i].addr);
216 rend_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
217 while ( pfn < rstart_pfn )
218 {
219 BUG_ON(!pfn_valid(pfn));
220 page = &frame_table[pfn++];
221 page->count_info = PGC_allocated | 1;
222 page->u.inuse.type_info = PGT_writable_page | PGT_validated | 1;
223 page_set_owner(page, dom_io);
224 }
225 /* Skip the RAM region. */
226 pfn = rend_pfn;
227 }
228 BUG_ON(pfn != max_page);
230 subarch_init_memory(dom_xen);
231 }
233 void write_ptbase(struct vcpu *v)
234 {
235 write_cr3(pagetable_get_paddr(v->arch.monitor_table));
236 }
238 void invalidate_shadow_ldt(struct vcpu *v)
239 {
240 int i;
241 unsigned long pfn;
242 struct pfn_info *page;
244 if ( v->arch.shadow_ldt_mapcnt == 0 )
245 return;
247 v->arch.shadow_ldt_mapcnt = 0;
249 for ( i = 16; i < 32; i++ )
250 {
251 pfn = l1e_get_pfn(v->arch.perdomain_ptes[i]);
252 if ( pfn == 0 ) continue;
253 v->arch.perdomain_ptes[i] = l1e_empty();
254 page = &frame_table[pfn];
255 ASSERT_PAGE_IS_TYPE(page, PGT_ldt_page);
256 ASSERT_PAGE_IS_DOMAIN(page, v->domain);
257 put_page_and_type(page);
258 }
260 /* Dispose of the (now possibly invalid) mappings from the TLB. */
261 percpu_info[v->processor].deferred_ops |= DOP_FLUSH_TLB | DOP_RELOAD_LDT;
262 }
265 static int alloc_segdesc_page(struct pfn_info *page)
266 {
267 struct desc_struct *descs;
268 int i;
270 descs = map_domain_page(page_to_pfn(page));
272 for ( i = 0; i < 512; i++ )
273 if ( unlikely(!check_descriptor(&descs[i])) )
274 goto fail;
276 unmap_domain_page(descs);
277 return 1;
279 fail:
280 unmap_domain_page(descs);
281 return 0;
282 }
285 /* Map shadow page at offset @off. */
286 int map_ldt_shadow_page(unsigned int off)
287 {
288 struct vcpu *v = current;
289 struct domain *d = v->domain;
290 unsigned long gpfn, gmfn;
291 l1_pgentry_t l1e, nl1e;
292 unsigned long gva = v->arch.guest_context.ldt_base + (off << PAGE_SHIFT);
293 int res;
295 #if defined(__x86_64__)
296 /* If in user mode, switch to kernel mode just to read LDT mapping. */
297 extern void toggle_guest_mode(struct vcpu *);
298 int user_mode = !(v->arch.flags & TF_kernel_mode);
299 #define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)
300 #elif defined(__i386__)
301 #define TOGGLE_MODE() ((void)0)
302 #endif
304 BUG_ON(unlikely(in_irq()));
306 shadow_sync_va(v, gva);
308 TOGGLE_MODE();
309 __copy_from_user(&l1e, &linear_pg_table[l1_linear_offset(gva)],
310 sizeof(l1e));
311 TOGGLE_MODE();
313 if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
314 return 0;
316 gpfn = l1e_get_pfn(l1e);
317 gmfn = __gpfn_to_mfn(d, gpfn);
318 if ( unlikely(!VALID_MFN(gmfn)) )
319 return 0;
321 res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
323 if ( !res && unlikely(shadow_mode_refcounts(d)) )
324 {
325 shadow_lock(d);
326 shadow_remove_all_write_access(d, gpfn, gmfn);
327 res = get_page_and_type(&frame_table[gmfn], d, PGT_ldt_page);
328 shadow_unlock(d);
329 }
331 if ( unlikely(!res) )
332 return 0;
334 nl1e = l1e_from_pfn(gmfn, l1e_get_flags(l1e) | _PAGE_RW);
336 v->arch.perdomain_ptes[off + 16] = nl1e;
337 v->arch.shadow_ldt_mapcnt++;
339 return 1;
340 }
343 static int get_page_from_pagenr(unsigned long page_nr, struct domain *d)
344 {
345 struct pfn_info *page = &frame_table[page_nr];
347 if ( unlikely(!pfn_valid(page_nr)) || unlikely(!get_page(page, d)) )
348 {
349 MEM_LOG("Could not get page ref for pfn %lx", page_nr);
350 return 0;
351 }
353 return 1;
354 }
357 static int get_page_and_type_from_pagenr(unsigned long page_nr,
358 unsigned long type,
359 struct domain *d)
360 {
361 struct pfn_info *page = &frame_table[page_nr];
363 if ( unlikely(!get_page_from_pagenr(page_nr, d)) )
364 return 0;
366 if ( unlikely(!get_page_type(page, type)) )
367 {
368 if ( (type & PGT_type_mask) != PGT_l1_page_table )
369 MEM_LOG("Bad page type for pfn %lx (%" PRtype_info ")",
370 page_nr, page->u.inuse.type_info);
371 put_page(page);
372 return 0;
373 }
375 return 1;
376 }
378 /*
379 * We allow root tables to map each other (a.k.a. linear page tables). It
380 * needs some special care with reference counts and access permissions:
381 * 1. The mapping entry must be read-only, or the guest may get write access
382 * to its own PTEs.
383 * 2. We must only bump the reference counts for an *already validated*
384 * L2 table, or we can end up in a deadlock in get_page_type() by waiting
385 * on a validation that is required to complete that validation.
386 * 3. We only need to increment the reference counts for the mapped page
387 * frame if it is mapped by a different root table. This is sufficient and
388 * also necessary to allow validation of a root table mapping itself.
389 */
390 static int
391 get_linear_pagetable(
392 root_pgentry_t re, unsigned long re_pfn, struct domain *d)
393 {
394 unsigned long x, y;
395 struct pfn_info *page;
396 unsigned long pfn;
398 ASSERT( !shadow_mode_refcounts(d) );
400 if ( (root_get_flags(re) & _PAGE_RW) )
401 {
402 MEM_LOG("Attempt to create linear p.t. with write perms");
403 return 0;
404 }
406 if ( (pfn = root_get_pfn(re)) != re_pfn )
407 {
408 /* Make sure the mapped frame belongs to the correct domain. */
409 if ( unlikely(!get_page_from_pagenr(pfn, d)) )
410 return 0;
412 /*
413 * Make sure that the mapped frame is an already-validated L2 table.
414 * If so, atomically increment the count (checking for overflow).
415 */
416 page = &frame_table[pfn];
417 y = page->u.inuse.type_info;
418 do {
419 x = y;
420 if ( unlikely((x & PGT_count_mask) == PGT_count_mask) ||
421 unlikely((x & (PGT_type_mask|PGT_validated)) !=
422 (PGT_root_page_table|PGT_validated)) )
423 {
424 put_page(page);
425 return 0;
426 }
427 }
428 while ( (y = cmpxchg(&page->u.inuse.type_info, x, x + 1)) != x );
429 }
431 return 1;
432 }
434 int
435 get_page_from_l1e(
436 l1_pgentry_t l1e, struct domain *d)
437 {
438 unsigned long mfn = l1e_get_pfn(l1e);
439 struct pfn_info *page = &frame_table[mfn];
440 extern int domain_iomem_in_pfn(struct domain *d, unsigned long pfn);
442 if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) )
443 return 1;
445 if ( unlikely(l1e_get_flags(l1e) & L1_DISALLOW_MASK) )
446 {
447 MEM_LOG("Bad L1 flags %x", l1e_get_flags(l1e) & L1_DISALLOW_MASK);
448 return 0;
449 }
451 if ( unlikely(!pfn_valid(mfn)) ||
452 unlikely(page_get_owner(page) == dom_io) )
453 {
454 /* DOMID_IO reverts to caller for privilege checks. */
455 if ( d == dom_io )
456 d = current->domain;
458 if ( (!IS_PRIV(d)) &&
459 (!IS_CAPABLE_PHYSDEV(d) || !domain_iomem_in_pfn(d, mfn)) )
460 {
461 MEM_LOG("Non-privileged attempt to map I/O space %08lx", mfn);
462 return 0;
463 }
465 /* No reference counting for out-of-range I/O pages. */
466 if ( !pfn_valid(mfn) )
467 return 1;
469 d = dom_io;
470 }
472 return ((l1e_get_flags(l1e) & _PAGE_RW) ?
473 get_page_and_type(page, d, PGT_writable_page) :
474 get_page(page, d));
475 }
478 /* NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'. */
479 static int
480 get_page_from_l2e(
481 l2_pgentry_t l2e, unsigned long pfn,
482 struct domain *d, unsigned long vaddr)
483 {
484 int rc;
486 ASSERT(!shadow_mode_refcounts(d));
488 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
489 return 1;
491 if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) )
492 {
493 MEM_LOG("Bad L2 flags %x", l2e_get_flags(l2e) & L2_DISALLOW_MASK);
494 return 0;
495 }
497 vaddr >>= L2_PAGETABLE_SHIFT;
498 vaddr <<= PGT_va_shift;
499 rc = get_page_and_type_from_pagenr(
500 l2e_get_pfn(l2e), PGT_l1_page_table | vaddr, d);
502 #if CONFIG_PAGING_LEVELS == 2
503 if (!rc)
504 rc = get_linear_pagetable(l2e, pfn, d);
505 #endif
506 return rc;
507 }
510 #if CONFIG_PAGING_LEVELS >= 3
512 static int
513 get_page_from_l3e(
514 l3_pgentry_t l3e, unsigned long pfn,
515 struct domain *d, unsigned long vaddr)
516 {
517 ASSERT( !shadow_mode_refcounts(d) );
519 int rc;
521 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) )
522 return 1;
524 if ( unlikely((l3e_get_flags(l3e) & L3_DISALLOW_MASK)) )
525 {
526 MEM_LOG("Bad L3 flags %x", l3e_get_flags(l3e) & L3_DISALLOW_MASK);
527 return 0;
528 }
530 vaddr >>= L3_PAGETABLE_SHIFT;
531 vaddr <<= PGT_va_shift;
532 rc = get_page_and_type_from_pagenr(
533 l3e_get_pfn(l3e),
534 PGT_l2_page_table | vaddr, d);
535 #if CONFIG_PAGING_LEVELS == 3
536 if (!rc)
537 rc = get_linear_pagetable(l3e, pfn, d);
538 #endif
539 return rc;
540 }
542 #endif /* 3 level */
544 #if CONFIG_PAGING_LEVELS >= 4
546 static int
547 get_page_from_l4e(
548 l4_pgentry_t l4e, unsigned long pfn,
549 struct domain *d, unsigned long vaddr)
550 {
551 int rc;
553 ASSERT( !shadow_mode_refcounts(d) );
555 if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
556 return 1;
558 if ( unlikely((l4e_get_flags(l4e) & L4_DISALLOW_MASK)) )
559 {
560 MEM_LOG("Bad L4 flags %x", l4e_get_flags(l4e) & L4_DISALLOW_MASK);
561 return 0;
562 }
564 vaddr >>= L4_PAGETABLE_SHIFT;
565 vaddr <<= PGT_va_shift;
566 rc = get_page_and_type_from_pagenr(
567 l4e_get_pfn(l4e),
568 PGT_l3_page_table | vaddr, d);
570 if ( unlikely(!rc) )
571 return get_linear_pagetable(l4e, pfn, d);
573 return 1;
574 }
576 #endif /* 4 level */
579 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *d)
580 {
581 unsigned long pfn = l1e_get_pfn(l1e);
582 struct pfn_info *page = &frame_table[pfn];
583 struct domain *e;
585 if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !pfn_valid(pfn) )
586 return;
588 e = page_get_owner(page);
589 if ( unlikely(e != d) )
590 {
591 /*
592 * Unmap a foreign page that may have been mapped via a grant table.
593 * Note that this can fail for a privileged domain that can map foreign
594 * pages via MMUEXT_SET_FOREIGNDOM. Such domains can have some mappings
595 * counted via a grant entry and some counted directly in the page
596 * structure's reference count. Note that reference counts won't get
597 * dangerously confused as long as we always try to decrement the
598 * grant entry first. We may end up with a mismatch between which
599 * mappings and which unmappings are counted via the grant entry, but
600 * really it doesn't matter as privileged domains have carte blanche.
601 */
602 if (likely(gnttab_check_unmap(e, d, pfn,
603 !(l1e_get_flags(l1e) & _PAGE_RW))))
604 return;
605 /* Assume this mapping was made via MMUEXT_SET_FOREIGNDOM... */
606 }
608 if ( l1e_get_flags(l1e) & _PAGE_RW )
609 {
610 put_page_and_type(page);
611 }
612 else
613 {
614 /* We expect this is rare so we blow the entire shadow LDT. */
615 if ( unlikely(((page->u.inuse.type_info & PGT_type_mask) ==
616 PGT_ldt_page)) &&
617 unlikely(((page->u.inuse.type_info & PGT_count_mask) != 0)) )
619 // XXX SMP BUG?
620 invalidate_shadow_ldt(e->vcpu[0]);
621 put_page(page);
622 }
623 }
626 /*
627 * NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'.
628 * Note also that this automatically deals correctly with linear p.t.'s.
629 */
630 static void put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
631 {
632 if ( (l2e_get_flags(l2e) & _PAGE_PRESENT) &&
633 (l2e_get_pfn(l2e) != pfn) )
634 put_page_and_type(&frame_table[l2e_get_pfn(l2e)]);
635 }
638 #if CONFIG_PAGING_LEVELS >= 3
640 static void put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn)
641 {
642 if ( (l3e_get_flags(l3e) & _PAGE_PRESENT) &&
643 (l3e_get_pfn(l3e) != pfn) )
644 put_page_and_type(&frame_table[l3e_get_pfn(l3e)]);
645 }
647 #endif
649 #if CONFIG_PAGING_LEVELS >= 4
651 static void put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn)
652 {
653 if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) &&
654 (l4e_get_pfn(l4e) != pfn) )
655 put_page_and_type(&frame_table[l4e_get_pfn(l4e)]);
656 }
658 #endif
661 static int alloc_l1_table(struct pfn_info *page)
662 {
663 struct domain *d = page_get_owner(page);
664 unsigned long pfn = page_to_pfn(page);
665 l1_pgentry_t *pl1e;
666 int i;
668 ASSERT(!shadow_mode_refcounts(d));
670 pl1e = map_domain_page(pfn);
672 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
673 if ( is_guest_l1_slot(i) &&
674 unlikely(!get_page_from_l1e(pl1e[i], d)) )
675 goto fail;
677 unmap_domain_page(pl1e);
678 return 1;
680 fail:
681 while ( i-- > 0 )
682 if ( is_guest_l1_slot(i) )
683 put_page_from_l1e(pl1e[i], d);
685 unmap_domain_page(pl1e);
686 return 0;
687 }
689 #ifdef CONFIG_X86_PAE
690 static int create_pae_xen_mappings(l3_pgentry_t *pl3e)
691 {
692 struct pfn_info *page;
693 l2_pgentry_t *pl2e;
694 l3_pgentry_t l3e3;
695 int i;
697 pl3e = (l3_pgentry_t *)((unsigned long)pl3e & PAGE_MASK);
699 /* 3rd L3 slot contains L2 with Xen-private mappings. It *must* exist. */
700 l3e3 = pl3e[3];
701 if ( !(l3e_get_flags(l3e3) & _PAGE_PRESENT) )
702 {
703 MEM_LOG("PAE L3 3rd slot is empty");
704 return 0;
705 }
707 /*
708 * The Xen-private mappings include linear mappings. The L2 thus cannot
709 * be shared by multiple L3 tables. The test here is adequate because:
710 * 1. Cannot appear in slots != 3 because the page would then then have
711 * unknown va backpointer, which get_page_type() explicitly disallows.
712 * 2. Cannot appear in another page table's L3:
713 * a. alloc_l3_table() calls this function and this check will fail
714 * b. mod_l3_entry() disallows updates to slot 3 in an existing table
715 */
716 page = l3e_get_page(l3e3);
717 BUG_ON(page->u.inuse.type_info & PGT_pinned);
718 BUG_ON((page->u.inuse.type_info & PGT_count_mask) == 0);
719 if ( (page->u.inuse.type_info & PGT_count_mask) != 1 )
720 {
721 MEM_LOG("PAE L3 3rd slot is shared");
722 return 0;
723 }
725 /* Xen private mappings. */
726 pl2e = map_domain_page(l3e_get_pfn(l3e3));
727 memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT & (L2_PAGETABLE_ENTRIES-1)],
728 &idle_pg_table_l2[L2_PAGETABLE_FIRST_XEN_SLOT],
729 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
730 for ( i = 0; i < (PERDOMAIN_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
731 pl2e[l2_table_offset(PERDOMAIN_VIRT_START) + i] =
732 l2e_from_page(
733 virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt) + i,
734 __PAGE_HYPERVISOR);
735 for ( i = 0; i < (LINEARPT_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
736 pl2e[l2_table_offset(LINEAR_PT_VIRT_START) + i] =
737 (l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ?
738 l2e_from_pfn(l3e_get_pfn(pl3e[i]), __PAGE_HYPERVISOR) :
739 l2e_empty();
740 unmap_domain_page(pl2e);
742 return 1;
743 }
745 static inline int l1_backptr(
746 unsigned long *backptr, unsigned long offset_in_l2, unsigned long l2_type)
747 {
748 unsigned long l2_backptr = l2_type & PGT_va_mask;
749 BUG_ON(l2_backptr == PGT_va_unknown);
750 if ( l2_backptr == PGT_va_mutable )
751 return 0;
752 *backptr =
753 ((l2_backptr >> PGT_va_shift) << L3_PAGETABLE_SHIFT) |
754 (offset_in_l2 << L2_PAGETABLE_SHIFT);
755 return 1;
756 }
758 #elif CONFIG_X86_64
759 # define create_pae_xen_mappings(pl3e) (1)
761 static inline int l1_backptr(
762 unsigned long *backptr, unsigned long offset_in_l2, unsigned long l2_type)
763 {
764 unsigned long l2_backptr = l2_type & PGT_va_mask;
765 BUG_ON(l2_backptr == PGT_va_unknown);
767 *backptr = ((l2_backptr >> PGT_va_shift) << L3_PAGETABLE_SHIFT) |
768 (offset_in_l2 << L2_PAGETABLE_SHIFT);
769 return 1;
770 }
772 static inline int l2_backptr(
773 unsigned long *backptr, unsigned long offset_in_l3, unsigned long l3_type)
774 {
775 unsigned long l3_backptr = l3_type & PGT_va_mask;
776 BUG_ON(l3_backptr == PGT_va_unknown);
778 *backptr = ((l3_backptr >> PGT_va_shift) << L4_PAGETABLE_SHIFT) |
779 (offset_in_l3 << L3_PAGETABLE_SHIFT);
780 return 1;
781 }
783 static inline int l3_backptr(
784 unsigned long *backptr, unsigned long offset_in_l4, unsigned long l4_type)
785 {
786 unsigned long l4_backptr = l4_type & PGT_va_mask;
787 BUG_ON(l4_backptr == PGT_va_unknown);
789 *backptr = (offset_in_l4 << L4_PAGETABLE_SHIFT);
790 return 1;
791 }
792 #else
793 # define create_pae_xen_mappings(pl3e) (1)
794 # define l1_backptr(bp,l2o,l2t) \
795 ({ *(bp) = (unsigned long)(l2o) << L2_PAGETABLE_SHIFT; 1; })
796 #endif
798 static int alloc_l2_table(struct pfn_info *page, unsigned long type)
799 {
800 struct domain *d = page_get_owner(page);
801 unsigned long pfn = page_to_pfn(page);
802 unsigned long vaddr;
803 l2_pgentry_t *pl2e;
804 int i;
806 /* See the code in shadow_promote() to understand why this is here. */
807 if ( (PGT_base_page_table == PGT_l2_page_table) &&
808 unlikely(shadow_mode_refcounts(d)) )
809 return 1;
810 ASSERT(!shadow_mode_refcounts(d));
812 pl2e = map_domain_page(pfn);
814 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
815 {
816 if ( !l1_backptr(&vaddr, i, type) )
817 goto fail;
818 if ( is_guest_l2_slot(type, i) &&
819 unlikely(!get_page_from_l2e(pl2e[i], pfn, d, vaddr)) )
820 goto fail;
821 }
823 #if CONFIG_PAGING_LEVELS == 2
824 /* Xen private mappings. */
825 memcpy(&pl2e[L2_PAGETABLE_FIRST_XEN_SLOT],
826 &idle_pg_table[L2_PAGETABLE_FIRST_XEN_SLOT],
827 L2_PAGETABLE_XEN_SLOTS * sizeof(l2_pgentry_t));
828 pl2e[l2_table_offset(LINEAR_PT_VIRT_START)] =
829 l2e_from_pfn(pfn, __PAGE_HYPERVISOR);
830 pl2e[l2_table_offset(PERDOMAIN_VIRT_START)] =
831 l2e_from_page(
832 virt_to_page(page_get_owner(page)->arch.mm_perdomain_pt),
833 __PAGE_HYPERVISOR);
834 #endif
836 unmap_domain_page(pl2e);
837 return 1;
839 fail:
840 while ( i-- > 0 )
841 if ( is_guest_l2_slot(type, i) )
842 put_page_from_l2e(pl2e[i], pfn);
844 unmap_domain_page(pl2e);
845 return 0;
846 }
849 #if CONFIG_PAGING_LEVELS >= 3
850 static int alloc_l3_table(struct pfn_info *page, unsigned long type)
851 {
852 struct domain *d = page_get_owner(page);
853 unsigned long pfn = page_to_pfn(page);
854 unsigned long vaddr;
855 l3_pgentry_t *pl3e;
856 int i;
858 ASSERT(!shadow_mode_refcounts(d));
860 #ifdef CONFIG_X86_PAE
861 if ( pfn >= 0x100000 )
862 {
863 MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn);
864 return 0;
865 }
866 #endif
868 pl3e = map_domain_page(pfn);
869 for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
870 {
871 #if CONFIG_PAGING_LEVELS >= 4
872 if ( !l2_backptr(&vaddr, i, type) )
873 goto fail;
874 #else
875 vaddr = (unsigned long)i << L3_PAGETABLE_SHIFT;
876 #endif
877 if ( is_guest_l3_slot(i) &&
878 unlikely(!get_page_from_l3e(pl3e[i], pfn, d, vaddr)) )
879 goto fail;
880 }
882 if ( !create_pae_xen_mappings(pl3e) )
883 goto fail;
885 unmap_domain_page(pl3e);
886 return 1;
888 fail:
889 while ( i-- > 0 )
890 if ( is_guest_l3_slot(i) )
891 put_page_from_l3e(pl3e[i], pfn);
893 unmap_domain_page(pl3e);
894 return 0;
895 }
896 #else
897 #define alloc_l3_table(page, type) (0)
898 #endif
900 #if CONFIG_PAGING_LEVELS >= 4
901 static int alloc_l4_table(struct pfn_info *page, unsigned long type)
902 {
903 struct domain *d = page_get_owner(page);
904 unsigned long pfn = page_to_pfn(page);
905 l4_pgentry_t *pl4e = page_to_virt(page);
906 unsigned long vaddr;
907 int i;
909 /* See the code in shadow_promote() to understand why this is here. */
910 if ( (PGT_base_page_table == PGT_l4_page_table) &&
911 shadow_mode_refcounts(d) )
912 return 1;
913 ASSERT(!shadow_mode_refcounts(d));
915 for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
916 {
917 if ( !l3_backptr(&vaddr, i, type) )
918 goto fail;
920 if ( is_guest_l4_slot(i) &&
921 unlikely(!get_page_from_l4e(pl4e[i], pfn, d, vaddr)) )
922 goto fail;
923 }
925 /* Xen private mappings. */
926 memcpy(&pl4e[ROOT_PAGETABLE_FIRST_XEN_SLOT],
927 &idle_pg_table[ROOT_PAGETABLE_FIRST_XEN_SLOT],
928 ROOT_PAGETABLE_XEN_SLOTS * sizeof(l4_pgentry_t));
929 pl4e[l4_table_offset(LINEAR_PT_VIRT_START)] =
930 l4e_from_pfn(pfn, __PAGE_HYPERVISOR);
931 pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
932 l4e_from_page(
933 virt_to_page(page_get_owner(page)->arch.mm_perdomain_l3),
934 __PAGE_HYPERVISOR);
936 return 1;
938 fail:
939 while ( i-- > 0 )
940 if ( is_guest_l4_slot(i) )
941 put_page_from_l4e(pl4e[i], pfn);
943 return 0;
944 }
945 #else
946 #define alloc_l4_table(page, type) (0)
947 #endif
950 static void free_l1_table(struct pfn_info *page)
951 {
952 struct domain *d = page_get_owner(page);
953 unsigned long pfn = page_to_pfn(page);
954 l1_pgentry_t *pl1e;
955 int i;
957 pl1e = map_domain_page(pfn);
959 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
960 if ( is_guest_l1_slot(i) )
961 put_page_from_l1e(pl1e[i], d);
963 unmap_domain_page(pl1e);
964 }
967 static void free_l2_table(struct pfn_info *page)
968 {
969 unsigned long pfn = page_to_pfn(page);
970 l2_pgentry_t *pl2e;
971 int i;
973 pl2e = map_domain_page(pfn);
975 for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
976 if ( is_guest_l2_slot(page->u.inuse.type_info, i) )
977 put_page_from_l2e(pl2e[i], pfn);
979 unmap_domain_page(pl2e);
980 }
983 #if CONFIG_PAGING_LEVELS >= 3
985 static void free_l3_table(struct pfn_info *page)
986 {
987 unsigned long pfn = page_to_pfn(page);
988 l3_pgentry_t *pl3e;
989 int i;
991 pl3e = map_domain_page(pfn);
993 for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
994 if ( is_guest_l3_slot(i) )
995 put_page_from_l3e(pl3e[i], pfn);
997 unmap_domain_page(pl3e);
998 }
1000 #endif
1002 #if CONFIG_PAGING_LEVELS >= 4
1004 static void free_l4_table(struct pfn_info *page)
1006 unsigned long pfn = page_to_pfn(page);
1007 l4_pgentry_t *pl4e = page_to_virt(page);
1008 int i;
1010 for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
1011 if ( is_guest_l4_slot(i) )
1012 put_page_from_l4e(pl4e[i], pfn);
1015 #endif
1017 static inline int update_l1e(l1_pgentry_t *pl1e,
1018 l1_pgentry_t ol1e,
1019 l1_pgentry_t nl1e)
1021 intpte_t o = l1e_get_intpte(ol1e);
1022 intpte_t n = l1e_get_intpte(nl1e);
1024 if ( unlikely(cmpxchg_user(pl1e, o, n) != 0) ||
1025 unlikely(o != l1e_get_intpte(ol1e)) )
1027 MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte
1028 ": saw %" PRIpte,
1029 l1e_get_intpte(ol1e),
1030 l1e_get_intpte(nl1e),
1031 o);
1032 return 0;
1034 return 1;
1038 /* Update the L1 entry at pl1e to new value nl1e. */
1039 static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e)
1041 l1_pgentry_t ol1e;
1042 struct domain *d = current->domain;
1044 if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
1045 return 0;
1047 if ( unlikely(shadow_mode_refcounts(d)) )
1048 return update_l1e(pl1e, ol1e, nl1e);
1050 if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
1052 if ( unlikely(l1e_get_flags(nl1e) & L1_DISALLOW_MASK) )
1054 MEM_LOG("Bad L1 flags %x",
1055 l1e_get_flags(nl1e) & L1_DISALLOW_MASK);
1056 return 0;
1059 /* Fast path for identical mapping, r/w and presence. */
1060 if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT))
1061 return update_l1e(pl1e, ol1e, nl1e);
1063 if ( unlikely(!get_page_from_l1e(nl1e, FOREIGNDOM)) )
1064 return 0;
1066 if ( unlikely(!update_l1e(pl1e, ol1e, nl1e)) )
1068 put_page_from_l1e(nl1e, d);
1069 return 0;
1072 else
1074 if ( unlikely(!update_l1e(pl1e, ol1e, nl1e)) )
1075 return 0;
1078 put_page_from_l1e(ol1e, d);
1079 return 1;
1082 #define UPDATE_ENTRY(_t,_p,_o,_n) ({ \
1083 intpte_t __o = cmpxchg((intpte_t *)(_p), \
1084 _t ## e_get_intpte(_o), \
1085 _t ## e_get_intpte(_n)); \
1086 if ( __o != _t ## e_get_intpte(_o) ) \
1087 MEM_LOG("Failed to update %" PRIpte " -> %" PRIpte \
1088 ": saw %" PRIpte "", \
1089 (_t ## e_get_intpte(_o)), \
1090 (_t ## e_get_intpte(_n)), \
1091 (__o)); \
1092 (__o == _t ## e_get_intpte(_o)); })
1094 /* Update the L2 entry at pl2e to new value nl2e. pl2e is within frame pfn. */
1095 static int mod_l2_entry(l2_pgentry_t *pl2e,
1096 l2_pgentry_t nl2e,
1097 unsigned long pfn,
1098 unsigned long type)
1100 l2_pgentry_t ol2e;
1101 unsigned long vaddr = 0;
1103 if ( unlikely(!is_guest_l2_slot(type,pgentry_ptr_to_slot(pl2e))) )
1105 MEM_LOG("Illegal L2 update attempt in Xen-private area %p", pl2e);
1106 return 0;
1109 if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
1110 return 0;
1112 if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
1114 if ( unlikely(l2e_get_flags(nl2e) & L2_DISALLOW_MASK) )
1116 MEM_LOG("Bad L2 flags %x",
1117 l2e_get_flags(nl2e) & L2_DISALLOW_MASK);
1118 return 0;
1121 /* Fast path for identical mapping and presence. */
1122 if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT))
1123 return UPDATE_ENTRY(l2, pl2e, ol2e, nl2e);
1125 if ( unlikely(!l1_backptr(&vaddr, pgentry_ptr_to_slot(pl2e), type)) ||
1126 unlikely(!get_page_from_l2e(nl2e, pfn, current->domain, vaddr)) )
1127 return 0;
1129 if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e)) )
1131 put_page_from_l2e(nl2e, pfn);
1132 return 0;
1135 else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e)) )
1137 return 0;
1140 put_page_from_l2e(ol2e, pfn);
1141 return 1;
1145 #if CONFIG_PAGING_LEVELS >= 3
1147 /* Update the L3 entry at pl3e to new value nl3e. pl3e is within frame pfn. */
1148 static int mod_l3_entry(l3_pgentry_t *pl3e,
1149 l3_pgentry_t nl3e,
1150 unsigned long pfn,
1151 unsigned long type)
1153 l3_pgentry_t ol3e;
1154 unsigned long vaddr;
1156 if ( unlikely(!is_guest_l3_slot(pgentry_ptr_to_slot(pl3e))) )
1158 MEM_LOG("Illegal L3 update attempt in Xen-private area %p", pl3e);
1159 return 0;
1162 #ifdef CONFIG_X86_PAE
1163 /*
1164 * Disallow updates to final L3 slot. It contains Xen mappings, and it
1165 * would be a pain to ensure they remain continuously valid throughout.
1166 */
1167 if ( pgentry_ptr_to_slot(pl3e) >= 3 )
1168 return 0;
1169 #endif
1171 if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
1172 return 0;
1174 if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
1176 if ( unlikely(l3e_get_flags(nl3e) & L3_DISALLOW_MASK) )
1178 MEM_LOG("Bad L3 flags %x",
1179 l3e_get_flags(nl3e) & L3_DISALLOW_MASK);
1180 return 0;
1183 /* Fast path for identical mapping and presence. */
1184 if (!l3e_has_changed(ol3e, nl3e, _PAGE_PRESENT))
1185 return UPDATE_ENTRY(l3, pl3e, ol3e, nl3e);
1187 #if CONFIG_PAGING_LEVELS >= 4
1188 if ( unlikely(!l2_backptr(&vaddr, pgentry_ptr_to_slot(pl3e), type)) ||
1189 unlikely(!get_page_from_l3e(nl3e, pfn, current->domain, vaddr)) )
1190 return 0;
1191 #else
1192 vaddr = (((unsigned long)pl3e & ~PAGE_MASK) / sizeof(l3_pgentry_t))
1193 << L3_PAGETABLE_SHIFT;
1194 if ( unlikely(!get_page_from_l3e(nl3e, pfn, current->domain, vaddr)) )
1195 return 0;
1196 #endif
1198 if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
1200 put_page_from_l3e(nl3e, pfn);
1201 return 0;
1204 else if ( unlikely(!UPDATE_ENTRY(l3, pl3e, ol3e, nl3e)) )
1206 return 0;
1209 BUG_ON(!create_pae_xen_mappings(pl3e));
1210 put_page_from_l3e(ol3e, pfn);
1211 return 1;
1214 #endif
1216 #if CONFIG_PAGING_LEVELS >= 4
1218 /* Update the L4 entry at pl4e to new value nl4e. pl4e is within frame pfn. */
1219 static int mod_l4_entry(l4_pgentry_t *pl4e,
1220 l4_pgentry_t nl4e,
1221 unsigned long pfn,
1222 unsigned long type)
1224 l4_pgentry_t ol4e;
1225 unsigned long vaddr;
1227 if ( unlikely(!is_guest_l4_slot(pgentry_ptr_to_slot(pl4e))) )
1229 MEM_LOG("Illegal L4 update attempt in Xen-private area %p", pl4e);
1230 return 0;
1233 if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
1234 return 0;
1236 if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
1238 if ( unlikely(l4e_get_flags(nl4e) & L4_DISALLOW_MASK) )
1240 MEM_LOG("Bad L4 flags %x",
1241 l4e_get_flags(nl4e) & L4_DISALLOW_MASK);
1242 return 0;
1245 /* Fast path for identical mapping and presence. */
1246 if (!l4e_has_changed(ol4e, nl4e, _PAGE_PRESENT))
1247 return UPDATE_ENTRY(l4, pl4e, ol4e, nl4e);
1249 if ( unlikely(!l3_backptr(&vaddr, pgentry_ptr_to_slot(pl4e), type)) ||
1250 unlikely(!get_page_from_l4e(nl4e, pfn, current->domain, vaddr)) )
1251 return 0;
1253 if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
1255 put_page_from_l4e(nl4e, pfn);
1256 return 0;
1259 else if ( unlikely(!UPDATE_ENTRY(l4, pl4e, ol4e, nl4e)) )
1261 return 0;
1264 put_page_from_l4e(ol4e, pfn);
1265 return 1;
1268 #endif
1270 int alloc_page_type(struct pfn_info *page, unsigned long type)
1272 switch ( type & PGT_type_mask )
1274 case PGT_l1_page_table:
1275 return alloc_l1_table(page);
1276 case PGT_l2_page_table:
1277 return alloc_l2_table(page, type);
1278 case PGT_l3_page_table:
1279 return alloc_l3_table(page, type);
1280 case PGT_l4_page_table:
1281 return alloc_l4_table(page, type);
1282 case PGT_gdt_page:
1283 case PGT_ldt_page:
1284 return alloc_segdesc_page(page);
1285 default:
1286 printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%x\n",
1287 type, page->u.inuse.type_info,
1288 page->count_info);
1289 BUG();
1292 return 0;
1296 void free_page_type(struct pfn_info *page, unsigned long type)
1298 struct domain *owner = page_get_owner(page);
1299 unsigned long gpfn;
1301 if ( owner != NULL )
1303 if ( unlikely(shadow_mode_refcounts(owner)) )
1304 return;
1305 if ( unlikely(shadow_mode_enabled(owner)) )
1307 gpfn = __mfn_to_gpfn(owner, page_to_pfn(page));
1308 ASSERT(VALID_M2P(gpfn));
1309 remove_shadow(owner, gpfn, type & PGT_type_mask);
1313 switch (type & PGT_type_mask)
1315 case PGT_l1_page_table:
1316 free_l1_table(page);
1317 break;
1319 case PGT_l2_page_table:
1320 free_l2_table(page);
1321 break;
1323 #if CONFIG_PAGING_LEVELS >= 3
1324 case PGT_l3_page_table:
1325 free_l3_table(page);
1326 break;
1327 #endif
1329 #if CONFIG_PAGING_LEVELS >= 4
1330 case PGT_l4_page_table:
1331 free_l4_table(page);
1332 break;
1333 #endif
1335 default:
1336 printk("%s: type %lx pfn %lx\n",__FUNCTION__,
1337 type, page_to_pfn(page));
1338 BUG();
1343 void put_page_type(struct pfn_info *page)
1345 unsigned long nx, x, y = page->u.inuse.type_info;
1347 again:
1348 do {
1349 x = y;
1350 nx = x - 1;
1352 ASSERT((x & PGT_count_mask) != 0);
1354 /*
1355 * The page should always be validated while a reference is held. The
1356 * exception is during domain destruction, when we forcibly invalidate
1357 * page-table pages if we detect a referential loop.
1358 * See domain.c:relinquish_list().
1359 */
1360 ASSERT((x & PGT_validated) ||
1361 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
1363 if ( unlikely((nx & PGT_count_mask) == 0) )
1365 /* Record TLB information for flush later. Races are harmless. */
1366 page->tlbflush_timestamp = tlbflush_current_time();
1368 if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
1369 likely(nx & PGT_validated) )
1371 /*
1372 * Page-table pages must be unvalidated when count is zero. The
1373 * 'free' is safe because the refcnt is non-zero and validated
1374 * bit is clear => other ops will spin or fail.
1375 */
1376 if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
1377 x & ~PGT_validated)) != x) )
1378 goto again;
1379 /* We cleared the 'valid bit' so we do the clean up. */
1380 free_page_type(page, x);
1381 /* Carry on, but with the 'valid bit' now clear. */
1382 x &= ~PGT_validated;
1383 nx &= ~PGT_validated;
1386 else if ( unlikely(((nx & (PGT_pinned | PGT_count_mask)) ==
1387 (PGT_pinned | 1)) &&
1388 ((nx & PGT_type_mask) != PGT_writable_page)) )
1390 /* Page is now only pinned. Make the back pointer mutable again. */
1391 nx |= PGT_va_mutable;
1394 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
1398 int get_page_type(struct pfn_info *page, unsigned long type)
1400 unsigned long nx, x, y = page->u.inuse.type_info;
1402 again:
1403 do {
1404 x = y;
1405 nx = x + 1;
1406 if ( unlikely((nx & PGT_count_mask) == 0) )
1408 MEM_LOG("Type count overflow on pfn %lx", page_to_pfn(page));
1409 return 0;
1411 else if ( unlikely((x & PGT_count_mask) == 0) )
1413 if ( (x & (PGT_type_mask|PGT_va_mask)) != type )
1415 if ( (x & PGT_type_mask) != (type & PGT_type_mask) )
1417 /*
1418 * On type change we check to flush stale TLB
1419 * entries. This may be unnecessary (e.g., page
1420 * was GDT/LDT) but those circumstances should be
1421 * very rare.
1422 */
1423 cpumask_t mask = page_get_owner(page)->cpumask;
1424 tlbflush_filter(mask, page->tlbflush_timestamp);
1426 if ( unlikely(!cpus_empty(mask)) )
1428 perfc_incrc(need_flush_tlb_flush);
1429 flush_tlb_mask(mask);
1433 /* We lose existing type, back pointer, and validity. */
1434 nx &= ~(PGT_type_mask | PGT_va_mask | PGT_validated);
1435 nx |= type;
1437 /* No special validation needed for writable pages. */
1438 /* Page tables and GDT/LDT need to be scanned for validity. */
1439 if ( type == PGT_writable_page )
1440 nx |= PGT_validated;
1443 else
1445 if ( unlikely((x & (PGT_type_mask|PGT_va_mask)) != type) )
1447 if ( unlikely((x & PGT_type_mask) != (type & PGT_type_mask) ) )
1449 if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
1450 ((type & PGT_type_mask) != PGT_l1_page_table) )
1451 MEM_LOG("Bad type (saw %" PRtype_info
1452 "!= exp %" PRtype_info ") "
1453 "for mfn %lx (pfn %x)",
1454 x, type, page_to_pfn(page),
1455 machine_to_phys_mapping[page_to_pfn(page)]);
1456 return 0;
1458 else if ( (x & PGT_va_mask) == PGT_va_mutable )
1460 /* The va backpointer is mutable, hence we update it. */
1461 nx &= ~PGT_va_mask;
1462 nx |= type; /* we know the actual type is correct */
1464 else if ( ((type & PGT_va_mask) != PGT_va_mutable) &&
1465 ((type & PGT_va_mask) != (x & PGT_va_mask)) )
1467 #ifdef CONFIG_X86_PAE
1468 /* We use backptr as extra typing. Cannot be unknown. */
1469 if ( (type & PGT_type_mask) == PGT_l2_page_table )
1470 return 0;
1471 #endif
1472 /* This table is possibly mapped at multiple locations. */
1473 nx &= ~PGT_va_mask;
1474 nx |= PGT_va_unknown;
1477 if ( unlikely(!(x & PGT_validated)) )
1479 /* Someone else is updating validation of this page. Wait... */
1480 while ( (y = page->u.inuse.type_info) == x )
1481 cpu_relax();
1482 goto again;
1486 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
1488 if ( unlikely(!(nx & PGT_validated)) )
1490 /* Try to validate page type; drop the new reference on failure. */
1491 if ( unlikely(!alloc_page_type(page, type)) )
1493 MEM_LOG("Error while validating pfn %lx for type %" PRtype_info "."
1494 " caf=%08x taf=%" PRtype_info,
1495 page_to_pfn(page), type,
1496 page->count_info,
1497 page->u.inuse.type_info);
1498 /* Noone else can get a reference. We hold the only ref. */
1499 page->u.inuse.type_info = 0;
1500 return 0;
1503 /* Noone else is updating simultaneously. */
1504 __set_bit(_PGT_validated, &page->u.inuse.type_info);
1507 return 1;
1511 int new_guest_cr3(unsigned long mfn)
1513 struct vcpu *v = current;
1514 struct domain *d = v->domain;
1515 int okay;
1516 unsigned long old_base_mfn;
1518 if ( shadow_mode_refcounts(d) )
1519 okay = get_page_from_pagenr(mfn, d);
1520 else
1521 okay = get_page_and_type_from_pagenr(mfn, PGT_root_page_table, d);
1523 if ( likely(okay) )
1525 invalidate_shadow_ldt(v);
1527 old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
1528 v->arch.guest_table = mk_pagetable(mfn << PAGE_SHIFT);
1529 update_pagetables(v); /* update shadow_table and monitor_table */
1531 write_ptbase(v);
1533 if ( shadow_mode_refcounts(d) )
1534 put_page(&frame_table[old_base_mfn]);
1535 else
1536 put_page_and_type(&frame_table[old_base_mfn]);
1538 /* CR3 also holds a ref to its shadow... */
1539 if ( shadow_mode_enabled(d) )
1541 if ( v->arch.monitor_shadow_ref )
1542 put_shadow_ref(v->arch.monitor_shadow_ref);
1543 v->arch.monitor_shadow_ref =
1544 pagetable_get_pfn(v->arch.monitor_table);
1545 ASSERT(!page_get_owner(&frame_table[v->arch.monitor_shadow_ref]));
1546 get_shadow_ref(v->arch.monitor_shadow_ref);
1549 else
1551 MEM_LOG("Error while installing new baseptr %lx", mfn);
1554 return okay;
1557 static void process_deferred_ops(unsigned int cpu)
1559 unsigned int deferred_ops;
1560 struct domain *d = current->domain;
1562 deferred_ops = percpu_info[cpu].deferred_ops;
1563 percpu_info[cpu].deferred_ops = 0;
1565 if ( deferred_ops & DOP_FLUSH_TLB )
1567 if ( shadow_mode_enabled(d) )
1568 shadow_sync_all(d);
1569 local_flush_tlb();
1572 if ( deferred_ops & DOP_RELOAD_LDT )
1573 (void)map_ldt_shadow_page(0);
1575 if ( unlikely(percpu_info[cpu].foreign != NULL) )
1577 put_domain(percpu_info[cpu].foreign);
1578 percpu_info[cpu].foreign = NULL;
1582 static int set_foreigndom(unsigned int cpu, domid_t domid)
1584 struct domain *e, *d = current->domain;
1585 int okay = 1;
1587 if ( (e = percpu_info[cpu].foreign) != NULL )
1588 put_domain(e);
1589 percpu_info[cpu].foreign = NULL;
1591 if ( domid == DOMID_SELF )
1592 goto out;
1594 if ( !IS_PRIV(d) )
1596 switch ( domid )
1598 case DOMID_IO:
1599 get_knownalive_domain(dom_io);
1600 percpu_info[cpu].foreign = dom_io;
1601 break;
1602 default:
1603 MEM_LOG("Dom %u cannot set foreign dom", d->domain_id);
1604 okay = 0;
1605 break;
1608 else
1610 percpu_info[cpu].foreign = e = find_domain_by_id(domid);
1611 if ( e == NULL )
1613 switch ( domid )
1615 case DOMID_XEN:
1616 get_knownalive_domain(dom_xen);
1617 percpu_info[cpu].foreign = dom_xen;
1618 break;
1619 case DOMID_IO:
1620 get_knownalive_domain(dom_io);
1621 percpu_info[cpu].foreign = dom_io;
1622 break;
1623 default:
1624 MEM_LOG("Unknown domain '%u'", domid);
1625 okay = 0;
1626 break;
1631 out:
1632 return okay;
1635 static inline cpumask_t vcpumask_to_pcpumask(
1636 struct domain *d, unsigned long vmask)
1638 unsigned int vcpu_id;
1639 cpumask_t pmask;
1640 struct vcpu *v;
1642 while ( vmask != 0 )
1644 vcpu_id = find_first_set_bit(vmask);
1645 vmask &= ~(1UL << vcpu_id);
1646 if ( (vcpu_id < MAX_VIRT_CPUS) &&
1647 ((v = d->vcpu[vcpu_id]) != NULL) )
1648 cpu_set(v->processor, pmask);
1651 return pmask;
1654 int do_mmuext_op(
1655 struct mmuext_op *uops,
1656 unsigned int count,
1657 unsigned int *pdone,
1658 unsigned int foreigndom)
1660 struct mmuext_op op;
1661 int rc = 0, i = 0, okay, cpu = smp_processor_id();
1662 unsigned long type, done = 0;
1663 struct pfn_info *page;
1664 struct vcpu *v = current;
1665 struct domain *d = v->domain, *e;
1666 u32 x, y, _d, _nd;
1668 LOCK_BIGLOCK(d);
1670 cleanup_writable_pagetable(d);
1672 if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
1674 count &= ~MMU_UPDATE_PREEMPTED;
1675 if ( unlikely(pdone != NULL) )
1676 (void)get_user(done, pdone);
1679 if ( !set_foreigndom(cpu, foreigndom) )
1681 rc = -EINVAL;
1682 goto out;
1685 if ( unlikely(!array_access_ok(uops, count, sizeof(op))) )
1687 rc = -EFAULT;
1688 goto out;
1691 for ( i = 0; i < count; i++ )
1693 if ( hypercall_preempt_check() )
1695 rc = hypercall4_create_continuation(
1696 __HYPERVISOR_mmuext_op, uops,
1697 (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
1698 break;
1701 if ( unlikely(__copy_from_user(&op, uops, sizeof(op)) != 0) )
1703 MEM_LOG("Bad __copy_from_user");
1704 rc = -EFAULT;
1705 break;
1708 okay = 1;
1709 page = &frame_table[op.mfn];
1711 switch ( op.cmd )
1713 case MMUEXT_PIN_L1_TABLE:
1714 type = PGT_l1_page_table | PGT_va_mutable;
1716 pin_page:
1717 if ( shadow_mode_refcounts(FOREIGNDOM) )
1718 type = PGT_writable_page;
1720 okay = get_page_and_type_from_pagenr(op.mfn, type, FOREIGNDOM);
1721 if ( unlikely(!okay) )
1723 MEM_LOG("Error while pinning mfn %lx", op.mfn);
1724 break;
1727 if ( unlikely(test_and_set_bit(_PGT_pinned,
1728 &page->u.inuse.type_info)) )
1730 MEM_LOG("Mfn %lx already pinned", op.mfn);
1731 put_page_and_type(page);
1732 okay = 0;
1733 break;
1736 break;
1738 #ifndef CONFIG_X86_PAE /* Unsafe on PAE because of Xen-private mappings. */
1739 case MMUEXT_PIN_L2_TABLE:
1740 type = PGT_l2_page_table | PGT_va_mutable;
1741 goto pin_page;
1742 #endif
1744 case MMUEXT_PIN_L3_TABLE:
1745 type = PGT_l3_page_table | PGT_va_mutable;
1746 goto pin_page;
1748 case MMUEXT_PIN_L4_TABLE:
1749 type = PGT_l4_page_table | PGT_va_mutable;
1750 goto pin_page;
1752 case MMUEXT_UNPIN_TABLE:
1753 if ( unlikely(!(okay = get_page_from_pagenr(op.mfn, FOREIGNDOM))) )
1755 MEM_LOG("Mfn %lx bad domain (dom=%p)",
1756 op.mfn, page_get_owner(page));
1758 else if ( likely(test_and_clear_bit(_PGT_pinned,
1759 &page->u.inuse.type_info)) )
1761 put_page_and_type(page);
1762 put_page(page);
1764 else
1766 okay = 0;
1767 put_page(page);
1768 MEM_LOG("Mfn %lx not pinned", op.mfn);
1770 break;
1772 case MMUEXT_NEW_BASEPTR:
1773 okay = new_guest_cr3(op.mfn);
1774 percpu_info[cpu].deferred_ops &= ~DOP_FLUSH_TLB;
1775 break;
1777 #ifdef __x86_64__
1778 case MMUEXT_NEW_USER_BASEPTR:
1779 okay = get_page_and_type_from_pagenr(
1780 op.mfn, PGT_root_page_table, d);
1781 if ( unlikely(!okay) )
1783 MEM_LOG("Error while installing new mfn %lx", op.mfn);
1785 else
1787 unsigned long old_mfn =
1788 pagetable_get_pfn(v->arch.guest_table_user);
1789 v->arch.guest_table_user = mk_pagetable(op.mfn << PAGE_SHIFT);
1790 if ( old_mfn != 0 )
1791 put_page_and_type(&frame_table[old_mfn]);
1793 break;
1794 #endif
1796 case MMUEXT_TLB_FLUSH_LOCAL:
1797 percpu_info[cpu].deferred_ops |= DOP_FLUSH_TLB;
1798 break;
1800 case MMUEXT_INVLPG_LOCAL:
1801 if ( shadow_mode_enabled(d) )
1802 shadow_invlpg(v, op.linear_addr);
1803 local_flush_tlb_one(op.linear_addr);
1804 break;
1806 case MMUEXT_TLB_FLUSH_MULTI:
1807 case MMUEXT_INVLPG_MULTI:
1809 unsigned long vmask;
1810 cpumask_t pmask;
1811 if ( unlikely(get_user(vmask, (unsigned long *)op.vcpumask)) )
1813 okay = 0;
1814 break;
1816 pmask = vcpumask_to_pcpumask(d, vmask);
1817 cpus_and(pmask, pmask, d->cpumask);
1818 if ( op.cmd == MMUEXT_TLB_FLUSH_MULTI )
1819 flush_tlb_mask(pmask);
1820 else
1821 flush_tlb_one_mask(pmask, op.linear_addr);
1822 break;
1825 case MMUEXT_TLB_FLUSH_ALL:
1826 flush_tlb_mask(d->cpumask);
1827 break;
1829 case MMUEXT_INVLPG_ALL:
1830 flush_tlb_one_mask(d->cpumask, op.linear_addr);
1831 break;
1833 case MMUEXT_FLUSH_CACHE:
1834 if ( unlikely(!IS_CAPABLE_PHYSDEV(d)) )
1836 MEM_LOG("Non-physdev domain tried to FLUSH_CACHE.");
1837 okay = 0;
1839 else
1841 wbinvd();
1843 break;
1845 case MMUEXT_SET_LDT:
1847 if ( shadow_mode_external(d) )
1849 MEM_LOG("ignoring SET_LDT hypercall from external "
1850 "domain %u", d->domain_id);
1851 okay = 0;
1852 break;
1855 unsigned long ptr = op.linear_addr;
1856 unsigned long ents = op.nr_ents;
1857 if ( ((ptr & (PAGE_SIZE-1)) != 0) ||
1858 (ents > 8192) ||
1859 !array_access_ok(ptr, ents, LDT_ENTRY_SIZE) )
1861 okay = 0;
1862 MEM_LOG("Bad args to SET_LDT: ptr=%lx, ents=%lx", ptr, ents);
1864 else if ( (v->arch.guest_context.ldt_ents != ents) ||
1865 (v->arch.guest_context.ldt_base != ptr) )
1867 invalidate_shadow_ldt(v);
1868 v->arch.guest_context.ldt_base = ptr;
1869 v->arch.guest_context.ldt_ents = ents;
1870 load_LDT(v);
1871 percpu_info[cpu].deferred_ops &= ~DOP_RELOAD_LDT;
1872 if ( ents != 0 )
1873 percpu_info[cpu].deferred_ops |= DOP_RELOAD_LDT;
1875 break;
1878 case MMUEXT_REASSIGN_PAGE:
1879 if ( unlikely(!IS_PRIV(d)) )
1881 MEM_LOG("Dom %u has no reassignment priv", d->domain_id);
1882 okay = 0;
1883 break;
1886 e = percpu_info[cpu].foreign;
1887 if ( unlikely(e == NULL) )
1889 MEM_LOG("No FOREIGNDOM to reassign mfn %lx to", op.mfn);
1890 okay = 0;
1891 break;
1894 /*
1895 * Grab both page_list locks, in order. This prevents the page from
1896 * disappearing elsewhere while we modify the owner, and we'll need
1897 * both locks if we're successful so that we can change lists.
1898 */
1899 if ( d < e )
1901 spin_lock(&d->page_alloc_lock);
1902 spin_lock(&e->page_alloc_lock);
1904 else
1906 spin_lock(&e->page_alloc_lock);
1907 spin_lock(&d->page_alloc_lock);
1910 /*
1911 * Check that 'e' will accept the page and has reservation
1912 * headroom. Also, a domain mustn't have PGC_allocated pages when
1913 * it is dying.
1914 */
1915 ASSERT(e->tot_pages <= e->max_pages);
1916 if ( unlikely(test_bit(_DOMF_dying, &e->domain_flags)) ||
1917 unlikely(e->tot_pages == e->max_pages) ||
1918 unlikely(IS_XEN_HEAP_FRAME(page)) )
1920 MEM_LOG("Transferee has no reservation headroom (%d,%d), or "
1921 "page is in Xen heap (%lx), or dom is dying (%ld).",
1922 e->tot_pages, e->max_pages, op.mfn, e->domain_flags);
1923 okay = 0;
1924 goto reassign_fail;
1927 /*
1928 * The tricky bit: atomically change owner while there is just one
1929 * benign reference to the page (PGC_allocated). If that reference
1930 * disappears then the deallocation routine will safely spin.
1931 */
1932 _d = pickle_domptr(d);
1933 _nd = page->u.inuse._domain;
1934 y = page->count_info;
1935 do {
1936 x = y;
1937 if ( unlikely((x & (PGC_count_mask|PGC_allocated)) !=
1938 (1|PGC_allocated)) ||
1939 unlikely(_nd != _d) )
1941 MEM_LOG("Bad page values %lx: ed=%p(%u), sd=%p,"
1942 " caf=%08x, taf=%" PRtype_info,
1943 page_to_pfn(page), d, d->domain_id,
1944 unpickle_domptr(_nd), x, page->u.inuse.type_info);
1945 okay = 0;
1946 goto reassign_fail;
1948 __asm__ __volatile__(
1949 LOCK_PREFIX "cmpxchg8b %3"
1950 : "=d" (_nd), "=a" (y), "=c" (e),
1951 "=m" (*(volatile u64 *)(&page->count_info))
1952 : "0" (_d), "1" (x), "c" (e), "b" (x) );
1954 while ( unlikely(_nd != _d) || unlikely(y != x) );
1956 /*
1957 * Unlink from 'd'. We transferred at least one reference to 'e',
1958 * so noone else is spinning to try to delete this page from 'd'.
1959 */
1960 d->tot_pages--;
1961 list_del(&page->list);
1963 /*
1964 * Add the page to 'e'. Someone may already have removed the last
1965 * reference and want to remove the page from 'e'. However, we have
1966 * the lock so they'll spin waiting for us.
1967 */
1968 if ( unlikely(e->tot_pages++ == 0) )
1969 get_knownalive_domain(e);
1970 list_add_tail(&page->list, &e->page_list);
1972 reassign_fail:
1973 spin_unlock(&d->page_alloc_lock);
1974 spin_unlock(&e->page_alloc_lock);
1975 break;
1977 default:
1978 MEM_LOG("Invalid extended pt command 0x%x", op.cmd);
1979 okay = 0;
1980 break;
1983 if ( unlikely(!okay) )
1985 rc = -EINVAL;
1986 break;
1989 uops++;
1992 out:
1993 process_deferred_ops(cpu);
1995 /* Add incremental work we have done to the @done output parameter. */
1996 if ( unlikely(pdone != NULL) )
1997 __put_user(done + i, pdone);
1999 UNLOCK_BIGLOCK(d);
2000 return rc;
2003 int do_mmu_update(
2004 mmu_update_t *ureqs,
2005 unsigned int count,
2006 unsigned int *pdone,
2007 unsigned int foreigndom)
2009 mmu_update_t req;
2010 void *va;
2011 unsigned long gpfn, mfn;
2012 struct pfn_info *page;
2013 int rc = 0, okay = 1, i = 0, cpu = smp_processor_id();
2014 unsigned int cmd, done = 0;
2015 struct vcpu *v = current;
2016 struct domain *d = v->domain;
2017 unsigned long type_info;
2018 struct domain_mmap_cache mapcache, sh_mapcache;
2020 LOCK_BIGLOCK(d);
2022 cleanup_writable_pagetable(d);
2024 if ( unlikely(shadow_mode_enabled(d)) )
2025 check_pagetable(v, "pre-mmu"); /* debug */
2027 if ( unlikely(count & MMU_UPDATE_PREEMPTED) )
2029 count &= ~MMU_UPDATE_PREEMPTED;
2030 if ( unlikely(pdone != NULL) )
2031 (void)get_user(done, pdone);
2034 domain_mmap_cache_init(&mapcache);
2035 domain_mmap_cache_init(&sh_mapcache);
2037 if ( !set_foreigndom(cpu, foreigndom) )
2039 rc = -EINVAL;
2040 goto out;
2043 perfc_incrc(calls_to_mmu_update);
2044 perfc_addc(num_page_updates, count);
2045 perfc_incr_histo(bpt_updates, count, PT_UPDATES);
2047 if ( unlikely(!array_access_ok(ureqs, count, sizeof(req))) )
2049 rc = -EFAULT;
2050 goto out;
2053 for ( i = 0; i < count; i++ )
2055 if ( hypercall_preempt_check() )
2057 rc = hypercall4_create_continuation(
2058 __HYPERVISOR_mmu_update, ureqs,
2059 (count - i) | MMU_UPDATE_PREEMPTED, pdone, foreigndom);
2060 break;
2063 if ( unlikely(__copy_from_user(&req, ureqs, sizeof(req)) != 0) )
2065 MEM_LOG("Bad __copy_from_user");
2066 rc = -EFAULT;
2067 break;
2070 cmd = req.ptr & (sizeof(l1_pgentry_t)-1);
2071 okay = 0;
2073 switch ( cmd )
2075 /*
2076 * MMU_NORMAL_PT_UPDATE: Normal update to any level of page table.
2077 */
2078 case MMU_NORMAL_PT_UPDATE:
2080 gpfn = req.ptr >> PAGE_SHIFT;
2081 mfn = __gpfn_to_mfn(d, gpfn);
2083 if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
2085 MEM_LOG("Could not get page for normal update");
2086 break;
2089 va = map_domain_page_with_cache(mfn, &mapcache);
2090 va = (void *)((unsigned long)va +
2091 (unsigned long)(req.ptr & ~PAGE_MASK));
2092 page = &frame_table[mfn];
2094 switch ( (type_info = page->u.inuse.type_info) & PGT_type_mask )
2096 case PGT_l1_page_table:
2097 ASSERT( !shadow_mode_refcounts(d) );
2098 if ( likely(get_page_type(
2099 page, type_info & (PGT_type_mask|PGT_va_mask))) )
2101 l1_pgentry_t l1e;
2103 /* FIXME: doesn't work with PAE */
2104 l1e = l1e_from_intpte(req.val);
2105 okay = mod_l1_entry(va, l1e);
2106 if ( okay && unlikely(shadow_mode_enabled(d)) )
2107 shadow_l1_normal_pt_update(
2108 d, req.ptr, l1e, &sh_mapcache);
2109 put_page_type(page);
2111 break;
2112 case PGT_l2_page_table:
2113 ASSERT( !shadow_mode_refcounts(d) );
2114 if ( likely(get_page_type(
2115 page, type_info & (PGT_type_mask|PGT_va_mask))) )
2117 l2_pgentry_t l2e;
2119 /* FIXME: doesn't work with PAE */
2120 l2e = l2e_from_intpte(req.val);
2121 okay = mod_l2_entry(
2122 (l2_pgentry_t *)va, l2e, mfn, type_info);
2123 if ( okay && unlikely(shadow_mode_enabled(d)) )
2124 shadow_l2_normal_pt_update(
2125 d, req.ptr, l2e, &sh_mapcache);
2126 put_page_type(page);
2128 break;
2129 #if CONFIG_PAGING_LEVELS >= 3
2130 case PGT_l3_page_table:
2131 ASSERT( !shadow_mode_refcounts(d) );
2132 if ( likely(get_page_type(
2133 page, type_info & (PGT_type_mask|PGT_va_mask))) )
2135 l3_pgentry_t l3e;
2137 /* FIXME: doesn't work with PAE */
2138 l3e = l3e_from_intpte(req.val);
2139 okay = mod_l3_entry(va, l3e, mfn, type_info);
2140 if ( okay && unlikely(shadow_mode_enabled(d)) )
2141 shadow_l3_normal_pt_update(
2142 d, req.ptr, l3e, &sh_mapcache);
2143 put_page_type(page);
2145 break;
2146 #endif
2147 #if CONFIG_PAGING_LEVELS >= 4
2148 case PGT_l4_page_table:
2149 ASSERT( !shadow_mode_refcounts(d) );
2150 if ( likely(get_page_type(
2151 page, type_info & (PGT_type_mask|PGT_va_mask))) )
2153 l4_pgentry_t l4e;
2155 l4e = l4e_from_intpte(req.val);
2156 okay = mod_l4_entry(va, l4e, mfn, type_info);
2157 if ( okay && unlikely(shadow_mode_enabled(d)) )
2158 shadow_l4_normal_pt_update(
2159 d, req.ptr, l4e, &sh_mapcache);
2160 put_page_type(page);
2162 break;
2163 #endif
2164 default:
2165 if ( likely(get_page_type(page, PGT_writable_page)) )
2167 if ( shadow_mode_enabled(d) )
2169 shadow_lock(d);
2171 if ( shadow_mode_log_dirty(d) )
2172 __mark_dirty(d, mfn);
2174 if ( page_is_page_table(page) &&
2175 !page_out_of_sync(page) )
2177 shadow_mark_mfn_out_of_sync(v, gpfn, mfn);
2181 *(intpte_t *)va = req.val;
2182 okay = 1;
2184 if ( shadow_mode_enabled(d) )
2185 shadow_unlock(d);
2187 put_page_type(page);
2189 break;
2192 unmap_domain_page_with_cache(va, &mapcache);
2194 put_page(page);
2195 break;
2197 case MMU_MACHPHYS_UPDATE:
2199 mfn = req.ptr >> PAGE_SHIFT;
2200 gpfn = req.val;
2202 /* HACK ALERT... Need to think about this some more... */
2203 if ( unlikely(shadow_mode_translate(FOREIGNDOM) && IS_PRIV(d)) )
2205 shadow_lock(FOREIGNDOM);
2206 printk("privileged guest dom%d requests pfn=%lx to "
2207 "map mfn=%lx for dom%d\n",
2208 d->domain_id, gpfn, mfn, FOREIGNDOM->domain_id);
2209 set_machinetophys(mfn, gpfn);
2210 set_p2m_entry(FOREIGNDOM, gpfn, mfn, &sh_mapcache, &mapcache);
2211 okay = 1;
2212 shadow_unlock(FOREIGNDOM);
2213 break;
2216 if ( unlikely(!get_page_from_pagenr(mfn, FOREIGNDOM)) )
2218 MEM_LOG("Could not get page for mach->phys update");
2219 break;
2222 if ( unlikely(shadow_mode_translate(FOREIGNDOM) && !IS_PRIV(d)) )
2224 MEM_LOG("can't mutate the m2p of translated guests");
2225 break;
2228 set_machinetophys(mfn, gpfn);
2229 okay = 1;
2231 /*
2232 * If in log-dirty mode, mark the corresponding
2233 * page as dirty.
2234 */
2235 if ( unlikely(shadow_mode_log_dirty(FOREIGNDOM)) &&
2236 mark_dirty(FOREIGNDOM, mfn) )
2237 FOREIGNDOM->arch.shadow_dirty_block_count++;
2239 put_page(&frame_table[mfn]);
2240 break;
2242 default:
2243 MEM_LOG("Invalid page update command %x", cmd);
2244 break;
2247 if ( unlikely(!okay) )
2249 rc = -EINVAL;
2250 break;
2253 ureqs++;
2256 out:
2257 domain_mmap_cache_destroy(&mapcache);
2258 domain_mmap_cache_destroy(&sh_mapcache);
2260 process_deferred_ops(cpu);
2262 /* Add incremental work we have done to the @done output parameter. */
2263 if ( unlikely(pdone != NULL) )
2264 __put_user(done + i, pdone);
2266 if ( unlikely(shadow_mode_enabled(d)) )
2267 check_pagetable(v, "post-mmu"); /* debug */
2269 UNLOCK_BIGLOCK(d);
2270 return rc;
2274 int update_grant_pte_mapping(
2275 unsigned long pte_addr, l1_pgentry_t _nl1e,
2276 struct domain *d, struct vcpu *v)
2278 int rc = GNTST_okay;
2279 void *va;
2280 unsigned long gpfn, mfn;
2281 struct pfn_info *page;
2282 u32 type_info;
2283 l1_pgentry_t ol1e;
2285 ASSERT(spin_is_locked(&d->big_lock));
2286 ASSERT(!shadow_mode_refcounts(d));
2287 ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
2289 gpfn = pte_addr >> PAGE_SHIFT;
2290 mfn = __gpfn_to_mfn(d, gpfn);
2292 if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
2294 MEM_LOG("Could not get page for normal update");
2295 return GNTST_general_error;
2298 va = map_domain_page(mfn);
2299 va = (void *)((unsigned long)va + (pte_addr & ~PAGE_MASK));
2300 page = pfn_to_page(mfn);
2302 type_info = page->u.inuse.type_info;
2303 if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
2304 !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
2306 MEM_LOG("Grant map attempted to update a non-L1 page");
2307 rc = GNTST_general_error;
2308 goto failed;
2311 if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) ||
2312 !update_l1e(va, ol1e, _nl1e) )
2314 put_page_type(page);
2315 rc = GNTST_general_error;
2316 goto failed;
2319 put_page_from_l1e(ol1e, d);
2321 rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_all : GNTST_okay;
2323 if ( unlikely(shadow_mode_enabled(d)) )
2325 struct domain_mmap_cache sh_mapcache;
2326 domain_mmap_cache_init(&sh_mapcache);
2327 shadow_l1_normal_pt_update(d, pte_addr, _nl1e, &sh_mapcache);
2328 domain_mmap_cache_destroy(&sh_mapcache);
2331 put_page_type(page);
2333 failed:
2334 unmap_domain_page(va);
2335 put_page(page);
2336 return rc;
2339 int clear_grant_pte_mapping(
2340 unsigned long addr, unsigned long frame, struct domain *d)
2342 int rc = GNTST_okay;
2343 void *va;
2344 unsigned long gpfn, mfn;
2345 struct pfn_info *page;
2346 u32 type_info;
2347 l1_pgentry_t ol1e;
2349 ASSERT(!shadow_mode_refcounts(d));
2351 gpfn = addr >> PAGE_SHIFT;
2352 mfn = __gpfn_to_mfn(d, gpfn);
2354 if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
2356 MEM_LOG("Could not get page for normal update");
2357 return GNTST_general_error;
2360 va = map_domain_page(mfn);
2361 va = (void *)((unsigned long)va + (addr & ~PAGE_MASK));
2362 page = pfn_to_page(mfn);
2364 type_info = page->u.inuse.type_info;
2365 if ( ((type_info & PGT_type_mask) != PGT_l1_page_table) ||
2366 !get_page_type(page, type_info & (PGT_type_mask|PGT_va_mask)) )
2368 MEM_LOG("Grant map attempted to update a non-L1 page");
2369 rc = GNTST_general_error;
2370 goto failed;
2373 if ( __copy_from_user(&ol1e, (l1_pgentry_t *)va, sizeof(ol1e)) )
2375 put_page_type(page);
2376 rc = GNTST_general_error;
2377 goto failed;
2380 /* Check that the virtual address supplied is actually mapped to frame. */
2381 if ( unlikely((l1e_get_intpte(ol1e) >> PAGE_SHIFT) != frame) )
2383 MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
2384 (unsigned long)l1e_get_intpte(ol1e), addr, frame);
2385 put_page_type(page);
2386 rc = GNTST_general_error;
2387 goto failed;
2390 /* Delete pagetable entry. */
2391 if ( unlikely(__put_user(0, (intpte_t *)va)))
2393 MEM_LOG("Cannot delete PTE entry at %p", va);
2394 put_page_type(page);
2395 rc = GNTST_general_error;
2396 goto failed;
2399 if ( unlikely(shadow_mode_enabled(d)) )
2401 struct domain_mmap_cache sh_mapcache;
2402 domain_mmap_cache_init(&sh_mapcache);
2403 shadow_l1_normal_pt_update(d, addr, l1e_empty(), &sh_mapcache);
2404 domain_mmap_cache_destroy(&sh_mapcache);
2407 put_page_type(page);
2409 failed:
2410 unmap_domain_page(va);
2411 put_page(page);
2412 return rc;
2416 int update_grant_va_mapping(
2417 unsigned long va, l1_pgentry_t _nl1e, struct domain *d, struct vcpu *v)
2419 int rc = GNTST_okay;
2420 l1_pgentry_t *pl1e, ol1e;
2422 ASSERT(spin_is_locked(&d->big_lock));
2423 ASSERT(!shadow_mode_refcounts(d));
2424 ASSERT((l1e_get_flags(_nl1e) & L1_DISALLOW_MASK) == 0);
2426 /*
2427 * This is actually overkill - we don't need to sync the L1 itself,
2428 * just everything involved in getting to this L1 (i.e. we need
2429 * linear_pg_table[l1_linear_offset(va)] to be in sync)...
2430 */
2431 __shadow_sync_va(v, va);
2433 pl1e = &linear_pg_table[l1_linear_offset(va)];
2435 if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) ||
2436 !update_l1e(pl1e, ol1e, _nl1e) )
2437 return GNTST_general_error;
2439 put_page_from_l1e(ol1e, d);
2441 rc = (l1e_get_flags(ol1e) & _PAGE_PRESENT) ? GNTST_flush_one : GNTST_okay;
2443 if ( unlikely(shadow_mode_enabled(d)) )
2444 shadow_do_update_va_mapping(va, _nl1e, v);
2446 return rc;
2449 int clear_grant_va_mapping(unsigned long addr, unsigned long frame)
2451 l1_pgentry_t *pl1e, ol1e;
2453 pl1e = &linear_pg_table[l1_linear_offset(addr)];
2455 if ( unlikely(__get_user(ol1e.l1, &pl1e->l1) != 0) )
2457 MEM_LOG("Could not find PTE entry for address %lx", addr);
2458 return GNTST_general_error;
2461 /*
2462 * Check that the virtual address supplied is actually mapped to
2463 * frame.
2464 */
2465 if ( unlikely(l1e_get_pfn(ol1e) != frame) )
2467 MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
2468 l1e_get_pfn(ol1e), addr, frame);
2469 return GNTST_general_error;
2472 /* Delete pagetable entry. */
2473 if ( unlikely(__put_user(0, &pl1e->l1)) )
2475 MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
2476 return GNTST_general_error;
2479 return 0;
2483 int do_update_va_mapping(unsigned long va, u64 val64,
2484 unsigned long flags)
2486 l1_pgentry_t val = l1e_from_intpte(val64);
2487 struct vcpu *v = current;
2488 struct domain *d = v->domain;
2489 unsigned int cpu = v->processor;
2490 unsigned long vmask, bmap_ptr;
2491 cpumask_t pmask;
2492 int rc = 0;
2494 perfc_incrc(calls_to_update_va);
2496 if ( unlikely(!__addr_ok(va) && !shadow_mode_external(d)) )
2497 return -EINVAL;
2499 LOCK_BIGLOCK(d);
2501 cleanup_writable_pagetable(d);
2503 if ( unlikely(shadow_mode_enabled(d)) )
2504 check_pagetable(v, "pre-va"); /* debug */
2506 if ( unlikely(!mod_l1_entry(&linear_pg_table[l1_linear_offset(va)],
2507 val)) )
2508 rc = -EINVAL;
2510 if ( likely(rc == 0) && unlikely(shadow_mode_enabled(d)) )
2512 if ( unlikely(percpu_info[cpu].foreign &&
2513 (shadow_mode_translate(d) ||
2514 shadow_mode_translate(percpu_info[cpu].foreign))) )
2516 /*
2517 * The foreign domain's pfn's are in a different namespace. There's
2518 * not enough information in just a gpte to figure out how to
2519 * (re-)shadow this entry.
2520 */
2521 domain_crash();
2524 rc = shadow_do_update_va_mapping(va, val, v);
2526 check_pagetable(v, "post-va"); /* debug */
2529 switch ( flags & UVMF_FLUSHTYPE_MASK )
2531 case UVMF_TLB_FLUSH:
2532 switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
2534 case UVMF_LOCAL:
2535 if ( unlikely(shadow_mode_enabled(d)) )
2536 shadow_sync_all(d);
2537 local_flush_tlb();
2538 break;
2539 case UVMF_ALL:
2540 flush_tlb_mask(d->cpumask);
2541 break;
2542 default:
2543 if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
2544 rc = -EFAULT;
2545 pmask = vcpumask_to_pcpumask(d, vmask);
2546 cpus_and(pmask, pmask, d->cpumask);
2547 flush_tlb_mask(pmask);
2548 break;
2550 break;
2552 case UVMF_INVLPG:
2553 switch ( (bmap_ptr = flags & ~UVMF_FLUSHTYPE_MASK) )
2555 case UVMF_LOCAL:
2556 if ( unlikely(shadow_mode_enabled(d)) )
2557 shadow_invlpg(current, va);
2558 local_flush_tlb_one(va);
2559 break;
2560 case UVMF_ALL:
2561 flush_tlb_one_mask(d->cpumask, va);
2562 break;
2563 default:
2564 if ( unlikely(get_user(vmask, (unsigned long *)bmap_ptr)) )
2565 rc = -EFAULT;
2566 pmask = vcpumask_to_pcpumask(d, vmask);
2567 cpus_and(pmask, pmask, d->cpumask);
2568 flush_tlb_one_mask(pmask, va);
2569 break;
2571 break;
2574 process_deferred_ops(cpu);
2576 UNLOCK_BIGLOCK(d);
2578 return rc;
2581 int do_update_va_mapping_otherdomain(unsigned long va, u64 val64,
2582 unsigned long flags,
2583 domid_t domid)
2585 unsigned int cpu = smp_processor_id();
2586 struct domain *d;
2587 int rc;
2589 if ( unlikely(!IS_PRIV(current->domain)) )
2590 return -EPERM;
2592 percpu_info[cpu].foreign = d = find_domain_by_id(domid);
2593 if ( unlikely(d == NULL) )
2595 MEM_LOG("Unknown domain '%u'", domid);
2596 return -ESRCH;
2599 rc = do_update_va_mapping(va, val64, flags);
2601 return rc;
2606 /*************************
2607 * Descriptor Tables
2608 */
2610 void destroy_gdt(struct vcpu *v)
2612 int i;
2613 unsigned long pfn;
2615 v->arch.guest_context.gdt_ents = 0;
2616 for ( i = 0; i < FIRST_RESERVED_GDT_PAGE; i++ )
2618 if ( (pfn = l1e_get_pfn(v->arch.perdomain_ptes[i])) != 0 )
2619 put_page_and_type(&frame_table[pfn]);
2620 v->arch.perdomain_ptes[i] = l1e_empty();
2621 v->arch.guest_context.gdt_frames[i] = 0;
2626 long set_gdt(struct vcpu *v,
2627 unsigned long *frames,
2628 unsigned int entries)
2630 struct domain *d = v->domain;
2631 /* NB. There are 512 8-byte entries per GDT page. */
2632 int i, nr_pages = (entries + 511) / 512;
2633 unsigned long pfn;
2635 if ( entries > FIRST_RESERVED_GDT_ENTRY )
2636 return -EINVAL;
2638 shadow_sync_all(d);
2640 /* Check the pages in the new GDT. */
2641 for ( i = 0; i < nr_pages; i++ ) {
2642 pfn = frames[i];
2643 if ((pfn >= max_page) ||
2644 !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
2645 goto fail;
2648 /* Tear down the old GDT. */
2649 destroy_gdt(v);
2651 /* Install the new GDT. */
2652 v->arch.guest_context.gdt_ents = entries;
2653 for ( i = 0; i < nr_pages; i++ )
2655 v->arch.guest_context.gdt_frames[i] = frames[i];
2656 v->arch.perdomain_ptes[i] =
2657 l1e_from_pfn(frames[i], __PAGE_HYPERVISOR);
2660 return 0;
2662 fail:
2663 while ( i-- > 0 )
2664 put_page_and_type(&frame_table[frames[i]]);
2665 return -EINVAL;
2669 long do_set_gdt(unsigned long *frame_list, unsigned int entries)
2671 int nr_pages = (entries + 511) / 512;
2672 unsigned long frames[16];
2673 long ret;
2675 /* Rechecked in set_gdt, but ensures a sane limit for copy_from_user(). */
2676 if ( entries > FIRST_RESERVED_GDT_ENTRY )
2677 return -EINVAL;
2679 if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) )
2680 return -EFAULT;
2682 LOCK_BIGLOCK(current->domain);
2684 if ( (ret = set_gdt(current, frames, entries)) == 0 )
2685 local_flush_tlb();
2687 UNLOCK_BIGLOCK(current->domain);
2689 return ret;
2693 long do_update_descriptor(u64 pa, u64 desc)
2695 struct domain *dom = current->domain;
2696 unsigned long gpfn = pa >> PAGE_SHIFT;
2697 unsigned long mfn;
2698 unsigned int offset;
2699 struct desc_struct *gdt_pent, d;
2700 struct pfn_info *page;
2701 long ret = -EINVAL;
2703 offset = ((unsigned int)pa & ~PAGE_MASK) / sizeof(struct desc_struct);
2705 *(u64 *)&d = desc;
2707 LOCK_BIGLOCK(dom);
2709 if ( !VALID_MFN(mfn = __gpfn_to_mfn(dom, gpfn)) ||
2710 (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
2711 (mfn >= max_page) ||
2712 !check_descriptor(&d) )
2714 UNLOCK_BIGLOCK(dom);
2715 return -EINVAL;
2718 page = &frame_table[mfn];
2719 if ( unlikely(!get_page(page, dom)) )
2721 UNLOCK_BIGLOCK(dom);
2722 return -EINVAL;
2725 /* Check if the given frame is in use in an unsafe context. */
2726 switch ( page->u.inuse.type_info & PGT_type_mask )
2728 case PGT_gdt_page:
2729 if ( unlikely(!get_page_type(page, PGT_gdt_page)) )
2730 goto out;
2731 break;
2732 case PGT_ldt_page:
2733 if ( unlikely(!get_page_type(page, PGT_ldt_page)) )
2734 goto out;
2735 break;
2736 default:
2737 if ( unlikely(!get_page_type(page, PGT_writable_page)) )
2738 goto out;
2739 break;
2742 if ( shadow_mode_enabled(dom) )
2744 shadow_lock(dom);
2746 if ( shadow_mode_log_dirty(dom) )
2747 __mark_dirty(dom, mfn);
2749 if ( page_is_page_table(page) && !page_out_of_sync(page) )
2750 shadow_mark_mfn_out_of_sync(current, gpfn, mfn);
2753 /* All is good so make the update. */
2754 gdt_pent = map_domain_page(mfn);
2755 memcpy(&gdt_pent[offset], &d, 8);
2756 unmap_domain_page(gdt_pent);
2758 if ( shadow_mode_enabled(dom) )
2759 shadow_unlock(dom);
2761 put_page_type(page);
2763 ret = 0; /* success */
2765 out:
2766 put_page(page);
2768 UNLOCK_BIGLOCK(dom);
2770 return ret;
2775 /*************************
2776 * Writable Pagetables
2777 */
2779 #ifdef VVERBOSE
2780 int ptwr_debug = 0x0;
2781 #define PTWR_PRINTK(_f, _a...) \
2782 do { if ( unlikely(ptwr_debug) ) printk( _f , ## _a ); } while ( 0 )
2783 #define PTWR_PRINT_WHICH (which ? 'I' : 'A')
2784 #else
2785 #define PTWR_PRINTK(_f, _a...) ((void)0)
2786 #endif
2789 #ifdef PERF_ARRAYS
2791 /**************** writeable pagetables profiling functions *****************/
2793 #define ptwr_eip_buckets 256
2795 int ptwr_eip_stat_threshold[] = {1, 10, 50, 100, L1_PAGETABLE_ENTRIES};
2797 #define ptwr_eip_stat_thresholdN (sizeof(ptwr_eip_stat_threshold)/sizeof(int))
2799 struct {
2800 unsigned long eip;
2801 domid_t id;
2802 u32 val[ptwr_eip_stat_thresholdN];
2803 } typedef ptwr_eip_stat_t;
2805 ptwr_eip_stat_t ptwr_eip_stats[ptwr_eip_buckets];
2807 static inline unsigned int ptwr_eip_stat_hash( unsigned long eip, domid_t id )
2809 return (((unsigned long) id) ^ eip ^ (eip>>8) ^ (eip>>16) ^ (eip>24)) %
2810 ptwr_eip_buckets;
2813 static void ptwr_eip_stat_inc(u32 *n)
2815 int i, j;
2817 if ( ++(*n) != 0 )
2818 return;
2820 *n = ~0;
2822 /* Re-scale all buckets. */
2823 for ( i = 0; i <ptwr_eip_buckets; i++ )
2824 for ( j = 0; j < ptwr_eip_stat_thresholdN; j++ )
2825 ptwr_eip_stats[i].val[j] >>= 1;
2828 static void ptwr_eip_stat_update(unsigned long eip, domid_t id, int modified)
2830 int i, j, b;
2832 i = b = ptwr_eip_stat_hash(eip, id);
2834 do
2836 if ( !ptwr_eip_stats[i].eip )
2838 /* doesn't exist */
2839 ptwr_eip_stats[i].eip = eip;
2840 ptwr_eip_stats[i].id = id;
2841 memset(ptwr_eip_stats[i].val,0, sizeof(ptwr_eip_stats[i].val));
2844 if ( ptwr_eip_stats[i].eip == eip )
2846 for ( j = 0; j < ptwr_eip_stat_thresholdN; j++ )
2847 if ( modified <= ptwr_eip_stat_threshold[j] )
2848 break;
2849 BUG_ON(j >= ptwr_eip_stat_thresholdN);
2850 ptwr_eip_stat_inc(&ptwr_eip_stats[i].val[j]);
2851 return;
2854 i = (i+1) % ptwr_eip_buckets;
2856 while ( i != b );
2858 printk("ptwr_eip_stat: too many EIPs in use!\n");
2860 ptwr_eip_stat_print();
2861 ptwr_eip_stat_reset();
2864 void ptwr_eip_stat_reset(void)
2866 memset(ptwr_eip_stats, 0, sizeof(ptwr_eip_stats));
2869 void ptwr_eip_stat_print(void)
2871 struct domain *e;
2872 domid_t d;
2873 int i, j;
2875 for_each_domain( e )
2877 d = e->domain_id;
2879 for ( i = 0; i < ptwr_eip_buckets; i++ )
2881 if ( ptwr_eip_stats[i].eip && ptwr_eip_stats[i].id != d )
2882 continue;
2884 printk("D %d eip %08lx ",
2885 ptwr_eip_stats[i].id, ptwr_eip_stats[i].eip);
2887 for ( j = 0; j < ptwr_eip_stat_thresholdN; j++ )
2888 printk("<=%u %4u \t",
2889 ptwr_eip_stat_threshold[j],
2890 ptwr_eip_stats[i].val[j]);
2891 printk("\n");
2896 #else /* PERF_ARRAYS */
2898 #define ptwr_eip_stat_update(eip, id, modified) ((void)0)
2900 #endif
2902 /*******************************************************************/
2904 /* Re-validate a given p.t. page, given its prior snapshot */
2905 int revalidate_l1(
2906 struct domain *d, l1_pgentry_t *l1page, l1_pgentry_t *snapshot)
2908 l1_pgentry_t ol1e, nl1e;
2909 int modified = 0, i;
2911 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
2913 ol1e = snapshot[i];
2914 nl1e = l1page[i];
2916 if ( likely(l1e_get_intpte(ol1e) == l1e_get_intpte(nl1e)) )
2917 continue;
2919 /* Update number of entries modified. */
2920 modified++;
2922 /*
2923 * Fast path for PTEs that have merely been write-protected
2924 * (e.g., during a Unix fork()). A strict reduction in privilege.
2925 */
2926 if ( likely(l1e_get_intpte(ol1e) == (l1e_get_intpte(nl1e)|_PAGE_RW)) )
2928 if ( likely(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
2929 put_page_type(&frame_table[l1e_get_pfn(nl1e)]);
2930 continue;
2933 if ( unlikely(!get_page_from_l1e(nl1e, d)) )
2935 MEM_LOG("ptwr: Could not re-validate l1 page");
2936 /*
2937 * Make the remaining p.t's consistent before crashing, so the
2938 * reference counts are correct.
2939 */
2940 memcpy(&l1page[i], &snapshot[i],
2941 (L1_PAGETABLE_ENTRIES - i) * sizeof(l1_pgentry_t));
2942 domain_crash();
2943 break;
2946 put_page_from_l1e(ol1e, d);
2949 return modified;
2953 /* Flush the given writable p.t. page and write-protect it again. */
2954 void ptwr_flush(struct domain *d, const int which)
2956 unsigned long l1va;
2957 l1_pgentry_t *pl1e, pte, *ptep;
2958 l2_pgentry_t *pl2e;
2959 unsigned int modified;
2961 #ifdef CONFIG_X86_64
2962 struct vcpu *v = current;
2963 extern void toggle_guest_mode(struct vcpu *);
2964 int user_mode = !(v->arch.flags & TF_kernel_mode);
2965 #endif
2967 ASSERT(!shadow_mode_enabled(d));
2969 if ( unlikely(d->arch.ptwr[which].vcpu != current) )
2970 /* Don't use write_ptbase: it may switch to guest_user on x86/64! */
2971 write_cr3(pagetable_get_paddr(
2972 d->arch.ptwr[which].vcpu->arch.guest_table));
2973 else
2974 TOGGLE_MODE();
2976 l1va = d->arch.ptwr[which].l1va;
2977 ptep = (l1_pgentry_t *)&linear_pg_table[l1_linear_offset(l1va)];
2979 /*
2980 * STEP 1. Write-protect the p.t. page so no more updates can occur.
2981 */
2983 if ( unlikely(__get_user(pte.l1, &ptep->l1)) )
2985 MEM_LOG("ptwr: Could not read pte at %p", ptep);
2986 /*
2987 * Really a bug. We could read this PTE during the initial fault,
2988 * and pagetables can't have changed meantime.
2989 */
2990 BUG();
2992 PTWR_PRINTK("[%c] disconnected_l1va at %p is %"PRIpte"\n",
2993 PTWR_PRINT_WHICH, ptep, pte.l1);
2994 l1e_remove_flags(pte, _PAGE_RW);
2996 /* Write-protect the p.t. page in the guest page table. */
2997 if ( unlikely(__put_user(pte, ptep)) )
2999 MEM_LOG("ptwr: Could not update pte at %p", ptep);
3000 /*
3001 * Really a bug. We could write this PTE during the initial fault,
3002 * and pagetables can't have changed meantime.
3003 */
3004 BUG();
3007 /* Ensure that there are no stale writable mappings in any TLB. */
3008 /* NB. INVLPG is a serialising instruction: flushes pending updates. */
3009 flush_tlb_one_mask(d->cpumask, l1va);
3010 PTWR_PRINTK("[%c] disconnected_l1va at %p now %"PRIpte"\n",
3011 PTWR_PRINT_WHICH, ptep, pte.l1);
3013 /*
3014 * STEP 2. Validate any modified PTEs.
3015 */
3017 pl1e = d->arch.ptwr[which].pl1e;
3018 modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page);
3019 unmap_domain_page(pl1e);
3020 perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
3021 ptwr_eip_stat_update( d->arch.ptwr[which].eip, d->domain_id, modified);
3022 d->arch.ptwr[which].prev_nr_updates = modified;
3024 /*
3025 * STEP 3. Reattach the L1 p.t. page into the current address space.
3026 */
3028 if ( which == PTWR_PT_ACTIVE )
3030 pl2e = &__linear_l2_table[d->arch.ptwr[which].l2_idx];
3031 l2e_add_flags(*pl2e, _PAGE_PRESENT);
3034 /*
3035 * STEP 4. Final tidy-up.
3036 */
3038 d->arch.ptwr[which].l1va = 0;
3040 if ( unlikely(d->arch.ptwr[which].vcpu != current) )
3041 write_ptbase(current);
3042 else
3043 TOGGLE_MODE();
3046 static int ptwr_emulated_update(
3047 unsigned long addr,
3048 physaddr_t old,
3049 physaddr_t val,
3050 unsigned int bytes,
3051 unsigned int do_cmpxchg)
3053 unsigned long pfn;
3054 struct pfn_info *page;
3055 l1_pgentry_t pte, ol1e, nl1e, *pl1e;
3056 struct domain *d = current->domain;
3058 /* Aligned access only, thank you. */
3059 if ( !access_ok(addr, bytes) || ((addr & (bytes-1)) != 0) )
3061 MEM_LOG("ptwr_emulate: Unaligned or bad size ptwr access (%d, %lx)",
3062 bytes, addr);
3063 return X86EMUL_UNHANDLEABLE;
3066 /* Turn a sub-word access into a full-word access. */
3067 if ( bytes != sizeof(physaddr_t) )
3069 int rc;
3070 physaddr_t full;
3071 unsigned int offset = addr & (sizeof(physaddr_t)-1);
3073 /* Align address; read full word. */
3074 addr &= ~(sizeof(physaddr_t)-1);
3075 if ( (rc = x86_emulate_read_std(addr, (unsigned long *)&full,
3076 sizeof(physaddr_t))) )
3077 return rc;
3078 /* Mask out bits provided by caller. */
3079 full &= ~((((physaddr_t)1 << (bytes*8)) - 1) << (offset*8));
3080 /* Shift the caller value and OR in the missing bits. */
3081 val &= (((physaddr_t)1 << (bytes*8)) - 1);
3082 val <<= (offset)*8;
3083 val |= full;
3084 /* Also fill in missing parts of the cmpxchg old value. */
3085 old &= (((physaddr_t)1 << (bytes*8)) - 1);
3086 old <<= (offset)*8;
3087 old |= full;
3090 /* Read the PTE that maps the page being updated. */
3091 if (__copy_from_user(&pte, &linear_pg_table[l1_linear_offset(addr)],
3092 sizeof(pte)))
3094 MEM_LOG("ptwr_emulate: Cannot read thru linear_pg_table");
3095 return X86EMUL_UNHANDLEABLE;
3098 pfn = l1e_get_pfn(pte);
3099 page = &frame_table[pfn];
3101 /* We are looking only for read-only mappings of p.t. pages. */
3102 if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
3103 ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
3104 (page_get_owner(page) != d) )
3106 MEM_LOG("ptwr_emulate: Page is mistyped or bad pte "
3107 "(%lx, %" PRtype_info ")",
3108 l1e_get_pfn(pte), page->u.inuse.type_info);
3109 return X86EMUL_UNHANDLEABLE;
3112 /* Check the new PTE. */
3113 nl1e = l1e_from_intpte(val);
3114 if ( unlikely(!get_page_from_l1e(nl1e, d)) )
3115 return X86EMUL_UNHANDLEABLE;
3117 /* Checked successfully: do the update (write or cmpxchg). */
3118 pl1e = map_domain_page(page_to_pfn(page));
3119 pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
3120 if ( do_cmpxchg )
3122 ol1e = l1e_from_intpte(old);
3123 if ( cmpxchg((intpte_t *)pl1e, old, val) != old )
3125 unmap_domain_page(pl1e);
3126 put_page_from_l1e(nl1e, d);
3127 return X86EMUL_CMPXCHG_FAILED;
3130 else
3132 ol1e = *pl1e;
3133 *pl1e = nl1e;
3135 unmap_domain_page(pl1e);
3137 /* Finally, drop the old PTE. */
3138 put_page_from_l1e(ol1e, d);
3140 return X86EMUL_CONTINUE;
3143 static int ptwr_emulated_write(
3144 unsigned long addr,
3145 unsigned long val,
3146 unsigned int bytes)
3148 return ptwr_emulated_update(addr, 0, val, bytes, 0);
3151 static int ptwr_emulated_cmpxchg(
3152 unsigned long addr,
3153 unsigned long old,
3154 unsigned long new,
3155 unsigned int bytes)
3157 return ptwr_emulated_update(addr, old, new, bytes, 1);
3160 static int ptwr_emulated_cmpxchg8b(
3161 unsigned long addr,
3162 unsigned long old,
3163 unsigned long old_hi,
3164 unsigned long new,
3165 unsigned long new_hi)
3167 return ptwr_emulated_update(
3168 addr, ((u64)old_hi << 32) | old, ((u64)new_hi << 32) | new, 8, 1);
3171 static struct x86_mem_emulator ptwr_mem_emulator = {
3172 .read_std = x86_emulate_read_std,
3173 .write_std = x86_emulate_write_std,
3174 .read_emulated = x86_emulate_read_std,
3175 .write_emulated = ptwr_emulated_write,
3176 .cmpxchg_emulated = ptwr_emulated_cmpxchg,
3177 .cmpxchg8b_emulated = ptwr_emulated_cmpxchg8b
3178 };
3180 /* Write page fault handler: check if guest is trying to modify a PTE. */
3181 int ptwr_do_page_fault(struct domain *d, unsigned long addr,
3182 struct cpu_user_regs *regs)
3184 unsigned long pfn;
3185 struct pfn_info *page;
3186 l1_pgentry_t pte;
3187 l2_pgentry_t *pl2e, l2e;
3188 int which;
3189 unsigned long l2_idx;
3191 if ( unlikely(shadow_mode_enabled(d)) )
3192 return 0;
3194 /*
3195 * Attempt to read the PTE that maps the VA being accessed. By checking for
3196 * PDE validity in the L2 we avoid many expensive fixups in __get_user().
3197 */
3198 if ( !(l2e_get_flags(__linear_l2_table[l2_linear_offset(addr)]) &
3199 _PAGE_PRESENT) ||
3200 __copy_from_user(&pte,&linear_pg_table[l1_linear_offset(addr)],
3201 sizeof(pte)) )
3203 return 0;
3206 pfn = l1e_get_pfn(pte);
3207 page = &frame_table[pfn];
3209 /* We are looking only for read-only mappings of p.t. pages. */
3210 if ( ((l1e_get_flags(pte) & (_PAGE_RW|_PAGE_PRESENT)) != _PAGE_PRESENT) ||
3211 ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ||
3212 ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
3213 (page_get_owner(page) != d) )
3215 return 0;
3218 #if 0 /* Leave this in as useful for debugging */
3219 goto emulate;
3220 #endif
3222 /* Get the L2 index at which this L1 p.t. is always mapped. */
3223 l2_idx = page->u.inuse.type_info & PGT_va_mask;
3224 if ( unlikely(l2_idx >= PGT_va_unknown) )
3225 goto emulate; /* Urk! This L1 is mapped in multiple L2 slots! */
3226 l2_idx >>= PGT_va_shift;
3228 if ( unlikely(l2_idx == l2_linear_offset(addr)) )
3229 goto emulate; /* Urk! Pagetable maps itself! */
3231 /*
3232 * Is the L1 p.t. mapped into the current address space? If so we call it
3233 * an ACTIVE p.t., otherwise it is INACTIVE.
3234 */
3235 pl2e = &__linear_l2_table[l2_idx];
3236 which = PTWR_PT_INACTIVE;
3238 if ( (__get_user(l2e.l2, &pl2e->l2) == 0) && (l2e_get_pfn(l2e) == pfn) )
3240 /*
3241 * Check the PRESENT bit to set ACTIVE mode.
3242 * If the PRESENT bit is clear, we may be conflicting with the current
3243 * ACTIVE p.t. (it may be the same p.t. mapped at another virt addr).
3244 * The ptwr_flush call below will restore the PRESENT bit.
3245 */
3246 if ( likely(l2e_get_flags(l2e) & _PAGE_PRESENT) ||
3247 (d->arch.ptwr[PTWR_PT_ACTIVE].l1va &&
3248 (l2_idx == d->arch.ptwr[PTWR_PT_ACTIVE].l2_idx)) )
3249 which = PTWR_PT_ACTIVE;
3252 /*
3253 * If this is a multi-processor guest then ensure that the page is hooked
3254 * into at most one L2 table, which must be the one running on this VCPU.
3255 */
3256 if ( (d->vcpu[0]->next_in_list != NULL) &&
3257 ((page->u.inuse.type_info & PGT_count_mask) !=
3258 (!!(page->u.inuse.type_info & PGT_pinned) +
3259 (which == PTWR_PT_ACTIVE))) )
3261 /* Could be conflicting writable mappings from other VCPUs. */
3262 cleanup_writable_pagetable(d);
3263 goto emulate;
3266 PTWR_PRINTK("[%c] page_fault on l1 pt at va %lx, pt for %08lx, "
3267 "pfn %lx\n", PTWR_PRINT_WHICH,
3268 addr, l2_idx << L2_PAGETABLE_SHIFT, pfn);
3270 /*
3271 * We only allow one ACTIVE and one INACTIVE p.t. to be updated at at
3272 * time. If there is already one, we must flush it out.
3273 */
3274 if ( d->arch.ptwr[which].l1va )
3275 ptwr_flush(d, which);
3277 /*
3278 * If last batch made no updates then we are probably stuck. Emulate this
3279 * update to ensure we make progress.
3280 */
3281 if ( d->arch.ptwr[which].prev_nr_updates == 0 )
3283 /* Ensure that we don't get stuck in an emulation-only rut. */
3284 d->arch.ptwr[which].prev_nr_updates = 1;
3285 goto emulate;
3288 d->arch.ptwr[which].l1va = addr | 1;
3289 d->arch.ptwr[which].l2_idx = l2_idx;
3290 d->arch.ptwr[which].vcpu = current;
3292 #ifdef PERF_ARRAYS
3293 d->arch.ptwr[which].eip = regs->eip;
3294 #endif
3296 /* For safety, disconnect the L1 p.t. page from current space. */
3297 if ( which == PTWR_PT_ACTIVE )
3299 l2e_remove_flags(*pl2e, _PAGE_PRESENT);
3300 flush_tlb_mask(d->cpumask);
3303 /* Temporarily map the L1 page, and make a copy of it. */
3304 d->arch.ptwr[which].pl1e = map_domain_page(pfn);
3305 memcpy(d->arch.ptwr[which].page,
3306 d->arch.ptwr[which].pl1e,
3307 L1_PAGETABLE_ENTRIES * sizeof(l1_pgentry_t));
3309 /* Finally, make the p.t. page writable by the guest OS. */
3310 l1e_add_flags(pte, _PAGE_RW);
3311 if ( unlikely(__put_user(pte.l1,
3312 &linear_pg_table[l1_linear_offset(addr)].l1)) )
3314 MEM_LOG("ptwr: Could not update pte at %p", (unsigned long *)
3315 &linear_pg_table[l1_linear_offset(addr)]);
3316 /* Toss the writable pagetable state and crash. */
3317 unmap_domain_page(d->arch.ptwr[which].pl1e);
3318 d->arch.ptwr[which].l1va = 0;
3319 domain_crash();
3320 return 0;
3323 return EXCRET_fault_fixed;
3325 emulate:
3326 if ( x86_emulate_memop(guest_cpu_user_regs(), addr,
3327 &ptwr_mem_emulator, BITS_PER_LONG/8) )
3328 return 0;
3329 perfc_incrc(ptwr_emulations);
3330 return EXCRET_fault_fixed;
3333 int ptwr_init(struct domain *d)
3335 void *x = alloc_xenheap_page();
3336 void *y = alloc_xenheap_page();
3338 if ( (x == NULL) || (y == NULL) )
3340 if ( x != NULL )
3341 free_xenheap_page(x);
3342 if ( y != NULL )
3343 free_xenheap_page(y);
3344 return -ENOMEM;
3347 d->arch.ptwr[PTWR_PT_ACTIVE].page = x;
3348 d->arch.ptwr[PTWR_PT_INACTIVE].page = y;
3350 return 0;
3353 void ptwr_destroy(struct domain *d)
3355 cleanup_writable_pagetable(d);
3356 free_xenheap_page(d->arch.ptwr[PTWR_PT_ACTIVE].page);
3357 free_xenheap_page(d->arch.ptwr[PTWR_PT_INACTIVE].page);
3360 void cleanup_writable_pagetable(struct domain *d)
3362 if ( unlikely(!VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) )
3363 return;
3365 if ( unlikely(shadow_mode_enabled(d)) )
3367 shadow_sync_all(d);
3369 else
3371 if ( d->arch.ptwr[PTWR_PT_ACTIVE].l1va )
3372 ptwr_flush(d, PTWR_PT_ACTIVE);
3373 if ( d->arch.ptwr[PTWR_PT_INACTIVE].l1va )
3374 ptwr_flush(d, PTWR_PT_INACTIVE);
3378 int map_pages_to_xen(
3379 unsigned long virt,
3380 unsigned long pfn,
3381 unsigned long nr_pfns,
3382 unsigned long flags)
3384 l2_pgentry_t *pl2e, ol2e;
3385 l1_pgentry_t *pl1e, ol1e;
3386 unsigned int i;
3388 unsigned int map_small_pages = !!(flags & MAP_SMALL_PAGES);
3389 flags &= ~MAP_SMALL_PAGES;
3391 while ( nr_pfns != 0 )
3393 pl2e = virt_to_xen_l2e(virt);
3395 if ( ((((virt>>PAGE_SHIFT) | pfn) & ((1<<PAGETABLE_ORDER)-1)) == 0) &&
3396 (nr_pfns >= (1<<PAGETABLE_ORDER)) &&
3397 !map_small_pages )
3399 /* Super-page mapping. */
3400 ol2e = *pl2e;
3401 *pl2e = l2e_from_pfn(pfn, flags|_PAGE_PSE);
3403 if ( (l2e_get_flags(ol2e) & _PAGE_PRESENT) )
3405 local_flush_tlb_pge();
3406 if ( !(l2e_get_flags(ol2e) & _PAGE_PSE) )
3407 free_xen_pagetable(l2e_get_page(*pl2e));
3410 virt += 1UL << L2_PAGETABLE_SHIFT;
3411 pfn += 1UL << PAGETABLE_ORDER;
3412 nr_pfns -= 1UL << PAGETABLE_ORDER;
3414 else
3416 /* Normal page mapping. */
3417 if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
3419 pl1e = page_to_virt(alloc_xen_pagetable());
3420 clear_page(pl1e);
3421 *pl2e = l2e_from_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
3423 else if ( l2e_get_flags(*pl2e) & _PAGE_PSE )
3425 pl1e = page_to_virt(alloc_xen_pagetable());
3426 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
3427 pl1e[i] = l1e_from_pfn(
3428 l2e_get_pfn(*pl2e) + i,
3429 l2e_get_flags(*pl2e) & ~_PAGE_PSE);
3430 *pl2e = l2e_from_page(virt_to_page(pl1e), __PAGE_HYPERVISOR);
3431 local_flush_tlb_pge();
3434 pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(virt);
3435 ol1e = *pl1e;
3436 *pl1e = l1e_from_pfn(pfn, flags);
3437 if ( (l1e_get_flags(ol1e) & _PAGE_PRESENT) )
3438 local_flush_tlb_one(virt);
3440 virt += 1UL << L1_PAGETABLE_SHIFT;
3441 pfn += 1UL;
3442 nr_pfns -= 1UL;
3446 return 0;
3449 void __set_fixmap(
3450 enum fixed_addresses idx, unsigned long p, unsigned long flags)
3452 if ( unlikely(idx >= __end_of_fixed_addresses) )
3453 BUG();
3454 map_pages_to_xen(fix_to_virt(idx), p >> PAGE_SHIFT, 1, flags);
3457 #ifdef MEMORY_GUARD
3459 void memguard_init(void)
3461 map_pages_to_xen(
3462 PAGE_OFFSET, 0, xenheap_phys_end >> PAGE_SHIFT,
3463 __PAGE_HYPERVISOR|MAP_SMALL_PAGES);
3466 static void __memguard_change_range(void *p, unsigned long l, int guard)
3468 unsigned long _p = (unsigned long)p;
3469 unsigned long _l = (unsigned long)l;
3470 unsigned long flags = __PAGE_HYPERVISOR | MAP_SMALL_PAGES;
3472 /* Ensure we are dealing with a page-aligned whole number of pages. */
3473 ASSERT((_p&PAGE_MASK) != 0);
3474 ASSERT((_l&PAGE_MASK) != 0);
3475 ASSERT((_p&~PAGE_MASK) == 0);
3476 ASSERT((_l&~PAGE_MASK) == 0);
3478 if ( guard )
3479 flags &= ~_PAGE_PRESENT;
3481 map_pages_to_xen(
3482 _p, virt_to_phys(p) >> PAGE_SHIFT, _l >> PAGE_SHIFT, flags);
3485 void memguard_guard_range(void *p, unsigned long l)
3487 __memguard_change_range(p, l, 1);
3490 void memguard_unguard_range(void *p, unsigned long l)
3492 __memguard_change_range(p, l, 0);
3495 #endif
3497 /*
3498 * Local variables:
3499 * mode: C
3500 * c-set-style: "BSD"
3501 * c-basic-offset: 4
3502 * tab-width: 4
3503 * indent-tabs-mode: nil
3504 * End:
3505 */