ia64/xen-unstable

view xen/arch/ia64/xen/mm.c @ 15882:923795831f9a

[IA64] tak emulation, minor 4k-page correction

Signed-off-by: Juergen Gross juergen.gross@fujitsu-siemens.com
author Alex Williamson <alex.williamson@hp.com>
date Tue Sep 11 15:12:39 2007 -0600 (2007-09-11)
parents d956779d8d47
children fdd298b75fb5
line source
1 /*
2 * Copyright (C) 2005 Intel Co
3 * Kun Tian (Kevin Tian) <kevin.tian@intel.com>
4 *
5 * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add VTI domain support
6 *
7 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
8 * VA Linux Systems Japan K.K.
9 * dom0 vp model support
10 */
12 /*
13 * NOTES on SMP
14 *
15 * * shared structures
16 * There are some structures which are accessed by CPUs concurrently.
17 * Here is the list of shared structures and operations on them which
18 * read/write the structures.
19 *
20 * - struct page_info
21 * This is a xen global resource. This structure is accessed by
22 * any CPUs.
23 *
24 * operations on this structure:
25 * - get_page() and its variant
26 * - put_page() and its variant
27 *
28 * - vTLB
29 * vcpu->arch.{d, i}tlb: Software tlb cache. These are per VCPU data.
30 * DEFINE_PER_CPU (unsigned long, vhpt_paddr): VHPT table per physical CPU.
31 *
32 * domain_flush_vtlb_range() and domain_flush_vtlb_all()
33 * write vcpu->arch.{d, i}tlb and VHPT table of vcpu which isn't current.
34 * So there are potential races to read/write VHPT and vcpu->arch.{d, i}tlb.
35 * Please note that reading VHPT is done by hardware page table walker.
36 *
37 * operations on this structure:
38 * - global tlb purge
39 * vcpu_ptc_g(), vcpu_ptc_ga() and domain_page_flush_and_put()
40 * I.e. callers of domain_flush_vtlb_range() and domain_flush_vtlb_all()
41 * These functions invalidate VHPT entry and vcpu->arch.{i, d}tlb
42 *
43 * - tlb insert and fc
44 * vcpu_itc_i()
45 * vcpu_itc_d()
46 * ia64_do_page_fault()
47 * vcpu_fc()
48 * These functions set VHPT entry and vcpu->arch.{i, d}tlb.
49 * Actually vcpu_itc_no_srlz() does.
50 *
51 * - the P2M table
52 * domain->mm and pgd, pud, pmd, pte table page.
53 * This structure is used to convert domain pseudo physical address
54 * to machine address. This is per domain resource.
55 *
56 * operations on this structure:
57 * - populate the P2M table tree
58 * lookup_alloc_domain_pte() and its variants.
59 * - set p2m entry
60 * assign_new_domain_page() and its variants.
61 * assign_domain_page() and its variants.
62 * - xchg p2m entry
63 * assign_domain_page_replace()
64 * - cmpxchg p2m entry
65 * assign_domain_page_cmpxchg_rel()
66 * replace_grant_host_mapping()
67 * steal_page()
68 * zap_domain_page_one()
69 * - read p2m entry
70 * lookup_alloc_domain_pte() and its variants.
71 *
72 * - the M2P table
73 * mpt_table (or machine_to_phys_mapping)
74 * This is a table which converts from machine address to pseudo physical
75 * address. This is a global structure.
76 *
77 * operations on this structure:
78 * - set m2p entry
79 * set_gpfn_from_mfn()
80 * - zap m2p entry
81 * set_gpfn_from_mfn(INVALID_P2M_ENTRY)
82 * - get m2p entry
83 * get_gpfn_from_mfn()
84 *
85 *
86 * * avoiding races
87 * The resources which are shared by CPUs must be accessed carefully
88 * to avoid race.
89 * IA64 has weak memory ordering so that attention must be paid
90 * to access shared structures. [SDM vol2 PartII chap. 2]
91 *
92 * - struct page_info memory ordering
93 * get_page() has acquire semantics.
94 * put_page() has release semantics.
95 *
96 * - populating the p2m table
97 * pgd, pud, pmd are append only.
98 *
99 * - races when updating the P2M tables and the M2P table
100 * The P2M entry are shared by more than one vcpu.
101 * So they are accessed atomic operations.
102 * I.e. xchg or cmpxchg must be used to update the p2m entry.
103 * NOTE: When creating/destructing a domain, we don't need to take care of
104 * this race.
105 *
106 * The M2P table is inverse of the P2M table.
107 * I.e. P2M(M2P(p)) = p and M2P(P2M(m)) = m
108 * The M2P table and P2M table must be updated consistently.
109 * Here is the update sequence
110 *
111 * xchg or cmpxchg case
112 * - set_gpfn_from_mfn(new_mfn, gpfn)
113 * - memory barrier
114 * - atomic update of the p2m entry (xchg or cmpxchg the p2m entry)
115 * get old_mfn entry as a result.
116 * - memory barrier
117 * - set_gpfn_from_mfn(old_mfn, INVALID_P2M_ENTRY)
118 *
119 * Here memory barrier can be achieved by release semantics.
120 *
121 * - races between global tlb purge and tlb insert
122 * This is a race between reading/writing vcpu->arch.{d, i}tlb or VHPT entry.
123 * When a vcpu is about to insert tlb, another vcpu may purge tlb
124 * cache globally. Inserting tlb (vcpu_itc_no_srlz()) or global tlb purge
125 * (domain_flush_vtlb_range() and domain_flush_vtlb_all()) can't update
126 * cpu->arch.{d, i}tlb, VHPT and mTLB. So there is a race here.
127 *
128 * Here check vcpu->arch.{d, i}tlb.p bit
129 * After inserting tlb entry, check the p bit and retry to insert.
130 * This means that when global tlb purge and tlb insert are issued
131 * simultaneously, always global tlb purge happens after tlb insert.
132 *
133 * - races between p2m entry update and tlb insert
134 * This is a race between reading/writing the p2m entry.
135 * reader: vcpu_itc_i(), vcpu_itc_d(), ia64_do_page_fault(), vcpu_fc()
136 * writer: assign_domain_page_cmpxchg_rel(), replace_grant_host_mapping(),
137 * steal_page(), zap_domain_page_one()
138 *
139 * For example, vcpu_itc_i() is about to insert tlb by calling
140 * vcpu_itc_no_srlz() after reading the p2m entry.
141 * At the same time, the p2m entry is replaced by xchg or cmpxchg and
142 * tlb cache of the page is flushed.
143 * There is a possibility that the p2m entry doesn't already point to the
144 * old page, but tlb cache still points to the old page.
145 * This can be detected similar to sequence lock using the p2m entry itself.
146 * reader remember the read value of the p2m entry, and insert tlb.
147 * Then read the p2m entry again. If the new p2m entry value is different
148 * from the used p2m entry value, the retry.
149 *
150 * - races between referencing page and p2m entry update
151 * This is a race between reading/writing the p2m entry.
152 * reader: vcpu_get_domain_bundle(), vmx_get_domain_bundle(),
153 * efi_emulate_get_time()
154 * writer: assign_domain_page_cmpxchg_rel(), replace_grant_host_mapping(),
155 * steal_page(), zap_domain_page_one()
156 *
157 * A page which assigned to a domain can be de-assigned by another vcpu.
158 * So before read/write to a domain page, the page's reference count
159 * must be incremented.
160 * vcpu_get_domain_bundle(), vmx_get_domain_bundle() and
161 * efi_emulate_get_time()
162 *
163 */
165 #include <xen/config.h>
166 #include <xen/sched.h>
167 #include <xen/domain.h>
168 #include <asm/xentypes.h>
169 #include <xen/mm.h>
170 #include <xen/errno.h>
171 #include <asm/pgalloc.h>
172 #include <asm/vhpt.h>
173 #include <asm/vcpu.h>
174 #include <asm/shadow.h>
175 #include <asm/p2m_entry.h>
176 #include <asm/tlb_track.h>
177 #include <linux/efi.h>
178 #include <linux/sort.h>
179 #include <xen/guest_access.h>
180 #include <asm/page.h>
181 #include <asm/dom_fw_common.h>
182 #include <public/memory.h>
183 #include <asm/event.h>
185 static void domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
186 volatile pte_t* ptep, pte_t old_pte,
187 struct page_info* page);
189 extern unsigned long ia64_iobase;
191 static struct domain *dom_xen, *dom_io;
193 /*
194 * This number is bigger than DOMID_SELF, DOMID_XEN and DOMID_IO.
195 * If more reserved domain ids are introduced, this might be increased.
196 */
197 #define DOMID_P2M (0x7FF8U)
198 static struct domain *dom_p2m;
200 // followings are stolen from arch_init_memory() @ xen/arch/x86/mm.c
201 void
202 alloc_dom_xen_and_dom_io(void)
203 {
204 /*
205 * Initialise our DOMID_XEN domain.
206 * Any Xen-heap pages that we will allow to be mapped will have
207 * their domain field set to dom_xen.
208 */
209 dom_xen = alloc_domain(DOMID_XEN);
210 BUG_ON(dom_xen == NULL);
212 /*
213 * Initialise our DOMID_IO domain.
214 * This domain owns I/O pages that are within the range of the page_info
215 * array. Mappings occur at the priv of the caller.
216 */
217 dom_io = alloc_domain(DOMID_IO);
218 BUG_ON(dom_io == NULL);
219 }
221 static int
222 mm_teardown_can_skip(struct domain* d, unsigned long offset)
223 {
224 return d->arch.mm_teardown_offset > offset;
225 }
227 static void
228 mm_teardown_update_offset(struct domain* d, unsigned long offset)
229 {
230 d->arch.mm_teardown_offset = offset;
231 }
233 static void
234 mm_teardown_pte(struct domain* d, volatile pte_t* pte, unsigned long offset)
235 {
236 pte_t old_pte;
237 unsigned long mfn;
238 struct page_info* page;
240 old_pte = ptep_get_and_clear(&d->arch.mm, offset, pte);// acquire semantics
242 // vmx domain use bit[58:56] to distinguish io region from memory.
243 // see vmx_build_physmap_table() in vmx_init.c
244 if (!pte_mem(old_pte))
245 return;
247 // domain might map IO space or acpi table pages. check it.
248 mfn = pte_pfn(old_pte);
249 if (!mfn_valid(mfn))
250 return;
251 page = mfn_to_page(mfn);
252 BUG_ON(page_get_owner(page) == NULL);
254 // struct page_info corresponding to mfn may exist or not depending
255 // on CONFIG_VIRTUAL_FRAME_TABLE.
256 // The above check is too easy.
257 // The right way is to check whether this page is of io area or acpi pages
259 if (pte_pgc_allocated(old_pte)) {
260 BUG_ON(page_get_owner(page) != d);
261 BUG_ON(get_gpfn_from_mfn(mfn) == INVALID_M2P_ENTRY);
262 set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
263 if (test_and_clear_bit(_PGC_allocated, &page->count_info))
264 put_page(page);
265 } else {
266 put_page(page);
267 }
268 }
270 static int
271 mm_teardown_pmd(struct domain* d, volatile pmd_t* pmd, unsigned long offset)
272 {
273 unsigned long i;
274 volatile pte_t* pte = pte_offset_map(pmd, offset);
276 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
277 unsigned long cur_offset = offset + (i << PAGE_SHIFT);
278 if (mm_teardown_can_skip(d, cur_offset + PAGE_SIZE))
279 continue;
280 if (!pte_present(*pte)) { // acquire semantics
281 mm_teardown_update_offset(d, cur_offset);
282 continue;
283 }
284 mm_teardown_update_offset(d, cur_offset);
285 mm_teardown_pte(d, pte, cur_offset);
286 if (hypercall_preempt_check())
287 return -EAGAIN;
288 }
289 return 0;
290 }
292 static int
293 mm_teardown_pud(struct domain* d, volatile pud_t *pud, unsigned long offset)
294 {
295 unsigned long i;
296 volatile pmd_t *pmd = pmd_offset(pud, offset);
298 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
299 unsigned long cur_offset = offset + (i << PMD_SHIFT);
300 if (mm_teardown_can_skip(d, cur_offset + PMD_SIZE))
301 continue;
302 if (!pmd_present(*pmd)) { // acquire semantics
303 mm_teardown_update_offset(d, cur_offset);
304 continue;
305 }
306 if (mm_teardown_pmd(d, pmd, cur_offset))
307 return -EAGAIN;
308 }
309 return 0;
310 }
312 static int
313 mm_teardown_pgd(struct domain* d, volatile pgd_t *pgd, unsigned long offset)
314 {
315 unsigned long i;
316 volatile pud_t *pud = pud_offset(pgd, offset);
318 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
319 unsigned long cur_offset = offset + (i << PUD_SHIFT);
320 #ifndef __PAGETABLE_PUD_FOLDED
321 if (mm_teardown_can_skip(d, cur_offset + PUD_SIZE))
322 continue;
323 #endif
324 if (!pud_present(*pud)) { // acquire semantics
325 #ifndef __PAGETABLE_PUD_FOLDED
326 mm_teardown_update_offset(d, cur_offset);
327 #endif
328 continue;
329 }
330 if (mm_teardown_pud(d, pud, cur_offset))
331 return -EAGAIN;
332 }
333 return 0;
334 }
336 int
337 mm_teardown(struct domain* d)
338 {
339 struct mm_struct* mm = &d->arch.mm;
340 unsigned long i;
341 volatile pgd_t* pgd;
343 if (mm->pgd == NULL)
344 return 0;
346 pgd = pgd_offset(mm, 0);
347 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
348 unsigned long cur_offset = i << PGDIR_SHIFT;
350 if (mm_teardown_can_skip(d, cur_offset + PGDIR_SIZE))
351 continue;
352 if (!pgd_present(*pgd)) { // acquire semantics
353 mm_teardown_update_offset(d, cur_offset);
354 continue;
355 }
356 if (mm_teardown_pgd(d, pgd, cur_offset))
357 return -EAGAIN;
358 }
360 foreign_p2m_destroy(d);
361 return 0;
362 }
364 static void
365 mm_p2m_teardown_pmd(struct domain* d, volatile pmd_t* pmd,
366 unsigned long offset)
367 {
368 pte_free_kernel(pte_offset_map(pmd, offset));
369 }
371 static void
372 mm_p2m_teardown_pud(struct domain* d, volatile pud_t *pud,
373 unsigned long offset)
374 {
375 unsigned long i;
376 volatile pmd_t *pmd = pmd_offset(pud, offset);
378 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
379 if (!pmd_present(*pmd))
380 continue;
381 mm_p2m_teardown_pmd(d, pmd, offset + (i << PMD_SHIFT));
382 }
383 pmd_free(pmd_offset(pud, offset));
384 }
386 static void
387 mm_p2m_teardown_pgd(struct domain* d, volatile pgd_t *pgd,
388 unsigned long offset)
389 {
390 unsigned long i;
391 volatile pud_t *pud = pud_offset(pgd, offset);
393 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
394 if (!pud_present(*pud))
395 continue;
396 mm_p2m_teardown_pud(d, pud, offset + (i << PUD_SHIFT));
397 }
398 pud_free(pud_offset(pgd, offset));
399 }
401 static void
402 mm_p2m_teardown(struct domain* d)
403 {
404 struct mm_struct* mm = &d->arch.mm;
405 unsigned long i;
406 volatile pgd_t* pgd;
408 BUG_ON(mm->pgd == NULL);
409 pgd = pgd_offset(mm, 0);
410 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
411 if (!pgd_present(*pgd))
412 continue;
413 mm_p2m_teardown_pgd(d, pgd, i << PGDIR_SHIFT);
414 }
415 pgd_free(mm->pgd);
416 mm->pgd = NULL;
417 }
419 void
420 mm_final_teardown(struct domain* d)
421 {
422 if (d->arch.shadow_bitmap != NULL) {
423 xfree(d->arch.shadow_bitmap);
424 d->arch.shadow_bitmap = NULL;
425 }
426 mm_p2m_teardown(d);
427 }
429 unsigned long
430 domain_get_maximum_gpfn(struct domain *d)
431 {
432 return (d->arch.convmem_end + PAGE_SIZE - 1) >> PAGE_SHIFT;
433 }
435 // stolen from share_xen_page_with_guest() in xen/arch/x86/mm.c
436 void
437 share_xen_page_with_guest(struct page_info *page,
438 struct domain *d, int readonly)
439 {
440 if ( page_get_owner(page) == d )
441 return;
443 #if 1
444 if (readonly) {
445 printk("%s:%d readonly is not supported yet\n", __func__, __LINE__);
446 }
447 #endif
449 // alloc_xenheap_pages() doesn't initialize page owner.
450 //BUG_ON(page_get_owner(page) != NULL);
452 spin_lock(&d->page_alloc_lock);
454 #ifndef __ia64__
455 /* The incremented type count pins as writable or read-only. */
456 page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page);
457 page->u.inuse.type_info |= PGT_validated | 1;
458 #endif
460 page_set_owner(page, d);
461 wmb(); /* install valid domain ptr before updating refcnt. */
462 ASSERT(page->count_info == 0);
464 /* Only add to the allocation list if the domain isn't dying. */
465 if ( !d->is_dying )
466 {
467 page->count_info |= PGC_allocated | 1;
468 if ( unlikely(d->xenheap_pages++ == 0) )
469 get_knownalive_domain(d);
470 list_add_tail(&page->list, &d->xenpage_list);
471 }
473 // grant_table_destroy() releases these pages.
474 // but it doesn't clear their m2p entry. So there might remain stale
475 // entries. such a stale entry is cleared here.
476 set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
478 spin_unlock(&d->page_alloc_lock);
479 }
481 void
482 share_xen_page_with_privileged_guests(struct page_info *page, int readonly)
483 {
484 share_xen_page_with_guest(page, dom_xen, readonly);
485 }
487 unsigned long
488 gmfn_to_mfn_foreign(struct domain *d, unsigned long gpfn)
489 {
490 unsigned long pte;
492 pte = lookup_domain_mpa(d,gpfn << PAGE_SHIFT, NULL);
493 if (!pte) {
494 panic("gmfn_to_mfn_foreign: bad gpfn. spinning...\n");
495 }
496 return ((pte & _PFN_MASK) >> PAGE_SHIFT);
497 }
499 // given a domain virtual address, pte and pagesize, extract the metaphysical
500 // address, convert the pte for a physical address for (possibly different)
501 // Xen PAGE_SIZE and return modified pte. (NOTE: TLB insert should use
502 // current->arch.vhpt_pg_shift!)
503 u64 translate_domain_pte(u64 pteval, u64 address, u64 itir__, u64* itir,
504 struct p2m_entry* entry)
505 {
506 struct domain *d = current->domain;
507 ia64_itir_t _itir = {.itir = itir__};
508 u64 mask, mpaddr, pteval2;
509 u64 arflags;
510 u64 arflags2;
511 u64 maflags2;
512 u64 ps;
514 pteval &= ((1UL << 53) - 1);// ignore [63:53] bits
516 // FIXME address had better be pre-validated on insert
517 mask = ~itir_mask(_itir.itir);
518 mpaddr = ((pteval & _PAGE_PPN_MASK) & ~mask) | (address & mask);
519 ps = current->arch.vhpt_pg_shift ? current->arch.vhpt_pg_shift :
520 PAGE_SHIFT;
522 if (_itir.ps > ps)
523 _itir.ps = ps;
525 ((ia64_itir_t*)itir)->itir = _itir.itir;/* Copy the whole register. */
526 ((ia64_itir_t*)itir)->ps = _itir.ps; /* Overwrite ps part! */
528 pteval2 = lookup_domain_mpa(d, mpaddr, entry);
529 if (ps < PAGE_SHIFT)
530 pteval2 |= mpaddr & (PAGE_SIZE - 1) & ~((1L << ps) - 1);
532 /* Check access rights. */
533 arflags = pteval & _PAGE_AR_MASK;
534 arflags2 = pteval2 & _PAGE_AR_MASK;
535 if (arflags != _PAGE_AR_R && arflags2 == _PAGE_AR_R) {
536 #if 0
537 dprintk(XENLOG_WARNING,
538 "%s:%d "
539 "pteval 0x%lx arflag 0x%lx address 0x%lx itir 0x%lx "
540 "pteval2 0x%lx arflags2 0x%lx mpaddr 0x%lx\n",
541 __func__, __LINE__,
542 pteval, arflags, address, itir__,
543 pteval2, arflags2, mpaddr);
544 #endif
545 pteval = (pteval & ~_PAGE_AR_MASK) | _PAGE_AR_R;
546 }
548 /* Check memory attribute. The switch is on the *requested* memory
549 attribute. */
550 maflags2 = pteval2 & _PAGE_MA_MASK;
551 switch (pteval & _PAGE_MA_MASK) {
552 case _PAGE_MA_NAT:
553 /* NaT pages are always accepted! */
554 break;
555 case _PAGE_MA_UC:
556 case _PAGE_MA_UCE:
557 case _PAGE_MA_WC:
558 if (maflags2 == _PAGE_MA_WB) {
559 /* Don't let domains WB-map uncached addresses.
560 This can happen when domU tries to touch i/o
561 port space. Also prevents possible address
562 aliasing issues. */
563 if (!(mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE)) {
564 u64 ucwb;
566 /*
567 * If dom0 page has both UC & WB attributes
568 * don't warn about attempted UC access.
569 */
570 ucwb = efi_mem_attribute(mpaddr, PAGE_SIZE);
571 ucwb &= EFI_MEMORY_UC | EFI_MEMORY_WB;
572 ucwb ^= EFI_MEMORY_UC | EFI_MEMORY_WB;
574 if (d != dom0 || ucwb != 0)
575 gdprintk(XENLOG_WARNING, "Warning: UC"
576 " to WB for mpaddr=%lx\n",
577 mpaddr);
578 }
579 pteval = (pteval & ~_PAGE_MA_MASK) | _PAGE_MA_WB;
580 }
581 break;
582 case _PAGE_MA_WB:
583 if (maflags2 != _PAGE_MA_WB) {
584 /* Forbid non-coherent access to coherent memory. */
585 panic_domain(NULL, "try to use WB mem attr on "
586 "UC page, mpaddr=%lx\n", mpaddr);
587 }
588 break;
589 default:
590 panic_domain(NULL, "try to use unknown mem attribute\n");
591 }
593 /* If shadow mode is enabled, virtualize dirty bit. */
594 if (shadow_mode_enabled(d) && (pteval & _PAGE_D)) {
595 u64 mp_page = mpaddr >> PAGE_SHIFT;
596 pteval |= _PAGE_VIRT_D;
598 /* If the page is not already dirty, don't set the dirty bit! */
599 if (mp_page < d->arch.shadow_bitmap_size * 8
600 && !test_bit(mp_page, d->arch.shadow_bitmap))
601 pteval &= ~_PAGE_D;
602 }
604 /* Ignore non-addr bits of pteval2 and force PL0->1
605 (PL3 is unaffected) */
606 return (pteval & ~(_PAGE_PPN_MASK | _PAGE_PL_MASK)) |
607 (pteval2 & _PAGE_PPN_MASK) |
608 (vcpu_pl_adjust(pteval, 7) & _PAGE_PL_MASK);
609 }
611 // given a current domain metaphysical address, return the physical address
612 unsigned long translate_domain_mpaddr(unsigned long mpaddr,
613 struct p2m_entry* entry)
614 {
615 unsigned long pteval;
617 pteval = lookup_domain_mpa(current->domain, mpaddr, entry);
618 return ((pteval & _PAGE_PPN_MASK) | (mpaddr & ~PAGE_MASK));
619 }
621 //XXX !xxx_present() should be used instread of !xxx_none()?
622 // pud, pmd, pte page is zero cleared when they are allocated.
623 // Their area must be visible before population so that
624 // cmpxchg must have release semantics.
625 static volatile pte_t*
626 lookup_alloc_domain_pte(struct domain* d, unsigned long mpaddr)
627 {
628 struct mm_struct *mm = &d->arch.mm;
629 volatile pgd_t *pgd;
630 volatile pud_t *pud;
631 volatile pmd_t *pmd;
633 BUG_ON(mm->pgd == NULL);
635 pgd = pgd_offset(mm, mpaddr);
636 again_pgd:
637 if (unlikely(pgd_none(*pgd))) { // acquire semantics
638 pud_t *old_pud = NULL;
639 pud = pud_alloc_one(mm, mpaddr);
640 if (unlikely(!pgd_cmpxchg_rel(mm, pgd, old_pud, pud))) {
641 pud_free(pud);
642 goto again_pgd;
643 }
644 }
646 pud = pud_offset(pgd, mpaddr);
647 again_pud:
648 if (unlikely(pud_none(*pud))) { // acquire semantics
649 pmd_t* old_pmd = NULL;
650 pmd = pmd_alloc_one(mm, mpaddr);
651 if (unlikely(!pud_cmpxchg_rel(mm, pud, old_pmd, pmd))) {
652 pmd_free(pmd);
653 goto again_pud;
654 }
655 }
657 pmd = pmd_offset(pud, mpaddr);
658 again_pmd:
659 if (unlikely(pmd_none(*pmd))) { // acquire semantics
660 pte_t* old_pte = NULL;
661 pte_t* pte = pte_alloc_one_kernel(mm, mpaddr);
662 if (unlikely(!pmd_cmpxchg_kernel_rel(mm, pmd, old_pte, pte))) {
663 pte_free_kernel(pte);
664 goto again_pmd;
665 }
666 }
668 return pte_offset_map(pmd, mpaddr);
669 }
671 //XXX xxx_none() should be used instread of !xxx_present()?
672 volatile pte_t*
673 lookup_noalloc_domain_pte(struct domain* d, unsigned long mpaddr)
674 {
675 struct mm_struct *mm = &d->arch.mm;
676 volatile pgd_t *pgd;
677 volatile pud_t *pud;
678 volatile pmd_t *pmd;
680 BUG_ON(mm->pgd == NULL);
681 pgd = pgd_offset(mm, mpaddr);
682 if (unlikely(!pgd_present(*pgd))) // acquire semantics
683 return NULL;
685 pud = pud_offset(pgd, mpaddr);
686 if (unlikely(!pud_present(*pud))) // acquire semantics
687 return NULL;
689 pmd = pmd_offset(pud, mpaddr);
690 if (unlikely(!pmd_present(*pmd))) // acquire semantics
691 return NULL;
693 return pte_offset_map(pmd, mpaddr);
694 }
696 static volatile pte_t*
697 lookup_noalloc_domain_pte_none(struct domain* d, unsigned long mpaddr)
698 {
699 struct mm_struct *mm = &d->arch.mm;
700 volatile pgd_t *pgd;
701 volatile pud_t *pud;
702 volatile pmd_t *pmd;
704 BUG_ON(mm->pgd == NULL);
705 pgd = pgd_offset(mm, mpaddr);
706 if (unlikely(pgd_none(*pgd))) // acquire semantics
707 return NULL;
709 pud = pud_offset(pgd, mpaddr);
710 if (unlikely(pud_none(*pud))) // acquire semantics
711 return NULL;
713 pmd = pmd_offset(pud, mpaddr);
714 if (unlikely(pmd_none(*pmd))) // acquire semantics
715 return NULL;
717 return pte_offset_map(pmd, mpaddr);
718 }
720 unsigned long
721 ____lookup_domain_mpa(struct domain *d, unsigned long mpaddr)
722 {
723 volatile pte_t *pte;
725 pte = lookup_noalloc_domain_pte(d, mpaddr);
726 if (pte == NULL)
727 return INVALID_MFN;
729 if (pte_present(*pte))
730 return (pte->pte & _PFN_MASK);
731 else if (VMX_DOMAIN(d->vcpu[0]))
732 return GPFN_INV_MASK;
733 return INVALID_MFN;
734 }
736 unsigned long lookup_domain_mpa(struct domain *d, unsigned long mpaddr,
737 struct p2m_entry* entry)
738 {
739 volatile pte_t *pte = lookup_noalloc_domain_pte(d, mpaddr);
741 if (pte != NULL) {
742 pte_t tmp_pte = *pte;// pte is volatile. copy the value.
743 if (pte_present(tmp_pte)) {
744 if (entry != NULL)
745 p2m_entry_set(entry, pte, tmp_pte);
746 return pte_val(tmp_pte);
747 } else if (VMX_DOMAIN(d->vcpu[0]))
748 return GPFN_INV_MASK;
749 }
751 if (mpaddr < d->arch.convmem_end && !d->is_dying) {
752 gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: non-allocated mpa "
753 "d %"PRId16" 0x%lx (< 0x%lx)\n",
754 current->vcpu_id, PSCB(current, iip),
755 d->domain_id, mpaddr, d->arch.convmem_end);
756 } else if (mpaddr - IO_PORTS_PADDR < IO_PORTS_SIZE) {
757 /* Log I/O port probing, but complain less loudly about it */
758 gdprintk(XENLOG_INFO, "vcpu %d iip 0x%016lx: bad I/O port access "
759 "d %"PRId16" 0x%lx\n",
760 current->vcpu_id, PSCB(current, iip), d->domain_id,
761 IO_SPACE_SPARSE_DECODING(mpaddr - IO_PORTS_PADDR));
762 } else {
763 gdprintk(XENLOG_WARNING, "vcpu %d iip 0x%016lx: bad mpa "
764 "d %"PRId16" 0x%lx (=> 0x%lx)\n",
765 current->vcpu_id, PSCB(current, iip),
766 d->domain_id, mpaddr, d->arch.convmem_end);
767 }
769 if (entry != NULL)
770 p2m_entry_set(entry, NULL, __pte(0));
771 //XXX This is a work around until the emulation memory access to a region
772 // where memory or device are attached is implemented.
773 return pte_val(pfn_pte(0, __pgprot(__DIRTY_BITS | _PAGE_PL_PRIV |
774 _PAGE_AR_RWX)));
775 }
777 // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
778 #if 1
779 void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
780 {
781 unsigned long pte = lookup_domain_mpa(d, mpaddr, NULL);
782 unsigned long imva;
784 pte &= _PAGE_PPN_MASK;
785 imva = (unsigned long) __va(pte);
786 imva |= mpaddr & ~PAGE_MASK;
787 return (void*)imva;
788 }
789 #else
790 void *domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
791 {
792 unsigned long imva = __gpa_to_mpa(d, mpaddr);
794 return (void *)__va(imva);
795 }
796 #endif
798 unsigned long
799 paddr_to_maddr(unsigned long paddr)
800 {
801 struct vcpu *v = current;
802 struct domain *d = v->domain;
803 u64 pa;
805 pa = ____lookup_domain_mpa(d, paddr);
806 if (pa == INVALID_MFN) {
807 printk("%s: called with bad memory address: 0x%lx - iip=%lx\n",
808 __func__, paddr, vcpu_regs(v)->cr_iip);
809 return 0;
810 }
811 return (pa & _PFN_MASK) | (paddr & ~PAGE_MASK);
812 }
814 /* Allocate a new page for domain and map it to the specified metaphysical
815 address. */
816 static struct page_info *
817 __assign_new_domain_page(struct domain *d, unsigned long mpaddr,
818 volatile pte_t* pte)
819 {
820 struct page_info *p;
821 unsigned long maddr;
823 BUG_ON(!pte_none(*pte));
825 p = alloc_domheap_page(d);
826 if (unlikely(!p)) {
827 printk("assign_new_domain_page: Can't alloc!!!! Aaaargh!\n");
828 return(p);
829 }
831 // zero out pages for security reasons
832 clear_page(page_to_virt(p));
833 maddr = page_to_maddr (p);
834 if (unlikely(maddr > __get_cpu_var(vhpt_paddr)
835 && maddr < __get_cpu_var(vhpt_pend))) {
836 /* FIXME: how can this happen ?
837 vhpt is allocated by alloc_domheap_page. */
838 printk("assign_new_domain_page: reassigned vhpt page %lx!!\n",
839 maddr);
840 }
842 set_gpfn_from_mfn(page_to_mfn(p), mpaddr >> PAGE_SHIFT);
843 // clear_page() and set_gpfn_from_mfn() become visible before set_pte_rel()
844 // because set_pte_rel() has release semantics
845 set_pte_rel(pte,
846 pfn_pte(maddr >> PAGE_SHIFT,
847 __pgprot(_PAGE_PGC_ALLOCATED | __DIRTY_BITS |
848 _PAGE_PL_PRIV | _PAGE_AR_RWX)));
850 smp_mb();
851 return p;
852 }
854 struct page_info *
855 assign_new_domain_page(struct domain *d, unsigned long mpaddr)
856 {
857 volatile pte_t *pte = lookup_alloc_domain_pte(d, mpaddr);
859 if (!pte_none(*pte))
860 return NULL;
862 return __assign_new_domain_page(d, mpaddr, pte);
863 }
865 void __init
866 assign_new_domain0_page(struct domain *d, unsigned long mpaddr)
867 {
868 volatile pte_t *pte;
870 BUG_ON(d != dom0);
871 pte = lookup_alloc_domain_pte(d, mpaddr);
872 if (pte_none(*pte)) {
873 struct page_info *p = __assign_new_domain_page(d, mpaddr, pte);
874 if (p == NULL) {
875 panic("%s: can't allocate page for dom0\n", __func__);
876 }
877 }
878 }
880 static unsigned long
881 flags_to_prot (unsigned long flags)
882 {
883 unsigned long res = _PAGE_PL_PRIV | __DIRTY_BITS;
885 res |= flags & ASSIGN_readonly ? _PAGE_AR_R: _PAGE_AR_RWX;
886 res |= flags & ASSIGN_nocache ? _PAGE_MA_UC: _PAGE_MA_WB;
887 #ifdef CONFIG_XEN_IA64_TLB_TRACK
888 res |= flags & ASSIGN_tlb_track ? _PAGE_TLB_TRACKING: 0;
889 #endif
890 res |= flags & ASSIGN_pgc_allocated ? _PAGE_PGC_ALLOCATED: 0;
892 return res;
893 }
895 /* map a physical address to the specified metaphysical addr */
896 // flags: currently only ASSIGN_readonly, ASSIGN_nocache, ASSIGN_tlb_tack
897 // This is called by assign_domain_mmio_page().
898 // So accessing to pte is racy.
899 int
900 __assign_domain_page(struct domain *d,
901 unsigned long mpaddr, unsigned long physaddr,
902 unsigned long flags)
903 {
904 volatile pte_t *pte;
905 pte_t old_pte;
906 pte_t new_pte;
907 pte_t ret_pte;
908 unsigned long prot = flags_to_prot(flags);
910 pte = lookup_alloc_domain_pte(d, mpaddr);
912 old_pte = __pte(0);
913 new_pte = pfn_pte(physaddr >> PAGE_SHIFT, __pgprot(prot));
914 ret_pte = ptep_cmpxchg_rel(&d->arch.mm, mpaddr, pte, old_pte, new_pte);
915 if (pte_val(ret_pte) == pte_val(old_pte)) {
916 smp_mb();
917 return 0;
918 }
920 // dom0 tries to map real machine's I/O region, but failed.
921 // It is very likely that dom0 doesn't boot correctly because
922 // it can't access I/O. So complain here.
923 if (flags & ASSIGN_nocache) {
924 int warn = 0;
926 if (pte_pfn(ret_pte) != (physaddr >> PAGE_SHIFT))
927 warn = 1;
928 else if (!(pte_val(ret_pte) & _PAGE_MA_UC)) {
929 u32 type;
930 u64 attr;
932 warn = 1;
934 /*
935 * See
936 * complete_dom0_memmap()
937 * case EFI_RUNTIME_SERVICES_CODE:
938 * case EFI_RUNTIME_SERVICES_DATA:
939 * case EFI_ACPI_RECLAIM_MEMORY:
940 * case EFI_ACPI_MEMORY_NVS:
941 * case EFI_RESERVED_TYPE:
942 *
943 * Currently only EFI_RUNTIME_SERVICES_CODE is found
944 * so that we suppress only EFI_RUNTIME_SERVICES_CODE case.
945 */
946 type = efi_mem_type(physaddr);
947 attr = efi_mem_attributes(physaddr);
948 if (type == EFI_RUNTIME_SERVICES_CODE &&
949 (attr & EFI_MEMORY_UC) && (attr & EFI_MEMORY_WB))
950 warn = 0;
951 }
952 if (warn)
953 printk("%s:%d WARNING can't assign page domain 0x%p id %d\n"
954 "\talready assigned pte_val 0x%016lx\n"
955 "\tmpaddr 0x%016lx physaddr 0x%016lx flags 0x%lx\n",
956 __func__, __LINE__,
957 d, d->domain_id, pte_val(ret_pte),
958 mpaddr, physaddr, flags);
959 }
961 return -EAGAIN;
962 }
964 /* get_page() and map a physical address to the specified metaphysical addr */
965 void
966 assign_domain_page(struct domain *d,
967 unsigned long mpaddr, unsigned long physaddr)
968 {
969 struct page_info* page = mfn_to_page(physaddr >> PAGE_SHIFT);
971 BUG_ON((physaddr & GPFN_IO_MASK) != GPFN_MEM);
972 BUG_ON(page->count_info != (PGC_allocated | 1));
973 set_gpfn_from_mfn(physaddr >> PAGE_SHIFT, mpaddr >> PAGE_SHIFT);
974 // because __assign_domain_page() uses set_pte_rel() which has
975 // release semantics, smp_mb() isn't needed.
976 (void)__assign_domain_page(d, mpaddr, physaddr,
977 ASSIGN_writable | ASSIGN_pgc_allocated);
978 }
980 int
981 ioports_permit_access(struct domain *d, unsigned int fp, unsigned int lp)
982 {
983 struct io_space *space;
984 unsigned long mmio_start, mmio_end, mach_start;
985 int ret;
987 if (IO_SPACE_NR(fp) >= num_io_spaces) {
988 dprintk(XENLOG_WARNING, "Unknown I/O Port range 0x%x - 0x%x\n", fp, lp);
989 return -EFAULT;
990 }
992 /*
993 * The ioport_cap rangeset tracks the I/O port address including
994 * the port space ID. This means port space IDs need to match
995 * between Xen and dom0. This is also a requirement because
996 * the hypercall to pass these port ranges only uses a u32.
997 *
998 * NB - non-dom0 driver domains may only have a subset of the
999 * I/O port spaces and thus will number port spaces differently.
1000 * This is ok, they don't make use of this interface.
1001 */
1002 ret = rangeset_add_range(d->arch.ioport_caps, fp, lp);
1003 if (ret != 0)
1004 return ret;
1006 space = &io_space[IO_SPACE_NR(fp)];
1008 /* Legacy I/O on dom0 is already setup */
1009 if (d == dom0 && space == &io_space[0])
1010 return 0;
1012 fp = IO_SPACE_PORT(fp);
1013 lp = IO_SPACE_PORT(lp);
1015 if (space->sparse) {
1016 mmio_start = IO_SPACE_SPARSE_ENCODING(fp) & ~PAGE_MASK;
1017 mmio_end = PAGE_ALIGN(IO_SPACE_SPARSE_ENCODING(lp));
1018 } else {
1019 mmio_start = fp & ~PAGE_MASK;
1020 mmio_end = PAGE_ALIGN(lp);
1023 /*
1024 * The "machine first port" is not necessarily identity mapped
1025 * to the guest first port. At least for the legacy range.
1026 */
1027 mach_start = mmio_start | __pa(space->mmio_base);
1029 if (space == &io_space[0]) {
1030 mmio_start |= IO_PORTS_PADDR;
1031 mmio_end |= IO_PORTS_PADDR;
1032 } else {
1033 mmio_start |= __pa(space->mmio_base);
1034 mmio_end |= __pa(space->mmio_base);
1037 while (mmio_start <= mmio_end) {
1038 (void)__assign_domain_page(d, mmio_start, mach_start, ASSIGN_nocache);
1039 mmio_start += PAGE_SIZE;
1040 mach_start += PAGE_SIZE;
1043 return 0;
1046 static int
1047 ioports_has_allowed(struct domain *d, unsigned int fp, unsigned int lp)
1049 for (; fp < lp; fp++)
1050 if (rangeset_contains_singleton(d->arch.ioport_caps, fp))
1051 return 1;
1053 return 0;
1056 int
1057 ioports_deny_access(struct domain *d, unsigned int fp, unsigned int lp)
1059 int ret;
1060 struct mm_struct *mm = &d->arch.mm;
1061 unsigned long mmio_start, mmio_end, mmio_base;
1062 unsigned int fp_base, lp_base;
1063 struct io_space *space;
1065 if (IO_SPACE_NR(fp) >= num_io_spaces) {
1066 dprintk(XENLOG_WARNING, "Unknown I/O Port range 0x%x - 0x%x\n", fp, lp);
1067 return -EFAULT;
1070 ret = rangeset_remove_range(d->arch.ioport_caps, fp, lp);
1071 if (ret != 0)
1072 return ret;
1074 space = &io_space[IO_SPACE_NR(fp)];
1075 fp_base = IO_SPACE_PORT(fp);
1076 lp_base = IO_SPACE_PORT(lp);
1078 if (space->sparse) {
1079 mmio_start = IO_SPACE_SPARSE_ENCODING(fp_base) & ~PAGE_MASK;
1080 mmio_end = PAGE_ALIGN(IO_SPACE_SPARSE_ENCODING(lp_base));
1081 } else {
1082 mmio_start = fp_base & ~PAGE_MASK;
1083 mmio_end = PAGE_ALIGN(lp_base);
1086 if (space == &io_space[0] && d != dom0)
1087 mmio_base = IO_PORTS_PADDR;
1088 else
1089 mmio_base = __pa(space->mmio_base);
1091 for (; mmio_start < mmio_end; mmio_start += PAGE_SIZE) {
1092 unsigned int port, range;
1093 unsigned long mpaddr;
1094 volatile pte_t *pte;
1095 pte_t old_pte;
1097 if (space->sparse) {
1098 port = IO_SPACE_SPARSE_DECODING(mmio_start);
1099 range = IO_SPACE_SPARSE_PORTS_PER_PAGE - 1;
1100 } else {
1101 port = mmio_start;
1102 range = PAGE_SIZE - 1;
1105 port |= IO_SPACE_BASE(IO_SPACE_NR(fp));
1107 if (port < fp || port + range > lp) {
1108 /* Maybe this covers an allowed port. */
1109 if (ioports_has_allowed(d, port, port + range))
1110 continue;
1113 mpaddr = mmio_start | mmio_base;
1114 pte = lookup_noalloc_domain_pte_none(d, mpaddr);
1115 BUG_ON(pte == NULL);
1116 BUG_ON(pte_none(*pte));
1118 /* clear pte */
1119 old_pte = ptep_get_and_clear(mm, mpaddr, pte);
1121 domain_flush_vtlb_all(d);
1122 return 0;
1125 static void
1126 assign_domain_same_page(struct domain *d,
1127 unsigned long mpaddr, unsigned long size,
1128 unsigned long flags)
1130 //XXX optimization
1131 unsigned long end = PAGE_ALIGN(mpaddr + size);
1132 for (mpaddr &= PAGE_MASK; mpaddr < end; mpaddr += PAGE_SIZE) {
1133 (void)__assign_domain_page(d, mpaddr, mpaddr, flags);
1137 int
1138 efi_mmio(unsigned long physaddr, unsigned long size)
1140 void *efi_map_start, *efi_map_end;
1141 u64 efi_desc_size;
1142 void* p;
1144 efi_map_start = __va(ia64_boot_param->efi_memmap);
1145 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1146 efi_desc_size = ia64_boot_param->efi_memdesc_size;
1148 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1149 efi_memory_desc_t* md = (efi_memory_desc_t *)p;
1150 unsigned long start = md->phys_addr;
1151 unsigned long end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
1153 if (start <= physaddr && physaddr < end) {
1154 if ((physaddr + size) > end) {
1155 gdprintk(XENLOG_INFO, "%s: physaddr 0x%lx size = 0x%lx\n",
1156 __func__, physaddr, size);
1157 return 0;
1160 // for io space
1161 if (md->type == EFI_MEMORY_MAPPED_IO ||
1162 md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
1163 return 1;
1166 // for runtime
1167 // see efi_enter_virtual_mode(void)
1168 // in linux/arch/ia64/kernel/efi.c
1169 if ((md->attribute & EFI_MEMORY_RUNTIME) &&
1170 !(md->attribute & EFI_MEMORY_WB)) {
1171 return 1;
1174 return 0;
1177 if (physaddr < start) {
1178 break;
1182 return 1;
1185 unsigned long
1186 assign_domain_mmio_page(struct domain *d, unsigned long mpaddr,
1187 unsigned long phys_addr, unsigned long size,
1188 unsigned long flags)
1190 unsigned long addr = mpaddr & PAGE_MASK;
1191 unsigned long end = PAGE_ALIGN(mpaddr + size);
1193 if (size == 0) {
1194 gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
1195 __func__, d, mpaddr, size);
1197 if (!efi_mmio(mpaddr, size)) {
1198 #ifndef NDEBUG
1199 gdprintk(XENLOG_INFO, "%s: domain %p mpaddr 0x%lx size = 0x%lx\n",
1200 __func__, d, mpaddr, size);
1201 #endif
1202 return -EINVAL;
1205 for (phys_addr &= PAGE_MASK; addr < end;
1206 addr += PAGE_SIZE, phys_addr += PAGE_SIZE) {
1207 __assign_domain_page(d, addr, phys_addr, flags);
1210 return mpaddr;
1213 unsigned long
1214 assign_domain_mach_page(struct domain *d,
1215 unsigned long mpaddr, unsigned long size,
1216 unsigned long flags)
1218 BUG_ON(flags & ASSIGN_pgc_allocated);
1219 assign_domain_same_page(d, mpaddr, size, flags);
1220 return mpaddr;
1223 static void
1224 adjust_page_count_info(struct page_info* page)
1226 struct domain* d = page_get_owner(page);
1227 BUG_ON((page->count_info & PGC_count_mask) != 1);
1228 if (d != NULL) {
1229 int ret = get_page(page, d);
1230 BUG_ON(ret == 0);
1231 } else {
1232 u64 x, nx, y;
1234 y = *((u64*)&page->count_info);
1235 do {
1236 x = y;
1237 nx = x + 1;
1239 BUG_ON((x >> 32) != 0);
1240 BUG_ON((nx & PGC_count_mask) != 2);
1241 y = cmpxchg((u64*)&page->count_info, x, nx);
1242 } while (unlikely(y != x));
1246 static void
1247 domain_put_page(struct domain* d, unsigned long mpaddr,
1248 volatile pte_t* ptep, pte_t old_pte, int clear_PGC_allocate)
1250 unsigned long mfn = pte_pfn(old_pte);
1251 struct page_info* page = mfn_to_page(mfn);
1253 if (pte_pgc_allocated(old_pte)) {
1254 if (page_get_owner(page) == d || page_get_owner(page) == NULL) {
1255 BUG_ON(get_gpfn_from_mfn(mfn) != (mpaddr >> PAGE_SHIFT));
1256 set_gpfn_from_mfn(mfn, INVALID_M2P_ENTRY);
1257 } else {
1258 BUG();
1261 if (likely(clear_PGC_allocate)) {
1262 if (!test_and_clear_bit(_PGC_allocated, &page->count_info))
1263 BUG();
1264 /* put_page() is done by domain_page_flush_and_put() */
1265 } else {
1266 // In this case, page reference count mustn't touched.
1267 // domain_page_flush_and_put() decrements it, we increment
1268 // it in advence. This patch is slow path.
1269 //
1270 // guest_remove_page(): owner = d, count_info = 1
1271 // memory_exchange(): owner = NULL, count_info = 1
1272 adjust_page_count_info(page);
1275 domain_page_flush_and_put(d, mpaddr, ptep, old_pte, page);
1278 // caller must get_page(mfn_to_page(mfn)) before call.
1279 // caller must call set_gpfn_from_mfn() before call if necessary.
1280 // because set_gpfn_from_mfn() result must be visible before pte xchg
1281 // caller must use memory barrier. NOTE: xchg has acquire semantics.
1282 // flags: ASSIGN_xxx
1283 static void
1284 assign_domain_page_replace(struct domain *d, unsigned long mpaddr,
1285 unsigned long mfn, unsigned long flags)
1287 struct mm_struct *mm = &d->arch.mm;
1288 volatile pte_t* pte;
1289 pte_t old_pte;
1290 pte_t npte;
1291 unsigned long prot = flags_to_prot(flags);
1293 pte = lookup_alloc_domain_pte(d, mpaddr);
1295 // update pte
1296 npte = pfn_pte(mfn, __pgprot(prot));
1297 old_pte = ptep_xchg(mm, mpaddr, pte, npte);
1298 if (pte_mem(old_pte)) {
1299 unsigned long old_mfn = pte_pfn(old_pte);
1301 // mfn = old_mfn case can happen when domain maps a granted page
1302 // twice with the same pseudo physial address.
1303 // It's non sense, but allowed.
1304 // __gnttab_map_grant_ref()
1305 // => create_host_mapping()
1306 // => assign_domain_page_replace()
1307 if (mfn != old_mfn) {
1308 domain_put_page(d, mpaddr, pte, old_pte, 1);
1311 perfc_incr(assign_domain_page_replace);
1314 // caller must get_page(new_page) before
1315 // Only steal_page() calls this function.
1316 static int
1317 assign_domain_page_cmpxchg_rel(struct domain* d, unsigned long mpaddr,
1318 struct page_info* old_page,
1319 struct page_info* new_page,
1320 unsigned long flags, int clear_PGC_allocate)
1322 struct mm_struct *mm = &d->arch.mm;
1323 volatile pte_t* pte;
1324 unsigned long old_mfn;
1325 unsigned long old_prot;
1326 pte_t old_pte;
1327 unsigned long new_mfn;
1328 unsigned long new_prot;
1329 pte_t new_pte;
1330 pte_t ret_pte;
1332 BUG_ON((flags & ASSIGN_pgc_allocated) == 0);
1333 pte = lookup_alloc_domain_pte(d, mpaddr);
1335 again:
1336 old_prot = pte_val(*pte) & ~_PAGE_PPN_MASK;
1337 old_mfn = page_to_mfn(old_page);
1338 old_pte = pfn_pte(old_mfn, __pgprot(old_prot));
1339 if (!pte_present(old_pte)) {
1340 gdprintk(XENLOG_INFO,
1341 "%s: old_pte 0x%lx old_prot 0x%lx old_mfn 0x%lx\n",
1342 __func__, pte_val(old_pte), old_prot, old_mfn);
1343 return -EINVAL;
1346 new_prot = flags_to_prot(flags);
1347 new_mfn = page_to_mfn(new_page);
1348 new_pte = pfn_pte(new_mfn, __pgprot(new_prot));
1350 // update pte
1351 ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);
1352 if (unlikely(pte_val(old_pte) != pte_val(ret_pte))) {
1353 if (pte_pfn(old_pte) == pte_pfn(ret_pte)) {
1354 goto again;
1357 gdprintk(XENLOG_INFO,
1358 "%s: old_pte 0x%lx old_prot 0x%lx old_mfn 0x%lx "
1359 "ret_pte 0x%lx ret_mfn 0x%lx\n",
1360 __func__,
1361 pte_val(old_pte), old_prot, old_mfn,
1362 pte_val(ret_pte), pte_pfn(ret_pte));
1363 return -EINVAL;
1366 BUG_ON(!pte_mem(old_pte));
1367 BUG_ON(!pte_pgc_allocated(old_pte));
1368 BUG_ON(page_get_owner(old_page) != d);
1369 BUG_ON(get_gpfn_from_mfn(old_mfn) != (mpaddr >> PAGE_SHIFT));
1370 BUG_ON(old_mfn == new_mfn);
1372 set_gpfn_from_mfn(old_mfn, INVALID_M2P_ENTRY);
1373 if (likely(clear_PGC_allocate)) {
1374 if (!test_and_clear_bit(_PGC_allocated, &old_page->count_info))
1375 BUG();
1376 } else {
1377 int ret;
1378 // adjust for count_info for domain_page_flush_and_put()
1379 // This is slow path.
1380 BUG_ON(!test_bit(_PGC_allocated, &old_page->count_info));
1381 BUG_ON(d == NULL);
1382 ret = get_page(old_page, d);
1383 BUG_ON(ret == 0);
1386 domain_page_flush_and_put(d, mpaddr, pte, old_pte, old_page);
1387 perfc_incr(assign_domain_pge_cmpxchg_rel);
1388 return 0;
1391 static void
1392 zap_domain_page_one(struct domain *d, unsigned long mpaddr,
1393 int clear_PGC_allocate, unsigned long mfn)
1395 struct mm_struct *mm = &d->arch.mm;
1396 volatile pte_t *pte;
1397 pte_t old_pte;
1398 struct page_info *page;
1400 pte = lookup_noalloc_domain_pte_none(d, mpaddr);
1401 if (pte == NULL)
1402 return;
1403 if (pte_none(*pte))
1404 return;
1406 if (mfn == INVALID_MFN) {
1407 // clear pte
1408 old_pte = ptep_get_and_clear(mm, mpaddr, pte);
1409 mfn = pte_pfn(old_pte);
1410 } else {
1411 unsigned long old_arflags;
1412 pte_t new_pte;
1413 pte_t ret_pte;
1415 again:
1416 // memory_exchange() calls guest_physmap_remove_page() with
1417 // a stealed page. i.e. page owner = NULL.
1418 BUG_ON(page_get_owner(mfn_to_page(mfn)) != d &&
1419 page_get_owner(mfn_to_page(mfn)) != NULL);
1420 old_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
1421 old_pte = pfn_pte(mfn, __pgprot(old_arflags));
1422 new_pte = __pte(0);
1424 // update pte
1425 ret_pte = ptep_cmpxchg_rel(mm, mpaddr, pte, old_pte, new_pte);
1426 if (unlikely(pte_val(old_pte) != pte_val(ret_pte))) {
1427 if (pte_pfn(old_pte) == pte_pfn(ret_pte)) {
1428 goto again;
1431 gdprintk(XENLOG_INFO, "%s: old_pte 0x%lx old_arflags 0x%lx mfn 0x%lx "
1432 "ret_pte 0x%lx ret_mfn 0x%lx\n",
1433 __func__,
1434 pte_val(old_pte), old_arflags, mfn,
1435 pte_val(ret_pte), pte_pfn(ret_pte));
1436 return;
1438 BUG_ON(mfn != pte_pfn(ret_pte));
1441 page = mfn_to_page(mfn);
1442 BUG_ON((page->count_info & PGC_count_mask) == 0);
1444 BUG_ON(clear_PGC_allocate && (page_get_owner(page) == NULL));
1445 domain_put_page(d, mpaddr, pte, old_pte, clear_PGC_allocate);
1446 perfc_incr(zap_dcomain_page_one);
1449 unsigned long
1450 dom0vp_zap_physmap(struct domain *d, unsigned long gpfn,
1451 unsigned int extent_order)
1453 if (extent_order != 0) {
1454 //XXX
1455 return -ENOSYS;
1458 zap_domain_page_one(d, gpfn << PAGE_SHIFT, 1, INVALID_MFN);
1459 perfc_incr(dom0vp_zap_physmap);
1460 return 0;
1463 static unsigned long
1464 __dom0vp_add_physmap(struct domain* d, unsigned long gpfn,
1465 unsigned long mfn_or_gmfn,
1466 unsigned long flags, domid_t domid, int is_gmfn)
1468 int error = -EINVAL;
1469 struct domain* rd;
1470 unsigned long mfn;
1472 /* Not allowed by a domain. */
1473 if (flags & (ASSIGN_nocache | ASSIGN_pgc_allocated))
1474 return -EINVAL;
1476 rd = get_domain_by_id(domid);
1477 if (unlikely(rd == NULL)) {
1478 switch (domid) {
1479 case DOMID_XEN:
1480 rd = dom_xen;
1481 break;
1482 case DOMID_IO:
1483 rd = dom_io;
1484 break;
1485 default:
1486 gdprintk(XENLOG_INFO, "d 0x%p domid %d "
1487 "gpfn 0x%lx mfn_or_gmfn 0x%lx flags 0x%lx domid %d\n",
1488 d, d->domain_id, gpfn, mfn_or_gmfn, flags, domid);
1489 return -ESRCH;
1491 BUG_ON(rd == NULL);
1492 get_knownalive_domain(rd);
1495 if (unlikely(rd == d))
1496 goto out1;
1497 /*
1498 * DOMID_XEN and DOMID_IO don't have their own p2m table.
1499 * It can be considered that their p2m conversion is p==m.
1500 */
1501 if (likely(is_gmfn && domid != DOMID_XEN && domid != DOMID_IO))
1502 mfn = gmfn_to_mfn(rd, mfn_or_gmfn);
1503 else
1504 mfn = mfn_or_gmfn;
1505 if (unlikely(!mfn_valid(mfn) || get_page(mfn_to_page(mfn), rd) == 0))
1506 goto out1;
1508 error = 0;
1509 BUG_ON(page_get_owner(mfn_to_page(mfn)) == d &&
1510 get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY);
1511 assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn, flags);
1512 //don't update p2m table because this page belongs to rd, not d.
1513 perfc_incr(dom0vp_add_physmap);
1514 out1:
1515 put_domain(rd);
1516 return error;
1519 unsigned long
1520 dom0vp_add_physmap(struct domain* d, unsigned long gpfn, unsigned long mfn,
1521 unsigned long flags, domid_t domid)
1523 return __dom0vp_add_physmap(d, gpfn, mfn, flags, domid, 0);
1526 unsigned long
1527 dom0vp_add_physmap_with_gmfn(struct domain* d, unsigned long gpfn,
1528 unsigned long gmfn, unsigned long flags,
1529 domid_t domid)
1531 return __dom0vp_add_physmap(d, gpfn, gmfn, flags, domid, 1);
1534 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
1535 #define P2M_PFN_ROUNDUP(x) (((x) + PTRS_PER_PTE - 1) & \
1536 ~(PTRS_PER_PTE - 1))
1537 #define P2M_PFN_ROUNDDOWN(x) ((x) & ~(PTRS_PER_PTE - 1))
1538 #define P2M_NUM_PFN(x) (((x) + PTRS_PER_PTE - 1) / PTRS_PER_PTE)
1539 #define MD_END(md) ((md)->phys_addr + \
1540 ((md)->num_pages << EFI_PAGE_SHIFT))
1541 static struct page_info* p2m_pte_zero_page = NULL;
1543 /* This must called before dom0 p2m table allocation */
1544 void __init
1545 expose_p2m_init(void)
1547 pte_t* pte;
1549 /*
1550 * Initialise our DOMID_P2M domain.
1551 * This domain owns m2p table pages.
1552 */
1553 dom_p2m = alloc_domain(DOMID_P2M);
1554 BUG_ON(dom_p2m == NULL);
1555 dom_p2m->max_pages = ~0U;
1557 pte = pte_alloc_one_kernel(NULL, 0);
1558 BUG_ON(pte == NULL);
1559 smp_mb();// make contents of the page visible.
1560 p2m_pte_zero_page = virt_to_page(pte);
1563 // allocate pgd, pmd of dest_dom if necessary
1564 static int
1565 allocate_pgd_pmd(struct domain* dest_dom, unsigned long dest_gpfn,
1566 struct domain* src_dom,
1567 unsigned long src_gpfn, unsigned long num_src_gpfn)
1569 unsigned long i = 0;
1571 BUG_ON((src_gpfn % PTRS_PER_PTE) != 0);
1572 BUG_ON((num_src_gpfn % PTRS_PER_PTE) != 0);
1574 while (i < num_src_gpfn) {
1575 volatile pte_t* src_pte;
1576 volatile pte_t* dest_pte;
1578 src_pte = lookup_noalloc_domain_pte(src_dom,
1579 (src_gpfn + i) << PAGE_SHIFT);
1580 if (src_pte == NULL) {
1581 i++;
1582 continue;
1585 dest_pte = lookup_alloc_domain_pte(dest_dom,
1586 (dest_gpfn << PAGE_SHIFT) +
1587 i * sizeof(pte_t));
1588 if (dest_pte == NULL) {
1589 gdprintk(XENLOG_INFO, "%s failed to allocate pte page\n",
1590 __func__);
1591 return -ENOMEM;
1594 // skip to next pte page
1595 i = P2M_PFN_ROUNDDOWN(i + PTRS_PER_PTE);
1597 return 0;
1600 static int
1601 expose_p2m_page(struct domain* d, unsigned long mpaddr, struct page_info* page)
1603 int ret = get_page(page, dom_p2m);
1604 BUG_ON(ret != 1);
1605 return __assign_domain_page(d, mpaddr, page_to_maddr(page),
1606 ASSIGN_readonly);
1609 // expose pte page
1610 static int
1611 expose_p2m_range(struct domain* dest_dom, unsigned long dest_gpfn,
1612 struct domain* src_dom,
1613 unsigned long src_gpfn, unsigned long num_src_gpfn)
1615 unsigned long i = 0;
1617 BUG_ON((src_gpfn % PTRS_PER_PTE) != 0);
1618 BUG_ON((num_src_gpfn % PTRS_PER_PTE) != 0);
1620 while (i < num_src_gpfn) {
1621 volatile pte_t* pte;
1623 pte = lookup_noalloc_domain_pte(src_dom, (src_gpfn + i) << PAGE_SHIFT);
1624 if (pte == NULL) {
1625 i++;
1626 continue;
1629 if (expose_p2m_page(dest_dom,
1630 (dest_gpfn << PAGE_SHIFT) + i * sizeof(pte_t),
1631 virt_to_page(pte)) < 0) {
1632 gdprintk(XENLOG_INFO, "%s failed to assign page\n", __func__);
1633 return -EAGAIN;
1636 // skip to next pte page
1637 i = P2M_PFN_ROUNDDOWN(i + PTRS_PER_PTE);
1639 return 0;
1642 // expose p2m_pte_zero_page
1643 static int
1644 expose_zero_page(struct domain* dest_dom, unsigned long dest_gpfn,
1645 unsigned long num_src_gpfn)
1647 unsigned long i;
1649 for (i = 0; i < P2M_NUM_PFN(num_src_gpfn); i++) {
1650 volatile pte_t* pte;
1651 pte = lookup_noalloc_domain_pte(dest_dom,
1652 (dest_gpfn + i) << PAGE_SHIFT);
1653 if (pte == NULL || pte_present(*pte))
1654 continue;
1656 if (expose_p2m_page(dest_dom, (dest_gpfn + i) << PAGE_SHIFT,
1657 p2m_pte_zero_page) < 0) {
1658 gdprintk(XENLOG_INFO, "%s failed to assign zero-pte page\n",
1659 __func__);
1660 return -EAGAIN;
1663 return 0;
1666 static int
1667 expose_p2m(struct domain* dest_dom, unsigned long dest_gpfn,
1668 struct domain* src_dom,
1669 unsigned long src_gpfn, unsigned long num_src_gpfn)
1671 if (allocate_pgd_pmd(dest_dom, dest_gpfn,
1672 src_dom, src_gpfn, num_src_gpfn))
1673 return -ENOMEM;
1675 if (expose_p2m_range(dest_dom, dest_gpfn,
1676 src_dom, src_gpfn, num_src_gpfn))
1677 return -EAGAIN;
1679 if (expose_zero_page(dest_dom, dest_gpfn, num_src_gpfn))
1680 return -EAGAIN;
1682 return 0;
1685 static void
1686 unexpose_p2m(struct domain* dest_dom,
1687 unsigned long dest_gpfn, unsigned long num_dest_gpfn)
1689 unsigned long i;
1691 for (i = 0; i < num_dest_gpfn; i++) {
1692 zap_domain_page_one(dest_dom, (dest_gpfn + i) << PAGE_SHIFT,
1693 0, INVALID_MFN);
1697 // It is possible to optimize loop, But this isn't performance critical.
1698 unsigned long
1699 dom0vp_expose_p2m(struct domain* d,
1700 unsigned long conv_start_gpfn,
1701 unsigned long assign_start_gpfn,
1702 unsigned long expose_size, unsigned long granule_pfn)
1704 unsigned long ret;
1705 unsigned long expose_num_pfn = expose_size >> PAGE_SHIFT;
1707 if ((expose_size % PAGE_SIZE) != 0 ||
1708 (granule_pfn % PTRS_PER_PTE) != 0 ||
1709 (expose_num_pfn % PTRS_PER_PTE) != 0 ||
1710 (conv_start_gpfn % granule_pfn) != 0 ||
1711 (assign_start_gpfn % granule_pfn) != 0 ||
1712 (expose_num_pfn % granule_pfn) != 0) {
1713 gdprintk(XENLOG_INFO,
1714 "%s conv_start_gpfn 0x%016lx assign_start_gpfn 0x%016lx "
1715 "expose_size 0x%016lx granulte_pfn 0x%016lx\n", __func__,
1716 conv_start_gpfn, assign_start_gpfn, expose_size, granule_pfn);
1717 return -EINVAL;
1720 if (granule_pfn != PTRS_PER_PTE) {
1721 gdprintk(XENLOG_INFO,
1722 "%s granule_pfn 0x%016lx PTRS_PER_PTE 0x%016lx\n",
1723 __func__, granule_pfn, PTRS_PER_PTE);
1724 return -ENOSYS;
1726 ret = expose_p2m(d, assign_start_gpfn,
1727 d, conv_start_gpfn, expose_num_pfn);
1728 return ret;
1731 static int
1732 memmap_info_copy_from_guest(struct xen_ia64_memmap_info* memmap_info,
1733 char** memmap_p,
1734 XEN_GUEST_HANDLE(char) buffer)
1736 char *memmap;
1737 char *p;
1738 char *memmap_end;
1739 efi_memory_desc_t *md;
1740 unsigned long start;
1741 unsigned long end;
1742 efi_memory_desc_t *prev_md;
1744 if (copy_from_guest((char*)memmap_info, buffer, sizeof(*memmap_info)))
1745 return -EFAULT;
1746 if (memmap_info->efi_memdesc_size < sizeof(efi_memory_desc_t) ||
1747 memmap_info->efi_memmap_size < memmap_info->efi_memdesc_size ||
1748 (memmap_info->efi_memmap_size % memmap_info->efi_memdesc_size) != 0)
1749 return -EINVAL;
1751 memmap = _xmalloc(memmap_info->efi_memmap_size,
1752 __alignof__(efi_memory_desc_t));
1753 if (memmap == NULL)
1754 return -ENOMEM;
1755 if (copy_from_guest_offset(memmap, buffer, sizeof(*memmap_info),
1756 memmap_info->efi_memmap_size)) {
1757 xfree(memmap);
1758 return -EFAULT;
1761 /* intergirty check & simplify */
1762 sort(memmap, memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
1763 memmap_info->efi_memdesc_size, efi_mdt_cmp, NULL);
1765 /* alignement & overlap check */
1766 prev_md = NULL;
1767 p = memmap;
1768 memmap_end = memmap + memmap_info->efi_memmap_size;
1769 for (p = memmap; p < memmap_end; p += memmap_info->efi_memmap_size) {
1770 md = (efi_memory_desc_t*)p;
1771 start = md->phys_addr;
1773 if (start & ((1UL << EFI_PAGE_SHIFT) - 1) || md->num_pages == 0) {
1774 xfree(memmap);
1775 return -EINVAL;
1778 if (prev_md != NULL) {
1779 unsigned long prev_end = MD_END(prev_md);
1780 if (prev_end > start) {
1781 xfree(memmap);
1782 return -EINVAL;
1786 prev_md = (efi_memory_desc_t *)p;
1789 /* coalease */
1790 prev_md = NULL;
1791 p = memmap;
1792 while (p < memmap_end) {
1793 md = (efi_memory_desc_t*)p;
1794 start = md->phys_addr;
1795 end = MD_END(md);
1797 start = P2M_PFN_ROUNDDOWN(start >> PAGE_SHIFT) << PAGE_SHIFT;
1798 end = P2M_PFN_ROUNDUP(end >> PAGE_SHIFT) << PAGE_SHIFT;
1799 md->phys_addr = start;
1800 md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
1802 if (prev_md != NULL) {
1803 unsigned long prev_end = MD_END(prev_md);
1804 if (prev_end >= start) {
1805 size_t left;
1806 end = max(prev_end, end);
1807 prev_md->num_pages = (end - prev_md->phys_addr) >> EFI_PAGE_SHIFT;
1809 left = memmap_end - p;
1810 if (left > memmap_info->efi_memdesc_size) {
1811 left -= memmap_info->efi_memdesc_size;
1812 memmove(p, p + memmap_info->efi_memdesc_size, left);
1815 memmap_info->efi_memmap_size -= memmap_info->efi_memdesc_size;
1816 memmap_end -= memmap_info->efi_memdesc_size;
1817 continue;
1821 prev_md = md;
1822 p += memmap_info->efi_memdesc_size;
1825 if (copy_to_guest(buffer, (char*)memmap_info, sizeof(*memmap_info)) ||
1826 copy_to_guest_offset(buffer, sizeof(*memmap_info),
1827 (char*)memmap, memmap_info->efi_memmap_size)) {
1828 xfree(memmap);
1829 return -EFAULT;
1832 *memmap_p = memmap;
1833 return 0;
1836 static int
1837 foreign_p2m_allocate_pte(struct domain* d,
1838 const struct xen_ia64_memmap_info* memmap_info,
1839 const void* memmap)
1841 const void* memmap_end = memmap + memmap_info->efi_memmap_size;
1842 const void* p;
1844 for (p = memmap; p < memmap_end; p += memmap_info->efi_memdesc_size) {
1845 const efi_memory_desc_t* md = p;
1846 unsigned long start = md->phys_addr;
1847 unsigned long end = MD_END(md);
1848 unsigned long gpaddr;
1850 for (gpaddr = start; gpaddr < end; gpaddr += PAGE_SIZE) {
1851 if (lookup_alloc_domain_pte(d, gpaddr) == NULL) {
1852 return -ENOMEM;
1857 return 0;
1860 struct foreign_p2m_region {
1861 unsigned long gpfn;
1862 unsigned long num_gpfn;
1863 };
1865 struct foreign_p2m_entry {
1866 struct list_head list;
1867 int busy;
1869 /* src domain */
1870 struct domain* src_dom;
1872 /* region into which foreign p2m table is mapped */
1873 unsigned long gpfn;
1874 unsigned long num_gpfn;
1875 unsigned int num_region;
1876 struct foreign_p2m_region region[0];
1877 };
1879 /* caller must increment the reference count of src_dom */
1880 static int
1881 foreign_p2m_alloc(struct foreign_p2m* foreign_p2m,
1882 unsigned long dest_gpfn, struct domain* src_dom,
1883 struct xen_ia64_memmap_info* memmap_info, void* memmap,
1884 struct foreign_p2m_entry** entryp)
1886 void* memmap_end = memmap + memmap_info->efi_memmap_size;
1887 efi_memory_desc_t* md;
1888 unsigned long dest_gpfn_end;
1889 unsigned long src_gpfn;
1890 unsigned long src_gpfn_end;
1892 unsigned int num_region;
1893 struct foreign_p2m_entry* entry;
1894 struct foreign_p2m_entry* prev;
1895 struct foreign_p2m_entry* pos;
1897 num_region = (memmap_end - memmap) / memmap_info->efi_memdesc_size;
1899 md = memmap;
1900 src_gpfn = P2M_PFN_ROUNDDOWN(md->phys_addr >> PAGE_SHIFT);
1902 md = memmap + (num_region - 1) * memmap_info->efi_memdesc_size;
1903 src_gpfn_end = MD_END(md) >> PAGE_SHIFT;
1904 if (src_gpfn_end >
1905 P2M_PFN_ROUNDUP(src_dom->arch.convmem_end >> PAGE_SHIFT))
1906 return -EINVAL;
1908 src_gpfn_end = P2M_PFN_ROUNDUP(src_gpfn_end);
1909 dest_gpfn_end = dest_gpfn + P2M_NUM_PFN(src_gpfn_end - src_gpfn);
1910 entry = _xmalloc(sizeof(*entry) + num_region * sizeof(entry->region[0]),
1911 __alignof__(*entry));
1912 if (entry == NULL)
1913 return -ENOMEM;
1915 entry->busy = 1;
1916 entry->gpfn = dest_gpfn;
1917 entry->num_gpfn = dest_gpfn_end - dest_gpfn;
1918 entry->src_dom = src_dom;
1919 entry->num_region = 0;
1920 memset(entry->region, 0, sizeof(entry->region[0]) * num_region);
1921 prev = NULL;
1923 spin_lock(&foreign_p2m->lock);
1924 if (list_empty(&foreign_p2m->head))
1925 prev = (struct foreign_p2m_entry*)&foreign_p2m->head;
1927 list_for_each_entry(pos, &foreign_p2m->head, list) {
1928 if (pos->gpfn + pos->num_gpfn < dest_gpfn) {
1929 prev = pos;
1930 continue;
1933 if (dest_gpfn_end < pos->gpfn) {
1934 if (prev != NULL && prev->gpfn + prev->num_gpfn > dest_gpfn)
1935 prev = NULL;/* overlap */
1936 break;
1939 /* overlap */
1940 prev = NULL;
1941 break;
1943 if (prev != NULL) {
1944 list_add(&entry->list, &prev->list);
1945 spin_unlock(&foreign_p2m->lock);
1946 *entryp = entry;
1947 return 0;
1949 spin_unlock(&foreign_p2m->lock);
1950 xfree(entry);
1951 return -EBUSY;
1954 static void
1955 foreign_p2m_unexpose(struct domain* dest_dom, struct foreign_p2m_entry* entry)
1957 unsigned int i;
1959 BUG_ON(!entry->busy);
1960 for (i = 0; i < entry->num_region; i++)
1961 unexpose_p2m(dest_dom,
1962 entry->region[i].gpfn, entry->region[i].num_gpfn);
1965 static void
1966 foreign_p2m_unbusy(struct foreign_p2m* foreign_p2m,
1967 struct foreign_p2m_entry* entry)
1969 spin_lock(&foreign_p2m->lock);
1970 BUG_ON(!entry->busy);
1971 entry->busy = 0;
1972 spin_unlock(&foreign_p2m->lock);
1975 static void
1976 foreign_p2m_free(struct foreign_p2m* foreign_p2m,
1977 struct foreign_p2m_entry* entry)
1979 spin_lock(&foreign_p2m->lock);
1980 BUG_ON(!entry->busy);
1981 list_del(&entry->list);
1982 spin_unlock(&foreign_p2m->lock);
1984 put_domain(entry->src_dom);
1985 xfree(entry);
1988 void
1989 foreign_p2m_init(struct domain* d)
1991 struct foreign_p2m* foreign_p2m = &d->arch.foreign_p2m;
1992 INIT_LIST_HEAD(&foreign_p2m->head);
1993 spin_lock_init(&foreign_p2m->lock);
1996 void
1997 foreign_p2m_destroy(struct domain* d)
1999 struct foreign_p2m* foreign_p2m = &d->arch.foreign_p2m;
2000 struct foreign_p2m_entry* entry;
2001 struct foreign_p2m_entry* n;
2003 spin_lock(&foreign_p2m->lock);
2004 list_for_each_entry_safe(entry, n, &foreign_p2m->head, list) {
2005 /* mm_teardown() cleared p2m table already */
2006 /* foreign_p2m_unexpose(d, entry);*/
2007 list_del(&entry->list);
2008 put_domain(entry->src_dom);
2009 xfree(entry);
2011 spin_unlock(&foreign_p2m->lock);
2014 unsigned long
2015 dom0vp_expose_foreign_p2m(struct domain* dest_dom,
2016 unsigned long dest_gpfn,
2017 domid_t domid,
2018 XEN_GUEST_HANDLE(char) buffer,
2019 unsigned long flags)
2021 unsigned long ret = 0;
2022 struct domain* src_dom;
2023 struct xen_ia64_memmap_info memmap_info;
2024 char* memmap;
2025 void* memmap_end;
2026 void* p;
2028 struct foreign_p2m_entry* entry;
2030 ret = memmap_info_copy_from_guest(&memmap_info, &memmap, buffer);
2031 if (ret != 0)
2032 return ret;
2034 dest_dom = rcu_lock_domain(dest_dom);
2035 if (dest_dom == NULL) {
2036 ret = -EINVAL;
2037 goto out;
2039 #if 1
2040 // Self foreign domain p2m exposure isn't allowed.
2041 // Otherwise the domain can't be destroyed because
2042 // no one decrements the domain reference count.
2043 if (domid == dest_dom->domain_id) {
2044 ret = -EINVAL;
2045 goto out;
2047 #endif
2049 src_dom = get_domain_by_id(domid);
2050 if (src_dom == NULL) {
2051 ret = -EINVAL;
2052 goto out_unlock;
2055 if (flags & IA64_DOM0VP_EFP_ALLOC_PTE) {
2056 ret = foreign_p2m_allocate_pte(src_dom, &memmap_info, memmap);
2057 if (ret != 0)
2058 goto out_unlock;
2061 ret = foreign_p2m_alloc(&dest_dom->arch.foreign_p2m, dest_gpfn,
2062 src_dom, &memmap_info, memmap, &entry);
2063 if (ret != 0)
2064 goto out_unlock;
2066 memmap_end = memmap + memmap_info.efi_memmap_size;
2067 for (p = memmap; p < memmap_end; p += memmap_info.efi_memdesc_size) {
2068 efi_memory_desc_t* md = p;
2069 unsigned long src_gpfn =
2070 P2M_PFN_ROUNDDOWN(md->phys_addr >> PAGE_SHIFT);
2071 unsigned long src_gpfn_end =
2072 P2M_PFN_ROUNDUP(MD_END(md) >> PAGE_SHIFT);
2073 unsigned long num_src_gpfn = src_gpfn_end - src_gpfn;
2075 ret = expose_p2m(dest_dom, dest_gpfn + src_gpfn / PTRS_PER_PTE,
2076 src_dom, src_gpfn, num_src_gpfn);
2077 if (ret != 0)
2078 break;
2080 entry->region[entry->num_region].gpfn =
2081 dest_gpfn + src_gpfn / PTRS_PER_PTE;
2082 entry->region[entry->num_region].num_gpfn = P2M_NUM_PFN(num_src_gpfn);
2083 entry->num_region++;
2086 if (ret == 0) {
2087 foreign_p2m_unbusy(&dest_dom->arch.foreign_p2m, entry);
2088 } else {
2089 foreign_p2m_unexpose(dest_dom, entry);
2090 foreign_p2m_free(&dest_dom->arch.foreign_p2m, entry);
2093 out_unlock:
2094 rcu_unlock_domain(dest_dom);
2095 out:
2096 xfree(memmap);
2097 return ret;
2100 unsigned long
2101 dom0vp_unexpose_foreign_p2m(struct domain* dest_dom,
2102 unsigned long dest_gpfn,
2103 domid_t domid)
2105 int ret = -ENOENT;
2106 struct foreign_p2m* foreign_p2m = &dest_dom->arch.foreign_p2m;
2107 struct foreign_p2m_entry* entry;
2109 dest_dom = rcu_lock_domain(dest_dom);
2110 if (dest_dom == NULL)
2111 return ret;
2112 spin_lock(&foreign_p2m->lock);
2113 list_for_each_entry(entry, &foreign_p2m->head, list) {
2114 if (entry->gpfn < dest_gpfn)
2115 continue;
2116 if (dest_gpfn < entry->gpfn)
2117 break;
2119 if (domid == entry->src_dom->domain_id)
2120 ret = 0;
2121 else
2122 ret = -EINVAL;
2123 break;
2125 if (ret == 0) {
2126 if (entry->busy == 0)
2127 entry->busy = 1;
2128 else
2129 ret = -EBUSY;
2131 spin_unlock(&foreign_p2m->lock);
2133 if (ret == 0) {
2134 foreign_p2m_unexpose(dest_dom, entry);
2135 foreign_p2m_free(&dest_dom->arch.foreign_p2m, entry);
2137 rcu_unlock_domain(dest_dom);
2138 return ret;
2140 #endif
2142 // grant table host mapping
2143 // mpaddr: host_addr: pseudo physical address
2144 // mfn: frame: machine page frame
2145 // flags: GNTMAP_readonly | GNTMAP_application_map | GNTMAP_contains_pte
2146 int
2147 create_grant_host_mapping(unsigned long gpaddr,
2148 unsigned long mfn, unsigned int flags)
2150 struct domain* d = current->domain;
2151 struct page_info* page;
2152 int ret;
2154 if (flags & (GNTMAP_device_map |
2155 GNTMAP_application_map | GNTMAP_contains_pte)) {
2156 gdprintk(XENLOG_INFO, "%s: flags 0x%x\n", __func__, flags);
2157 return GNTST_general_error;
2160 BUG_ON(!mfn_valid(mfn));
2161 page = mfn_to_page(mfn);
2162 ret = get_page(page, page_get_owner(page));
2163 BUG_ON(ret == 0);
2164 assign_domain_page_replace(d, gpaddr, mfn,
2165 #ifdef CONFIG_XEN_IA64_TLB_TRACK
2166 ASSIGN_tlb_track |
2167 #endif
2168 ((flags & GNTMAP_readonly) ?
2169 ASSIGN_readonly : ASSIGN_writable));
2170 perfc_incr(create_grant_host_mapping);
2171 return GNTST_okay;
2174 // grant table host unmapping
2175 int
2176 replace_grant_host_mapping(unsigned long gpaddr,
2177 unsigned long mfn, unsigned long new_gpaddr, unsigned int flags)
2179 struct domain* d = current->domain;
2180 unsigned long gpfn = gpaddr >> PAGE_SHIFT;
2181 volatile pte_t* pte;
2182 unsigned long cur_arflags;
2183 pte_t cur_pte;
2184 pte_t new_pte = __pte(0);
2185 pte_t old_pte;
2186 struct page_info* page = mfn_to_page(mfn);
2187 struct page_info* new_page = NULL;
2188 volatile pte_t* new_page_pte = NULL;
2190 if (new_gpaddr) {
2191 new_page_pte = lookup_noalloc_domain_pte_none(d, new_gpaddr);
2192 if (likely(new_page_pte != NULL)) {
2193 new_pte = ptep_get_and_clear(&d->arch.mm,
2194 new_gpaddr, new_page_pte);
2195 if (likely(pte_present(new_pte))) {
2196 unsigned long new_page_mfn;
2197 struct domain* page_owner;
2199 new_page_mfn = pte_pfn(new_pte);
2200 new_page = mfn_to_page(new_page_mfn);
2201 page_owner = page_get_owner(new_page);
2202 if (unlikely(page_owner == NULL)) {
2203 gdprintk(XENLOG_INFO,
2204 "%s: page_owner == NULL "
2205 "gpaddr 0x%lx mfn 0x%lx "
2206 "new_gpaddr 0x%lx mfn 0x%lx\n",
2207 __func__, gpaddr, mfn, new_gpaddr, new_page_mfn);
2208 new_page = NULL; /* prevent domain_put_page() */
2209 goto out;
2212 /*
2213 * domain_put_page(clear_PGC_allcoated = 0)
2214 * doesn't decrement refcount of page with
2215 * pte_ptc_allocated() = 1. Be carefull.
2216 */
2217 if (unlikely(!pte_pgc_allocated(new_pte))) {
2218 /* domain_put_page() decrements page refcount. adjust it. */
2219 if (get_page(new_page, page_owner)) {
2220 gdprintk(XENLOG_INFO,
2221 "%s: get_page() failed. "
2222 "gpaddr 0x%lx mfn 0x%lx "
2223 "new_gpaddr 0x%lx mfn 0x%lx\n",
2224 __func__, gpaddr, mfn,
2225 new_gpaddr, new_page_mfn);
2226 goto out;
2229 domain_put_page(d, new_gpaddr, new_page_pte, new_pte, 0);
2230 } else
2231 new_pte = __pte(0);
2235 if (flags & (GNTMAP_application_map | GNTMAP_contains_pte)) {
2236 gdprintk(XENLOG_INFO, "%s: flags 0x%x\n", __func__, flags);
2237 return GNTST_general_error;
2240 pte = lookup_noalloc_domain_pte(d, gpaddr);
2241 if (pte == NULL) {
2242 gdprintk(XENLOG_INFO, "%s: gpaddr 0x%lx mfn 0x%lx\n",
2243 __func__, gpaddr, mfn);
2244 goto out;
2247 again:
2248 cur_arflags = pte_val(*pte) & ~_PAGE_PPN_MASK;
2249 cur_pte = pfn_pte(mfn, __pgprot(cur_arflags));
2250 if (!pte_present(cur_pte) ||
2251 (page_get_owner(page) == d && get_gpfn_from_mfn(mfn) == gpfn)) {
2252 gdprintk(XENLOG_INFO, "%s: gpaddr 0x%lx mfn 0x%lx cur_pte 0x%lx\n",
2253 __func__, gpaddr, mfn, pte_val(cur_pte));
2254 goto out;
2257 old_pte = ptep_cmpxchg_rel(&d->arch.mm, gpaddr, pte, cur_pte, new_pte);
2258 if (unlikely(!pte_present(old_pte))) {
2259 gdprintk(XENLOG_INFO, "%s: gpaddr 0x%lx mfn 0x%lx"
2260 " cur_pte 0x%lx old_pte 0x%lx\n",
2261 __func__, gpaddr, mfn, pte_val(cur_pte), pte_val(old_pte));
2262 goto out;
2264 if (unlikely(pte_val(cur_pte) != pte_val(old_pte))) {
2265 if (pte_pfn(old_pte) == mfn) {
2266 goto again;
2268 gdprintk(XENLOG_INFO, "%s gpaddr 0x%lx mfn 0x%lx cur_pte "
2269 "0x%lx old_pte 0x%lx\n",
2270 __func__, gpaddr, mfn, pte_val(cur_pte), pte_val(old_pte));
2271 goto out;
2273 BUG_ON(pte_pfn(old_pte) != mfn);
2275 /* try_to_clear_PGC_allocate(d, page) is not needed. */
2276 BUG_ON(page_get_owner(page) == d &&
2277 get_gpfn_from_mfn(mfn) == gpfn);
2278 BUG_ON(pte_pgc_allocated(old_pte));
2279 domain_page_flush_and_put(d, gpaddr, pte, old_pte, page);
2281 perfc_incr(replace_grant_host_mapping);
2282 return GNTST_okay;
2284 out:
2285 if (new_page)
2286 domain_put_page(d, new_gpaddr, new_page_pte, new_pte, 1);
2287 return GNTST_general_error;
2290 // heavily depends on the struct page layout.
2291 // gnttab_transfer() calls steal_page() with memflags = 0
2292 // For grant table transfer, we must fill the page.
2293 // memory_exchange() calls steal_page() with memflags = MEMF_no_refcount
2294 // For memory exchange, we don't have to fill the page because
2295 // memory_exchange() does it.
2296 int
2297 steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
2299 #if 0 /* if big endian */
2300 # error "implement big endian version of steal_page()"
2301 #endif
2302 u32 _d, _nd;
2303 u64 x, nx, y;
2305 if (page_get_owner(page) != d) {
2306 gdprintk(XENLOG_INFO, "%s d 0x%p owner 0x%p\n",
2307 __func__, d, page_get_owner(page));
2308 return -1;
2311 if (!(memflags & MEMF_no_refcount)) {
2312 unsigned long gpfn;
2313 struct page_info *new;
2314 unsigned long new_mfn;
2315 int ret;
2317 new = alloc_domheap_page(d);
2318 if (new == NULL) {
2319 gdprintk(XENLOG_INFO, "alloc_domheap_page() failed\n");
2320 return -1;
2322 // zero out pages for security reasons
2323 clear_page(page_to_virt(new));
2324 // assign_domain_page_cmpxchg_rel() has release semantics
2325 // so smp_mb() isn't needed.
2327 gpfn = get_gpfn_from_mfn(page_to_mfn(page));
2328 if (gpfn == INVALID_M2P_ENTRY) {
2329 free_domheap_page(new);
2330 return -1;
2332 new_mfn = page_to_mfn(new);
2333 set_gpfn_from_mfn(new_mfn, gpfn);
2334 // smp_mb() isn't needed because assign_domain_pge_cmpxchg_rel()
2335 // has release semantics.
2337 ret = assign_domain_page_cmpxchg_rel(d, gpfn << PAGE_SHIFT, page, new,
2338 ASSIGN_writable |
2339 ASSIGN_pgc_allocated, 0);
2340 if (ret < 0) {
2341 gdprintk(XENLOG_INFO, "assign_domain_page_cmpxchg_rel failed %d\n",
2342 ret);
2343 set_gpfn_from_mfn(new_mfn, INVALID_M2P_ENTRY);
2344 free_domheap_page(new);
2345 return -1;
2347 perfc_incr(steal_page_refcount);
2350 spin_lock(&d->page_alloc_lock);
2352 /*
2353 * The tricky bit: atomically release ownership while there is just one
2354 * benign reference to the page (PGC_allocated). If that reference
2355 * disappears then the deallocation routine will safely spin.
2356 */
2357 _d = pickle_domptr(d);
2358 y = *((u64*)&page->count_info);
2359 do {
2360 x = y;
2361 nx = x & 0xffffffff;
2362 // page->count_info: untouched
2363 // page->u.inused._domain = 0;
2364 _nd = x >> 32;
2366 if (unlikely(((x & (PGC_count_mask | PGC_allocated)) !=
2367 (1 | PGC_allocated))) ||
2368 unlikely(_nd != _d)) {
2369 struct domain* nd = unpickle_domptr(_nd);
2370 if (nd == NULL) {
2371 gdprintk(XENLOG_INFO, "gnttab_transfer: "
2372 "Bad page %p: ed=%p(%u) 0x%x, "
2373 "sd=%p 0x%x,"
2374 " caf=%016lx, taf=%" PRtype_info
2375 " memflags 0x%x\n",
2376 (void *) page_to_mfn(page),
2377 d, d->domain_id, _d,
2378 nd, _nd,
2379 x,
2380 page->u.inuse.type_info,
2381 memflags);
2382 } else {
2383 gdprintk(XENLOG_WARNING, "gnttab_transfer: "
2384 "Bad page %p: ed=%p(%u) 0x%x, "
2385 "sd=%p(%u) 0x%x,"
2386 " caf=%016lx, taf=%" PRtype_info
2387 " memflags 0x%x\n",
2388 (void *) page_to_mfn(page),
2389 d, d->domain_id, _d,
2390 nd, nd->domain_id, _nd,
2391 x,
2392 page->u.inuse.type_info,
2393 memflags);
2395 spin_unlock(&d->page_alloc_lock);
2396 return -1;
2399 y = cmpxchg((u64*)&page->count_info, x, nx);
2400 } while (unlikely(y != x));
2402 /*
2403 * Unlink from 'd'. At least one reference remains (now anonymous), so
2404 * noone else is spinning to try to delete this page from 'd'.
2405 */
2406 if ( !(memflags & MEMF_no_refcount) )
2407 d->tot_pages--;
2408 list_del(&page->list);
2410 spin_unlock(&d->page_alloc_lock);
2411 perfc_incr(steal_page);
2412 return 0;
2415 void
2416 guest_physmap_add_page(struct domain *d, unsigned long gpfn,
2417 unsigned long mfn)
2419 BUG_ON(!mfn_valid(mfn));
2420 BUG_ON(mfn_to_page(mfn)->count_info != (PGC_allocated | 1));
2421 set_gpfn_from_mfn(mfn, gpfn);
2422 smp_mb();
2423 assign_domain_page_replace(d, gpfn << PAGE_SHIFT, mfn,
2424 ASSIGN_writable | ASSIGN_pgc_allocated);
2426 //BUG_ON(mfn != ((lookup_domain_mpa(d, gpfn << PAGE_SHIFT) & _PFN_MASK) >> PAGE_SHIFT));
2428 perfc_incr(guest_physmap_add_page);
2431 void
2432 guest_physmap_remove_page(struct domain *d, unsigned long gpfn,
2433 unsigned long mfn)
2435 BUG_ON(mfn == 0);//XXX
2436 zap_domain_page_one(d, gpfn << PAGE_SHIFT, 0, mfn);
2437 perfc_incr(guest_physmap_remove_page);
2440 static void
2441 domain_page_flush_and_put(struct domain* d, unsigned long mpaddr,
2442 volatile pte_t* ptep, pte_t old_pte,
2443 struct page_info* page)
2445 #ifdef CONFIG_XEN_IA64_TLB_TRACK
2446 struct tlb_track_entry* entry;
2447 #endif
2449 if (shadow_mode_enabled(d))
2450 shadow_mark_page_dirty(d, mpaddr >> PAGE_SHIFT);
2452 #ifndef CONFIG_XEN_IA64_TLB_TRACK
2453 //XXX sledgehammer.
2454 // flush finer range.
2455 domain_flush_vtlb_all(d);
2456 put_page(page);
2457 #else
2458 switch (tlb_track_search_and_remove(d->arch.tlb_track,
2459 ptep, old_pte, &entry)) {
2460 case TLB_TRACK_NOT_TRACKED:
2461 // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_TRACKED\n", __func__);
2462 /* This page is zapped from this domain
2463 * by memory decrease or exchange or dom0vp_zap_physmap.
2464 * I.e. the page is zapped for returning this page to xen
2465 * (balloon driver or DMA page allocation) or
2466 * foreign domain mapped page is unmapped from the domain.
2467 * In the former case the page is to be freed so that
2468 * we can defer freeing page to batch.
2469 * In the latter case the page is unmapped so that
2470 * we need to flush it. But to optimize it, we
2471 * queue the page and flush vTLB only once.
2472 * I.e. The caller must call dfree_flush() explicitly.
2473 */
2474 domain_flush_vtlb_all(d);
2475 put_page(page);
2476 break;
2477 case TLB_TRACK_NOT_FOUND:
2478 // dprintk(XENLOG_WARNING, "%s TLB_TRACK_NOT_FOUND\n", __func__);
2479 /* This page is zapped from this domain
2480 * by grant table page unmap.
2481 * Luckily the domain that mapped this page didn't
2482 * access this page so that we don't have to flush vTLB.
2483 * Probably the domain did only DMA.
2484 */
2485 /* do nothing */
2486 put_page(page);
2487 break;
2488 case TLB_TRACK_FOUND:
2489 // dprintk(XENLOG_WARNING, "%s TLB_TRACK_FOUND\n", __func__);
2490 /* This page is zapped from this domain
2491 * by grant table page unmap.
2492 * Fortunately this page is accessced via only one virtual
2493 * memory address. So it is easy to flush it.
2494 */
2495 domain_flush_vtlb_track_entry(d, entry);
2496 tlb_track_free_entry(d->arch.tlb_track, entry);
2497 put_page(page);
2498 break;
2499 case TLB_TRACK_MANY:
2500 gdprintk(XENLOG_INFO, "%s TLB_TRACK_MANY\n", __func__);
2501 /* This page is zapped from this domain
2502 * by grant table page unmap.
2503 * Unfortunately this page is accessced via many virtual
2504 * memory address (or too many times with single virtual address).
2505 * So we abondaned to track virtual addresses.
2506 * full vTLB flush is necessary.
2507 */
2508 domain_flush_vtlb_all(d);
2509 put_page(page);
2510 break;
2511 case TLB_TRACK_AGAIN:
2512 gdprintk(XENLOG_ERR, "%s TLB_TRACK_AGAIN\n", __func__);
2513 BUG();
2514 break;
2516 #endif
2517 perfc_incr(domain_page_flush_and_put);
2520 int
2521 domain_page_mapped(struct domain* d, unsigned long mpaddr)
2523 volatile pte_t * pte;
2525 pte = lookup_noalloc_domain_pte(d, mpaddr);
2526 if(pte != NULL && !pte_none(*pte))
2527 return 1;
2528 return 0;
2531 /* Flush cache of domain d. */
2532 void domain_cache_flush (struct domain *d, int sync_only)
2534 struct mm_struct *mm = &d->arch.mm;
2535 volatile pgd_t *pgd = mm->pgd;
2536 unsigned long maddr;
2537 int i,j,k, l;
2538 int nbr_page = 0;
2539 void (*flush_func)(unsigned long start, unsigned long end);
2540 extern void flush_dcache_range (unsigned long, unsigned long);
2542 if (sync_only)
2543 flush_func = &flush_icache_range;
2544 else
2545 flush_func = &flush_dcache_range;
2547 for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
2548 volatile pud_t *pud;
2549 if (!pgd_present(*pgd)) // acquire semantics
2550 continue;
2551 pud = pud_offset(pgd, 0);
2552 for (j = 0; j < PTRS_PER_PUD; pud++, j++) {
2553 volatile pmd_t *pmd;
2554 if (!pud_present(*pud)) // acquire semantics
2555 continue;
2556 pmd = pmd_offset(pud, 0);
2557 for (k = 0; k < PTRS_PER_PMD; pmd++, k++) {
2558 volatile pte_t *pte;
2559 if (!pmd_present(*pmd)) // acquire semantics
2560 continue;
2561 pte = pte_offset_map(pmd, 0);
2562 for (l = 0; l < PTRS_PER_PTE; pte++, l++) {
2563 if (!pte_present(*pte)) // acquire semantics
2564 continue;
2565 /* Convert PTE to maddr. */
2566 maddr = __va_ul (pte_val(*pte)
2567 & _PAGE_PPN_MASK);
2568 (*flush_func)(maddr, maddr+ PAGE_SIZE);
2569 nbr_page++;
2574 //printk ("domain_cache_flush: %d %d pages\n", d->domain_id, nbr_page);
2577 #ifdef VERBOSE
2578 #define MEM_LOG(_f, _a...) \
2579 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
2580 current->domain->domain_id , __LINE__ , ## _a )
2581 #else
2582 #define MEM_LOG(_f, _a...) ((void)0)
2583 #endif
2585 static void free_page_type(struct page_info *page, u32 type)
2589 static int alloc_page_type(struct page_info *page, u32 type)
2591 return 1;
2594 static int opt_p2m_xenheap;
2595 boolean_param("p2m_xenheap", opt_p2m_xenheap);
2597 void *pgtable_quicklist_alloc(void)
2599 void *p;
2601 BUG_ON(dom_p2m == NULL);
2602 if (!opt_p2m_xenheap) {
2603 struct page_info *page = alloc_domheap_page(dom_p2m);
2604 if (page == NULL)
2605 return NULL;
2606 p = page_to_virt(page);
2607 clear_page(p);
2608 return p;
2610 p = alloc_xenheap_pages(0);
2611 if (p) {
2612 clear_page(p);
2613 /*
2614 * This page should be read only. At this moment, the third
2615 * argument doesn't make sense. It should be 1 when supported.
2616 */
2617 share_xen_page_with_guest(virt_to_page(p), dom_p2m, 0);
2619 return p;
2622 void pgtable_quicklist_free(void *pgtable_entry)
2624 struct page_info* page = virt_to_page(pgtable_entry);
2626 BUG_ON(page_get_owner(page) != dom_p2m);
2627 BUG_ON(page->count_info != (1 | PGC_allocated));
2629 put_page(page);
2630 if (opt_p2m_xenheap)
2631 free_xenheap_page(pgtable_entry);
2634 void put_page_type(struct page_info *page)
2636 u64 nx, x, y = page->u.inuse.type_info;
2638 again:
2639 do {
2640 x = y;
2641 nx = x - 1;
2643 ASSERT((x & PGT_count_mask) != 0);
2645 /*
2646 * The page should always be validated while a reference is held. The
2647 * exception is during domain destruction, when we forcibly invalidate
2648 * page-table pages if we detect a referential loop.
2649 * See domain.c:relinquish_list().
2650 */
2651 ASSERT((x & PGT_validated) || page_get_owner(page)->is_dying);
2653 if ( unlikely((nx & PGT_count_mask) == 0) )
2655 /* Record TLB information for flush later. Races are harmless. */
2656 page->tlbflush_timestamp = tlbflush_current_time();
2658 if ( unlikely((nx & PGT_type_mask) <= PGT_l4_page_table) &&
2659 likely(nx & PGT_validated) )
2661 /*
2662 * Page-table pages must be unvalidated when count is zero. The
2663 * 'free' is safe because the refcnt is non-zero and validated
2664 * bit is clear => other ops will spin or fail.
2665 */
2666 if ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x,
2667 x & ~PGT_validated)) != x) )
2668 goto again;
2669 /* We cleared the 'valid bit' so we do the clean up. */
2670 free_page_type(page, x);
2671 /* Carry on, but with the 'valid bit' now clear. */
2672 x &= ~PGT_validated;
2673 nx &= ~PGT_validated;
2677 while ( unlikely((y = cmpxchg_rel(&page->u.inuse.type_info, x, nx)) != x) );
2681 int get_page_type(struct page_info *page, u32 type)
2683 u64 nx, x, y = page->u.inuse.type_info;
2685 ASSERT(!(type & ~PGT_type_mask));
2687 again:
2688 do {
2689 x = y;
2690 nx = x + 1;
2691 if ( unlikely((nx & PGT_count_mask) == 0) )
2693 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
2694 return 0;
2696 else if ( unlikely((x & PGT_count_mask) == 0) )
2698 if ( (x & PGT_type_mask) != type )
2700 /*
2701 * On type change we check to flush stale TLB entries. This
2702 * may be unnecessary (e.g., page was GDT/LDT) but those
2703 * circumstances should be very rare.
2704 */
2705 cpumask_t mask =
2706 page_get_owner(page)->domain_dirty_cpumask;
2707 tlbflush_filter(mask, page->tlbflush_timestamp);
2709 if ( unlikely(!cpus_empty(mask)) )
2711 perfc_incr(need_flush_tlb_flush);
2712 flush_tlb_mask(mask);
2715 /* We lose existing type, back pointer, and validity. */
2716 nx &= ~(PGT_type_mask | PGT_validated);
2717 nx |= type;
2719 /* No special validation needed for writable pages. */
2720 /* Page tables and GDT/LDT need to be scanned for validity. */
2721 if ( type == PGT_writable_page )
2722 nx |= PGT_validated;
2725 else if ( unlikely((x & PGT_type_mask) != type) )
2727 if ( ((x & PGT_type_mask) != PGT_l2_page_table) ||
2728 (type != PGT_l1_page_table) )
2729 MEM_LOG("Bad type (saw %08lx != exp %08x) "
2730 "for mfn %016lx (pfn %016lx)",
2731 x, type, page_to_mfn(page),
2732 get_gpfn_from_mfn(page_to_mfn(page)));
2733 return 0;
2735 else if ( unlikely(!(x & PGT_validated)) )
2737 /* Someone else is updating validation of this page. Wait... */
2738 while ( (y = page->u.inuse.type_info) == x )
2739 cpu_relax();
2740 goto again;
2743 while ( unlikely((y = cmpxchg_acq(&page->u.inuse.type_info, x, nx)) != x) );
2745 if ( unlikely(!(nx & PGT_validated)) )
2747 /* Try to validate page type; drop the new reference on failure. */
2748 if ( unlikely(!alloc_page_type(page, type)) )
2750 MEM_LOG("Error while validating mfn %lx (pfn %lx) for type %08x"
2751 ": caf=%08x taf=%" PRtype_info,
2752 page_to_mfn(page), get_gpfn_from_mfn(page_to_mfn(page)),
2753 type, page->count_info, page->u.inuse.type_info);
2754 /* Noone else can get a reference. We hold the only ref. */
2755 page->u.inuse.type_info = 0;
2756 return 0;
2759 /* Noone else is updating simultaneously. */
2760 __set_bit(_PGT_validated, &page->u.inuse.type_info);
2763 return 1;
2766 int memory_is_conventional_ram(paddr_t p)
2768 return (efi_mem_type(p) == EFI_CONVENTIONAL_MEMORY);
2772 long
2773 arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
2775 switch (op) {
2776 case XENMEM_add_to_physmap:
2778 struct xen_add_to_physmap xatp;
2779 unsigned long prev_mfn, mfn = 0, gpfn;
2780 struct domain *d;
2782 if (copy_from_guest(&xatp, arg, 1))
2783 return -EFAULT;
2785 if (xatp.domid == DOMID_SELF) {
2786 d = get_current_domain();
2788 else if (!IS_PRIV(current->domain))
2789 return -EPERM;
2790 else if ((d = get_domain_by_id(xatp.domid)) == NULL)
2791 return -ESRCH;
2793 /* This hypercall is used for VT-i domain only */
2794 if (!VMX_DOMAIN(d->vcpu[0])) {
2795 put_domain(d);
2796 return -ENOSYS;
2799 switch (xatp.space) {
2800 case XENMAPSPACE_shared_info:
2801 if (xatp.idx == 0)
2802 mfn = virt_to_mfn(d->shared_info);
2803 break;
2804 case XENMAPSPACE_grant_table:
2805 spin_lock(&d->grant_table->lock);
2807 if ((xatp.idx >= nr_grant_frames(d->grant_table)) &&
2808 (xatp.idx < max_nr_grant_frames))
2809 gnttab_grow_table(d, xatp.idx + 1);
2811 if (xatp.idx < nr_grant_frames(d->grant_table))
2812 mfn = virt_to_mfn(d->grant_table->shared[xatp.idx]);
2814 spin_unlock(&d->grant_table->lock);
2815 break;
2816 default:
2817 break;
2820 if (mfn == 0) {
2821 put_domain(d);
2822 return -EINVAL;
2825 LOCK_BIGLOCK(d);
2827 /* Check remapping necessity */
2828 prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
2829 if (mfn == prev_mfn)
2830 goto out;
2832 /* Remove previously mapped page if it was present. */
2833 if (prev_mfn && mfn_valid(prev_mfn)) {
2834 if (is_xen_heap_frame(mfn_to_page(prev_mfn)))
2835 /* Xen heap frames are simply unhooked from this phys slot. */
2836 guest_physmap_remove_page(d, xatp.gpfn, prev_mfn);
2837 else
2838 /* Normal domain memory is freed, to avoid leaking memory. */
2839 guest_remove_page(d, xatp.gpfn);
2842 /* Unmap from old location, if any. */
2843 gpfn = get_gpfn_from_mfn(mfn);
2844 if (gpfn != INVALID_M2P_ENTRY)
2845 guest_physmap_remove_page(d, gpfn, mfn);
2847 /* Map at new location. */
2848 guest_physmap_add_page(d, xatp.gpfn, mfn);
2850 out:
2851 UNLOCK_BIGLOCK(d);
2853 put_domain(d);
2855 break;
2858 case XENMEM_machine_memory_map:
2860 struct xen_memory_map memmap;
2861 struct xen_ia64_memmap_info memmap_info;
2862 XEN_GUEST_HANDLE(char) buffer;
2864 if (!IS_PRIV(current->domain))
2865 return -EINVAL;
2866 if (copy_from_guest(&memmap, arg, 1))
2867 return -EFAULT;
2868 if (memmap.nr_entries <
2869 sizeof(memmap_info) + ia64_boot_param->efi_memmap_size)
2870 return -EINVAL;
2872 memmap.nr_entries =
2873 sizeof(memmap_info) + ia64_boot_param->efi_memmap_size;
2874 memset(&memmap_info, 0, sizeof(memmap_info));
2875 memmap_info.efi_memmap_size = ia64_boot_param->efi_memmap_size;
2876 memmap_info.efi_memdesc_size = ia64_boot_param->efi_memdesc_size;
2877 memmap_info.efi_memdesc_version = ia64_boot_param->efi_memdesc_version;
2879 buffer = guest_handle_cast(memmap.buffer, char);
2880 if (copy_to_guest(buffer, (char*)&memmap_info, sizeof(memmap_info)) ||
2881 copy_to_guest_offset(buffer, sizeof(memmap_info),
2882 (char*)__va(ia64_boot_param->efi_memmap),
2883 ia64_boot_param->efi_memmap_size) ||
2884 copy_to_guest(arg, &memmap, 1))
2885 return -EFAULT;
2886 return 0;
2889 default:
2890 return -ENOSYS;
2893 return 0;
2896 /*
2897 * Local variables:
2898 * mode: C
2899 * c-set-style: "BSD"
2900 * c-basic-offset: 4
2901 * tab-width: 4
2902 * indent-tabs-mode: nil
2903 * End:
2904 */