ia64/xen-unstable

view xen/arch/powerpc/mm.c @ 13943:4ce0b332b572

[POWERPC][XEN] Move shared page location to public header to share with libxc.
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Tue Feb 06 13:42:19 2007 -0600 (2007-02-06)
parents a510c94ceaa3
children 9d36026b1b43
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 */
22 #include <xen/config.h>
23 #include <xen/mm.h>
24 #include <xen/shadow.h>
25 #include <xen/kernel.h>
26 #include <xen/sched.h>
27 #include <xen/perfc.h>
28 #include <asm/init.h>
29 #include <asm/page.h>
30 #include <asm/string.h>
31 #include <public/arch-powerpc.h>
33 #ifdef VERBOSE
34 #define MEM_LOG(_f, _a...) \
35 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
36 current->domain->domain_id , __LINE__ , ## _a )
37 #else
38 #define MEM_LOG(_f, _a...) ((void)0)
39 #endif
41 /* Frame table and its size in pages. */
42 struct page_info *frame_table;
43 unsigned long max_page;
44 unsigned long total_pages;
46 void __init init_frametable(void)
47 {
48 unsigned long p;
49 unsigned long nr_pages;
50 int i;
52 nr_pages = PFN_UP(max_page * sizeof(struct page_info));
54 p = alloc_boot_pages(nr_pages, 1);
55 if (p == 0)
56 panic("Not enough memory for frame table\n");
58 frame_table = (struct page_info *)(p << PAGE_SHIFT);
59 for (i = 0; i < nr_pages; i += 1)
60 clear_page((void *)((p + i) << PAGE_SHIFT));
61 }
63 void share_xen_page_with_guest(
64 struct page_info *page, struct domain *d, int readonly)
65 {
66 if ( page_get_owner(page) == d )
67 return;
69 /* this causes us to leak pages in the Domain and reuslts in
70 * Zombie domains, I think we are missing a piece, until we find
71 * it we disable the following code */
72 set_gpfn_from_mfn(page_to_mfn(page), INVALID_M2P_ENTRY);
74 spin_lock(&d->page_alloc_lock);
76 /* The incremented type count pins as writable or read-only. */
77 page->u.inuse.type_info = (readonly ? PGT_none : PGT_writable_page);
78 page->u.inuse.type_info |= PGT_validated | 1;
80 page_set_owner(page, d);
81 wmb(); /* install valid domain ptr before updating refcnt. */
82 ASSERT(page->count_info == 0);
83 page->count_info |= PGC_allocated | 1;
85 if ( unlikely(d->xenheap_pages++ == 0) )
86 get_knownalive_domain(d);
87 list_add_tail(&page->list, &d->xenpage_list);
89 spin_unlock(&d->page_alloc_lock);
90 }
92 void share_xen_page_with_privileged_guests(
93 struct page_info *page, int readonly)
94 {
95 unimplemented();
96 }
98 static ulong foreign_to_mfn(struct domain *d, ulong pfn)
99 {
101 pfn -= 1UL << cpu_foreign_map_order();
103 BUG_ON(pfn >= d->arch.foreign_mfn_count);
105 return d->arch.foreign_mfns[pfn];
106 }
108 static int set_foreign(struct domain *d, ulong pfn, ulong mfn)
109 {
110 pfn -= 1UL << cpu_foreign_map_order();
112 BUG_ON(pfn >= d->arch.foreign_mfn_count);
113 d->arch.foreign_mfns[pfn] = mfn;
115 return 0;
116 }
118 static int create_grant_va_mapping(
119 unsigned long va, unsigned long frame, struct vcpu *v)
120 {
121 if (v->domain->domain_id != 0) {
122 printk("only Dom0 can map a grant entry\n");
123 BUG();
124 return GNTST_permission_denied;
125 }
126 set_foreign(v->domain, va >> PAGE_SHIFT, frame);
127 return GNTST_okay;
128 }
130 static int destroy_grant_va_mapping(
131 unsigned long addr, unsigned long frame, struct domain *d)
132 {
133 if (d->domain_id != 0) {
134 printk("only Dom0 can map a grant entry\n");
135 BUG();
136 return GNTST_permission_denied;
137 }
138 set_foreign(d, addr >> PAGE_SHIFT, ~0UL);
139 return GNTST_okay;
140 }
142 int create_grant_host_mapping(
143 unsigned long addr, unsigned long frame, unsigned int flags)
144 {
145 if (flags & GNTMAP_application_map) {
146 printk("%s: GNTMAP_application_map not supported\n", __func__);
147 BUG();
148 return GNTST_general_error;
149 }
150 if (flags & GNTMAP_contains_pte) {
151 printk("%s: GNTMAP_contains_pte not supported\n", __func__);
152 BUG();
153 return GNTST_general_error;
154 }
155 return create_grant_va_mapping(addr, frame, current);
156 }
158 int destroy_grant_host_mapping(
159 unsigned long addr, unsigned long frame, unsigned int flags)
160 {
161 if (flags & GNTMAP_contains_pte) {
162 printk("%s: GNTMAP_contains_pte not supported\n", __func__);
163 BUG();
164 return GNTST_general_error;
165 }
167 /* may have force the remove here */
168 return destroy_grant_va_mapping(addr, frame, current->domain);
169 }
171 int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
172 {
173 panic("%s called\n", __func__);
174 return 1;
175 }
177 void put_page_type(struct page_info *page)
178 {
179 unsigned long nx, x, y = page->u.inuse.type_info;
181 do {
182 x = y;
183 nx = x - 1;
185 ASSERT((x & PGT_count_mask) != 0);
187 /*
188 * The page should always be validated while a reference is held. The
189 * exception is during domain destruction, when we forcibly invalidate
190 * page-table pages if we detect a referential loop.
191 * See domain.c:relinquish_list().
192 */
193 ASSERT((x & PGT_validated) ||
194 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
196 if ( unlikely((nx & PGT_count_mask) == 0) )
197 {
198 /* Record TLB information for flush later. */
199 page->tlbflush_timestamp = tlbflush_current_time();
200 }
201 }
202 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
203 }
206 int get_page_type(struct page_info *page, unsigned long type)
207 {
208 unsigned long nx, x, y = page->u.inuse.type_info;
210 ASSERT(!(type & ~PGT_type_mask));
212 again:
213 do {
214 x = y;
215 nx = x + 1;
216 if ( unlikely((nx & PGT_count_mask) == 0) )
217 {
218 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
219 return 0;
220 }
221 else if ( unlikely((x & PGT_count_mask) == 0) )
222 {
223 if ( (x & PGT_type_mask) != type )
224 {
225 /*
226 * On type change we check to flush stale TLB entries. This
227 * may be unnecessary (e.g., page was GDT/LDT) but those
228 * circumstances should be very rare.
229 */
230 cpumask_t mask =
231 page_get_owner(page)->domain_dirty_cpumask;
232 tlbflush_filter(mask, page->tlbflush_timestamp);
234 if ( unlikely(!cpus_empty(mask)) )
235 {
236 perfc_incrc(need_flush_tlb_flush);
237 flush_tlb_mask(mask);
238 }
240 /* We lose existing type, back pointer, and validity. */
241 nx &= ~(PGT_type_mask | PGT_validated);
242 nx |= type;
244 /* No special validation needed for writable pages. */
245 /* Page tables and GDT/LDT need to be scanned for validity. */
246 if ( type == PGT_writable_page )
247 nx |= PGT_validated;
248 }
249 }
250 else if ( unlikely((x & PGT_type_mask) != type) )
251 {
252 return 0;
253 }
254 else if ( unlikely(!(x & PGT_validated)) )
255 {
256 /* Someone else is updating validation of this page. Wait... */
257 while ( (y = page->u.inuse.type_info) == x )
258 cpu_relax();
259 goto again;
260 }
261 }
262 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
264 if ( unlikely(!(nx & PGT_validated)) )
265 {
266 /* Noone else is updating simultaneously. */
267 __set_bit(_PGT_validated, &page->u.inuse.type_info);
268 }
270 return 1;
271 }
273 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
274 {
275 printk("%s: no PPC specific memory ops\n", __func__);
276 return -ENOSYS;
277 }
279 extern void copy_page(void *dp, void *sp)
280 {
281 if (on_systemsim()) {
282 systemsim_memcpy(dp, sp, PAGE_SIZE);
283 } else {
284 memcpy(dp, sp, PAGE_SIZE);
285 }
286 }
288 /* XXX should probably replace with faster data structure */
289 static uint add_extent(struct domain *d, struct page_info *pg, uint order)
290 {
291 struct page_extents *pe;
293 pe = xmalloc(struct page_extents);
294 if (pe == NULL)
295 return -ENOMEM;
297 pe->pg = pg;
298 pe->order = order;
300 list_add_tail(&pe->pe_list, &d->arch.extent_list);
302 return 0;
303 }
305 void free_extents(struct domain *d)
306 {
307 /* we just need to free the memory behind list */
308 struct list_head *list;
309 struct list_head *ent;
310 struct list_head *next;
312 list = &d->arch.extent_list;
313 ent = list->next;
315 while (ent != list) {
316 next = ent->next;
317 xfree(ent);
318 ent = next;
319 }
320 }
322 uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
323 {
324 uint ext_order;
325 uint ext_nrpages;
326 uint total_nrpages;
327 struct page_info *pg;
329 ext_order = cpu_extent_order();
330 ext_nrpages = 1 << ext_order;
332 total_nrpages = rma_nrpages;
334 /* We only allocate in nr_extsz chunks so if you are not divisible
335 * you get more than you asked for */
336 while (total_nrpages < nrpages) {
337 pg = alloc_domheap_pages(d, ext_order, 0);
338 if (pg == NULL)
339 return total_nrpages;
341 if (add_extent(d, pg, ext_order) < 0) {
342 free_domheap_pages(pg, ext_order);
343 return total_nrpages;
344 }
345 total_nrpages += ext_nrpages;
346 }
348 return total_nrpages;
349 }
351 int allocate_rma(struct domain *d, unsigned int order)
352 {
353 struct vcpu *v;
354 ulong rma_base;
355 ulong rma_sz;
356 int i;
358 if (d->arch.rma_page)
359 return -EINVAL;
361 d->arch.rma_page = alloc_domheap_pages(d, order, 0);
362 if (d->arch.rma_page == NULL) {
363 gdprintk(XENLOG_INFO, "Could not allocate order=%d RMA for domain %u\n",
364 order, d->domain_id);
365 return -ENOMEM;
366 }
367 d->arch.rma_order = order;
369 rma_base = page_to_maddr(d->arch.rma_page);
370 rma_sz = rma_size(d->arch.rma_order);
372 BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
374 printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
375 d->domain_id, rma_base, rma_sz);
377 for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
378 /* Add in any extra CPUs that need flushing because of this page. */
379 d->arch.rma_page[i].count_info |= PGC_page_RMA;
380 clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
381 }
383 d->shared_info = (shared_info_t *)
384 (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
386 /* if there are already running vcpus, adjust v->vcpu_info */
387 /* XXX untested */
388 for_each_vcpu(d, v) {
389 v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
390 }
392 return 0;
393 }
395 void free_rma_check(struct page_info *page)
396 {
397 if (test_bit(_PGC_page_RMA, &page->count_info) &&
398 !test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags))
399 panic("Attempt to free an RMA page: 0x%lx\n", page_to_mfn(page));
400 }
402 ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
403 {
404 ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
405 ulong rma_size_mfn = 1UL << d->arch.rma_order;
406 struct page_extents *pe;
407 ulong mfn = INVALID_MFN;
408 int t = PFN_TYPE_NONE;
409 ulong foreign_map_pfn = 1UL << cpu_foreign_map_order();
411 /* quick tests first */
412 if (pfn & foreign_map_pfn) {
413 t = PFN_TYPE_FOREIGN;
414 mfn = foreign_to_mfn(d, pfn);
415 } else if (pfn >= max_page && pfn < (max_page + NR_GRANT_FRAMES)) {
416 /* Its a grant table access */
417 t = PFN_TYPE_GNTTAB;
418 mfn = gnttab_shared_mfn(d, d->grant_table, (pfn - max_page));
419 } else if (d->is_privileged && cpu_io_mfn(pfn)) {
420 t = PFN_TYPE_IO;
421 mfn = pfn;
422 } else {
423 if (pfn < rma_size_mfn) {
424 t = PFN_TYPE_RMA;
425 mfn = pfn + rma_base_mfn;
426 } else {
427 ulong cur_pfn = rma_size_mfn;
429 list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
430 uint pe_pages = 1UL << pe->order;
431 uint end_pfn = cur_pfn + pe_pages;
433 if (pfn >= cur_pfn && pfn < end_pfn) {
434 t = PFN_TYPE_LOGICAL;
435 mfn = page_to_mfn(pe->pg) + (pfn - cur_pfn);
436 break;
437 }
438 cur_pfn += pe_pages;
439 }
440 }
441 #ifdef DEBUG
442 if (t != PFN_TYPE_NONE &&
443 (d->domain_flags & DOMF_dying) &&
444 page_get_owner(mfn_to_page(mfn)) != d) {
445 printk("%s: page type: %d owner Dom[%d]:%p expected Dom[%d]:%p\n",
446 __func__, t,
447 page_get_owner(mfn_to_page(mfn))->domain_id,
448 page_get_owner(mfn_to_page(mfn)),
449 d->domain_id, d);
450 BUG();
451 }
452 #endif
453 }
455 if (t == PFN_TYPE_NONE) {
456 /* This hack allows dom0 to map all memory, necessary to
457 * initialize domU state. */
458 if (d->is_privileged && mfn_valid(pfn)) {
459 struct page_info *pg;
461 /* page better be allocated to some domain but not the caller */
462 pg = mfn_to_page(pfn);
463 if (!(pg->count_info & PGC_allocated))
464 panic("Foreign page: 0x%lx is not owned by any domain\n",
465 mfn);
466 if (page_get_owner(pg) == d)
467 panic("Foreign page: 0x%lx is owned by this domain\n",
468 mfn);
470 t = PFN_TYPE_FOREIGN;
471 mfn = pfn;
472 }
473 }
475 if (mfn == INVALID_MFN) {
476 printk("%s: Dom[%d] pfn 0x%lx is not a valid page\n",
477 __func__, d->domain_id, pfn);
478 }
480 if (type)
481 *type = t;
483 return mfn;
484 }
486 unsigned long mfn_to_gmfn(struct domain *d, unsigned long mfn)
487 {
488 struct page_extents *pe;
489 ulong cur_pfn;
490 ulong gnttab_mfn;
491 ulong rma_mfn;
493 /* grant? */
494 gnttab_mfn = gnttab_shared_mfn(d, d->grant_table, 0);
495 if (mfn >= gnttab_mfn && mfn < (gnttab_mfn + NR_GRANT_FRAMES))
496 return max_page + (mfn - gnttab_mfn);
498 /* IO? */
499 if (d->is_privileged && cpu_io_mfn(mfn))
500 return mfn;
502 rma_mfn = page_to_mfn(d->arch.rma_page);
503 if (mfn >= rma_mfn &&
504 mfn < (rma_mfn + (1 << d->arch.rma_order)))
505 return mfn - rma_mfn;
507 /* Extent? */
508 cur_pfn = 1UL << d->arch.rma_order;
509 list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
510 uint pe_pages = 1UL << pe->order;
511 uint b_mfn = page_to_mfn(pe->pg);
512 uint e_mfn = b_mfn + pe_pages;
514 if (mfn >= b_mfn && mfn < e_mfn) {
515 return cur_pfn + (mfn - b_mfn);
516 }
517 cur_pfn += pe_pages;
518 }
519 return INVALID_M2P_ENTRY;
520 }
522 void guest_physmap_add_page(
523 struct domain *d, unsigned long gpfn, unsigned long mfn)
524 {
525 printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn);
526 }
527 void guest_physmap_remove_page(
528 struct domain *d, unsigned long gpfn, unsigned long mfn)
529 {
530 panic("%s\n", __func__);
531 }
532 void shadow_drop_references(
533 struct domain *d, struct page_info *page)
534 {
535 }
537 int arch_domain_add_extent(struct domain *d, struct page_info *page, int order)
538 {
539 if (add_extent(d, page, order) < 0)
540 return -ENOMEM;
541 return 0;
542 }