ia64/xen-unstable

view xen/arch/powerpc/mm.c @ 12922:a69f935eda6c

[XEN][POWERPC] Remove limit on the page frame table
Dunno where the limit came from, but it is the reason we were
asserting in the page allocator.
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Thu Sep 28 22:31:23 2006 -0400 (2006-09-28)
parents 4da585fb62f9
children 79bb96e0ba73
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 */
22 #include <xen/config.h>
23 #include <xen/mm.h>
24 #include <xen/shadow.h>
25 #include <xen/kernel.h>
26 #include <xen/sched.h>
27 #include <xen/perfc.h>
28 #include <asm/init.h>
29 #include <asm/page.h>
31 #ifdef VERBOSE
32 #define MEM_LOG(_f, _a...) \
33 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
34 current->domain->domain_id , __LINE__ , ## _a )
35 #else
36 #define MEM_LOG(_f, _a...) ((void)0)
37 #endif
39 /* Frame table and its size in pages. */
40 struct page_info *frame_table;
41 unsigned long max_page;
42 unsigned long total_pages;
44 int create_grant_host_mapping(
45 unsigned long addr, unsigned long frame, unsigned int flags)
46 {
47 panic("%s called\n", __func__);
48 return 1;
49 }
51 int destroy_grant_host_mapping(
52 unsigned long addr, unsigned long frame, unsigned int flags)
53 {
54 panic("%s called\n", __func__);
55 return 1;
56 }
58 int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
59 {
60 panic("%s called\n", __func__);
61 return 1;
62 }
64 void put_page_type(struct page_info *page)
65 {
66 unsigned long nx, x, y = page->u.inuse.type_info;
68 do {
69 x = y;
70 nx = x - 1;
72 ASSERT((x & PGT_count_mask) != 0);
74 /*
75 * The page should always be validated while a reference is held. The
76 * exception is during domain destruction, when we forcibly invalidate
77 * page-table pages if we detect a referential loop.
78 * See domain.c:relinquish_list().
79 */
80 ASSERT((x & PGT_validated) ||
81 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
83 if ( unlikely((nx & PGT_count_mask) == 0) )
84 {
85 /* Record TLB information for flush later. */
86 page->tlbflush_timestamp = tlbflush_current_time();
87 }
88 }
89 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
90 }
93 int get_page_type(struct page_info *page, unsigned long type)
94 {
95 unsigned long nx, x, y = page->u.inuse.type_info;
97 ASSERT(!(type & ~PGT_type_mask));
99 again:
100 do {
101 x = y;
102 nx = x + 1;
103 if ( unlikely((nx & PGT_count_mask) == 0) )
104 {
105 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
106 return 0;
107 }
108 else if ( unlikely((x & PGT_count_mask) == 0) )
109 {
110 if ( (x & PGT_type_mask) != type )
111 {
112 /*
113 * On type change we check to flush stale TLB entries. This
114 * may be unnecessary (e.g., page was GDT/LDT) but those
115 * circumstances should be very rare.
116 */
117 cpumask_t mask =
118 page_get_owner(page)->domain_dirty_cpumask;
119 tlbflush_filter(mask, page->tlbflush_timestamp);
121 if ( unlikely(!cpus_empty(mask)) )
122 {
123 perfc_incrc(need_flush_tlb_flush);
124 flush_tlb_mask(mask);
125 }
127 /* We lose existing type, back pointer, and validity. */
128 nx &= ~(PGT_type_mask | PGT_validated);
129 nx |= type;
131 /* No special validation needed for writable pages. */
132 /* Page tables and GDT/LDT need to be scanned for validity. */
133 if ( type == PGT_writable_page )
134 nx |= PGT_validated;
135 }
136 }
137 else if ( unlikely((x & PGT_type_mask) != type) )
138 {
139 return 0;
140 }
141 if ( unlikely(!(x & PGT_validated)) )
142 {
143 /* Someone else is updating validation of this page. Wait... */
144 while ( (y = page->u.inuse.type_info) == x )
145 cpu_relax();
146 goto again;
147 }
148 }
149 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
151 if ( unlikely(!(nx & PGT_validated)) )
152 {
153 /* Noone else is updating simultaneously. */
154 __set_bit(_PGT_validated, &page->u.inuse.type_info);
155 }
157 return 1;
158 }
160 void __init init_frametable(void)
161 {
162 unsigned long p;
163 unsigned long nr_pages;
164 int i;
166 nr_pages = PFN_UP(max_page * sizeof(struct page_info));
168 p = alloc_boot_pages(nr_pages, 1);
169 if (p == 0)
170 panic("Not enough memory for frame table\n");
172 frame_table = (struct page_info *)(p << PAGE_SHIFT);
173 for (i = 0; i < nr_pages; i += 1)
174 clear_page((void *)((p + i) << PAGE_SHIFT));
175 }
177 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
178 {
179 printk("%s: no PPC specific memory ops\n", __func__);
180 return -ENOSYS;
181 }
183 extern void copy_page(void *dp, void *sp)
184 {
185 if (on_mambo()) {
186 extern void *mambo_memcpy(void *,const void *,__kernel_size_t);
187 mambo_memcpy(dp, sp, PAGE_SIZE);
188 } else {
189 memcpy(dp, sp, PAGE_SIZE);
190 }
191 }
193 static uint add_extent(struct domain *d, struct page_info *pg, uint order)
194 {
195 struct page_extents *pe;
197 pe = xmalloc(struct page_extents);
198 if (pe == NULL)
199 return 0;
201 pe->pg = pg;
202 pe->order = order;
203 pe->pfn = page_to_mfn(pg);
205 list_add_tail(&pe->pe_list, &d->arch.extent_list);
207 return pe->pfn;
208 }
210 void free_extents(struct domain *d)
211 {
212 /* we just need to free the memory behind list */
213 struct list_head *list;
214 struct list_head *ent;
215 struct list_head *next;
217 list = &d->arch.extent_list;
218 ent = list->next;
220 while (ent != list) {
221 next = ent->next;
222 xfree(ent);
223 ent = next;
224 }
225 }
227 uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
228 {
229 uint ext_order;
230 uint ext_nrpages;
231 uint total_nrpages;
232 struct page_info *pg;
234 ext_order = cpu_extent_order();
235 ext_nrpages = 1 << ext_order;
237 total_nrpages = rma_nrpages;
239 /* We only allocate in nr_extsz chunks so if you are not divisible
240 * you get more than you asked for */
241 while (total_nrpages < nrpages) {
242 pg = alloc_domheap_pages(d, ext_order, 0);
243 if (pg == NULL)
244 return total_nrpages;
246 if (add_extent(d, pg, ext_order) == 0) {
247 free_domheap_pages(pg, ext_order);
248 return total_nrpages;
249 }
250 total_nrpages += ext_nrpages;
251 }
253 return total_nrpages;
254 }
256 int allocate_rma(struct domain *d, unsigned int order)
257 {
258 struct vcpu *v;
259 ulong rma_base;
260 ulong rma_sz;
261 int i;
263 if (d->arch.rma_page)
264 return -EINVAL;
266 d->arch.rma_page = alloc_domheap_pages(d, order, 0);
267 if (d->arch.rma_page == NULL) {
268 gdprintk(XENLOG_INFO, "Could not allocate order=%d RMA for domain %u\n",
269 order, d->domain_id);
270 return -ENOMEM;
271 }
272 d->arch.rma_order = order;
274 rma_base = page_to_maddr(d->arch.rma_page);
275 rma_sz = rma_size(d->arch.rma_order);
277 BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
279 printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
280 d->domain_id, rma_base, rma_sz);
282 for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
283 /* Add in any extra CPUs that need flushing because of this page. */
284 d->arch.rma_page[i].count_info |= PGC_page_RMA;
285 clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
286 }
288 d->shared_info = (shared_info_t *)
289 (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
291 /* if there are already running vcpus, adjust v->vcpu_info */
292 /* XXX untested */
293 for_each_vcpu(d, v) {
294 v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
295 }
297 return 0;
298 }
299 void free_rma_check(struct page_info *page)
300 {
301 if (test_bit(_PGC_page_RMA, &page->count_info) &&
302 !test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags))
303 panic("Attempt to free an RMA page: 0x%lx\n", page_to_mfn(page));
304 }
307 ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
308 {
309 ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
310 ulong rma_size_mfn = 1UL << d->arch.rma_order;
311 struct page_extents *pe;
312 ulong mfn = INVALID_MFN;
313 int t = PFN_TYPE_NONE;
315 /* quick tests first */
316 if (d->is_privileged && cpu_io_mfn(pfn)) {
317 t = PFN_TYPE_IO;
318 mfn = pfn;
319 } else {
320 if (pfn < rma_size_mfn) {
321 t = PFN_TYPE_RMA;
322 mfn = pfn + rma_base_mfn;
323 } else {
324 list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
325 uint end_pfn = pe->pfn + (1 << pe->order);
327 if (pfn >= pe->pfn && pfn < end_pfn) {
328 t = PFN_TYPE_LOGICAL;
329 mfn = page_to_mfn(pe->pg) + (pfn - pe->pfn);
330 break;
331 }
332 }
333 }
334 BUG_ON(t != PFN_TYPE_NONE && page_get_owner(mfn_to_page(mfn)) != d);
335 }
337 if (t == PFN_TYPE_NONE) {
338 /* This hack allows dom0 to map all memory, necessary to
339 * initialize domU state. */
340 if (d->is_privileged && mfn_valid(pfn)) {
341 struct page_info *pg;
343 /* page better be allocated to some domain but not the caller */
344 pg = mfn_to_page(pfn);
345 if (!(pg->count_info & PGC_allocated))
346 panic("Foreign page: 0x%lx is not owned by any domain\n",
347 mfn);
348 if (page_get_owner(pg) == d)
349 panic("Foreign page: 0x%lx is owned by this domain\n",
350 mfn);
352 t = PFN_TYPE_FOREIGN;
353 mfn = pfn;
354 }
355 }
357 if (mfn == INVALID_MFN) {
358 printk("%s: Dom[%d] pfn 0x%lx is not a valid page\n",
359 __func__, d->domain_id, pfn);
360 }
362 if (type)
363 *type = t;
365 return mfn;
366 }
368 void guest_physmap_add_page(
369 struct domain *d, unsigned long gpfn, unsigned long mfn)
370 {
371 printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn);
372 }
373 void guest_physmap_remove_page(
374 struct domain *d, unsigned long gpfn, unsigned long mfn)
375 {
376 panic("%s\n", __func__);
377 }
378 void shadow_drop_references(
379 struct domain *d, struct page_info *page)
380 {
381 }