ia64/xen-unstable

view xen/arch/powerpc/mm.c @ 12034:b07487d91f93

[XEN] Logging parameters for powerpc code.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
author kaf24@localhost.localdomain
date Fri Oct 27 18:56:44 2006 +0100 (2006-10-27)
parents 9061e1246906
children 36679b74e24a
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jimi Xenidis <jimix@watson.ibm.com>
20 */
22 #include <xen/config.h>
23 #include <xen/mm.h>
24 #include <xen/shadow.h>
25 #include <xen/kernel.h>
26 #include <xen/sched.h>
27 #include <xen/perfc.h>
28 #include <asm/misc.h>
29 #include <asm/init.h>
30 #include <asm/page.h>
32 #ifdef VERBOSE
33 #define MEM_LOG(_f, _a...) \
34 printk("DOM%u: (file=mm.c, line=%d) " _f "\n", \
35 current->domain->domain_id , __LINE__ , ## _a )
36 #else
37 #define MEM_LOG(_f, _a...) ((void)0)
38 #endif
40 /* Frame table and its size in pages. */
41 struct page_info *frame_table;
42 unsigned long max_page;
43 unsigned long total_pages;
45 int create_grant_host_mapping(
46 unsigned long addr, unsigned long frame, unsigned int flags)
47 {
48 panic("%s called\n", __func__);
49 return 1;
50 }
52 int destroy_grant_host_mapping(
53 unsigned long addr, unsigned long frame, unsigned int flags)
54 {
55 panic("%s called\n", __func__);
56 return 1;
57 }
59 int steal_page(struct domain *d, struct page_info *page, unsigned int memflags)
60 {
61 panic("%s called\n", __func__);
62 return 1;
63 }
65 void put_page_type(struct page_info *page)
66 {
67 unsigned long nx, x, y = page->u.inuse.type_info;
69 do {
70 x = y;
71 nx = x - 1;
73 ASSERT((x & PGT_count_mask) != 0);
75 /*
76 * The page should always be validated while a reference is held. The
77 * exception is during domain destruction, when we forcibly invalidate
78 * page-table pages if we detect a referential loop.
79 * See domain.c:relinquish_list().
80 */
81 ASSERT((x & PGT_validated) ||
82 test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags));
84 if ( unlikely((nx & PGT_count_mask) == 0) )
85 {
86 /* Record TLB information for flush later. */
87 page->tlbflush_timestamp = tlbflush_current_time();
88 }
89 }
90 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
91 }
94 int get_page_type(struct page_info *page, unsigned long type)
95 {
96 unsigned long nx, x, y = page->u.inuse.type_info;
98 ASSERT(!(type & ~PGT_type_mask));
100 again:
101 do {
102 x = y;
103 nx = x + 1;
104 if ( unlikely((nx & PGT_count_mask) == 0) )
105 {
106 MEM_LOG("Type count overflow on pfn %lx", page_to_mfn(page));
107 return 0;
108 }
109 else if ( unlikely((x & PGT_count_mask) == 0) )
110 {
111 if ( (x & PGT_type_mask) != type )
112 {
113 /*
114 * On type change we check to flush stale TLB entries. This
115 * may be unnecessary (e.g., page was GDT/LDT) but those
116 * circumstances should be very rare.
117 */
118 cpumask_t mask =
119 page_get_owner(page)->domain_dirty_cpumask;
120 tlbflush_filter(mask, page->tlbflush_timestamp);
122 if ( unlikely(!cpus_empty(mask)) )
123 {
124 perfc_incrc(need_flush_tlb_flush);
125 flush_tlb_mask(mask);
126 }
128 /* We lose existing type, back pointer, and validity. */
129 nx &= ~(PGT_type_mask | PGT_validated);
130 nx |= type;
132 /* No special validation needed for writable pages. */
133 /* Page tables and GDT/LDT need to be scanned for validity. */
134 if ( type == PGT_writable_page )
135 nx |= PGT_validated;
136 }
137 }
138 else if ( unlikely((x & PGT_type_mask) != type) )
139 {
140 return 0;
141 }
142 if ( unlikely(!(x & PGT_validated)) )
143 {
144 /* Someone else is updating validation of this page. Wait... */
145 while ( (y = page->u.inuse.type_info) == x )
146 cpu_relax();
147 goto again;
148 }
149 }
150 while ( unlikely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x) );
152 if ( unlikely(!(nx & PGT_validated)) )
153 {
154 /* Noone else is updating simultaneously. */
155 __set_bit(_PGT_validated, &page->u.inuse.type_info);
156 }
158 return 1;
159 }
161 void __init init_frametable(void)
162 {
163 unsigned long p;
164 unsigned long nr_pages;
165 int i;
167 nr_pages = PFN_UP(max_page * sizeof(struct page_info));
168 nr_pages = min(nr_pages, (4UL << (20 - PAGE_SHIFT)));
171 p = alloc_boot_pages(nr_pages, 1);
172 if (p == 0)
173 panic("Not enough memory for frame table\n");
175 frame_table = (struct page_info *)(p << PAGE_SHIFT);
176 for (i = 0; i < nr_pages; i += 1)
177 clear_page((void *)((p + i) << PAGE_SHIFT));
178 }
180 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
181 {
182 printk("%s: no PPC specific memory ops\n", __func__);
183 return -ENOSYS;
184 }
186 extern void copy_page(void *dp, void *sp)
187 {
188 if (on_mambo()) {
189 extern void *mambo_memcpy(void *,const void *,__kernel_size_t);
190 mambo_memcpy(dp, sp, PAGE_SIZE);
191 } else {
192 memcpy(dp, sp, PAGE_SIZE);
193 }
194 }
196 static uint add_extent(struct domain *d, struct page_info *pg, uint order)
197 {
198 struct page_extents *pe;
200 pe = xmalloc(struct page_extents);
201 if (pe == NULL)
202 return 0;
204 pe->pg = pg;
205 pe->order = order;
206 pe->pfn = page_to_mfn(pg);
208 list_add_tail(&pe->pe_list, &d->arch.extent_list);
210 return pe->pfn;
211 }
213 void free_extents(struct domain *d)
214 {
215 /* we just need to free the memory behind list */
216 struct list_head *list;
217 struct list_head *ent;
218 struct list_head *next;
220 list = &d->arch.extent_list;
221 ent = list->next;
223 while (ent != list) {
224 next = ent->next;
225 xfree(ent);
226 ent = next;
227 }
228 }
230 uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages)
231 {
232 uint ext_order;
233 uint ext_nrpages;
234 uint total_nrpages;
235 struct page_info *pg;
237 ext_order = cpu_extent_order();
238 ext_nrpages = 1 << ext_order;
240 total_nrpages = rma_nrpages;
242 /* We only allocate in nr_extsz chunks so if you are not divisible
243 * you get more than you asked for */
244 while (total_nrpages < nrpages) {
245 pg = alloc_domheap_pages(d, ext_order, 0);
246 if (pg == NULL)
247 return total_nrpages;
249 if (add_extent(d, pg, ext_order) == 0) {
250 free_domheap_pages(pg, ext_order);
251 return total_nrpages;
252 }
253 total_nrpages += ext_nrpages;
254 }
256 return total_nrpages;
257 }
259 int allocate_rma(struct domain *d, unsigned int order)
260 {
261 struct vcpu *v;
262 ulong rma_base;
263 ulong rma_sz;
264 int i;
266 if (d->arch.rma_page)
267 return -EINVAL;
269 d->arch.rma_page = alloc_domheap_pages(d, order, 0);
270 if (d->arch.rma_page == NULL) {
271 DPRINTK(XENLOG_G_INFO "Could not allocate order=%d RMA for domain %u\n",
272 order, d->domain_id);
273 return -ENOMEM;
274 }
275 d->arch.rma_order = order;
277 rma_base = page_to_maddr(d->arch.rma_page);
278 rma_sz = rma_size(d->arch.rma_order);
280 BUG_ON(rma_base & (rma_sz - 1)); /* check alignment */
282 printk("allocated RMA for Dom[%d]: 0x%lx[0x%lx]\n",
283 d->domain_id, rma_base, rma_sz);
285 for (i = 0; i < (1 << d->arch.rma_order); i++ ) {
286 /* Add in any extra CPUs that need flushing because of this page. */
287 d->arch.rma_page[i].count_info |= PGC_page_RMA;
288 clear_page((void *)page_to_maddr(&d->arch.rma_page[i]));
289 }
291 d->shared_info = (shared_info_t *)
292 (rma_addr(&d->arch, RMA_SHARED_INFO) + rma_base);
294 /* if there are already running vcpus, adjust v->vcpu_info */
295 /* XXX untested */
296 for_each_vcpu(d, v) {
297 v->vcpu_info = &d->shared_info->vcpu_info[v->vcpu_id];
298 }
300 return 0;
301 }
302 void free_rma_check(struct page_info *page)
303 {
304 if (test_bit(_PGC_page_RMA, &page->count_info) &&
305 !test_bit(_DOMF_dying, &page_get_owner(page)->domain_flags))
306 panic("Attempt to free an RMA page: 0x%lx\n", page_to_mfn(page));
307 }
310 ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
311 {
312 ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
313 ulong rma_size_mfn = 1UL << d->arch.rma_order;
314 struct page_extents *pe;
315 ulong mfn = INVALID_MFN;
316 int t = PFN_TYPE_NONE;
318 /* quick tests first */
319 if (test_bit(_DOMF_privileged, &d->domain_flags) &&
320 cpu_io_mfn(pfn)) {
321 t = PFN_TYPE_IO;
322 mfn = pfn;
323 } else {
324 if (pfn < rma_size_mfn) {
325 t = PFN_TYPE_RMA;
326 mfn = pfn + rma_base_mfn;
327 } else {
328 list_for_each_entry (pe, &d->arch.extent_list, pe_list) {
329 uint end_pfn = pe->pfn + (1 << pe->order);
331 if (pfn >= pe->pfn && pfn < end_pfn) {
332 t = PFN_TYPE_LOGICAL;
333 mfn = page_to_mfn(pe->pg) + (pfn - pe->pfn);
334 break;
335 }
336 }
337 }
338 BUG_ON(t != PFN_TYPE_NONE && page_get_owner(mfn_to_page(mfn)) != d);
339 }
341 if (t == PFN_TYPE_NONE) {
342 /* This hack allows dom0 to map all memory, necessary to
343 * initialize domU state. */
344 if (test_bit(_DOMF_privileged, &d->domain_flags) &&
345 mfn_valid(pfn)) {
346 struct page_info *pg;
348 /* page better be allocated to some domain but not the caller */
349 pg = mfn_to_page(pfn);
350 if (!(pg->count_info & PGC_allocated))
351 panic("Foreign page: 0x%lx is not owned by any domain\n",
352 mfn);
353 if (page_get_owner(pg) == d)
354 panic("Foreign page: 0x%lx is owned by this domain\n",
355 mfn);
357 t = PFN_TYPE_FOREIGN;
358 mfn = pfn;
359 }
360 }
362 if (mfn == INVALID_MFN) {
363 printk("%s: Dom[%d] pfn 0x%lx is not a valid page\n",
364 __func__, d->domain_id, pfn);
365 }
367 if (type)
368 *type = t;
370 return mfn;
371 }
373 void guest_physmap_add_page(
374 struct domain *d, unsigned long gpfn, unsigned long mfn)
375 {
376 printk("%s(%d, 0x%lx, 0x%lx)\n", __func__, d->domain_id, gpfn, mfn);
377 }
378 void guest_physmap_remove_page(
379 struct domain *d, unsigned long gpfn, unsigned long mfn)
380 {
381 panic("%s\n", __func__);
382 }
383 void shadow_drop_references(
384 struct domain *d, struct page_info *page)
385 {
386 }