ia64/xen-unstable

view xen/arch/powerpc/domain.c @ 14239:b75609e1fa81

[POWERPC][XEN] Implement guest_physmap_{add,remove}_page().
- Use p2m array in pfn2mfn() and DOMCTL_getmemlist.
- Remove domain extent list.
- Create and use an m2p array for mfn_to_gmfn().
Signed-off-by: Ryan Harper <ryanh@us.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Hollis Blanchard <hollisb@us.ibm.com>
date Fri Mar 02 17:07:59 2007 -0600 (2007-03-02)
parents f56981f78d73
children 7db52e0ed133
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright IBM Corp. 2005, 2006, 2007
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 * Ryan Harper <ryanh@us.ibm.com>
20 * Hollis Blanchard <hollisb@us.ibm.com>
21 */
23 #include <stdarg.h>
24 #include <xen/config.h>
25 #include <xen/lib.h>
26 #include <xen/sched.h>
27 #include <xen/mm.h>
28 #include <xen/serial.h>
29 #include <xen/domain.h>
30 #include <xen/console.h>
31 #include <xen/shutdown.h>
32 #include <xen/shadow.h>
33 #include <xen/mm.h>
34 #include <xen/softirq.h>
35 #include <asm/htab.h>
36 #include <asm/current.h>
37 #include <asm/hcalls.h>
38 #include "rtas.h"
39 #include "exceptions.h"
41 #define next_arg(fmt, args) ({ \
42 unsigned long __arg; \
43 switch ( *(fmt)++ ) \
44 { \
45 case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
46 case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
47 case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
48 case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
49 default: __arg = 0; BUG(); \
50 } \
51 __arg; \
52 })
54 unsigned long hypercall_create_continuation(unsigned int op,
55 const char *format, ...)
56 {
57 struct cpu_user_regs *regs = guest_cpu_user_regs();
58 const char *p = format;
59 va_list args;
60 int gprnum = 4;
61 int i;
63 va_start(args, format);
65 regs->pc -= 4; /* re-execute 'sc' */
67 for (i = 0; *p != '\0'; i++) {
68 regs->gprs[gprnum++] = next_arg(p, args);
69 }
71 va_end(args);
73 /* As luck would have it, we use the same register for hcall opcodes and
74 * for hcall return values. The return value from this function is placed
75 * in r3 on return, so modifying regs->gprs[3] would have no effect. */
76 return XEN_MARK(op);
77 }
79 int arch_domain_create(struct domain *d)
80 {
81 if (d->domain_id == IDLE_DOMAIN_ID) {
82 d->shared_info = (void *)alloc_xenheap_page();
83 clear_page(d->shared_info);
85 return 0;
86 }
88 d->arch.large_page_sizes = cpu_large_page_orders(
89 d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
91 d->arch.foreign_mfn_count = 1024;
92 d->arch.foreign_mfns = xmalloc_array(uint, d->arch.foreign_mfn_count);
93 BUG_ON(d->arch.foreign_mfns == NULL);
95 memset(d->arch.foreign_mfns, -1, d->arch.foreign_mfn_count * sizeof(uint));
97 return 0;
98 }
100 void arch_domain_destroy(struct domain *d)
101 {
102 shadow_teardown(d);
103 /* shared_info is part of the RMA so no need to release it */
104 }
106 static void machine_fail(const char *s)
107 {
108 printk("%s failed, manual powercycle required!\n"
109 " spinning....\n", s);
110 for (;;)
111 sleep();
112 }
113 void machine_halt(void)
114 {
115 console_start_sync();
116 printk("%s called\n", __func__);
117 rtas_halt();
119 machine_fail(__func__);
120 }
122 void machine_restart(char * __unused)
123 {
124 console_start_sync();
125 printk("%s called\n", __func__);
126 rtas_reboot();
127 machine_fail(__func__);
128 }
130 struct vcpu *alloc_vcpu_struct(void)
131 {
132 struct vcpu *v;
133 if ( (v = xmalloc(struct vcpu)) != NULL )
134 memset(v, 0, sizeof(*v));
135 return v;
136 }
138 void free_vcpu_struct(struct vcpu *v)
139 {
140 xfree(v);
141 }
143 int vcpu_initialise(struct vcpu *v)
144 {
145 return 0;
146 }
148 void vcpu_destroy(struct vcpu *v)
149 {
150 }
152 int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
153 {
154 struct domain *d = v->domain;
156 memcpy(&v->arch.ctxt, &c.nat->user_regs, sizeof(c.nat->user_regs));
158 printk("Domain[%d].%d: initializing\n", d->domain_id, v->vcpu_id);
160 if (d->arch.htab.order == 0)
161 panic("Page table never allocated for Domain: %d\n", d->domain_id);
162 if (d->arch.rma_order == 0)
163 panic("RMA never allocated for Domain: %d\n", d->domain_id);
165 d->shared_info->wc_sec = dom0->shared_info->wc_sec;
166 d->shared_info->wc_nsec = dom0->shared_info->wc_nsec;
167 d->shared_info->arch.boot_timebase = dom0->shared_info->arch.boot_timebase;
169 set_bit(_VCPUF_initialised, &v->vcpu_flags);
171 cpu_init_vcpu(v);
173 return 0;
174 }
176 int arch_vcpu_reset(struct vcpu *v)
177 {
178 panic("%s: called for Dom%d[%d]\n",
179 __func__, v->domain->domain_id, v->vcpu_id);
180 return 0;
181 }
183 void dump_pageframe_info(struct domain *d)
184 {
185 struct page_info *page;
187 printk("Memory pages belonging to domain %u:\n", d->domain_id);
189 if ( d->tot_pages >= 10 )
190 {
191 printk(" DomPage list too long to display\n");
192 }
193 else
194 {
195 list_for_each_entry ( page, &d->page_list, list )
196 {
197 printk(" DomPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
198 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
199 page->count_info, page->u.inuse.type_info);
200 }
201 }
203 list_for_each_entry ( page, &d->xenpage_list, list )
204 {
205 printk(" XenPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
206 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
207 page->count_info, page->u.inuse.type_info);
208 }
209 }
211 void context_switch(struct vcpu *prev, struct vcpu *next)
212 {
213 struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
214 cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
215 unsigned int cpu = smp_processor_id();
217 #if 0
218 printk("%s: dom %x to dom %x\n", __func__, prev->domain->domain_id,
219 next->domain->domain_id);
220 #endif
222 /* Allow at most one CPU at a time to be dirty. */
223 ASSERT(cpus_weight(dirty_mask) <= 1);
224 if (unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)))
225 {
226 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
227 if (!cpus_empty(next->vcpu_dirty_cpumask))
228 flush_tlb_mask(next->vcpu_dirty_cpumask);
229 }
231 /* copy prev guest state off the stack into its vcpu */
232 memcpy(&prev->arch.ctxt, stack_regs, sizeof(struct cpu_user_regs));
234 set_current(next);
236 /* copy next guest state onto the stack */
237 memcpy(stack_regs, &next->arch.ctxt, sizeof(struct cpu_user_regs));
239 /* save old domain state */
240 save_sprs(prev);
241 save_float(prev);
242 save_segments(prev);
244 context_saved(prev);
246 /* load up new domain */
247 load_sprs(next);
248 load_float(next);
249 load_segments(next);
251 mtsdr1(next->domain->arch.htab.sdr1);
252 local_flush_tlb(); /* XXX maybe flush_tlb_mask? */
253 cpu_flush_icache();
255 if (is_idle_vcpu(next)) {
256 reset_stack_and_jump(idle_loop);
257 }
259 reset_stack_and_jump(full_resume);
260 /* not reached */
261 }
263 void continue_running(struct vcpu *same)
264 {
265 /* nothing to do */
266 return;
267 }
269 void sync_vcpu_execstate(struct vcpu *v)
270 {
271 /* do nothing */
272 return;
273 }
275 static void relinquish_memory(struct domain *d, struct list_head *list)
276 {
277 struct list_head *ent;
278 struct page_info *page;
280 /* Use a recursive lock, as we may enter 'free_domheap_page'. */
281 spin_lock_recursive(&d->page_alloc_lock);
283 ent = list->next;
284 while ( ent != list )
285 {
286 page = list_entry(ent, struct page_info, list);
288 /* Grab a reference to the page so it won't disappear from under us. */
289 if ( unlikely(!get_page(page, d)) )
290 {
291 /* Couldn't get a reference -- someone is freeing this page. */
292 ent = ent->next;
293 continue;
294 }
295 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
296 put_page_and_type(page);
298 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
299 put_page(page);
301 /* Follow the list chain and /then/ potentially free the page. */
302 ent = ent->next;
303 put_page(page);
304 }
305 spin_unlock_recursive(&d->page_alloc_lock);
306 }
308 void domain_relinquish_resources(struct domain *d)
309 {
310 relinquish_memory(d, &d->xenpage_list);
311 relinquish_memory(d, &d->page_list);
312 xfree(d->arch.foreign_mfns);
313 xfree(d->arch.p2m);
314 return;
315 }
317 void arch_dump_domain_info(struct domain *d)
318 {
319 }
321 void arch_dump_vcpu_info(struct vcpu *v)
322 {
323 }
325 static void safe_halt(void)
326 {
327 int cpu = smp_processor_id();
329 while (!softirq_pending(cpu))
330 sleep();
331 }
333 static void default_idle(void)
334 {
335 local_irq_disable();
336 if ( !softirq_pending(smp_processor_id()) )
337 safe_halt();
338 else
339 local_irq_enable();
340 }
342 void idle_loop(void)
343 {
344 for ( ; ; ) {
345 page_scrub_schedule_work();
346 default_idle();
347 do_softirq();
348 }
349 }