ia64/xen-unstable

view xen/arch/powerpc/domain.c @ 12933:a0b47a11b52d

[XEN][POWERPC] An OK spot to flush the I-Cache
Its not the best, but it will do for now.
Signed-off-by: Jimi Xenidis <jimix@watson.ibm.com>
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author Jimi Xenidis <jimix@watson.ibm.com>
date Mon Oct 02 21:43:09 2006 -0400 (2006-10-02)
parents 5b0e0c93a5bf
children 0f4c73a1e92a
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright (C) IBM Corp. 2005, 2006
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 */
21 #include <stdarg.h>
22 #include <xen/config.h>
23 #include <xen/lib.h>
24 #include <xen/sched.h>
25 #include <xen/mm.h>
26 #include <xen/serial.h>
27 #include <xen/domain.h>
28 #include <xen/console.h>
29 #include <xen/shutdown.h>
30 #include <xen/shadow.h>
31 #include <xen/mm.h>
32 #include <xen/softirq.h>
33 #include <asm/htab.h>
34 #include <asm/current.h>
35 #include <asm/hcalls.h>
36 #include "rtas.h"
38 #define next_arg(fmt, args) ({ \
39 unsigned long __arg; \
40 switch ( *(fmt)++ ) \
41 { \
42 case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
43 case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
44 case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
45 case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
46 default: __arg = 0; BUG(); \
47 } \
48 __arg; \
49 })
50 extern void idle_loop(void);
52 unsigned long hypercall_create_continuation(unsigned int op,
53 const char *format, ...)
54 {
55 struct cpu_user_regs *regs = guest_cpu_user_regs();
56 const char *p = format;
57 va_list args;
58 int gprnum = 4;
59 int i;
61 va_start(args, format);
63 regs->pc -= 4; /* re-execute 'sc' */
65 for (i = 0; *p != '\0'; i++) {
66 regs->gprs[gprnum++] = next_arg(p, args);
67 }
69 va_end(args);
71 /* As luck would have it, we use the same register for hcall opcodes and
72 * for hcall return values. The return value from this function is placed
73 * in r3 on return, so modifying regs->gprs[3] would have no effect. */
74 return XEN_MARK(op);
75 }
77 int arch_domain_create(struct domain *d)
78 {
79 if (d->domain_id == IDLE_DOMAIN_ID) {
80 d->shared_info = (void *)alloc_xenheap_page();
81 clear_page(d->shared_info);
83 return 0;
84 }
86 d->arch.large_page_sizes = cpu_large_page_orders(
87 d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
89 INIT_LIST_HEAD(&d->arch.extent_list);
91 return 0;
92 }
94 void arch_domain_destroy(struct domain *d)
95 {
96 shadow_teardown(d);
97 }
99 static void machine_fail(const char *s)
100 {
101 printf("%s failed, manual powercycle required!\n", s);
102 while(1);
103 }
105 void machine_halt(void)
106 {
107 console_start_sync();
108 printf("%s called\n", __func__);
109 rtas_halt();
111 machine_fail(__func__);
112 }
114 void machine_restart(char * __unused)
115 {
116 console_start_sync();
117 printf("%s called\n", __func__);
118 rtas_reboot();
119 machine_fail(__func__);
120 }
122 struct vcpu *alloc_vcpu_struct(void)
123 {
124 struct vcpu *v;
125 if ( (v = xmalloc(struct vcpu)) != NULL )
126 memset(v, 0, sizeof(*v));
127 return v;
128 }
130 void free_vcpu_struct(struct vcpu *v)
131 {
132 xfree(v);
133 }
135 int vcpu_initialise(struct vcpu *v)
136 {
137 return 0;
138 }
140 void vcpu_destroy(struct vcpu *v)
141 {
142 }
144 int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_t *c)
145 {
146 memcpy(&v->arch.ctxt, &c->user_regs, sizeof(c->user_regs));
148 printk("Domain[%d].%d: initializing\n",
149 v->domain->domain_id, v->vcpu_id);
151 if (v->domain->arch.htab.order == 0)
152 panic("Page table never allocated for Domain: %d\n",
153 v->domain->domain_id);
154 if (v->domain->arch.rma_order == 0)
155 panic("RMA never allocated for Domain: %d\n",
156 v->domain->domain_id);
158 set_bit(_VCPUF_initialised, &v->vcpu_flags);
160 cpu_init_vcpu(v);
162 return 0;
163 }
165 void dump_pageframe_info(struct domain *d)
166 {
167 struct page_info *page;
169 printk("Memory pages belonging to domain %u:\n", d->domain_id);
171 if ( d->tot_pages >= 10 )
172 {
173 printk(" DomPage list too long to display\n");
174 }
175 else
176 {
177 list_for_each_entry ( page, &d->page_list, list )
178 {
179 printk(" DomPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
180 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
181 page->count_info, page->u.inuse.type_info);
182 }
183 }
185 list_for_each_entry ( page, &d->xenpage_list, list )
186 {
187 printk(" XenPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
188 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
189 page->count_info, page->u.inuse.type_info);
190 }
191 }
193 void context_switch(struct vcpu *prev, struct vcpu *next)
194 {
195 struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
196 cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
197 unsigned int cpu = smp_processor_id();
199 #if 0
200 printk("%s: dom %x to dom %x\n", __func__, prev->domain->domain_id,
201 next->domain->domain_id);
202 #endif
204 /* Allow at most one CPU at a time to be dirty. */
205 ASSERT(cpus_weight(dirty_mask) <= 1);
206 if (unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)))
207 {
208 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
209 if (!cpus_empty(next->vcpu_dirty_cpumask))
210 flush_tlb_mask(next->vcpu_dirty_cpumask);
211 }
213 /* copy prev guest state off the stack into its vcpu */
214 memcpy(&prev->arch.ctxt, stack_regs, sizeof(struct cpu_user_regs));
216 set_current(next);
218 /* copy next guest state onto the stack */
219 memcpy(stack_regs, &next->arch.ctxt, sizeof(struct cpu_user_regs));
221 /* save old domain state */
222 save_sprs(prev);
223 save_float(prev);
224 save_segments(prev);
226 context_saved(prev);
228 /* load up new domain */
229 load_sprs(next);
230 load_float(next);
231 load_segments(next);
233 mtsdr1(next->domain->arch.htab.sdr1);
234 local_flush_tlb(); /* XXX maybe flush_tlb_mask? */
235 cpu_flush_icache();
237 if (is_idle_vcpu(next)) {
238 reset_stack_and_jump(idle_loop);
239 }
241 reset_stack_and_jump(full_resume);
242 /* not reached */
243 }
245 void continue_running(struct vcpu *same)
246 {
247 /* nothing to do */
248 return;
249 }
251 void sync_vcpu_execstate(struct vcpu *v)
252 {
253 /* do nothing */
254 return;
255 }
257 static void relinquish_memory(struct domain *d, struct list_head *list)
258 {
259 struct list_head *ent;
260 struct page_info *page;
262 /* Use a recursive lock, as we may enter 'free_domheap_page'. */
263 spin_lock_recursive(&d->page_alloc_lock);
265 ent = list->next;
266 while ( ent != list )
267 {
268 page = list_entry(ent, struct page_info, list);
270 /* Grab a reference to the page so it won't disappear from under us. */
271 if ( unlikely(!get_page(page, d)) )
272 {
273 /* Couldn't get a reference -- someone is freeing this page. */
274 ent = ent->next;
275 continue;
276 }
277 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
278 put_page_and_type(page);
280 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
281 put_page(page);
283 /* Follow the list chain and /then/ potentially free the page. */
284 ent = ent->next;
285 put_page(page);
286 }
287 spin_unlock_recursive(&d->page_alloc_lock);
288 }
290 void domain_relinquish_resources(struct domain *d)
291 {
292 relinquish_memory(d, &d->page_list);
293 free_extents(d);
294 return;
295 }
297 void arch_dump_domain_info(struct domain *d)
298 {
299 }
301 void arch_dump_vcpu_info(struct vcpu *v)
302 {
303 }
305 extern void sleep(void);
306 static void safe_halt(void)
307 {
308 int cpu = smp_processor_id();
310 while (!softirq_pending(cpu))
311 sleep();
312 }
314 static void default_idle(void)
315 {
316 local_irq_disable();
317 if ( !softirq_pending(smp_processor_id()) )
318 safe_halt();
319 else
320 local_irq_enable();
321 }
323 void idle_loop(void)
324 {
325 for ( ; ; ) {
326 page_scrub_schedule_work();
327 default_idle();
328 do_softirq();
329 }
330 }