direct-io.hg

view xen/arch/powerpc/domain.c @ 14350:f3f5f2756d75

x86: Add VGCF_onlien flag to vcpu_guest_context.
Change common Xen code to start all VCPUs (except idle ones)
offline. Change arch code to deal with this.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Mar 12 13:53:43 2007 +0000 (2007-03-12)
parents 7db52e0ed133
children b7ae31726aa6
line source
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 *
16 * Copyright IBM Corp. 2005, 2006, 2007
17 *
18 * Authors: Jimi Xenidis <jimix@watson.ibm.com>
19 * Ryan Harper <ryanh@us.ibm.com>
20 * Hollis Blanchard <hollisb@us.ibm.com>
21 */
23 #include <stdarg.h>
24 #include <xen/config.h>
25 #include <xen/lib.h>
26 #include <xen/sched.h>
27 #include <xen/mm.h>
28 #include <xen/serial.h>
29 #include <xen/domain.h>
30 #include <xen/console.h>
31 #include <xen/shutdown.h>
32 #include <xen/shadow.h>
33 #include <xen/mm.h>
34 #include <xen/softirq.h>
35 #include <asm/htab.h>
36 #include <asm/current.h>
37 #include <asm/hcalls.h>
38 #include "rtas.h"
39 #include "exceptions.h"
41 #define next_arg(fmt, args) ({ \
42 unsigned long __arg; \
43 switch ( *(fmt)++ ) \
44 { \
45 case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \
46 case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \
47 case 'p': __arg = (unsigned long)va_arg(args, void *); break; \
48 case 'h': __arg = (unsigned long)va_arg(args, void *); break; \
49 default: __arg = 0; BUG(); \
50 } \
51 __arg; \
52 })
54 unsigned long hypercall_create_continuation(unsigned int op,
55 const char *format, ...)
56 {
57 struct cpu_user_regs *regs = guest_cpu_user_regs();
58 const char *p = format;
59 va_list args;
60 int gprnum = 4;
61 int i;
63 va_start(args, format);
65 regs->pc -= 4; /* re-execute 'sc' */
67 for (i = 0; *p != '\0'; i++) {
68 regs->gprs[gprnum++] = next_arg(p, args);
69 }
71 va_end(args);
73 /* As luck would have it, we use the same register for hcall opcodes and
74 * for hcall return values. The return value from this function is placed
75 * in r3 on return, so modifying regs->gprs[3] would have no effect. */
76 return XEN_MARK(op);
77 }
79 int arch_domain_create(struct domain *d)
80 {
81 if (d->domain_id == IDLE_DOMAIN_ID) {
82 d->shared_info = (void *)alloc_xenheap_page();
83 clear_page(d->shared_info);
85 return 0;
86 }
88 d->arch.large_page_sizes = cpu_large_page_orders(
89 d->arch.large_page_order, ARRAY_SIZE(d->arch.large_page_order));
91 d->arch.foreign_mfn_count = 1024;
92 d->arch.foreign_mfns = xmalloc_array(uint, d->arch.foreign_mfn_count);
93 BUG_ON(d->arch.foreign_mfns == NULL);
95 memset(d->arch.foreign_mfns, -1, d->arch.foreign_mfn_count * sizeof(uint));
97 return 0;
98 }
100 void arch_domain_destroy(struct domain *d)
101 {
102 shadow_teardown(d);
103 /* shared_info is part of the RMA so no need to release it */
104 }
106 static void machine_fail(const char *s)
107 {
108 printk("%s failed, manual powercycle required!\n"
109 " spinning....\n", s);
110 for (;;)
111 sleep();
112 }
113 void machine_halt(void)
114 {
115 console_start_sync();
116 printk("%s called\n", __func__);
117 rtas_halt();
119 machine_fail(__func__);
120 }
122 void machine_restart(char * __unused)
123 {
124 console_start_sync();
125 printk("%s called\n", __func__);
126 rtas_reboot();
127 machine_fail(__func__);
128 }
130 struct vcpu *alloc_vcpu_struct(void)
131 {
132 struct vcpu *v;
133 if ( (v = xmalloc(struct vcpu)) != NULL )
134 memset(v, 0, sizeof(*v));
135 return v;
136 }
138 void free_vcpu_struct(struct vcpu *v)
139 {
140 xfree(v);
141 }
143 int vcpu_initialise(struct vcpu *v)
144 {
145 /* Guests by default have a 100Hz ticker. */
146 v->periodic_period = MILLISECS(10);
147 return 0;
148 }
150 void vcpu_destroy(struct vcpu *v)
151 {
152 }
154 int arch_set_info_guest(struct vcpu *v, vcpu_guest_context_u c)
155 {
156 struct domain *d = v->domain;
158 memcpy(&v->arch.ctxt, &c.nat->user_regs, sizeof(c.nat->user_regs));
160 printk("Domain[%d].%d: initializing\n", d->domain_id, v->vcpu_id);
162 if (d->arch.htab.order == 0)
163 panic("Page table never allocated for Domain: %d\n", d->domain_id);
164 if (d->arch.rma_order == 0)
165 panic("RMA never allocated for Domain: %d\n", d->domain_id);
167 d->shared_info->wc_sec = dom0->shared_info->wc_sec;
168 d->shared_info->wc_nsec = dom0->shared_info->wc_nsec;
169 d->shared_info->arch.boot_timebase = dom0->shared_info->arch.boot_timebase;
171 /* Auto-online VCPU0 when it is initialised. */
172 if ( !test_and_set_bit(_VCPUF_initialised, &v->vcpu_flags) &&
173 (v->vcpu_id == 0) )
174 clear_bit(_VCPUF_down, &v->vcpu_flags);
176 cpu_init_vcpu(v);
178 return 0;
179 }
181 int arch_vcpu_reset(struct vcpu *v)
182 {
183 panic("%s: called for Dom%d[%d]\n",
184 __func__, v->domain->domain_id, v->vcpu_id);
185 return 0;
186 }
188 void dump_pageframe_info(struct domain *d)
189 {
190 struct page_info *page;
192 printk("Memory pages belonging to domain %u:\n", d->domain_id);
194 if ( d->tot_pages >= 10 )
195 {
196 printk(" DomPage list too long to display\n");
197 }
198 else
199 {
200 list_for_each_entry ( page, &d->page_list, list )
201 {
202 printk(" DomPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
203 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
204 page->count_info, page->u.inuse.type_info);
205 }
206 }
208 list_for_each_entry ( page, &d->xenpage_list, list )
209 {
210 printk(" XenPage %p: mfn=%p, caf=%016lx, taf=%" PRtype_info "\n",
211 _p(page_to_maddr(page)), _p(page_to_mfn(page)),
212 page->count_info, page->u.inuse.type_info);
213 }
214 }
216 void context_switch(struct vcpu *prev, struct vcpu *next)
217 {
218 struct cpu_user_regs *stack_regs = guest_cpu_user_regs();
219 cpumask_t dirty_mask = next->vcpu_dirty_cpumask;
220 unsigned int cpu = smp_processor_id();
222 #if 0
223 printk("%s: dom %x to dom %x\n", __func__, prev->domain->domain_id,
224 next->domain->domain_id);
225 #endif
227 /* Allow at most one CPU at a time to be dirty. */
228 ASSERT(cpus_weight(dirty_mask) <= 1);
229 if (unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)))
230 {
231 /* Other cpus call __sync_lazy_execstate from flush ipi handler. */
232 if (!cpus_empty(next->vcpu_dirty_cpumask))
233 flush_tlb_mask(next->vcpu_dirty_cpumask);
234 }
236 /* copy prev guest state off the stack into its vcpu */
237 memcpy(&prev->arch.ctxt, stack_regs, sizeof(struct cpu_user_regs));
239 set_current(next);
241 /* copy next guest state onto the stack */
242 memcpy(stack_regs, &next->arch.ctxt, sizeof(struct cpu_user_regs));
244 /* save old domain state */
245 save_sprs(prev);
246 save_float(prev);
247 save_segments(prev);
249 context_saved(prev);
251 /* load up new domain */
252 load_sprs(next);
253 load_float(next);
254 load_segments(next);
256 mtsdr1(next->domain->arch.htab.sdr1);
257 local_flush_tlb(); /* XXX maybe flush_tlb_mask? */
258 cpu_flush_icache();
260 if (is_idle_vcpu(next)) {
261 reset_stack_and_jump(idle_loop);
262 }
264 reset_stack_and_jump(full_resume);
265 /* not reached */
266 }
268 void continue_running(struct vcpu *same)
269 {
270 /* nothing to do */
271 return;
272 }
274 void sync_vcpu_execstate(struct vcpu *v)
275 {
276 /* do nothing */
277 return;
278 }
280 static void relinquish_memory(struct domain *d, struct list_head *list)
281 {
282 struct list_head *ent;
283 struct page_info *page;
285 /* Use a recursive lock, as we may enter 'free_domheap_page'. */
286 spin_lock_recursive(&d->page_alloc_lock);
288 ent = list->next;
289 while ( ent != list )
290 {
291 page = list_entry(ent, struct page_info, list);
293 /* Grab a reference to the page so it won't disappear from under us. */
294 if ( unlikely(!get_page(page, d)) )
295 {
296 /* Couldn't get a reference -- someone is freeing this page. */
297 ent = ent->next;
298 continue;
299 }
300 if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
301 put_page_and_type(page);
303 if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
304 put_page(page);
306 /* Follow the list chain and /then/ potentially free the page. */
307 ent = ent->next;
308 put_page(page);
309 }
310 spin_unlock_recursive(&d->page_alloc_lock);
311 }
313 void domain_relinquish_resources(struct domain *d)
314 {
315 relinquish_memory(d, &d->xenpage_list);
316 relinquish_memory(d, &d->page_list);
317 xfree(d->arch.foreign_mfns);
318 xfree(d->arch.p2m);
319 return;
320 }
322 void arch_dump_domain_info(struct domain *d)
323 {
324 }
326 void arch_dump_vcpu_info(struct vcpu *v)
327 {
328 }
330 static void safe_halt(void)
331 {
332 int cpu = smp_processor_id();
334 while (!softirq_pending(cpu))
335 sleep();
336 }
338 static void default_idle(void)
339 {
340 local_irq_disable();
341 if ( !softirq_pending(smp_processor_id()) )
342 safe_halt();
343 else
344 local_irq_enable();
345 }
347 void idle_loop(void)
348 {
349 for ( ; ; ) {
350 page_scrub_schedule_work();
351 default_idle();
352 do_softirq();
353 }
354 }