ia64/xen-unstable

view xen/common/domain.c @ 11128:f2f584093379

[POWERPC] Update .hgignore
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
author kfraser@localhost.localdomain
date Tue Aug 15 10:38:59 2006 +0100 (2006-08-15)
parents f328519053f5
children 88e6bd5e2b54
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/domain.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <xen/softirq.h>
18 #include <xen/domain_page.h>
19 #include <xen/rangeset.h>
20 #include <xen/guest_access.h>
21 #include <xen/hypercall.h>
22 #include <xen/delay.h>
23 #include <xen/shutdown.h>
24 #include <xen/percpu.h>
25 #include <asm/debugger.h>
26 #include <public/dom0_ops.h>
27 #include <public/sched.h>
28 #include <public/vcpu.h>
30 /* Both these structures are protected by the domlist_lock. */
31 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
32 struct domain *domain_hash[DOMAIN_HASH_SIZE];
33 struct domain *domain_list;
35 struct domain *dom0;
37 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
39 struct domain *alloc_domain(domid_t domid)
40 {
41 struct domain *d;
43 if ( (d = xmalloc(struct domain)) == NULL )
44 return NULL;
46 memset(d, 0, sizeof(*d));
47 d->domain_id = domid;
48 atomic_set(&d->refcnt, 1);
49 spin_lock_init(&d->big_lock);
50 spin_lock_init(&d->page_alloc_lock);
51 spin_lock_init(&d->pause_lock);
52 INIT_LIST_HEAD(&d->page_list);
53 INIT_LIST_HEAD(&d->xenpage_list);
55 return d;
56 }
59 void free_domain(struct domain *d)
60 {
61 struct vcpu *v;
62 int i;
64 sched_destroy_domain(d);
66 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
67 if ( (v = d->vcpu[i]) != NULL )
68 free_vcpu_struct(v);
70 xfree(d);
71 }
74 struct vcpu *alloc_vcpu(
75 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
76 {
77 struct vcpu *v;
79 BUG_ON(d->vcpu[vcpu_id] != NULL);
81 if ( (v = alloc_vcpu_struct(d, vcpu_id)) == NULL )
82 return NULL;
84 v->domain = d;
85 v->vcpu_id = vcpu_id;
86 v->processor = cpu_id;
87 v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id];
88 spin_lock_init(&v->pause_lock);
90 v->cpu_affinity = is_idle_domain(d) ?
91 cpumask_of_cpu(cpu_id) : CPU_MASK_ALL;
93 v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
94 v->runstate.state_entry_time = NOW();
96 if ( (vcpu_id != 0) && !is_idle_domain(d) )
97 set_bit(_VCPUF_down, &v->vcpu_flags);
99 if ( sched_init_vcpu(v) < 0 )
100 {
101 free_vcpu_struct(v);
102 return NULL;
103 }
105 d->vcpu[vcpu_id] = v;
106 if ( vcpu_id != 0 )
107 d->vcpu[v->vcpu_id-1]->next_in_list = v;
109 return v;
110 }
112 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
113 {
114 struct domain *d;
115 struct vcpu *v;
116 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
118 d = (vcpu_id == 0) ?
119 domain_create(IDLE_DOMAIN_ID) :
120 idle_vcpu[cpu_id - vcpu_id]->domain;
121 BUG_ON(d == NULL);
123 v = alloc_vcpu(d, vcpu_id, cpu_id);
124 idle_vcpu[cpu_id] = v;
126 return v;
127 }
129 struct domain *domain_create(domid_t domid)
130 {
131 struct domain *d, **pd;
133 if ( (d = alloc_domain(domid)) == NULL )
134 return NULL;
136 rangeset_domain_initialise(d);
138 if ( !is_idle_domain(d) )
139 {
140 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
141 if ( evtchn_init(d) != 0 )
142 goto fail1;
143 if ( grant_table_create(d) != 0 )
144 goto fail2;
145 }
147 if ( arch_domain_create(d) != 0 )
148 goto fail3;
150 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
151 d->irq_caps = rangeset_new(d, "Interrupts", 0);
152 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
153 goto fail4;
155 if ( !is_idle_domain(d) )
156 {
157 write_lock(&domlist_lock);
158 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
159 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
160 if ( (*pd)->domain_id > d->domain_id )
161 break;
162 d->next_in_list = *pd;
163 *pd = d;
164 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
165 domain_hash[DOMAIN_HASH(domid)] = d;
166 write_unlock(&domlist_lock);
167 }
169 return d;
171 fail4:
172 arch_domain_destroy(d);
173 fail3:
174 if ( !is_idle_domain(d) )
175 grant_table_destroy(d);
176 fail2:
177 if ( !is_idle_domain(d) )
178 evtchn_destroy(d);
179 fail1:
180 rangeset_domain_destroy(d);
181 free_domain(d);
182 return NULL;
183 }
186 struct domain *find_domain_by_id(domid_t dom)
187 {
188 struct domain *d;
190 read_lock(&domlist_lock);
191 d = domain_hash[DOMAIN_HASH(dom)];
192 while ( d != NULL )
193 {
194 if ( d->domain_id == dom )
195 {
196 if ( unlikely(!get_domain(d)) )
197 d = NULL;
198 break;
199 }
200 d = d->next_in_hashbucket;
201 }
202 read_unlock(&domlist_lock);
204 return d;
205 }
208 void domain_kill(struct domain *d)
209 {
210 domain_pause(d);
212 if ( test_and_set_bit(_DOMF_dying, &d->domain_flags) )
213 return;
215 gnttab_release_mappings(d);
216 domain_relinquish_resources(d);
217 put_domain(d);
219 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
220 }
223 void __domain_crash(struct domain *d)
224 {
225 if ( d == current->domain )
226 {
227 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
228 d->domain_id, current->vcpu_id, smp_processor_id());
229 show_execution_state(guest_cpu_user_regs());
230 }
231 else
232 {
233 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
234 d->domain_id, current->domain->domain_id, smp_processor_id());
235 }
237 domain_shutdown(d, SHUTDOWN_crash);
238 }
241 void __domain_crash_synchronous(void)
242 {
243 __domain_crash(current->domain);
244 for ( ; ; )
245 do_softirq();
246 }
249 static DEFINE_PER_CPU(struct domain *, domain_shuttingdown);
251 static void domain_shutdown_finalise(void)
252 {
253 struct domain *d;
254 struct vcpu *v;
256 d = this_cpu(domain_shuttingdown);
257 this_cpu(domain_shuttingdown) = NULL;
259 BUG_ON(d == NULL);
260 BUG_ON(d == current->domain);
262 LOCK_BIGLOCK(d);
264 /* Make sure that every vcpu is descheduled before we finalise. */
265 for_each_vcpu ( d, v )
266 vcpu_sleep_sync(v);
267 BUG_ON(!cpus_empty(d->domain_dirty_cpumask));
269 /* Don't set DOMF_shutdown until execution contexts are sync'ed. */
270 if ( !test_and_set_bit(_DOMF_shutdown, &d->domain_flags) )
271 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
273 UNLOCK_BIGLOCK(d);
275 put_domain(d);
276 }
278 static __init int domain_shutdown_finaliser_init(void)
279 {
280 open_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ, domain_shutdown_finalise);
281 return 0;
282 }
283 __initcall(domain_shutdown_finaliser_init);
285 void domain_shutdown(struct domain *d, u8 reason)
286 {
287 struct vcpu *v;
289 if ( d->domain_id == 0 )
290 dom0_shutdown(reason);
292 /* Mark the domain as shutting down. */
293 d->shutdown_code = reason;
295 /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */
296 spin_lock(&d->pause_lock);
297 d->pause_count++;
298 set_bit(_DOMF_paused, &d->domain_flags);
299 spin_unlock(&d->pause_lock);
300 for_each_vcpu ( d, v )
301 vcpu_sleep_nosync(v);
303 get_knownalive_domain(d);
304 this_cpu(domain_shuttingdown) = d;
305 raise_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ);
306 }
309 void domain_pause_for_debugger(void)
310 {
311 struct domain *d = current->domain;
312 struct vcpu *v;
314 /*
315 * NOTE: This does not synchronously pause the domain. The debugger
316 * must issue a PAUSEDOMAIN command to ensure that all execution
317 * has ceased and guest state is committed to memory.
318 */
319 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
320 for_each_vcpu ( d, v )
321 vcpu_sleep_nosync(v);
323 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
324 }
327 /* Release resources belonging to task @p. */
328 void domain_destroy(struct domain *d)
329 {
330 struct domain **pd;
331 atomic_t old, new;
333 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
335 /* May be already destroyed, or get_domain() can race us. */
336 _atomic_set(old, 0);
337 _atomic_set(new, DOMAIN_DESTROYED);
338 old = atomic_compareandswap(old, new, &d->refcnt);
339 if ( _atomic_read(old) != 0 )
340 return;
342 /* Delete from task list and task hashtable. */
343 write_lock(&domlist_lock);
344 pd = &domain_list;
345 while ( *pd != d )
346 pd = &(*pd)->next_in_list;
347 *pd = d->next_in_list;
348 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
349 while ( *pd != d )
350 pd = &(*pd)->next_in_hashbucket;
351 *pd = d->next_in_hashbucket;
352 write_unlock(&domlist_lock);
354 rangeset_domain_destroy(d);
356 evtchn_destroy(d);
357 grant_table_destroy(d);
359 arch_domain_destroy(d);
361 free_domain(d);
363 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
364 }
366 void vcpu_pause(struct vcpu *v)
367 {
368 ASSERT(v != current);
370 spin_lock(&v->pause_lock);
371 if ( v->pause_count++ == 0 )
372 set_bit(_VCPUF_paused, &v->vcpu_flags);
373 spin_unlock(&v->pause_lock);
375 vcpu_sleep_sync(v);
376 }
378 void vcpu_unpause(struct vcpu *v)
379 {
380 int wake;
382 ASSERT(v != current);
384 spin_lock(&v->pause_lock);
385 wake = (--v->pause_count == 0);
386 if ( wake )
387 clear_bit(_VCPUF_paused, &v->vcpu_flags);
388 spin_unlock(&v->pause_lock);
390 if ( wake )
391 vcpu_wake(v);
392 }
394 void domain_pause(struct domain *d)
395 {
396 struct vcpu *v;
398 ASSERT(d != current->domain);
400 spin_lock(&d->pause_lock);
401 if ( d->pause_count++ == 0 )
402 set_bit(_DOMF_paused, &d->domain_flags);
403 spin_unlock(&d->pause_lock);
405 for_each_vcpu( d, v )
406 vcpu_sleep_sync(v);
407 }
409 void domain_unpause(struct domain *d)
410 {
411 struct vcpu *v;
412 int wake;
414 ASSERT(d != current->domain);
416 spin_lock(&d->pause_lock);
417 wake = (--d->pause_count == 0);
418 if ( wake )
419 clear_bit(_DOMF_paused, &d->domain_flags);
420 spin_unlock(&d->pause_lock);
422 if ( wake )
423 for_each_vcpu( d, v )
424 vcpu_wake(v);
425 }
427 void domain_pause_by_systemcontroller(struct domain *d)
428 {
429 struct vcpu *v;
431 BUG_ON(current->domain == d);
433 if ( !test_and_set_bit(_DOMF_ctrl_pause, &d->domain_flags) )
434 {
435 for_each_vcpu ( d, v )
436 vcpu_sleep_sync(v);
437 }
438 }
440 void domain_unpause_by_systemcontroller(struct domain *d)
441 {
442 struct vcpu *v;
444 if ( test_and_clear_bit(_DOMF_ctrl_pause, &d->domain_flags) )
445 {
446 for_each_vcpu ( d, v )
447 vcpu_wake(v);
448 }
449 }
452 /*
453 * set_info_guest is used for final setup, launching, and state modification
454 * of domains other than domain 0. ie. the domains that are being built by
455 * the userspace dom0 domain builder.
456 */
457 int set_info_guest(struct domain *d, dom0_setvcpucontext_t *setvcpucontext)
458 {
459 int rc = 0;
460 struct vcpu_guest_context *c = NULL;
461 unsigned long vcpu = setvcpucontext->vcpu;
462 struct vcpu *v;
464 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
465 return -EINVAL;
467 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
468 return -ENOMEM;
470 domain_pause(d);
472 rc = -EFAULT;
473 if ( copy_from_guest(c, setvcpucontext->ctxt, 1) == 0 )
474 rc = arch_set_info_guest(v, c);
476 domain_unpause(d);
478 xfree(c);
479 return rc;
480 }
482 int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt)
483 {
484 struct vcpu *v = d->vcpu[vcpuid];
486 BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
488 return arch_set_info_guest(v, ctxt);
489 }
491 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
492 {
493 struct domain *d = current->domain;
494 struct vcpu *v;
495 struct vcpu_guest_context *ctxt;
496 long rc = 0;
498 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
499 return -EINVAL;
501 if ( (v = d->vcpu[vcpuid]) == NULL )
502 return -ENOENT;
504 switch ( cmd )
505 {
506 case VCPUOP_initialise:
507 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
508 {
509 rc = -ENOMEM;
510 break;
511 }
513 if ( copy_from_guest(ctxt, arg, 1) )
514 {
515 xfree(ctxt);
516 rc = -EFAULT;
517 break;
518 }
520 LOCK_BIGLOCK(d);
521 rc = -EEXIST;
522 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
523 rc = boot_vcpu(d, vcpuid, ctxt);
524 UNLOCK_BIGLOCK(d);
526 xfree(ctxt);
527 break;
529 case VCPUOP_up:
530 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
531 rc = -EINVAL;
532 else if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
533 vcpu_wake(v);
534 break;
536 case VCPUOP_down:
537 if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
538 vcpu_sleep_nosync(v);
539 break;
541 case VCPUOP_is_up:
542 rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
543 break;
545 case VCPUOP_get_runstate_info:
546 {
547 struct vcpu_runstate_info runstate;
548 vcpu_runstate_get(v, &runstate);
549 if ( copy_to_guest(arg, &runstate, 1) )
550 rc = -EFAULT;
551 break;
552 }
554 default:
555 rc = arch_do_vcpu_op(cmd, v, arg);
556 break;
557 }
559 return rc;
560 }
562 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
563 {
564 if ( type > MAX_VMASST_TYPE )
565 return -EINVAL;
567 switch ( cmd )
568 {
569 case VMASST_CMD_enable:
570 set_bit(type, &p->vm_assist);
571 return 0;
572 case VMASST_CMD_disable:
573 clear_bit(type, &p->vm_assist);
574 return 0;
575 }
577 return -ENOSYS;
578 }
580 /*
581 * Local variables:
582 * mode: C
583 * c-set-style: "BSD"
584 * c-basic-offset: 4
585 * tab-width: 4
586 * indent-tabs-mode: nil
587 * End:
588 */