ia64/xen-unstable

view xen/common/domain.c @ 12902:4ae4bdee00e6

[XEN] Only allow each domain to putput crash debug info once.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Dec 11 16:32:25 2006 +0000 (2006-12-11)
parents 107f74d363ea
children 360eb996fa38
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/domain.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <xen/softirq.h>
18 #include <xen/domain_page.h>
19 #include <xen/rangeset.h>
20 #include <xen/guest_access.h>
21 #include <xen/hypercall.h>
22 #include <xen/delay.h>
23 #include <xen/shutdown.h>
24 #include <xen/percpu.h>
25 #include <xen/multicall.h>
26 #include <asm/debugger.h>
27 #include <public/sched.h>
28 #include <public/vcpu.h>
30 /* Both these structures are protected by the domlist_lock. */
31 DEFINE_RWLOCK(domlist_lock);
32 struct domain *domain_hash[DOMAIN_HASH_SIZE];
33 struct domain *domain_list;
35 struct domain *dom0;
37 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
39 int current_domain_id(void)
40 {
41 return current->domain->domain_id;
42 }
44 struct domain *alloc_domain(domid_t domid)
45 {
46 struct domain *d;
48 if ( (d = xmalloc(struct domain)) == NULL )
49 return NULL;
51 memset(d, 0, sizeof(*d));
52 d->domain_id = domid;
53 atomic_set(&d->refcnt, 1);
54 spin_lock_init(&d->big_lock);
55 spin_lock_init(&d->page_alloc_lock);
56 spin_lock_init(&d->pause_lock);
57 INIT_LIST_HEAD(&d->page_list);
58 INIT_LIST_HEAD(&d->xenpage_list);
60 return d;
61 }
63 void free_domain(struct domain *d)
64 {
65 struct vcpu *v;
66 int i;
68 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
69 {
70 if ( (v = d->vcpu[i]) == NULL )
71 continue;
72 vcpu_destroy(v);
73 sched_destroy_vcpu(v);
74 free_vcpu_struct(v);
75 }
77 sched_destroy_domain(d);
78 xfree(d);
79 }
81 struct vcpu *alloc_vcpu(
82 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
83 {
84 struct vcpu *v;
86 BUG_ON(d->vcpu[vcpu_id] != NULL);
88 if ( (v = alloc_vcpu_struct()) == NULL )
89 return NULL;
91 v->domain = d;
92 v->vcpu_id = vcpu_id;
93 v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id];
94 spin_lock_init(&v->pause_lock);
96 v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
97 v->runstate.state_entry_time = NOW();
99 if ( (vcpu_id != 0) && !is_idle_domain(d) )
100 set_bit(_VCPUF_down, &v->vcpu_flags);
102 if ( sched_init_vcpu(v, cpu_id) != 0 )
103 {
104 free_vcpu_struct(v);
105 return NULL;
106 }
108 if ( vcpu_initialise(v) != 0 )
109 {
110 sched_destroy_vcpu(v);
111 free_vcpu_struct(v);
112 return NULL;
113 }
115 d->vcpu[vcpu_id] = v;
116 if ( vcpu_id != 0 )
117 d->vcpu[v->vcpu_id-1]->next_in_list = v;
119 return v;
120 }
122 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
123 {
124 struct domain *d;
125 struct vcpu *v;
126 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
128 d = (vcpu_id == 0) ?
129 domain_create(IDLE_DOMAIN_ID, 0) :
130 idle_vcpu[cpu_id - vcpu_id]->domain;
131 BUG_ON(d == NULL);
133 v = alloc_vcpu(d, vcpu_id, cpu_id);
134 idle_vcpu[cpu_id] = v;
136 return v;
137 }
139 struct domain *domain_create(domid_t domid, unsigned int domcr_flags)
140 {
141 struct domain *d, **pd;
143 if ( (d = alloc_domain(domid)) == NULL )
144 return NULL;
146 if ( domcr_flags & DOMCRF_hvm )
147 d->is_hvm = 1;
149 rangeset_domain_initialise(d);
151 if ( !is_idle_domain(d) )
152 {
153 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
154 if ( evtchn_init(d) != 0 )
155 goto fail1;
156 if ( grant_table_create(d) != 0 )
157 goto fail2;
158 }
160 if ( arch_domain_create(d) != 0 )
161 goto fail3;
163 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
164 d->irq_caps = rangeset_new(d, "Interrupts", 0);
165 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
166 goto fail4;
168 if ( sched_init_domain(d) != 0 )
169 goto fail4;
171 if ( !is_idle_domain(d) )
172 {
173 write_lock(&domlist_lock);
174 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
175 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
176 if ( (*pd)->domain_id > d->domain_id )
177 break;
178 d->next_in_list = *pd;
179 *pd = d;
180 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
181 domain_hash[DOMAIN_HASH(domid)] = d;
182 write_unlock(&domlist_lock);
183 }
185 return d;
187 fail4:
188 arch_domain_destroy(d);
189 fail3:
190 if ( !is_idle_domain(d) )
191 grant_table_destroy(d);
192 fail2:
193 if ( !is_idle_domain(d) )
194 evtchn_destroy(d);
195 fail1:
196 rangeset_domain_destroy(d);
197 free_domain(d);
198 return NULL;
199 }
202 struct domain *find_domain_by_id(domid_t dom)
203 {
204 struct domain *d;
206 read_lock(&domlist_lock);
207 d = domain_hash[DOMAIN_HASH(dom)];
208 while ( d != NULL )
209 {
210 if ( d->domain_id == dom )
211 {
212 if ( unlikely(!get_domain(d)) )
213 d = NULL;
214 break;
215 }
216 d = d->next_in_hashbucket;
217 }
218 read_unlock(&domlist_lock);
220 return d;
221 }
224 void domain_kill(struct domain *d)
225 {
226 domain_pause(d);
228 if ( test_and_set_bit(_DOMF_dying, &d->domain_flags) )
229 return;
231 gnttab_release_mappings(d);
232 domain_relinquish_resources(d);
233 put_domain(d);
235 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
236 }
239 void __domain_crash(struct domain *d)
240 {
241 if ( test_bit(_DOMF_shutdown, &d->domain_flags) )
242 {
243 /* Print nothing: the domain is already shutting down. */
244 }
245 else if ( d == current->domain )
246 {
247 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
248 d->domain_id, current->vcpu_id, smp_processor_id());
249 show_execution_state(guest_cpu_user_regs());
250 }
251 else
252 {
253 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
254 d->domain_id, current->domain->domain_id, smp_processor_id());
255 }
257 domain_shutdown(d, SHUTDOWN_crash);
258 }
261 void __domain_crash_synchronous(void)
262 {
263 __domain_crash(current->domain);
265 /*
266 * Flush multicall state before dying if a multicall is in progress.
267 * This shouldn't be necessary, but some architectures are calling
268 * domain_crash_synchronous() when they really shouldn't (i.e., from
269 * within hypercall context).
270 */
271 if ( this_cpu(mc_state).flags != 0 )
272 {
273 dprintk(XENLOG_ERR,
274 "FIXME: synchronous domain crash during a multicall!\n");
275 this_cpu(mc_state).flags = 0;
276 }
278 for ( ; ; )
279 do_softirq();
280 }
283 void domain_shutdown(struct domain *d, u8 reason)
284 {
285 struct vcpu *v;
287 if ( d->domain_id == 0 )
288 dom0_shutdown(reason);
290 if ( !test_and_set_bit(_DOMF_shutdown, &d->domain_flags) )
291 d->shutdown_code = reason;
293 for_each_vcpu ( d, v )
294 vcpu_sleep_nosync(v);
296 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
297 }
300 void domain_pause_for_debugger(void)
301 {
302 struct domain *d = current->domain;
303 struct vcpu *v;
305 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
307 for_each_vcpu ( d, v )
308 vcpu_sleep_nosync(v);
310 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
311 }
314 /* Release resources belonging to task @p. */
315 void domain_destroy(struct domain *d)
316 {
317 struct domain **pd;
318 atomic_t old, new;
320 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
322 /* May be already destroyed, or get_domain() can race us. */
323 _atomic_set(old, 0);
324 _atomic_set(new, DOMAIN_DESTROYED);
325 old = atomic_compareandswap(old, new, &d->refcnt);
326 if ( _atomic_read(old) != 0 )
327 return;
329 /* Delete from task list and task hashtable. */
330 write_lock(&domlist_lock);
331 pd = &domain_list;
332 while ( *pd != d )
333 pd = &(*pd)->next_in_list;
334 *pd = d->next_in_list;
335 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
336 while ( *pd != d )
337 pd = &(*pd)->next_in_hashbucket;
338 *pd = d->next_in_hashbucket;
339 write_unlock(&domlist_lock);
341 rangeset_domain_destroy(d);
343 evtchn_destroy(d);
344 grant_table_destroy(d);
346 arch_domain_destroy(d);
348 free_domain(d);
350 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
351 }
353 void vcpu_pause(struct vcpu *v)
354 {
355 ASSERT(v != current);
357 spin_lock(&v->pause_lock);
358 if ( v->pause_count++ == 0 )
359 set_bit(_VCPUF_paused, &v->vcpu_flags);
360 spin_unlock(&v->pause_lock);
362 vcpu_sleep_sync(v);
363 }
365 void vcpu_unpause(struct vcpu *v)
366 {
367 int wake;
369 ASSERT(v != current);
371 spin_lock(&v->pause_lock);
372 wake = (--v->pause_count == 0);
373 if ( wake )
374 clear_bit(_VCPUF_paused, &v->vcpu_flags);
375 spin_unlock(&v->pause_lock);
377 if ( wake )
378 vcpu_wake(v);
379 }
381 void domain_pause(struct domain *d)
382 {
383 struct vcpu *v;
385 ASSERT(d != current->domain);
387 spin_lock(&d->pause_lock);
388 if ( d->pause_count++ == 0 )
389 set_bit(_DOMF_paused, &d->domain_flags);
390 spin_unlock(&d->pause_lock);
392 for_each_vcpu( d, v )
393 vcpu_sleep_sync(v);
394 }
396 void domain_unpause(struct domain *d)
397 {
398 struct vcpu *v;
399 int wake;
401 ASSERT(d != current->domain);
403 spin_lock(&d->pause_lock);
404 wake = (--d->pause_count == 0);
405 if ( wake )
406 clear_bit(_DOMF_paused, &d->domain_flags);
407 spin_unlock(&d->pause_lock);
409 if ( wake )
410 for_each_vcpu( d, v )
411 vcpu_wake(v);
412 }
414 void domain_pause_by_systemcontroller(struct domain *d)
415 {
416 struct vcpu *v;
418 BUG_ON(current->domain == d);
420 if ( !test_and_set_bit(_DOMF_ctrl_pause, &d->domain_flags) )
421 {
422 for_each_vcpu ( d, v )
423 vcpu_sleep_sync(v);
424 }
425 }
427 void domain_unpause_by_systemcontroller(struct domain *d)
428 {
429 struct vcpu *v;
431 if ( test_and_clear_bit(_DOMF_ctrl_pause, &d->domain_flags) )
432 {
433 for_each_vcpu ( d, v )
434 vcpu_wake(v);
435 }
436 }
439 /*
440 * set_info_guest is used for final setup, launching, and state modification
441 * of domains other than domain 0. ie. the domains that are being built by
442 * the userspace dom0 domain builder.
443 */
444 int set_info_guest(struct domain *d,
445 xen_domctl_vcpucontext_t *vcpucontext)
446 {
447 int rc = 0;
448 struct vcpu_guest_context *c = NULL;
449 unsigned long vcpu = vcpucontext->vcpu;
450 struct vcpu *v;
452 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
453 return -EINVAL;
455 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
456 return -ENOMEM;
458 domain_pause(d);
460 rc = -EFAULT;
461 if ( copy_from_guest(c, vcpucontext->ctxt, 1) == 0 )
462 rc = arch_set_info_guest(v, c);
464 domain_unpause(d);
466 xfree(c);
467 return rc;
468 }
470 int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt)
471 {
472 struct vcpu *v = d->vcpu[vcpuid];
474 BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
476 return arch_set_info_guest(v, ctxt);
477 }
479 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
480 {
481 struct domain *d = current->domain;
482 struct vcpu *v;
483 struct vcpu_guest_context *ctxt;
484 long rc = 0;
486 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
487 return -EINVAL;
489 if ( (v = d->vcpu[vcpuid]) == NULL )
490 return -ENOENT;
492 switch ( cmd )
493 {
494 case VCPUOP_initialise:
495 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
496 {
497 rc = -ENOMEM;
498 break;
499 }
501 if ( copy_from_guest(ctxt, arg, 1) )
502 {
503 xfree(ctxt);
504 rc = -EFAULT;
505 break;
506 }
508 LOCK_BIGLOCK(d);
509 rc = -EEXIST;
510 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
511 rc = boot_vcpu(d, vcpuid, ctxt);
512 UNLOCK_BIGLOCK(d);
514 xfree(ctxt);
515 break;
517 case VCPUOP_up:
518 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
519 rc = -EINVAL;
520 else if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
521 vcpu_wake(v);
522 break;
524 case VCPUOP_down:
525 if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
526 vcpu_sleep_nosync(v);
527 break;
529 case VCPUOP_is_up:
530 rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
531 break;
533 case VCPUOP_get_runstate_info:
534 {
535 struct vcpu_runstate_info runstate;
536 vcpu_runstate_get(v, &runstate);
537 if ( copy_to_guest(arg, &runstate, 1) )
538 rc = -EFAULT;
539 break;
540 }
542 default:
543 rc = arch_do_vcpu_op(cmd, v, arg);
544 break;
545 }
547 return rc;
548 }
550 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
551 {
552 if ( type > MAX_VMASST_TYPE )
553 return -EINVAL;
555 switch ( cmd )
556 {
557 case VMASST_CMD_enable:
558 set_bit(type, &p->vm_assist);
559 return 0;
560 case VMASST_CMD_disable:
561 clear_bit(type, &p->vm_assist);
562 return 0;
563 }
565 return -ENOSYS;
566 }
568 /*
569 * Local variables:
570 * mode: C
571 * c-set-style: "BSD"
572 * c-basic-offset: 4
573 * tab-width: 4
574 * indent-tabs-mode: nil
575 * End:
576 */