direct-io.hg

view xen/common/domain.c @ 7357:d6e99066959a

Refactor domain/vcpu allocation to be more separated.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Oct 12 17:01:38 2005 +0100 (2005-10-12)
parents 52b9aca1916a
children 475e2a8493b8
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/domain.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <xen/softirq.h>
18 #include <xen/domain_page.h>
19 #include <asm/debugger.h>
20 #include <public/dom0_ops.h>
21 #include <public/sched.h>
22 #include <public/vcpu.h>
24 /* Both these structures are protected by the domlist_lock. */
25 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
26 struct domain *domain_hash[DOMAIN_HASH_SIZE];
27 struct domain *domain_list;
29 struct domain *dom0;
31 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
32 {
33 struct domain *d, **pd;
34 struct vcpu *v;
36 if ( (d = alloc_domain()) == NULL )
37 return NULL;
39 d->domain_id = dom_id;
41 atomic_set(&d->refcnt, 1);
43 spin_lock_init(&d->big_lock);
44 spin_lock_init(&d->page_alloc_lock);
45 INIT_LIST_HEAD(&d->page_list);
46 INIT_LIST_HEAD(&d->xenpage_list);
48 if ( d->domain_id == IDLE_DOMAIN_ID )
49 set_bit(_DOMF_idle_domain, &d->domain_flags);
50 else
51 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
53 if ( !is_idle_task(d) &&
54 ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
55 {
56 evtchn_destroy(d);
57 free_domain(d);
58 return NULL;
59 }
61 if ( (v = alloc_vcpu(d, 0, cpu)) == NULL )
62 {
63 grant_table_destroy(d);
64 evtchn_destroy(d);
65 free_domain(d);
66 return NULL;
67 }
69 arch_do_createdomain(v);
71 if ( !is_idle_task(d) )
72 {
73 write_lock(&domlist_lock);
74 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
75 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
76 if ( (*pd)->domain_id > d->domain_id )
77 break;
78 d->next_in_list = *pd;
79 *pd = d;
80 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)];
81 domain_hash[DOMAIN_HASH(dom_id)] = d;
82 write_unlock(&domlist_lock);
83 }
85 return d;
86 }
89 struct domain *find_domain_by_id(domid_t dom)
90 {
91 struct domain *d;
93 read_lock(&domlist_lock);
94 d = domain_hash[DOMAIN_HASH(dom)];
95 while ( d != NULL )
96 {
97 if ( d->domain_id == dom )
98 {
99 if ( unlikely(!get_domain(d)) )
100 d = NULL;
101 break;
102 }
103 d = d->next_in_hashbucket;
104 }
105 read_unlock(&domlist_lock);
107 return d;
108 }
111 void domain_kill(struct domain *d)
112 {
113 struct vcpu *v;
115 domain_pause(d);
116 if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
117 {
118 for_each_vcpu(d, v)
119 sched_rem_domain(v);
120 domain_relinquish_resources(d);
121 put_domain(d);
123 send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
124 }
125 }
128 void domain_crash(void)
129 {
130 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
131 current->domain->domain_id, current->vcpu_id, smp_processor_id());
132 show_registers(guest_cpu_user_regs());
133 domain_shutdown(SHUTDOWN_crash);
134 }
137 void domain_crash_synchronous(void)
138 {
139 domain_crash();
140 for ( ; ; )
141 do_softirq();
142 }
145 static struct domain *domain_shuttingdown[NR_CPUS];
147 static void domain_shutdown_finalise(void)
148 {
149 struct domain *d;
150 struct vcpu *v;
152 d = domain_shuttingdown[smp_processor_id()];
153 domain_shuttingdown[smp_processor_id()] = NULL;
155 BUG_ON(d == NULL);
156 BUG_ON(d == current->domain);
157 BUG_ON(!test_bit(_DOMF_shuttingdown, &d->domain_flags));
158 BUG_ON(test_bit(_DOMF_shutdown, &d->domain_flags));
160 /* Make sure that every vcpu is descheduled before we finalise. */
161 for_each_vcpu ( d, v )
162 vcpu_sleep_sync(v);
163 BUG_ON(!cpus_empty(d->cpumask));
165 sync_pagetable_state(d);
167 set_bit(_DOMF_shutdown, &d->domain_flags);
168 clear_bit(_DOMF_shuttingdown, &d->domain_flags);
170 send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
171 }
173 static __init int domain_shutdown_finaliser_init(void)
174 {
175 open_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ, domain_shutdown_finalise);
176 return 0;
177 }
178 __initcall(domain_shutdown_finaliser_init);
181 void domain_shutdown(u8 reason)
182 {
183 struct domain *d = current->domain;
184 struct vcpu *v;
186 if ( d->domain_id == 0 )
187 {
188 extern void machine_restart(char *);
189 extern void machine_halt(void);
191 debugger_trap_immediate();
193 if ( reason == SHUTDOWN_poweroff )
194 {
195 printk("Domain 0 halted: halting machine.\n");
196 machine_halt();
197 }
198 else
199 {
200 printk("Domain 0 shutdown: rebooting machine.\n");
201 machine_restart(0);
202 }
203 }
205 /* Mark the domain as shutting down. */
206 d->shutdown_code = reason;
207 if ( !test_and_set_bit(_DOMF_shuttingdown, &d->domain_flags) )
208 {
209 /* This vcpu won the race to finalise the shutdown. */
210 domain_shuttingdown[smp_processor_id()] = d;
211 raise_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ);
212 }
214 /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */
215 for_each_vcpu ( d, v )
216 vcpu_sleep_nosync(v);
217 }
220 void domain_pause_for_debugger(void)
221 {
222 struct domain *d = current->domain;
223 struct vcpu *v;
225 /*
226 * NOTE: This does not synchronously pause the domain. The debugger
227 * must issue a PAUSEDOMAIN command to ensure that all execution
228 * has ceased and guest state is committed to memory.
229 */
230 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
231 for_each_vcpu ( d, v )
232 vcpu_sleep_nosync(v);
234 send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER);
235 }
238 /* Release resources belonging to task @p. */
239 void domain_destruct(struct domain *d)
240 {
241 struct domain **pd;
242 atomic_t old, new;
244 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
246 /* May be already destructed, or get_domain() can race us. */
247 _atomic_set(old, 0);
248 _atomic_set(new, DOMAIN_DESTRUCTED);
249 old = atomic_compareandswap(old, new, &d->refcnt);
250 if ( _atomic_read(old) != 0 )
251 return;
253 /* Delete from task list and task hashtable. */
254 write_lock(&domlist_lock);
255 pd = &domain_list;
256 while ( *pd != d )
257 pd = &(*pd)->next_in_list;
258 *pd = d->next_in_list;
259 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
260 while ( *pd != d )
261 pd = &(*pd)->next_in_hashbucket;
262 *pd = d->next_in_hashbucket;
263 write_unlock(&domlist_lock);
265 evtchn_destroy(d);
266 grant_table_destroy(d);
268 free_perdomain_pt(d);
269 free_xenheap_page(d->shared_info);
271 free_domain(d);
273 send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
274 }
276 void vcpu_pause(struct vcpu *v)
277 {
278 BUG_ON(v == current);
279 atomic_inc(&v->pausecnt);
280 vcpu_sleep_sync(v);
281 }
283 void domain_pause(struct domain *d)
284 {
285 struct vcpu *v;
287 for_each_vcpu( d, v )
288 {
289 BUG_ON(v == current);
290 atomic_inc(&v->pausecnt);
291 vcpu_sleep_sync(v);
292 }
293 }
295 void vcpu_unpause(struct vcpu *v)
296 {
297 BUG_ON(v == current);
298 if ( atomic_dec_and_test(&v->pausecnt) )
299 vcpu_wake(v);
300 }
302 void domain_unpause(struct domain *d)
303 {
304 struct vcpu *v;
306 for_each_vcpu( d, v )
307 vcpu_unpause(v);
308 }
310 void domain_pause_by_systemcontroller(struct domain *d)
311 {
312 struct vcpu *v;
314 BUG_ON(current->domain == d);
316 if ( !test_and_set_bit(_DOMF_ctrl_pause, &d->domain_flags) )
317 {
318 for_each_vcpu ( d, v )
319 vcpu_sleep_sync(v);
320 }
321 }
323 void domain_unpause_by_systemcontroller(struct domain *d)
324 {
325 struct vcpu *v;
327 if ( test_and_clear_bit(_DOMF_ctrl_pause, &d->domain_flags) )
328 {
329 for_each_vcpu ( d, v )
330 vcpu_wake(v);
331 }
332 }
335 /*
336 * set_info_guest is used for final setup, launching, and state modification
337 * of domains other than domain 0. ie. the domains that are being built by
338 * the userspace dom0 domain builder.
339 */
340 int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo)
341 {
342 int rc = 0;
343 struct vcpu_guest_context *c = NULL;
344 unsigned long vcpu = setdomaininfo->vcpu;
345 struct vcpu *v;
347 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
348 return -EINVAL;
350 if ( !test_bit(_DOMF_ctrl_pause, &d->domain_flags) )
351 return -EINVAL;
353 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
354 return -ENOMEM;
356 rc = -EFAULT;
357 if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) == 0 )
358 rc = arch_set_info_guest(v, c);
360 xfree(c);
361 return rc;
362 }
364 int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt)
365 {
366 struct vcpu *v = d->vcpu[vcpuid];
367 int rc;
369 BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
371 if ( (rc = arch_set_info_guest(v, ctxt)) != 0 )
372 return rc;
374 return rc;
375 }
377 long do_vcpu_op(int cmd, int vcpuid, void *arg)
378 {
379 struct domain *d = current->domain;
380 struct vcpu *v;
381 struct vcpu_guest_context *ctxt;
382 long rc = 0;
384 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
385 return -EINVAL;
387 if ( (v = d->vcpu[vcpuid]) == NULL )
388 return -ENOENT;
390 switch ( cmd )
391 {
392 case VCPUOP_initialise:
393 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
394 {
395 rc = -ENOMEM;
396 break;
397 }
399 if ( copy_from_user(ctxt, arg, sizeof(*ctxt)) )
400 {
401 xfree(ctxt);
402 rc = -EFAULT;
403 break;
404 }
406 LOCK_BIGLOCK(d);
407 rc = -EEXIST;
408 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
409 rc = boot_vcpu(d, vcpuid, ctxt);
410 UNLOCK_BIGLOCK(d);
412 xfree(ctxt);
413 break;
415 case VCPUOP_up:
416 if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
417 vcpu_wake(v);
418 break;
420 case VCPUOP_down:
421 if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
422 vcpu_sleep_nosync(v);
423 break;
425 case VCPUOP_is_up:
426 rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
427 break;
428 }
430 return rc;
431 }
433 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
434 {
435 if ( type > MAX_VMASST_TYPE )
436 return -EINVAL;
438 switch ( cmd )
439 {
440 case VMASST_CMD_enable:
441 set_bit(type, &p->vm_assist);
442 return 0;
443 case VMASST_CMD_disable:
444 clear_bit(type, &p->vm_assist);
445 return 0;
446 }
448 return -ENOSYS;
449 }
451 /*
452 * Local variables:
453 * mode: C
454 * c-set-style: "BSD"
455 * c-basic-offset: 4
456 * tab-width: 4
457 * indent-tabs-mode: nil
458 * End:
459 */