direct-io.hg

view xen/common/domain.c @ 5517:10e9028c8e3d

bitkeeper revision 1.1718.1.10 (42b7b19aqOS_1M8I4pIOFjiTPYWV-g)

Merge bk://xenbits.xensource.com/xen-unstable.bk
into spot.cl.cam.ac.uk:C:/Documents and Settings/iap10/xen-unstable.bk
author iap10@spot.cl.cam.ac.uk
date Tue Jun 21 06:20:10 2005 +0000 (2005-06-21)
parents 849b58da37b7
children 43564304cf94 b53a65034532
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/domain.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <xen/softirq.h>
18 #include <xen/domain_page.h>
19 #include <asm/debugger.h>
20 #include <public/dom0_ops.h>
22 /* Both these structures are protected by the domlist_lock. */
23 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
24 struct domain *domain_hash[DOMAIN_HASH_SIZE];
25 struct domain *domain_list;
27 struct domain *dom0;
29 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
30 {
31 struct domain *d, **pd;
32 struct vcpu *v;
34 if ( (d = alloc_domain_struct()) == NULL )
35 return NULL;
37 v = d->vcpu[0];
39 atomic_set(&d->refcnt, 1);
40 atomic_set(&v->pausecnt, 0);
42 d->domain_id = dom_id;
43 v->processor = cpu;
45 spin_lock_init(&d->time_lock);
47 spin_lock_init(&d->big_lock);
49 spin_lock_init(&d->page_alloc_lock);
50 INIT_LIST_HEAD(&d->page_list);
51 INIT_LIST_HEAD(&d->xenpage_list);
53 if ( d->domain_id == IDLE_DOMAIN_ID )
54 set_bit(_DOMF_idle_domain, &d->domain_flags);
56 if ( !is_idle_task(d) &&
57 ((evtchn_init(d) != 0) || (grant_table_create(d) != 0)) )
58 {
59 evtchn_destroy(d);
60 free_domain_struct(d);
61 return NULL;
62 }
64 arch_do_createdomain(v);
66 sched_add_domain(v);
68 if ( !is_idle_task(d) )
69 {
70 write_lock(&domlist_lock);
71 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
72 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
73 if ( (*pd)->domain_id > d->domain_id )
74 break;
75 d->next_in_list = *pd;
76 *pd = d;
77 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(dom_id)];
78 domain_hash[DOMAIN_HASH(dom_id)] = d;
79 write_unlock(&domlist_lock);
80 }
82 return d;
83 }
86 struct domain *find_domain_by_id(domid_t dom)
87 {
88 struct domain *d;
90 read_lock(&domlist_lock);
91 d = domain_hash[DOMAIN_HASH(dom)];
92 while ( d != NULL )
93 {
94 if ( d->domain_id == dom )
95 {
96 if ( unlikely(!get_domain(d)) )
97 d = NULL;
98 break;
99 }
100 d = d->next_in_hashbucket;
101 }
102 read_unlock(&domlist_lock);
104 return d;
105 }
108 void domain_kill(struct domain *d)
109 {
110 struct vcpu *v;
112 domain_pause(d);
113 if ( !test_and_set_bit(_DOMF_dying, &d->domain_flags) )
114 {
115 for_each_vcpu(d, v)
116 sched_rem_domain(v);
117 domain_relinquish_resources(d);
118 put_domain(d);
119 }
120 }
123 void domain_crash(void)
124 {
125 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
126 current->domain->domain_id, current->vcpu_id, smp_processor_id());
127 show_registers(guest_cpu_user_regs());
128 domain_shutdown(SHUTDOWN_crash);
129 }
132 void domain_crash_synchronous(void)
133 {
134 domain_crash();
135 for ( ; ; )
136 do_softirq();
137 }
140 static struct domain *domain_shuttingdown[NR_CPUS];
142 static void domain_shutdown_finalise(void)
143 {
144 struct domain *d;
145 struct vcpu *v;
147 d = domain_shuttingdown[smp_processor_id()];
148 domain_shuttingdown[smp_processor_id()] = NULL;
150 BUG_ON(d == NULL);
151 BUG_ON(d == current->domain);
152 BUG_ON(!test_bit(_DOMF_shuttingdown, &d->domain_flags));
153 BUG_ON(test_bit(_DOMF_shutdown, &d->domain_flags));
155 /* Make sure that every vcpu is descheduled before we finalise. */
156 for_each_vcpu ( d, v )
157 while ( test_bit(_VCPUF_running, &v->vcpu_flags) )
158 cpu_relax();
160 sync_lazy_execstate_mask(d->cpumask);
161 BUG_ON(!cpus_empty(d->cpumask));
163 sync_pagetable_state(d);
165 set_bit(_DOMF_shutdown, &d->domain_flags);
166 clear_bit(_DOMF_shuttingdown, &d->domain_flags);
168 send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
169 }
171 static __init int domain_shutdown_finaliser_init(void)
172 {
173 open_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ, domain_shutdown_finalise);
174 return 0;
175 }
176 __initcall(domain_shutdown_finaliser_init);
179 void domain_shutdown(u8 reason)
180 {
181 struct domain *d = current->domain;
182 struct vcpu *v;
184 if ( d->domain_id == 0 )
185 {
186 extern void machine_restart(char *);
187 extern void machine_halt(void);
189 debugger_trap_immediate();
191 if ( reason == SHUTDOWN_poweroff )
192 {
193 printk("Domain 0 halted: halting machine.\n");
194 machine_halt();
195 }
196 else
197 {
198 printk("Domain 0 shutdown: rebooting machine.\n");
199 machine_restart(0);
200 }
201 }
203 /* Mark the domain as shutting down. */
204 d->shutdown_code = reason;
205 if ( !test_and_set_bit(_DOMF_shuttingdown, &d->domain_flags) )
206 {
207 /* This vcpu won the race to finalise the shutdown. */
208 domain_shuttingdown[smp_processor_id()] = d;
209 raise_softirq(DOMAIN_SHUTDOWN_FINALISE_SOFTIRQ);
210 }
212 /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */
213 for_each_vcpu ( d, v )
214 domain_sleep_nosync(v);
215 }
218 void domain_pause_for_debugger(void)
219 {
220 struct domain *d = current->domain;
221 struct vcpu *v;
223 /*
224 * NOTE: This does not synchronously pause the domain. The debugger
225 * must issue a PAUSEDOMAIN command to ensure that all execution
226 * has ceased and guest state is committed to memory.
227 */
228 for_each_vcpu ( d, v )
229 {
230 set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags);
231 domain_sleep_nosync(v);
232 }
234 send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER);
235 }
238 /* Release resources belonging to task @p. */
239 void domain_destruct(struct domain *d)
240 {
241 struct domain **pd;
242 atomic_t old, new;
244 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
246 /* May be already destructed, or get_domain() can race us. */
247 _atomic_set(old, 0);
248 _atomic_set(new, DOMAIN_DESTRUCTED);
249 old = atomic_compareandswap(old, new, &d->refcnt);
250 if ( _atomic_read(old) != 0 )
251 return;
253 /* Delete from task list and task hashtable. */
254 write_lock(&domlist_lock);
255 pd = &domain_list;
256 while ( *pd != d )
257 pd = &(*pd)->next_in_list;
258 *pd = d->next_in_list;
259 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
260 while ( *pd != d )
261 pd = &(*pd)->next_in_hashbucket;
262 *pd = d->next_in_hashbucket;
263 write_unlock(&domlist_lock);
265 evtchn_destroy(d);
266 grant_table_destroy(d);
268 free_perdomain_pt(d);
269 free_xenheap_page(d->shared_info);
271 free_domain_struct(d);
273 send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC);
274 }
276 void vcpu_pause(struct vcpu *v)
277 {
278 BUG_ON(v == current);
279 atomic_inc(&v->pausecnt);
280 domain_sleep_sync(v);
281 }
283 void domain_pause(struct domain *d)
284 {
285 struct vcpu *v;
287 for_each_vcpu( d, v )
288 {
289 BUG_ON(v == current);
290 atomic_inc(&v->pausecnt);
291 domain_sleep_sync(v);
292 }
293 }
295 void vcpu_unpause(struct vcpu *v)
296 {
297 BUG_ON(v == current);
298 if ( atomic_dec_and_test(&v->pausecnt) )
299 domain_wake(v);
300 }
302 void domain_unpause(struct domain *d)
303 {
304 struct vcpu *v;
306 for_each_vcpu( d, v )
307 vcpu_unpause(v);
308 }
310 void domain_pause_by_systemcontroller(struct domain *d)
311 {
312 struct vcpu *v;
314 for_each_vcpu ( d, v )
315 {
316 BUG_ON(v == current);
317 if ( !test_and_set_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
318 domain_sleep_sync(v);
319 }
320 }
322 void domain_unpause_by_systemcontroller(struct domain *d)
323 {
324 struct vcpu *v;
326 for_each_vcpu ( d, v )
327 {
328 if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
329 domain_wake(v);
330 }
331 }
334 /*
335 * set_info_guest is used for final setup, launching, and state modification
336 * of domains other than domain 0. ie. the domains that are being built by
337 * the userspace dom0 domain builder.
338 */
339 int set_info_guest(struct domain *d, dom0_setdomaininfo_t *setdomaininfo)
340 {
341 int rc = 0;
342 struct vcpu_guest_context *c = NULL;
343 unsigned long vcpu = setdomaininfo->vcpu;
344 struct vcpu *v;
346 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
347 return -EINVAL;
349 if (test_bit(_DOMF_constructed, &d->domain_flags) &&
350 !test_bit(_VCPUF_ctrl_pause, &v->vcpu_flags))
351 return -EINVAL;
353 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
354 return -ENOMEM;
356 if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) )
357 {
358 rc = -EFAULT;
359 goto out;
360 }
362 if ( (rc = arch_set_info_guest(v, c)) != 0 )
363 goto out;
365 set_bit(_DOMF_constructed, &d->domain_flags);
367 out:
368 xfree(c);
369 return rc;
370 }
372 /*
373 * final_setup_guest is used for final setup and launching of domains other
374 * than domain 0. ie. the domains that are being built by the userspace dom0
375 * domain builder.
376 */
377 long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt)
378 {
379 struct domain *d = current->domain;
380 struct vcpu *v;
381 int rc = 0;
382 struct vcpu_guest_context *c;
384 if ( (vcpu >= MAX_VIRT_CPUS) || (d->vcpu[vcpu] != NULL) )
385 return -EINVAL;
387 if ( alloc_vcpu_struct(d, vcpu) == NULL )
388 return -ENOMEM;
390 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
391 {
392 rc = -ENOMEM;
393 goto out;
394 }
396 if ( copy_from_user(c, ctxt, sizeof(*c)) )
397 {
398 rc = -EFAULT;
399 goto out;
400 }
402 v = d->vcpu[vcpu];
404 atomic_set(&v->pausecnt, 0);
405 v->cpumap = CPUMAP_RUNANYWHERE;
407 memcpy(&v->arch, &idle0_vcpu.arch, sizeof(v->arch));
409 arch_do_boot_vcpu(v);
411 if ( (rc = arch_set_info_guest(v, c)) != 0 )
412 goto out;
414 sched_add_domain(v);
416 /* domain_unpause_by_systemcontroller */
417 if ( test_and_clear_bit(_VCPUF_ctrl_pause, &v->vcpu_flags) )
418 domain_wake(v);
420 xfree(c);
421 return 0;
423 out:
424 xfree(c);
425 arch_free_vcpu_struct(d->vcpu[vcpu]);
426 d->vcpu[vcpu] = NULL;
427 return rc;
428 }
430 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
431 {
432 if ( type > MAX_VMASST_TYPE )
433 return -EINVAL;
435 switch ( cmd )
436 {
437 case VMASST_CMD_enable:
438 set_bit(type, &p->vm_assist);
439 return 0;
440 case VMASST_CMD_disable:
441 clear_bit(type, &p->vm_assist);
442 return 0;
443 }
445 return -ENOSYS;
446 }
448 /*
449 * Local variables:
450 * mode: C
451 * c-set-style: "BSD"
452 * c-basic-offset: 4
453 * tab-width: 4
454 * indent-tabs-mode: nil
455 * End:
456 */