ia64/xen-unstable

view xen/common/domain.c @ 4335:9c0a556b33b3

bitkeeper revision 1.1236.43.17 (4244977dF3r2NTA36BFdUWC_2TNwzg)

Header include fix.
author kaf24@firebug.cl.cam.ac.uk
date Fri Mar 25 22:58:05 2005 +0000 (2005-03-25)
parents f71b1114a5c3
children a18e1426d4c8
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/sched.h>
11 #include <xen/errno.h>
12 #include <xen/sched.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <xen/softirq.h>
18 #include <asm/shadow.h>
19 #include <public/dom0_ops.h>
20 #include <asm/domain_page.h>
21 #include <asm/debugger.h>
23 /* Both these structures are protected by the domlist_lock. */
24 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
25 struct domain *domain_hash[DOMAIN_HASH_SIZE];
26 struct domain *domain_list;
28 struct domain *dom0;
30 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
31 {
32 struct domain *d, **pd;
33 struct exec_domain *ed;
35 if ( (d = alloc_domain_struct()) == NULL )
36 return NULL;
38 ed = d->exec_domain[0];
40 atomic_set(&d->refcnt, 1);
41 atomic_set(&ed->pausecnt, 0);
43 d->id = dom_id;
44 ed->processor = cpu;
46 spin_lock_init(&d->time_lock);
48 spin_lock_init(&d->big_lock);
50 spin_lock_init(&d->page_alloc_lock);
51 INIT_LIST_HEAD(&d->page_list);
52 INIT_LIST_HEAD(&d->xenpage_list);
54 /* Per-domain PCI-device list. */
55 spin_lock_init(&d->pcidev_lock);
56 INIT_LIST_HEAD(&d->pcidev_list);
58 if ( (d->id != IDLE_DOMAIN_ID) &&
59 ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
60 {
61 destroy_event_channels(d);
62 free_domain_struct(d);
63 return NULL;
64 }
66 arch_do_createdomain(ed);
68 sched_add_domain(ed);
70 if ( d->id != IDLE_DOMAIN_ID )
71 {
72 write_lock(&domlist_lock);
73 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
74 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
75 if ( (*pd)->id > d->id )
76 break;
77 d->next_list = *pd;
78 *pd = d;
79 d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
80 domain_hash[DOMAIN_HASH(dom_id)] = d;
81 write_unlock(&domlist_lock);
82 }
84 return d;
85 }
88 struct domain *find_domain_by_id(domid_t dom)
89 {
90 struct domain *d;
92 read_lock(&domlist_lock);
93 d = domain_hash[DOMAIN_HASH(dom)];
94 while ( d != NULL )
95 {
96 if ( d->id == dom )
97 {
98 if ( unlikely(!get_domain(d)) )
99 d = NULL;
100 break;
101 }
102 d = d->next_hash;
103 }
104 read_unlock(&domlist_lock);
106 return d;
107 }
110 #ifndef CONFIG_IA64
111 extern void physdev_destroy_state(struct domain *d);
112 #else
113 #define physdev_destroy_state(_d) ((void)0)
114 #endif
116 void domain_kill(struct domain *d)
117 {
118 struct exec_domain *ed;
120 domain_pause(d);
121 if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
122 {
123 for_each_exec_domain(d, ed)
124 sched_rem_domain(ed);
125 domain_relinquish_memory(d);
126 physdev_destroy_state(d);
127 put_domain(d);
128 }
129 }
132 void domain_crash(void)
133 {
134 struct domain *d = current->domain;
136 if ( d->id == 0 )
137 BUG();
139 set_bit(DF_CRASHED, &d->d_flags);
141 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
143 raise_softirq(SCHEDULE_SOFTIRQ);
144 }
147 void domain_crash_synchronous(void)
148 {
149 domain_crash();
150 for ( ; ; )
151 do_softirq();
152 }
155 void domain_shutdown(u8 reason)
156 {
157 struct domain *d = current->domain;
159 if ( d->id == 0 )
160 {
161 extern void machine_restart(char *);
162 extern void machine_halt(void);
164 debugger_trap_immediate();
166 if ( reason == SHUTDOWN_poweroff )
167 {
168 printk("Domain 0 halted: halting machine.\n");
169 machine_halt();
170 }
171 else
172 {
173 printk("Domain 0 shutdown: rebooting machine.\n");
174 machine_restart(0);
175 }
176 }
178 if ( (d->shutdown_code = reason) == SHUTDOWN_crash )
179 set_bit(DF_CRASHED, &d->d_flags);
180 else
181 set_bit(DF_SHUTDOWN, &d->d_flags);
183 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
185 raise_softirq(SCHEDULE_SOFTIRQ);
186 }
189 unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
190 {
191 unsigned int alloc_pfns, nr_pages;
192 struct pfn_info *page;
194 nr_pages = (kbytes + ((PAGE_SIZE-1)>>10)) >> (PAGE_SHIFT - 10);
195 d->max_pages = nr_pages; /* this can now be controlled independently */
197 /* Grow the allocation if necessary. */
198 for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
199 {
200 if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
201 {
202 domain_relinquish_memory(d);
203 return list_empty(&page_scrub_list) ? -ENOMEM : -EAGAIN;
204 }
206 /* Initialise the machine-to-phys mapping for this page. */
207 set_machinetophys(page_to_pfn(page), alloc_pfns);
208 }
210 return 0;
211 }
214 /* Release resources belonging to task @p. */
215 void domain_destruct(struct domain *d)
216 {
217 struct domain **pd;
218 atomic_t old, new;
220 if ( !test_bit(DF_DYING, &d->d_flags) )
221 BUG();
223 /* May be already destructed, or get_domain() can race us. */
224 _atomic_set(old, 0);
225 _atomic_set(new, DOMAIN_DESTRUCTED);
226 old = atomic_compareandswap(old, new, &d->refcnt);
227 if ( _atomic_read(old) != 0 )
228 return;
230 /* Delete from task list and task hashtable. */
231 write_lock(&domlist_lock);
232 pd = &domain_list;
233 while ( *pd != d )
234 pd = &(*pd)->next_list;
235 *pd = d->next_list;
236 pd = &domain_hash[DOMAIN_HASH(d->id)];
237 while ( *pd != d )
238 pd = &(*pd)->next_hash;
239 *pd = d->next_hash;
240 write_unlock(&domlist_lock);
242 destroy_event_channels(d);
243 grant_table_destroy(d);
245 free_perdomain_pt(d);
246 free_xenheap_page((unsigned long)d->shared_info);
248 free_domain_struct(d);
249 }
252 /*
253 * set_info_guest is used for final setup, launching, and state modification
254 * of domains other than domain 0. ie. the domains that are being built by
255 * the userspace dom0 domain builder.
256 */
257 int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo)
258 {
259 int rc = 0;
260 full_execution_context_t *c = NULL;
261 unsigned long vcpu = setdomaininfo->exec_domain;
262 struct exec_domain *ed;
264 if ( (vcpu >= MAX_VIRT_CPUS) || ((ed = p->exec_domain[vcpu]) == NULL) )
265 return -EINVAL;
267 if (test_bit(DF_CONSTRUCTED, &p->d_flags) &&
268 !test_bit(EDF_CTRLPAUSE, &ed->ed_flags))
269 return -EINVAL;
271 if ( (c = xmalloc(full_execution_context_t)) == NULL )
272 return -ENOMEM;
274 if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) )
275 {
276 rc = -EFAULT;
277 goto out;
278 }
280 if ( (rc = arch_set_info_guest(ed, c)) != 0 )
281 goto out;
283 set_bit(DF_CONSTRUCTED, &p->d_flags);
285 out:
286 if ( c != NULL )
287 xfree(c);
288 return rc;
289 }
291 /*
292 * final_setup_guest is used for final setup and launching of domains other
293 * than domain 0. ie. the domains that are being built by the userspace dom0
294 * domain builder.
295 */
296 long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt)
297 {
298 struct domain *d = current->domain;
299 struct exec_domain *ed;
300 int rc = 0;
301 full_execution_context_t *c;
303 if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
304 return -EINVAL;
306 if ( alloc_exec_domain_struct(d, vcpu) == NULL )
307 return -ENOMEM;
309 if ( (c = xmalloc(full_execution_context_t)) == NULL )
310 {
311 rc = -ENOMEM;
312 goto out;
313 }
315 if ( copy_from_user(c, ctxt, sizeof(*c)) )
316 {
317 rc = -EFAULT;
318 goto out;
319 }
321 ed = d->exec_domain[vcpu];
323 atomic_set(&ed->pausecnt, 0);
325 memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
327 arch_do_boot_vcpu(ed);
329 sched_add_domain(ed);
331 if ( (rc = arch_set_info_guest(ed, c)) != 0 )
332 {
333 sched_rem_domain(ed);
334 goto out;
335 }
337 /* domain_unpause_by_systemcontroller */
338 if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
339 domain_wake(ed);
341 xfree(c);
342 return 0;
344 out:
345 if ( c != NULL )
346 xfree(c);
347 arch_free_exec_domain_struct(d->exec_domain[vcpu]);
348 d->exec_domain[vcpu] = NULL;
349 return rc;
350 }
352 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
353 {
354 if ( type > MAX_VMASST_TYPE )
355 return -EINVAL;
357 switch ( cmd )
358 {
359 case VMASST_CMD_enable:
360 set_bit(type, &p->vm_assist);
361 if (vm_assist_info[type].enable)
362 (*vm_assist_info[type].enable)(p);
363 return 0;
364 case VMASST_CMD_disable:
365 clear_bit(type, &p->vm_assist);
366 if (vm_assist_info[type].disable)
367 (*vm_assist_info[type].disable)(p);
368 return 0;
369 }
371 return -ENOSYS;
372 }
374 /*
375 * Local variables:
376 * mode: C
377 * c-set-style: "BSD"
378 * c-basic-offset: 4
379 * tab-width: 4
380 * indent-tabs-mode: nil
381 * End:
382 */