ia64/xen-unstable

view xen/common/domain.c @ 3691:b8cbcd601e0e

bitkeeper revision 1.1159.247.1 (42077fc2VzXadqgxjViiGV6bG9CwbA)

Don't memcpy the arch specific bit of exec_domain from the idle task's.
This caused the ctxt->pt_base !=0 error when starting new domains.
author iap10@labyrinth.cl.cam.ac.uk
date Mon Feb 07 14:48:34 2005 +0000 (2005-02-07)
parents d93748c50893
children f38875b9c89f
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
2 /******************************************************************************
3 * domain.c
4 *
5 * Generic domain-handling functions.
6 */
8 #include <xen/config.h>
9 #include <xen/init.h>
10 #include <xen/lib.h>
11 #include <xen/sched.h>
12 #include <xen/errno.h>
13 #include <xen/sched.h>
14 #include <xen/mm.h>
15 #include <xen/event.h>
16 #include <xen/time.h>
17 #include <xen/console.h>
18 #include <asm/shadow.h>
19 #include <public/dom0_ops.h>
20 #include <asm/domain_page.h>
22 /* Both these structures are protected by the domlist_lock. */
23 rwlock_t domlist_lock = RW_LOCK_UNLOCKED;
24 struct domain *domain_hash[DOMAIN_HASH_SIZE];
25 struct domain *domain_list;
27 struct domain *dom0;
29 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
30 {
31 struct domain *d, **pd;
32 struct exec_domain *ed;
34 if ( (d = alloc_domain_struct()) == NULL )
35 return NULL;
37 ed = d->exec_domain[0];
39 atomic_set(&d->refcnt, 1);
40 atomic_set(&ed->pausecnt, 0);
42 shadow_lock_init(d);
44 d->id = dom_id;
45 ed->processor = cpu;
46 d->create_time = NOW();
48 spin_lock_init(&d->time_lock);
50 spin_lock_init(&d->big_lock);
52 spin_lock_init(&d->page_alloc_lock);
53 INIT_LIST_HEAD(&d->page_list);
54 INIT_LIST_HEAD(&d->xenpage_list);
56 /* Per-domain PCI-device list. */
57 spin_lock_init(&d->pcidev_lock);
58 INIT_LIST_HEAD(&d->pcidev_list);
60 if ( (d->id != IDLE_DOMAIN_ID) &&
61 ((init_event_channels(d) != 0) || (grant_table_create(d) != 0)) )
62 {
63 destroy_event_channels(d);
64 free_domain_struct(d);
65 return NULL;
66 }
68 arch_do_createdomain(ed);
70 sched_add_domain(ed);
72 if ( d->id != IDLE_DOMAIN_ID )
73 {
74 write_lock(&domlist_lock);
75 pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
76 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
77 if ( (*pd)->id > d->id )
78 break;
79 d->next_list = *pd;
80 *pd = d;
81 d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
82 domain_hash[DOMAIN_HASH(dom_id)] = d;
83 write_unlock(&domlist_lock);
84 }
86 return d;
87 }
90 struct domain *find_domain_by_id(domid_t dom)
91 {
92 struct domain *d;
94 read_lock(&domlist_lock);
95 d = domain_hash[DOMAIN_HASH(dom)];
96 while ( d != NULL )
97 {
98 if ( d->id == dom )
99 {
100 if ( unlikely(!get_domain(d)) )
101 d = NULL;
102 break;
103 }
104 d = d->next_hash;
105 }
106 read_unlock(&domlist_lock);
108 return d;
109 }
112 /* Return the most recently created domain. */
113 struct domain *find_last_domain(void)
114 {
115 struct domain *d, *dlast;
117 read_lock(&domlist_lock);
118 dlast = domain_list;
119 d = dlast->next_list;
120 while ( d != NULL )
121 {
122 if ( d->create_time > dlast->create_time )
123 dlast = d;
124 d = d->next_list;
125 }
126 if ( !get_domain(dlast) )
127 dlast = NULL;
128 read_unlock(&domlist_lock);
130 return dlast;
131 }
134 void domain_kill(struct domain *d)
135 {
136 struct exec_domain *ed;
138 domain_pause(d);
139 if ( !test_and_set_bit(DF_DYING, &d->d_flags) )
140 {
141 for_each_exec_domain(d, ed)
142 sched_rem_domain(ed);
143 domain_relinquish_memory(d);
144 put_domain(d);
145 }
146 }
149 void domain_crash(void)
150 {
151 struct domain *d = current->domain;
153 if ( d->id == 0 )
154 BUG();
156 set_bit(DF_CRASHED, &d->d_flags);
158 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
160 __enter_scheduler();
161 BUG();
162 }
164 void domain_shutdown(u8 reason)
165 {
166 struct domain *d = current->domain;
168 if ( d->id == 0 )
169 {
170 extern void machine_restart(char *);
171 extern void machine_halt(void);
173 if ( reason == 0 )
174 {
175 printk("Domain 0 halted: halting machine.\n");
176 machine_halt();
177 }
178 else
179 {
180 printk("Domain 0 shutdown: rebooting machine.\n");
181 machine_restart(0);
182 }
183 }
185 d->shutdown_code = reason;
186 set_bit(DF_SHUTDOWN, &d->d_flags);
188 send_guest_virq(dom0->exec_domain[0], VIRQ_DOM_EXC);
190 __enter_scheduler();
191 }
193 unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
194 {
195 unsigned int alloc_pfns, nr_pages;
196 struct pfn_info *page;
198 nr_pages = (kbytes + ((PAGE_SIZE-1)>>10)) >> (PAGE_SHIFT - 10);
199 d->max_pages = nr_pages; /* this can now be controlled independently */
201 /* Grow the allocation if necessary. */
202 for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
203 {
204 if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
205 {
206 domain_relinquish_memory(d);
207 return -ENOMEM;
208 }
210 /* Initialise the machine-to-phys mapping for this page. */
211 set_machinetophys(page_to_pfn(page), alloc_pfns);
212 }
214 return 0;
215 }
218 /* Release resources belonging to task @p. */
219 void domain_destruct(struct domain *d)
220 {
221 struct domain **pd;
222 atomic_t old, new;
224 if ( !test_bit(DF_DYING, &d->d_flags) )
225 BUG();
227 /* May be already destructed, or get_domain() can race us. */
228 _atomic_set(old, 0);
229 _atomic_set(new, DOMAIN_DESTRUCTED);
230 old = atomic_compareandswap(old, new, &d->refcnt);
231 if ( _atomic_read(old) != 0 )
232 return;
234 /* Delete from task list and task hashtable. */
235 write_lock(&domlist_lock);
236 pd = &domain_list;
237 while ( *pd != d )
238 pd = &(*pd)->next_list;
239 *pd = d->next_list;
240 pd = &domain_hash[DOMAIN_HASH(d->id)];
241 while ( *pd != d )
242 pd = &(*pd)->next_hash;
243 *pd = d->next_hash;
244 write_unlock(&domlist_lock);
246 destroy_event_channels(d);
247 grant_table_destroy(d);
249 free_perdomain_pt(d);
250 free_xenheap_page((unsigned long)d->shared_info);
252 free_domain_struct(d);
253 }
256 /*
257 * final_setup_guestos is used for final setup and launching of domains other
258 * than domain 0. ie. the domains that are being built by the userspace dom0
259 * domain builder.
260 */
261 int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
262 {
263 int rc = 0;
264 full_execution_context_t *c;
266 if ( (c = xmalloc(full_execution_context_t)) == NULL )
267 return -ENOMEM;
269 if ( test_bit(DF_CONSTRUCTED, &p->d_flags) )
270 {
271 rc = -EINVAL;
272 goto out;
273 }
275 if ( copy_from_user(c, builddomain->ctxt, sizeof(*c)) )
276 {
277 rc = -EFAULT;
278 goto out;
279 }
281 if ( (rc = arch_final_setup_guestos(p->exec_domain[0],c)) != 0 )
282 goto out;
284 /* Set up the shared info structure. */
285 update_dom_time(p);
287 set_bit(DF_CONSTRUCTED, &p->d_flags);
289 out:
290 if ( c != NULL )
291 xfree(c);
292 return rc;
293 }
295 /*
296 * final_setup_guestos is used for final setup and launching of domains other
297 * than domain 0. ie. the domains that are being built by the userspace dom0
298 * domain builder.
299 */
300 long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt)
301 {
302 struct domain *d = current->domain;
303 struct exec_domain *ed;
304 int rc = 0;
305 full_execution_context_t *c;
307 if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) )
308 return -EINVAL;
310 if ( alloc_exec_domain_struct(d, vcpu) == NULL )
311 return -ENOMEM;
313 if ( (c = xmalloc(full_execution_context_t)) == NULL )
314 {
315 rc = -ENOMEM;
316 goto out;
317 }
319 if ( copy_from_user(c, ctxt, sizeof(*c)) )
320 {
321 rc = -EFAULT;
322 goto out;
323 }
325 ed = d->exec_domain[vcpu];
327 atomic_set(&ed->pausecnt, 0);
328 shadow_lock_init(d);
330 memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch));
332 arch_do_boot_vcpu(ed);
334 sched_add_domain(ed);
336 if ( (rc = arch_final_setup_guestos(ed, c)) != 0 ) {
337 sched_rem_domain(ed);
338 goto out;
339 }
341 /* Set up the shared info structure. */
342 update_dom_time(d);
344 /* domain_unpause_by_systemcontroller */
345 if ( test_and_clear_bit(EDF_CTRLPAUSE, &ed->ed_flags) )
346 domain_wake(ed);
348 xfree(c);
349 return 0;
351 out:
352 if ( c != NULL )
353 xfree(c);
354 arch_free_exec_domain_struct(d->exec_domain[vcpu]);
355 d->exec_domain[vcpu] = NULL;
356 return rc;
357 }
359 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
360 {
361 if ( type > MAX_VMASST_TYPE )
362 return -EINVAL;
364 switch ( cmd )
365 {
366 case VMASST_CMD_enable:
367 set_bit(type, &p->vm_assist);
368 if (vm_assist_info[type].enable)
369 (*vm_assist_info[type].enable)(p);
370 return 0;
371 case VMASST_CMD_disable:
372 clear_bit(type, &p->vm_assist);
373 if (vm_assist_info[type].disable)
374 (*vm_assist_info[type].disable)(p);
375 return 0;
376 }
378 return -ENOSYS;
379 }