direct-io.hg

view xen/common/domain.c @ 2693:2584528df9e1

bitkeeper revision 1.1159.123.2 (4177d169N58TtQXn_XJO4xNBKbMQUw)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/BK/xeno.bk
into freefall.cl.cam.ac.uk:/local/scratch/kaf24/xeno
author kaf24@freefall.cl.cam.ac.uk
date Thu Oct 21 15:10:33 2004 +0000 (2004-10-21)
parents 92fff25bf21e d8e27145f1eb
children 8aa9d487a8dd
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/mm.h>
13 #include <xen/event.h>
14 #include <xen/time.h>
15 #include <xen/console.h>
16 #include <asm/shadow.h>
17 #include <hypervisor-ifs/dom0_ops.h>
18 #include <asm/domain_page.h>
20 /* Both these structures are protected by the tasklist_lock. */
21 rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
22 struct domain *task_hash[TASK_HASH_SIZE];
23 struct domain *task_list;
25 struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
26 {
27 struct domain *d, **pd;
28 unsigned long flags;
30 if ( (d = alloc_domain_struct()) == NULL )
31 return NULL;
33 atomic_set(&d->refcnt, 1);
34 atomic_set(&d->pausecnt, 0);
36 shadow_lock_init(d);
38 d->domain = dom_id;
39 d->processor = cpu;
40 d->create_time = NOW();
42 memcpy(&d->thread, &idle0_task.thread, sizeof(d->thread));
44 spin_lock_init(&d->page_alloc_lock);
45 INIT_LIST_HEAD(&d->page_list);
46 INIT_LIST_HEAD(&d->xenpage_list);
48 /* Per-domain PCI-device list. */
49 spin_lock_init(&d->pcidev_lock);
50 INIT_LIST_HEAD(&d->pcidev_list);
52 if ( d->domain != IDLE_DOMAIN_ID )
53 {
54 if ( (init_event_channels(d) != 0) || (grant_table_create(d) != 0) )
55 {
56 destroy_event_channels(d);
57 free_domain_struct(d);
58 return NULL;
59 }
61 arch_do_createdomain(d);
63 sched_add_domain(d);
65 write_lock_irqsave(&tasklist_lock, flags);
66 pd = &task_list; /* NB. task_list is maintained in order of dom_id. */
67 for ( pd = &task_list; *pd != NULL; pd = &(*pd)->next_list )
68 if ( (*pd)->domain > d->domain )
69 break;
70 d->next_list = *pd;
71 *pd = d;
72 d->next_hash = task_hash[TASK_HASH(dom_id)];
73 task_hash[TASK_HASH(dom_id)] = d;
74 write_unlock_irqrestore(&tasklist_lock, flags);
75 }
76 else
77 {
78 sched_add_domain(d);
79 }
81 return d;
82 }
85 struct domain *find_domain_by_id(domid_t dom)
86 {
87 struct domain *d;
88 unsigned long flags;
90 read_lock_irqsave(&tasklist_lock, flags);
91 d = task_hash[TASK_HASH(dom)];
92 while ( d != NULL )
93 {
94 if ( d->domain == dom )
95 {
96 if ( unlikely(!get_domain(d)) )
97 d = NULL;
98 break;
99 }
100 d = d->next_hash;
101 }
102 read_unlock_irqrestore(&tasklist_lock, flags);
104 return d;
105 }
108 /* Return the most recently created domain. */
109 struct domain *find_last_domain(void)
110 {
111 struct domain *d, *dlast;
112 unsigned long flags;
114 read_lock_irqsave(&tasklist_lock, flags);
115 dlast = task_list;
116 d = dlast->next_list;
117 while ( d != NULL )
118 {
119 if ( d->create_time > dlast->create_time )
120 dlast = d;
121 d = d->next_list;
122 }
123 if ( !get_domain(dlast) )
124 dlast = NULL;
125 read_unlock_irqrestore(&tasklist_lock, flags);
127 return dlast;
128 }
131 void domain_kill(struct domain *d)
132 {
133 domain_pause(d);
134 if ( !test_and_set_bit(DF_DYING, &d->flags) )
135 {
136 sched_rem_domain(d);
137 domain_relinquish_memory(d);
138 put_domain(d);
139 }
140 }
143 void domain_crash(void)
144 {
145 struct domain *d;
147 if ( current->domain == 0 )
148 BUG();
150 set_bit(DF_CRASHED, &current->flags);
152 d = find_domain_by_id(0);
153 send_guest_virq(d, VIRQ_DOM_EXC);
154 put_domain(d);
156 __enter_scheduler();
157 BUG();
158 }
160 void domain_shutdown(u8 reason)
161 {
162 struct domain *d;
164 if ( current->domain == 0 )
165 {
166 extern void machine_restart(char *);
167 extern void machine_halt(void);
169 if ( reason == 0 )
170 {
171 printk("Domain 0 halted: Our work here is done.\n");
172 machine_halt();
173 }
174 else
175 {
176 printk("Domain 0 shutdown: rebooting machine!\n");
177 machine_restart(0);
178 }
179 }
181 current->shutdown_code = reason;
182 set_bit(DF_SHUTDOWN, &current->flags);
184 d = find_domain_by_id(0);
185 send_guest_virq(d, VIRQ_DOM_EXC);
186 put_domain(d);
188 __enter_scheduler();
189 }
191 unsigned int alloc_new_dom_mem(struct domain *d, unsigned int kbytes)
192 {
193 unsigned int alloc_pfns, nr_pages;
194 struct pfn_info *page;
196 nr_pages = (kbytes + ((PAGE_SIZE-1)>>10)) >> (PAGE_SHIFT - 10);
197 d->max_pages = nr_pages; /* this can now be controlled independently */
199 /* Grow the allocation if necessary. */
200 for ( alloc_pfns = d->tot_pages; alloc_pfns < nr_pages; alloc_pfns++ )
201 {
202 if ( unlikely((page = alloc_domheap_page(d)) == NULL) )
203 {
204 domain_relinquish_memory(d);
205 return -ENOMEM;
206 }
208 /* initialise to machine_to_phys_mapping table to likely pfn */
209 machine_to_phys_mapping[page-frame_table] = alloc_pfns;
211 #ifndef NDEBUG
212 {
213 /* Initialise with magic marker if in DEBUG mode. */
214 void *a = map_domain_mem((page-frame_table)<<PAGE_SHIFT);
215 memset(a, 0x80 | (char)d->domain, PAGE_SIZE);
216 unmap_domain_mem(a);
217 }
218 #endif
219 }
221 return 0;
222 }
225 /* Release resources belonging to task @p. */
226 void domain_destruct(struct domain *d)
227 {
228 struct domain **pd;
229 unsigned long flags;
230 atomic_t old, new;
232 if ( !test_bit(DF_DYING, &d->flags) )
233 BUG();
235 /* May be already destructed, or get_domain() can race us. */
236 _atomic_set(old, 0);
237 _atomic_set(new, DOMAIN_DESTRUCTED);
238 old = atomic_compareandswap(old, new, &d->refcnt);
239 if ( _atomic_read(old) != 0 )
240 return;
242 DPRINTK("Releasing task %u\n", d->domain);
244 /* Delete from task list and task hashtable. */
245 write_lock_irqsave(&tasklist_lock, flags);
246 pd = &task_list;
247 while ( *pd != d )
248 pd = &(*pd)->next_list;
249 *pd = d->next_list;
250 pd = &task_hash[TASK_HASH(d->domain)];
251 while ( *pd != d )
252 pd = &(*pd)->next_hash;
253 *pd = d->next_hash;
254 write_unlock_irqrestore(&tasklist_lock, flags);
256 destroy_event_channels(d);
257 grant_table_destroy(d);
259 free_perdomain_pt(d);
260 free_xenheap_page((unsigned long)d->shared_info);
262 free_domain_struct(d);
263 }
266 /*
267 * final_setup_guestos is used for final setup and launching of domains other
268 * than domain 0. ie. the domains that are being built by the userspace dom0
269 * domain builder.
270 */
271 int final_setup_guestos(struct domain *p, dom0_builddomain_t *builddomain)
272 {
273 int rc = 0;
274 full_execution_context_t *c;
276 if ( (c = xmalloc(sizeof(*c))) == NULL )
277 return -ENOMEM;
279 if ( test_bit(DF_CONSTRUCTED, &p->flags) )
280 {
281 rc = -EINVAL;
282 goto out;
283 }
285 if ( copy_from_user(c, builddomain->ctxt, sizeof(*c)) )
286 {
287 rc = -EFAULT;
288 goto out;
289 }
291 if ( (rc = arch_final_setup_guestos(p,c)) != 0 )
292 goto out;
294 /* Set up the shared info structure. */
295 update_dom_time(p->shared_info);
297 set_bit(DF_CONSTRUCTED, &p->flags);
299 out:
300 if ( c != NULL )
301 xfree(c);
302 return rc;
303 }
305 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
306 {
307 if ( type > MAX_VMASST_TYPE )
308 return -EINVAL;
310 switch ( cmd )
311 {
312 case VMASST_CMD_enable:
313 set_bit(type, &p->vm_assist);
314 if (vm_assist_info[type].enable)
315 (*vm_assist_info[type].enable)(p);
316 return 0;
317 case VMASST_CMD_disable:
318 clear_bit(type, &p->vm_assist);
319 if (vm_assist_info[type].disable)
320 (*vm_assist_info[type].disable)(p);
321 return 0;
322 }
324 return -ENOSYS;
325 }