ia64/xen-unstable

view xen/common/domain.c @ 12390:e28beea6d228

[IA64] Fix time services of EFI emulation

This patch serializes the execution of following efi.runtimes.
- GetTime
- SetTime
- GetWakeTime
- SetWakeTime

Linux/ia64 uses similar spinlocks in the EFI RTC driver.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Fri Nov 10 12:03:19 2006 -0700 (2006-11-10)
parents e793dad2114d
children b7ffbec0e307
line source
1 /******************************************************************************
2 * domain.c
3 *
4 * Generic domain-handling functions.
5 */
7 #include <xen/config.h>
8 #include <xen/init.h>
9 #include <xen/lib.h>
10 #include <xen/errno.h>
11 #include <xen/sched.h>
12 #include <xen/domain.h>
13 #include <xen/mm.h>
14 #include <xen/event.h>
15 #include <xen/time.h>
16 #include <xen/console.h>
17 #include <xen/softirq.h>
18 #include <xen/domain_page.h>
19 #include <xen/rangeset.h>
20 #include <xen/guest_access.h>
21 #include <xen/hypercall.h>
22 #include <xen/delay.h>
23 #include <xen/shutdown.h>
24 #include <xen/percpu.h>
25 #include <asm/debugger.h>
26 #include <public/sched.h>
27 #include <public/vcpu.h>
29 /* Both these structures are protected by the domlist_lock. */
30 DEFINE_RWLOCK(domlist_lock);
31 struct domain *domain_hash[DOMAIN_HASH_SIZE];
32 struct domain *domain_list;
34 struct domain *dom0;
36 struct vcpu *idle_vcpu[NR_CPUS] __read_mostly;
38 int current_domain_id(void)
39 {
40 return current->domain->domain_id;
41 }
43 struct domain *alloc_domain(domid_t domid)
44 {
45 struct domain *d;
47 if ( (d = xmalloc(struct domain)) == NULL )
48 return NULL;
50 memset(d, 0, sizeof(*d));
51 d->domain_id = domid;
52 atomic_set(&d->refcnt, 1);
53 spin_lock_init(&d->big_lock);
54 spin_lock_init(&d->page_alloc_lock);
55 spin_lock_init(&d->pause_lock);
56 INIT_LIST_HEAD(&d->page_list);
57 INIT_LIST_HEAD(&d->xenpage_list);
59 return d;
60 }
62 void free_domain(struct domain *d)
63 {
64 struct vcpu *v;
65 int i;
67 for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
68 {
69 if ( (v = d->vcpu[i]) == NULL )
70 continue;
71 vcpu_destroy(v);
72 sched_destroy_vcpu(v);
73 free_vcpu_struct(v);
74 }
76 sched_destroy_domain(d);
77 xfree(d);
78 }
80 struct vcpu *alloc_vcpu(
81 struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
82 {
83 struct vcpu *v;
85 BUG_ON(d->vcpu[vcpu_id] != NULL);
87 if ( (v = alloc_vcpu_struct()) == NULL )
88 return NULL;
90 v->domain = d;
91 v->vcpu_id = vcpu_id;
92 v->vcpu_info = &d->shared_info->vcpu_info[vcpu_id];
93 spin_lock_init(&v->pause_lock);
95 v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline;
96 v->runstate.state_entry_time = NOW();
98 if ( (vcpu_id != 0) && !is_idle_domain(d) )
99 set_bit(_VCPUF_down, &v->vcpu_flags);
101 if ( sched_init_vcpu(v, cpu_id) != 0 )
102 {
103 free_vcpu_struct(v);
104 return NULL;
105 }
107 if ( vcpu_initialise(v) != 0 )
108 {
109 sched_destroy_vcpu(v);
110 free_vcpu_struct(v);
111 return NULL;
112 }
114 d->vcpu[vcpu_id] = v;
115 if ( vcpu_id != 0 )
116 d->vcpu[v->vcpu_id-1]->next_in_list = v;
118 return v;
119 }
121 struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
122 {
123 struct domain *d;
124 struct vcpu *v;
125 unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
127 d = (vcpu_id == 0) ?
128 domain_create(IDLE_DOMAIN_ID, 0) :
129 idle_vcpu[cpu_id - vcpu_id]->domain;
130 BUG_ON(d == NULL);
132 v = alloc_vcpu(d, vcpu_id, cpu_id);
133 idle_vcpu[cpu_id] = v;
135 return v;
136 }
138 struct domain *domain_create(domid_t domid, unsigned int domcr_flags)
139 {
140 struct domain *d, **pd;
142 if ( (d = alloc_domain(domid)) == NULL )
143 return NULL;
145 if ( domcr_flags & DOMCRF_hvm )
146 d->is_hvm = 1;
148 rangeset_domain_initialise(d);
150 if ( !is_idle_domain(d) )
151 {
152 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
153 if ( evtchn_init(d) != 0 )
154 goto fail1;
155 if ( grant_table_create(d) != 0 )
156 goto fail2;
157 }
159 if ( arch_domain_create(d) != 0 )
160 goto fail3;
162 d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
163 d->irq_caps = rangeset_new(d, "Interrupts", 0);
164 if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
165 goto fail4;
167 if ( sched_init_domain(d) != 0 )
168 goto fail4;
170 if ( !is_idle_domain(d) )
171 {
172 write_lock(&domlist_lock);
173 pd = &domain_list; /* NB. domain_list maintained in order of domid. */
174 for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
175 if ( (*pd)->domain_id > d->domain_id )
176 break;
177 d->next_in_list = *pd;
178 *pd = d;
179 d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
180 domain_hash[DOMAIN_HASH(domid)] = d;
181 write_unlock(&domlist_lock);
182 }
184 return d;
186 fail4:
187 arch_domain_destroy(d);
188 fail3:
189 if ( !is_idle_domain(d) )
190 grant_table_destroy(d);
191 fail2:
192 if ( !is_idle_domain(d) )
193 evtchn_destroy(d);
194 fail1:
195 rangeset_domain_destroy(d);
196 free_domain(d);
197 return NULL;
198 }
201 struct domain *find_domain_by_id(domid_t dom)
202 {
203 struct domain *d;
205 read_lock(&domlist_lock);
206 d = domain_hash[DOMAIN_HASH(dom)];
207 while ( d != NULL )
208 {
209 if ( d->domain_id == dom )
210 {
211 if ( unlikely(!get_domain(d)) )
212 d = NULL;
213 break;
214 }
215 d = d->next_in_hashbucket;
216 }
217 read_unlock(&domlist_lock);
219 return d;
220 }
223 void domain_kill(struct domain *d)
224 {
225 domain_pause(d);
227 if ( test_and_set_bit(_DOMF_dying, &d->domain_flags) )
228 return;
230 gnttab_release_mappings(d);
231 domain_relinquish_resources(d);
232 put_domain(d);
234 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
235 }
238 void __domain_crash(struct domain *d)
239 {
240 if ( d == current->domain )
241 {
242 printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n",
243 d->domain_id, current->vcpu_id, smp_processor_id());
244 show_execution_state(guest_cpu_user_regs());
245 }
246 else
247 {
248 printk("Domain %d reported crashed by domain %d on cpu#%d:\n",
249 d->domain_id, current->domain->domain_id, smp_processor_id());
250 }
252 domain_shutdown(d, SHUTDOWN_crash);
253 }
256 void __domain_crash_synchronous(void)
257 {
258 __domain_crash(current->domain);
259 for ( ; ; )
260 do_softirq();
261 }
264 void domain_shutdown(struct domain *d, u8 reason)
265 {
266 struct vcpu *v;
268 if ( d->domain_id == 0 )
269 dom0_shutdown(reason);
271 d->shutdown_code = reason;
272 set_bit(_DOMF_shutdown, &d->domain_flags);
274 for_each_vcpu ( d, v )
275 vcpu_sleep_nosync(v);
277 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
278 }
281 void domain_pause_for_debugger(void)
282 {
283 struct domain *d = current->domain;
284 struct vcpu *v;
286 set_bit(_DOMF_ctrl_pause, &d->domain_flags);
288 for_each_vcpu ( d, v )
289 vcpu_sleep_nosync(v);
291 send_guest_global_virq(dom0, VIRQ_DEBUGGER);
292 }
295 /* Release resources belonging to task @p. */
296 void domain_destroy(struct domain *d)
297 {
298 struct domain **pd;
299 atomic_t old, new;
301 BUG_ON(!test_bit(_DOMF_dying, &d->domain_flags));
303 /* May be already destroyed, or get_domain() can race us. */
304 _atomic_set(old, 0);
305 _atomic_set(new, DOMAIN_DESTROYED);
306 old = atomic_compareandswap(old, new, &d->refcnt);
307 if ( _atomic_read(old) != 0 )
308 return;
310 /* Delete from task list and task hashtable. */
311 write_lock(&domlist_lock);
312 pd = &domain_list;
313 while ( *pd != d )
314 pd = &(*pd)->next_in_list;
315 *pd = d->next_in_list;
316 pd = &domain_hash[DOMAIN_HASH(d->domain_id)];
317 while ( *pd != d )
318 pd = &(*pd)->next_in_hashbucket;
319 *pd = d->next_in_hashbucket;
320 write_unlock(&domlist_lock);
322 rangeset_domain_destroy(d);
324 evtchn_destroy(d);
325 grant_table_destroy(d);
327 arch_domain_destroy(d);
329 free_domain(d);
331 send_guest_global_virq(dom0, VIRQ_DOM_EXC);
332 }
334 void vcpu_pause(struct vcpu *v)
335 {
336 ASSERT(v != current);
338 spin_lock(&v->pause_lock);
339 if ( v->pause_count++ == 0 )
340 set_bit(_VCPUF_paused, &v->vcpu_flags);
341 spin_unlock(&v->pause_lock);
343 vcpu_sleep_sync(v);
344 }
346 void vcpu_unpause(struct vcpu *v)
347 {
348 int wake;
350 ASSERT(v != current);
352 spin_lock(&v->pause_lock);
353 wake = (--v->pause_count == 0);
354 if ( wake )
355 clear_bit(_VCPUF_paused, &v->vcpu_flags);
356 spin_unlock(&v->pause_lock);
358 if ( wake )
359 vcpu_wake(v);
360 }
362 void domain_pause(struct domain *d)
363 {
364 struct vcpu *v;
366 ASSERT(d != current->domain);
368 spin_lock(&d->pause_lock);
369 if ( d->pause_count++ == 0 )
370 set_bit(_DOMF_paused, &d->domain_flags);
371 spin_unlock(&d->pause_lock);
373 for_each_vcpu( d, v )
374 vcpu_sleep_sync(v);
375 }
377 void domain_unpause(struct domain *d)
378 {
379 struct vcpu *v;
380 int wake;
382 ASSERT(d != current->domain);
384 spin_lock(&d->pause_lock);
385 wake = (--d->pause_count == 0);
386 if ( wake )
387 clear_bit(_DOMF_paused, &d->domain_flags);
388 spin_unlock(&d->pause_lock);
390 if ( wake )
391 for_each_vcpu( d, v )
392 vcpu_wake(v);
393 }
395 void domain_pause_by_systemcontroller(struct domain *d)
396 {
397 struct vcpu *v;
399 BUG_ON(current->domain == d);
401 if ( !test_and_set_bit(_DOMF_ctrl_pause, &d->domain_flags) )
402 {
403 for_each_vcpu ( d, v )
404 vcpu_sleep_sync(v);
405 }
406 }
408 void domain_unpause_by_systemcontroller(struct domain *d)
409 {
410 struct vcpu *v;
412 if ( test_and_clear_bit(_DOMF_ctrl_pause, &d->domain_flags) )
413 {
414 for_each_vcpu ( d, v )
415 vcpu_wake(v);
416 }
417 }
420 /*
421 * set_info_guest is used for final setup, launching, and state modification
422 * of domains other than domain 0. ie. the domains that are being built by
423 * the userspace dom0 domain builder.
424 */
425 int set_info_guest(struct domain *d,
426 xen_domctl_vcpucontext_t *vcpucontext)
427 {
428 int rc = 0;
429 struct vcpu_guest_context *c = NULL;
430 unsigned long vcpu = vcpucontext->vcpu;
431 struct vcpu *v;
433 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
434 return -EINVAL;
436 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
437 return -ENOMEM;
439 domain_pause(d);
441 rc = -EFAULT;
442 if ( copy_from_guest(c, vcpucontext->ctxt, 1) == 0 )
443 rc = arch_set_info_guest(v, c);
445 domain_unpause(d);
447 xfree(c);
448 return rc;
449 }
451 int boot_vcpu(struct domain *d, int vcpuid, struct vcpu_guest_context *ctxt)
452 {
453 struct vcpu *v = d->vcpu[vcpuid];
455 BUG_ON(test_bit(_VCPUF_initialised, &v->vcpu_flags));
457 return arch_set_info_guest(v, ctxt);
458 }
460 long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg)
461 {
462 struct domain *d = current->domain;
463 struct vcpu *v;
464 struct vcpu_guest_context *ctxt;
465 long rc = 0;
467 if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
468 return -EINVAL;
470 if ( (v = d->vcpu[vcpuid]) == NULL )
471 return -ENOENT;
473 switch ( cmd )
474 {
475 case VCPUOP_initialise:
476 if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
477 {
478 rc = -ENOMEM;
479 break;
480 }
482 if ( copy_from_guest(ctxt, arg, 1) )
483 {
484 xfree(ctxt);
485 rc = -EFAULT;
486 break;
487 }
489 LOCK_BIGLOCK(d);
490 rc = -EEXIST;
491 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
492 rc = boot_vcpu(d, vcpuid, ctxt);
493 UNLOCK_BIGLOCK(d);
495 xfree(ctxt);
496 break;
498 case VCPUOP_up:
499 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
500 rc = -EINVAL;
501 else if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
502 vcpu_wake(v);
503 break;
505 case VCPUOP_down:
506 if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
507 vcpu_sleep_nosync(v);
508 break;
510 case VCPUOP_is_up:
511 rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
512 break;
514 case VCPUOP_get_runstate_info:
515 {
516 struct vcpu_runstate_info runstate;
517 vcpu_runstate_get(v, &runstate);
518 if ( copy_to_guest(arg, &runstate, 1) )
519 rc = -EFAULT;
520 break;
521 }
523 default:
524 rc = arch_do_vcpu_op(cmd, v, arg);
525 break;
526 }
528 return rc;
529 }
531 long vm_assist(struct domain *p, unsigned int cmd, unsigned int type)
532 {
533 if ( type > MAX_VMASST_TYPE )
534 return -EINVAL;
536 switch ( cmd )
537 {
538 case VMASST_CMD_enable:
539 set_bit(type, &p->vm_assist);
540 return 0;
541 case VMASST_CMD_disable:
542 clear_bit(type, &p->vm_assist);
543 return 0;
544 }
546 return -ENOSYS;
547 }
549 /*
550 * Local variables:
551 * mode: C
552 * c-set-style: "BSD"
553 * c-basic-offset: 4
554 * tab-width: 4
555 * indent-tabs-mode: nil
556 * End:
557 */