ia64/xen-unstable

view xen/common/domctl.c @ 12390:e28beea6d228

[IA64] Fix time services of EFI emulation

This patch serializes the execution of following efi.runtimes.
- GetTime
- SetTime
- GetWakeTime
- SetWakeTime

Linux/ia64 uses similar spinlocks in the EFI RTC driver.

Signed-off-by: Masaki Kanno <kanno.masaki@jp.fujitsu.com>
author awilliam@xenbuild.aw
date Fri Nov 10 12:03:19 2006 -0700 (2006-11-10)
parents cf8e65797826
children 45765c7a5066
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/guest_access.h>
21 #include <asm/current.h>
22 #include <public/domctl.h>
23 #include <acm/acm_hooks.h>
25 extern long arch_do_domctl(
26 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
27 extern void arch_getdomaininfo_ctxt(
28 struct vcpu *, struct vcpu_guest_context *);
30 void cpumask_to_xenctl_cpumap(
31 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
32 {
33 unsigned int guest_bytes, copy_bytes, i;
34 uint8_t zero = 0;
36 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
37 return;
39 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
40 copy_bytes = min_t(unsigned int, guest_bytes, (NR_CPUS + 7) / 8);
42 copy_to_guest(xenctl_cpumap->bitmap,
43 (uint8_t *)cpus_addr(*cpumask),
44 copy_bytes);
46 for ( i = copy_bytes; i < guest_bytes; i++ )
47 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
48 }
50 void xenctl_cpumap_to_cpumask(
51 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
52 {
53 unsigned int guest_bytes, copy_bytes;
55 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
56 copy_bytes = min_t(unsigned int, guest_bytes, (NR_CPUS + 7) / 8);
58 cpus_clear(*cpumask);
60 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
61 return;
63 copy_from_guest((uint8_t *)cpus_addr(*cpumask),
64 xenctl_cpumap->bitmap,
65 copy_bytes);
66 }
68 static inline int is_free_domid(domid_t dom)
69 {
70 struct domain *d;
72 if ( dom >= DOMID_FIRST_RESERVED )
73 return 0;
75 if ( (d = find_domain_by_id(dom)) == NULL )
76 return 1;
78 put_domain(d);
79 return 0;
80 }
82 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
83 {
84 struct vcpu *v;
85 u64 cpu_time = 0;
86 int flags = XEN_DOMINF_blocked;
87 struct vcpu_runstate_info runstate;
89 info->domain = d->domain_id;
90 info->nr_online_vcpus = 0;
92 /*
93 * - domain is marked as blocked only if all its vcpus are blocked
94 * - domain is marked as running if any of its vcpus is running
95 */
96 for_each_vcpu ( d, v )
97 {
98 vcpu_runstate_get(v, &runstate);
99 cpu_time += runstate.time[RUNSTATE_running];
100 info->max_vcpu_id = v->vcpu_id;
101 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
102 {
103 if ( !(v->vcpu_flags & VCPUF_blocked) )
104 flags &= ~XEN_DOMINF_blocked;
105 if ( v->vcpu_flags & VCPUF_running )
106 flags |= XEN_DOMINF_running;
107 info->nr_online_vcpus++;
108 }
109 }
111 info->cpu_time = cpu_time;
113 info->flags = flags |
114 ((d->domain_flags & DOMF_dying) ? XEN_DOMINF_dying : 0) |
115 ((d->domain_flags & DOMF_shutdown) ? XEN_DOMINF_shutdown : 0) |
116 ((d->domain_flags & DOMF_ctrl_pause) ? XEN_DOMINF_paused : 0) |
117 d->shutdown_code << XEN_DOMINF_shutdownshift;
119 if ( is_hvm_domain(d) )
120 info->flags |= XEN_DOMINF_hvm_guest;
122 if ( d->ssid != NULL )
123 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
124 else
125 info->ssidref = ACM_DEFAULT_SSID;
127 info->tot_pages = d->tot_pages;
128 info->max_pages = d->max_pages;
129 info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
131 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
132 }
134 static unsigned int default_vcpu0_location(void)
135 {
136 struct domain *d;
137 struct vcpu *v;
138 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
139 cpumask_t cpu_exclude_map;
141 /* Do an initial CPU placement. Pick the least-populated CPU. */
142 read_lock(&domlist_lock);
143 for_each_domain ( d )
144 for_each_vcpu ( d, v )
145 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
146 cnt[v->processor]++;
147 read_unlock(&domlist_lock);
149 /*
150 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
151 * favour high numbered CPUs in the event of a tie.
152 */
153 cpu = first_cpu(cpu_sibling_map[0]);
154 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
155 cpu = next_cpu(cpu, cpu_sibling_map[0]);
156 cpu_exclude_map = cpu_sibling_map[0];
157 for_each_online_cpu ( i )
158 {
159 if ( cpu_isset(i, cpu_exclude_map) )
160 continue;
161 if ( (i == first_cpu(cpu_sibling_map[i])) &&
162 (cpus_weight(cpu_sibling_map[i]) > 1) )
163 continue;
164 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
165 if ( cnt[i] <= cnt[cpu] )
166 cpu = i;
167 }
169 return cpu;
170 }
172 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
173 {
174 long ret = 0;
175 struct xen_domctl curop, *op = &curop;
176 void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
177 static DEFINE_SPINLOCK(domctl_lock);
179 if ( !IS_PRIV(current->domain) )
180 return -EPERM;
182 if ( copy_from_guest(op, u_domctl, 1) )
183 return -EFAULT;
185 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
186 return -EACCES;
188 if ( acm_pre_domctl(op, &ssid) )
189 return -EPERM;
191 spin_lock(&domctl_lock);
193 switch ( op->cmd )
194 {
196 case XEN_DOMCTL_setvcpucontext:
197 {
198 struct domain *d = find_domain_by_id(op->domain);
199 ret = -ESRCH;
200 if ( d != NULL )
201 {
202 ret = set_info_guest(d, &op->u.vcpucontext);
203 put_domain(d);
204 }
205 }
206 break;
208 case XEN_DOMCTL_pausedomain:
209 {
210 struct domain *d = find_domain_by_id(op->domain);
211 ret = -ESRCH;
212 if ( d != NULL )
213 {
214 ret = -EINVAL;
215 if ( d != current->domain )
216 {
217 domain_pause_by_systemcontroller(d);
218 ret = 0;
219 }
220 put_domain(d);
221 }
222 }
223 break;
225 case XEN_DOMCTL_unpausedomain:
226 {
227 struct domain *d = find_domain_by_id(op->domain);
228 ret = -ESRCH;
229 if ( d != NULL )
230 {
231 ret = -EINVAL;
232 if ( (d != current->domain) && (d->vcpu[0] != NULL) &&
233 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
234 {
235 domain_unpause_by_systemcontroller(d);
236 ret = 0;
237 }
238 put_domain(d);
239 }
240 }
241 break;
243 case XEN_DOMCTL_createdomain:
244 {
245 struct domain *d;
246 domid_t dom;
247 static domid_t rover = 0;
248 unsigned int domcr_flags;
250 if ( supervisor_mode_kernel ||
251 (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) )
252 return -EINVAL;
254 dom = op->domain;
255 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
256 {
257 ret = -EINVAL;
258 if ( !is_free_domid(dom) )
259 break;
260 }
261 else
262 {
263 for ( dom = rover + 1; dom != rover; dom++ )
264 {
265 if ( dom == DOMID_FIRST_RESERVED )
266 dom = 0;
267 if ( is_free_domid(dom) )
268 break;
269 }
271 ret = -ENOMEM;
272 if ( dom == rover )
273 break;
275 rover = dom;
276 }
278 domcr_flags = 0;
279 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
280 domcr_flags |= DOMCRF_hvm;
282 ret = -ENOMEM;
283 if ( (d = domain_create(dom, domcr_flags)) == NULL )
284 break;
286 memcpy(d->handle, op->u.createdomain.handle,
287 sizeof(xen_domain_handle_t));
289 ret = 0;
291 op->domain = d->domain_id;
292 if ( copy_to_guest(u_domctl, op, 1) )
293 ret = -EFAULT;
294 }
295 break;
297 case XEN_DOMCTL_max_vcpus:
298 {
299 struct domain *d;
300 unsigned int i, max = op->u.max_vcpus.max, cpu;
302 ret = -EINVAL;
303 if ( max > MAX_VIRT_CPUS )
304 break;
306 ret = -ESRCH;
307 if ( (d = find_domain_by_id(op->domain)) == NULL )
308 break;
310 /* Needed, for example, to ensure writable p.t. state is synced. */
311 domain_pause(d);
313 /* We cannot reduce maximum VCPUs. */
314 ret = -EINVAL;
315 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
316 goto maxvcpu_out;
318 ret = -ENOMEM;
319 for ( i = 0; i < max; i++ )
320 {
321 if ( d->vcpu[i] != NULL )
322 continue;
324 cpu = (i == 0) ?
325 default_vcpu0_location() :
326 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
328 if ( alloc_vcpu(d, i, cpu) == NULL )
329 goto maxvcpu_out;
330 }
332 ret = 0;
334 maxvcpu_out:
335 domain_unpause(d);
336 put_domain(d);
337 }
338 break;
340 case XEN_DOMCTL_destroydomain:
341 {
342 struct domain *d = find_domain_by_id(op->domain);
343 ret = -ESRCH;
344 if ( d != NULL )
345 {
346 ret = -EINVAL;
347 if ( d != current->domain )
348 {
349 domain_kill(d);
350 ret = 0;
351 }
352 put_domain(d);
353 }
354 }
355 break;
357 case XEN_DOMCTL_setvcpuaffinity:
358 case XEN_DOMCTL_getvcpuaffinity:
359 {
360 domid_t dom = op->domain;
361 struct domain *d = find_domain_by_id(dom);
362 struct vcpu *v;
363 cpumask_t new_affinity;
365 ret = -ESRCH;
366 if ( d == NULL )
367 break;
369 ret = -EINVAL;
370 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
371 goto vcpuaffinity_out;
373 ret = -ESRCH;
374 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
375 goto vcpuaffinity_out;
377 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
378 {
379 xenctl_cpumap_to_cpumask(
380 &new_affinity, &op->u.vcpuaffinity.cpumap);
381 ret = vcpu_set_affinity(v, &new_affinity);
382 }
383 else
384 {
385 cpumask_to_xenctl_cpumap(
386 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
387 ret = 0;
388 }
390 vcpuaffinity_out:
391 put_domain(d);
392 }
393 break;
395 case XEN_DOMCTL_scheduler_op:
396 {
397 struct domain *d;
399 ret = -ESRCH;
400 if ( (d = find_domain_by_id(op->domain)) == NULL )
401 break;
403 ret = sched_adjust(d, &op->u.scheduler_op);
404 if ( copy_to_guest(u_domctl, op, 1) )
405 ret = -EFAULT;
407 put_domain(d);
408 }
409 break;
411 case XEN_DOMCTL_getdomaininfo:
412 {
413 struct domain *d;
414 domid_t dom;
416 dom = op->domain;
417 if ( dom == DOMID_SELF )
418 dom = current->domain->domain_id;
420 read_lock(&domlist_lock);
422 for_each_domain ( d )
423 {
424 if ( d->domain_id >= dom )
425 break;
426 }
428 if ( (d == NULL) || !get_domain(d) )
429 {
430 read_unlock(&domlist_lock);
431 ret = -ESRCH;
432 break;
433 }
435 read_unlock(&domlist_lock);
437 getdomaininfo(d, &op->u.getdomaininfo);
439 op->domain = op->u.getdomaininfo.domain;
440 if ( copy_to_guest(u_domctl, op, 1) )
441 ret = -EFAULT;
443 put_domain(d);
444 }
445 break;
447 case XEN_DOMCTL_getvcpucontext:
448 {
449 struct vcpu_guest_context *c;
450 struct domain *d;
451 struct vcpu *v;
453 ret = -ESRCH;
454 if ( (d = find_domain_by_id(op->domain)) == NULL )
455 break;
457 ret = -EINVAL;
458 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
459 goto getvcpucontext_out;
461 ret = -ESRCH;
462 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
463 goto getvcpucontext_out;
465 ret = -ENODATA;
466 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
467 goto getvcpucontext_out;
469 ret = -ENOMEM;
470 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
471 goto getvcpucontext_out;
473 if ( v != current )
474 vcpu_pause(v);
476 arch_getdomaininfo_ctxt(v,c);
477 ret = 0;
479 if ( v != current )
480 vcpu_unpause(v);
482 if ( copy_to_guest(op->u.vcpucontext.ctxt, c, 1) )
483 ret = -EFAULT;
485 xfree(c);
487 if ( copy_to_guest(u_domctl, op, 1) )
488 ret = -EFAULT;
490 getvcpucontext_out:
491 put_domain(d);
492 }
493 break;
495 case XEN_DOMCTL_getvcpuinfo:
496 {
497 struct domain *d;
498 struct vcpu *v;
499 struct vcpu_runstate_info runstate;
501 ret = -ESRCH;
502 if ( (d = find_domain_by_id(op->domain)) == NULL )
503 break;
505 ret = -EINVAL;
506 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
507 goto getvcpuinfo_out;
509 ret = -ESRCH;
510 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
511 goto getvcpuinfo_out;
513 vcpu_runstate_get(v, &runstate);
515 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
516 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
517 op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
518 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
519 op->u.getvcpuinfo.cpu = v->processor;
520 ret = 0;
522 if ( copy_to_guest(u_domctl, op, 1) )
523 ret = -EFAULT;
525 getvcpuinfo_out:
526 put_domain(d);
527 }
528 break;
530 case XEN_DOMCTL_max_mem:
531 {
532 struct domain *d;
533 unsigned long new_max;
535 ret = -ESRCH;
536 d = find_domain_by_id(op->domain);
537 if ( d == NULL )
538 break;
540 ret = -EINVAL;
541 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
543 spin_lock(&d->page_alloc_lock);
544 if ( new_max >= d->tot_pages )
545 {
546 d->max_pages = new_max;
547 ret = 0;
548 }
549 spin_unlock(&d->page_alloc_lock);
551 put_domain(d);
552 }
553 break;
555 case XEN_DOMCTL_setdomainhandle:
556 {
557 struct domain *d;
558 ret = -ESRCH;
559 d = find_domain_by_id(op->domain);
560 if ( d != NULL )
561 {
562 memcpy(d->handle, op->u.setdomainhandle.handle,
563 sizeof(xen_domain_handle_t));
564 put_domain(d);
565 ret = 0;
566 }
567 }
568 break;
570 case XEN_DOMCTL_setdebugging:
571 {
572 struct domain *d;
573 ret = -ESRCH;
574 d = find_domain_by_id(op->domain);
575 if ( d != NULL )
576 {
577 if ( op->u.setdebugging.enable )
578 set_bit(_DOMF_debugging, &d->domain_flags);
579 else
580 clear_bit(_DOMF_debugging, &d->domain_flags);
581 put_domain(d);
582 ret = 0;
583 }
584 }
585 break;
587 case XEN_DOMCTL_irq_permission:
588 {
589 struct domain *d;
590 unsigned int pirq = op->u.irq_permission.pirq;
592 ret = -EINVAL;
593 if ( pirq >= NR_IRQS )
594 break;
596 ret = -ESRCH;
597 d = find_domain_by_id(op->domain);
598 if ( d == NULL )
599 break;
601 if ( op->u.irq_permission.allow_access )
602 ret = irq_permit_access(d, pirq);
603 else
604 ret = irq_deny_access(d, pirq);
606 put_domain(d);
607 }
608 break;
610 case XEN_DOMCTL_iomem_permission:
611 {
612 struct domain *d;
613 unsigned long mfn = op->u.iomem_permission.first_mfn;
614 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
616 ret = -EINVAL;
617 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
618 break;
620 ret = -ESRCH;
621 d = find_domain_by_id(op->domain);
622 if ( d == NULL )
623 break;
625 if ( op->u.iomem_permission.allow_access )
626 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
627 else
628 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
630 put_domain(d);
631 }
632 break;
634 case XEN_DOMCTL_settimeoffset:
635 {
636 struct domain *d;
638 ret = -ESRCH;
639 d = find_domain_by_id(op->domain);
640 if ( d != NULL )
641 {
642 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
643 put_domain(d);
644 ret = 0;
645 }
646 }
647 break;
649 default:
650 ret = arch_do_domctl(op, u_domctl);
651 break;
652 }
654 spin_unlock(&domctl_lock);
656 if ( ret == 0 )
657 acm_post_domctl(op, &ssid);
658 else
659 acm_fail_domctl(op, &ssid);
661 return ret;
662 }
664 /*
665 * Local variables:
666 * mode: C
667 * c-set-style: "BSD"
668 * c-basic-offset: 4
669 * tab-width: 4
670 * indent-tabs-mode: nil
671 * End:
672 */