direct-io.hg

view xen/common/domctl.c @ 15399:45a44a9cbe8d

Enhance guest memory accessor macros so that source operands can be
pointers to const or arrays.

Only build-tested on ia64, and untested for powerpc (which, however,
is almost identical to ia64, except for an apparent bug in the original
version of __copy_field_{from,to}_guest in that the field offset was
multiplied by the field size).

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author kfraser@localhost.localdomain
date Wed Jun 20 15:29:53 2007 +0100 (2007-06-20)
parents a99093e602c6
children 05331a29f3cb
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/shadow.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <acm/acm_hooks.h>
28 extern long arch_do_domctl(
29 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
31 void cpumask_to_xenctl_cpumap(
32 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
33 {
34 unsigned int guest_bytes, copy_bytes, i;
35 uint8_t zero = 0;
36 uint8_t bytemap[(NR_CPUS + 7) / 8];
38 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
39 return;
41 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
42 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
44 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
46 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
48 for ( i = copy_bytes; i < guest_bytes; i++ )
49 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
50 }
52 void xenctl_cpumap_to_cpumask(
53 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
54 {
55 unsigned int guest_bytes, copy_bytes;
56 uint8_t bytemap[(NR_CPUS + 7) / 8];
58 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
59 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
61 cpus_clear(*cpumask);
63 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
64 return;
66 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
68 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
69 }
71 static inline int is_free_domid(domid_t dom)
72 {
73 struct domain *d;
75 if ( dom >= DOMID_FIRST_RESERVED )
76 return 0;
78 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
79 return 1;
81 rcu_unlock_domain(d);
82 return 0;
83 }
85 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
86 {
87 struct vcpu *v;
88 u64 cpu_time = 0;
89 int flags = XEN_DOMINF_blocked;
90 struct vcpu_runstate_info runstate;
92 info->domain = d->domain_id;
93 info->nr_online_vcpus = 0;
95 /*
96 * - domain is marked as blocked only if all its vcpus are blocked
97 * - domain is marked as running if any of its vcpus is running
98 */
99 for_each_vcpu ( d, v )
100 {
101 vcpu_runstate_get(v, &runstate);
102 cpu_time += runstate.time[RUNSTATE_running];
103 info->max_vcpu_id = v->vcpu_id;
104 if ( !test_bit(_VPF_down, &v->pause_flags) )
105 {
106 if ( !(v->pause_flags & VPF_blocked) )
107 flags &= ~XEN_DOMINF_blocked;
108 if ( v->is_running )
109 flags |= XEN_DOMINF_running;
110 info->nr_online_vcpus++;
111 }
112 }
114 info->cpu_time = cpu_time;
116 info->flags = flags |
117 (d->is_dying ? XEN_DOMINF_dying : 0) |
118 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
119 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
120 d->shutdown_code << XEN_DOMINF_shutdownshift;
122 if ( is_hvm_domain(d) )
123 info->flags |= XEN_DOMINF_hvm_guest;
125 if ( d->ssid != NULL )
126 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
127 else
128 info->ssidref = ACM_DEFAULT_SSID;
130 info->tot_pages = d->tot_pages;
131 info->max_pages = d->max_pages;
132 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
134 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
135 }
137 static unsigned int default_vcpu0_location(void)
138 {
139 struct domain *d;
140 struct vcpu *v;
141 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
142 cpumask_t cpu_exclude_map;
144 /* Do an initial CPU placement. Pick the least-populated CPU. */
145 rcu_read_lock(&domlist_read_lock);
146 for_each_domain ( d )
147 for_each_vcpu ( d, v )
148 if ( !test_bit(_VPF_down, &v->pause_flags) )
149 cnt[v->processor]++;
150 rcu_read_unlock(&domlist_read_lock);
152 /*
153 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
154 * favour high numbered CPUs in the event of a tie.
155 */
156 cpu = first_cpu(cpu_sibling_map[0]);
157 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
158 cpu = next_cpu(cpu, cpu_sibling_map[0]);
159 cpu_exclude_map = cpu_sibling_map[0];
160 for_each_online_cpu ( i )
161 {
162 if ( cpu_isset(i, cpu_exclude_map) )
163 continue;
164 if ( (i == first_cpu(cpu_sibling_map[i])) &&
165 (cpus_weight(cpu_sibling_map[i]) > 1) )
166 continue;
167 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
168 if ( cnt[i] <= cnt[cpu] )
169 cpu = i;
170 }
172 return cpu;
173 }
175 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
176 {
177 long ret = 0;
178 struct xen_domctl curop, *op = &curop;
179 static DEFINE_SPINLOCK(domctl_lock);
181 if ( !IS_PRIV(current->domain) )
182 return -EPERM;
184 if ( copy_from_guest(op, u_domctl, 1) )
185 return -EFAULT;
187 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
188 return -EACCES;
190 spin_lock(&domctl_lock);
192 switch ( op->cmd )
193 {
195 case XEN_DOMCTL_setvcpucontext:
196 {
197 struct domain *d = rcu_lock_domain_by_id(op->domain);
198 vcpu_guest_context_u c = { .nat = NULL };
199 unsigned int vcpu = op->u.vcpucontext.vcpu;
200 struct vcpu *v;
202 ret = -ESRCH;
203 if ( d == NULL )
204 break;
206 ret = -EINVAL;
207 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
208 goto svc_out;
210 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
211 {
212 ret = vcpu_reset(v);
213 goto svc_out;
214 }
216 #ifdef CONFIG_COMPAT
217 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
218 < sizeof(struct compat_vcpu_guest_context));
219 #endif
220 ret = -ENOMEM;
221 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
222 goto svc_out;
224 if ( !IS_COMPAT(v->domain) )
225 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
226 #ifdef CONFIG_COMPAT
227 else
228 ret = copy_from_guest(c.cmp,
229 guest_handle_cast(op->u.vcpucontext.ctxt,
230 void), 1);
231 #endif
232 ret = ret ? -EFAULT : 0;
234 if ( ret == 0 )
235 {
236 domain_pause(d);
237 ret = arch_set_info_guest(v, c);
238 domain_unpause(d);
239 }
241 svc_out:
242 xfree(c.nat);
243 rcu_unlock_domain(d);
244 }
245 break;
247 case XEN_DOMCTL_pausedomain:
248 {
249 struct domain *d = rcu_lock_domain_by_id(op->domain);
250 ret = -ESRCH;
251 if ( d != NULL )
252 {
253 ret = -EINVAL;
254 if ( d != current->domain )
255 {
256 domain_pause_by_systemcontroller(d);
257 ret = 0;
258 }
259 rcu_unlock_domain(d);
260 }
261 }
262 break;
264 case XEN_DOMCTL_unpausedomain:
265 {
266 struct domain *d = rcu_lock_domain_by_id(op->domain);
268 ret = -ESRCH;
269 if ( d == NULL )
270 break;
272 domain_unpause_by_systemcontroller(d);
273 rcu_unlock_domain(d);
274 ret = 0;
275 }
276 break;
278 case XEN_DOMCTL_resumedomain:
279 {
280 struct domain *d = rcu_lock_domain_by_id(op->domain);
282 ret = -ESRCH;
283 if ( d == NULL )
284 break;
286 domain_resume(d);
287 rcu_unlock_domain(d);
288 ret = 0;
289 }
290 break;
292 case XEN_DOMCTL_createdomain:
293 {
294 struct domain *d;
295 domid_t dom;
296 static domid_t rover = 0;
297 unsigned int domcr_flags;
299 ret = -EINVAL;
300 if ( supervisor_mode_kernel ||
301 (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) )
302 break;
304 dom = op->domain;
305 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
306 {
307 ret = -EINVAL;
308 if ( !is_free_domid(dom) )
309 break;
310 }
311 else
312 {
313 for ( dom = rover + 1; dom != rover; dom++ )
314 {
315 if ( dom == DOMID_FIRST_RESERVED )
316 dom = 0;
317 if ( is_free_domid(dom) )
318 break;
319 }
321 ret = -ENOMEM;
322 if ( dom == rover )
323 break;
325 rover = dom;
326 }
328 domcr_flags = 0;
329 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
330 domcr_flags |= DOMCRF_hvm;
332 ret = -ENOMEM;
333 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
334 if ( d == NULL )
335 break;
337 ret = 0;
339 memcpy(d->handle, op->u.createdomain.handle,
340 sizeof(xen_domain_handle_t));
342 op->domain = d->domain_id;
343 if ( copy_to_guest(u_domctl, op, 1) )
344 ret = -EFAULT;
345 }
346 break;
348 case XEN_DOMCTL_max_vcpus:
349 {
350 struct domain *d;
351 unsigned int i, max = op->u.max_vcpus.max, cpu;
353 ret = -EINVAL;
354 if ( max > MAX_VIRT_CPUS )
355 break;
357 ret = -ESRCH;
358 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
359 break;
361 /* Needed, for example, to ensure writable p.t. state is synced. */
362 domain_pause(d);
364 /* We cannot reduce maximum VCPUs. */
365 ret = -EINVAL;
366 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
367 goto maxvcpu_out;
369 ret = -ENOMEM;
370 for ( i = 0; i < max; i++ )
371 {
372 if ( d->vcpu[i] != NULL )
373 continue;
375 cpu = (i == 0) ?
376 default_vcpu0_location() :
377 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
379 if ( alloc_vcpu(d, i, cpu) == NULL )
380 goto maxvcpu_out;
381 }
383 ret = 0;
385 maxvcpu_out:
386 domain_unpause(d);
387 rcu_unlock_domain(d);
388 }
389 break;
391 case XEN_DOMCTL_destroydomain:
392 {
393 struct domain *d = rcu_lock_domain_by_id(op->domain);
394 ret = -ESRCH;
395 if ( d != NULL )
396 {
397 ret = -EINVAL;
398 if ( d != current->domain )
399 {
400 domain_kill(d);
401 ret = 0;
402 }
403 rcu_unlock_domain(d);
404 }
405 }
406 break;
408 case XEN_DOMCTL_setvcpuaffinity:
409 case XEN_DOMCTL_getvcpuaffinity:
410 {
411 domid_t dom = op->domain;
412 struct domain *d = rcu_lock_domain_by_id(dom);
413 struct vcpu *v;
414 cpumask_t new_affinity;
416 ret = -ESRCH;
417 if ( d == NULL )
418 break;
420 ret = -EINVAL;
421 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
422 goto vcpuaffinity_out;
424 ret = -ESRCH;
425 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
426 goto vcpuaffinity_out;
428 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
429 {
430 xenctl_cpumap_to_cpumask(
431 &new_affinity, &op->u.vcpuaffinity.cpumap);
432 ret = vcpu_set_affinity(v, &new_affinity);
433 }
434 else
435 {
436 cpumask_to_xenctl_cpumap(
437 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
438 ret = 0;
439 }
441 vcpuaffinity_out:
442 rcu_unlock_domain(d);
443 }
444 break;
446 case XEN_DOMCTL_scheduler_op:
447 {
448 struct domain *d;
450 ret = -ESRCH;
451 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
452 break;
454 ret = sched_adjust(d, &op->u.scheduler_op);
455 if ( copy_to_guest(u_domctl, op, 1) )
456 ret = -EFAULT;
458 rcu_unlock_domain(d);
459 }
460 break;
462 case XEN_DOMCTL_getdomaininfo:
463 {
464 struct domain *d;
465 domid_t dom;
467 dom = op->domain;
468 if ( dom == DOMID_SELF )
469 dom = current->domain->domain_id;
471 rcu_read_lock(&domlist_read_lock);
473 for_each_domain ( d )
474 {
475 if ( d->domain_id >= dom )
476 break;
477 }
479 if ( d == NULL )
480 {
481 rcu_read_unlock(&domlist_read_lock);
482 ret = -ESRCH;
483 break;
484 }
486 getdomaininfo(d, &op->u.getdomaininfo);
488 op->domain = op->u.getdomaininfo.domain;
489 if ( copy_to_guest(u_domctl, op, 1) )
490 ret = -EFAULT;
492 rcu_read_unlock(&domlist_read_lock);
493 }
494 break;
496 case XEN_DOMCTL_getvcpucontext:
497 {
498 vcpu_guest_context_u c = { .nat = NULL };
499 struct domain *d;
500 struct vcpu *v;
502 ret = -ESRCH;
503 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
504 break;
506 ret = -EINVAL;
507 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
508 goto getvcpucontext_out;
510 ret = -ESRCH;
511 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
512 goto getvcpucontext_out;
514 ret = -ENODATA;
515 if ( !v->is_initialised )
516 goto getvcpucontext_out;
518 #ifdef CONFIG_COMPAT
519 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
520 < sizeof(struct compat_vcpu_guest_context));
521 #endif
522 ret = -ENOMEM;
523 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
524 goto getvcpucontext_out;
526 if ( v != current )
527 vcpu_pause(v);
529 arch_get_info_guest(v, c);
530 ret = 0;
532 if ( v != current )
533 vcpu_unpause(v);
535 if ( !IS_COMPAT(v->domain) )
536 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
537 #ifdef CONFIG_COMPAT
538 else
539 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
540 void), c.cmp, 1);
541 #endif
543 if ( copy_to_guest(u_domctl, op, 1) || ret )
544 ret = -EFAULT;
546 getvcpucontext_out:
547 xfree(c.nat);
548 rcu_unlock_domain(d);
549 }
550 break;
552 case XEN_DOMCTL_getvcpuinfo:
553 {
554 struct domain *d;
555 struct vcpu *v;
556 struct vcpu_runstate_info runstate;
558 ret = -ESRCH;
559 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
560 break;
562 ret = -EINVAL;
563 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
564 goto getvcpuinfo_out;
566 ret = -ESRCH;
567 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
568 goto getvcpuinfo_out;
570 vcpu_runstate_get(v, &runstate);
572 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
573 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
574 op->u.getvcpuinfo.running = v->is_running;
575 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
576 op->u.getvcpuinfo.cpu = v->processor;
577 ret = 0;
579 if ( copy_to_guest(u_domctl, op, 1) )
580 ret = -EFAULT;
582 getvcpuinfo_out:
583 rcu_unlock_domain(d);
584 }
585 break;
587 case XEN_DOMCTL_max_mem:
588 {
589 struct domain *d;
590 unsigned long new_max;
592 ret = -ESRCH;
593 d = rcu_lock_domain_by_id(op->domain);
594 if ( d == NULL )
595 break;
597 ret = -EINVAL;
598 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
600 spin_lock(&d->page_alloc_lock);
601 if ( new_max >= d->tot_pages )
602 {
603 ret = guest_physmap_max_mem_pages(d, new_max);
604 if ( ret != 0 )
605 break;
606 d->max_pages = new_max;
607 ret = 0;
608 }
609 spin_unlock(&d->page_alloc_lock);
611 rcu_unlock_domain(d);
612 }
613 break;
615 case XEN_DOMCTL_setdomainhandle:
616 {
617 struct domain *d;
619 ret = -ESRCH;
620 d = rcu_lock_domain_by_id(op->domain);
621 if ( d == NULL )
622 break;
624 memcpy(d->handle, op->u.setdomainhandle.handle,
625 sizeof(xen_domain_handle_t));
626 rcu_unlock_domain(d);
627 ret = 0;
628 }
629 break;
631 case XEN_DOMCTL_setdebugging:
632 {
633 struct domain *d;
635 ret = -ESRCH;
636 d = rcu_lock_domain_by_id(op->domain);
637 if ( d == NULL )
638 break;
640 domain_pause(d);
641 d->debugger_attached = !!op->u.setdebugging.enable;
642 domain_unpause(d); /* causes guest to latch new status */
643 rcu_unlock_domain(d);
644 ret = 0;
645 }
646 break;
648 case XEN_DOMCTL_irq_permission:
649 {
650 struct domain *d;
651 unsigned int pirq = op->u.irq_permission.pirq;
653 ret = -EINVAL;
654 if ( pirq >= NR_IRQS )
655 break;
657 ret = -ESRCH;
658 d = rcu_lock_domain_by_id(op->domain);
659 if ( d == NULL )
660 break;
662 if ( op->u.irq_permission.allow_access )
663 ret = irq_permit_access(d, pirq);
664 else
665 ret = irq_deny_access(d, pirq);
667 rcu_unlock_domain(d);
668 }
669 break;
671 case XEN_DOMCTL_iomem_permission:
672 {
673 struct domain *d;
674 unsigned long mfn = op->u.iomem_permission.first_mfn;
675 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
677 ret = -EINVAL;
678 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
679 break;
681 ret = -ESRCH;
682 d = rcu_lock_domain_by_id(op->domain);
683 if ( d == NULL )
684 break;
686 if ( op->u.iomem_permission.allow_access )
687 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
688 else
689 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
691 rcu_unlock_domain(d);
692 }
693 break;
695 case XEN_DOMCTL_settimeoffset:
696 {
697 struct domain *d;
699 ret = -ESRCH;
700 d = rcu_lock_domain_by_id(op->domain);
701 if ( d != NULL )
702 {
703 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
704 rcu_unlock_domain(d);
705 ret = 0;
706 }
707 }
708 break;
710 default:
711 ret = arch_do_domctl(op, u_domctl);
712 break;
713 }
715 spin_unlock(&domctl_lock);
717 return ret;
718 }
720 /*
721 * Local variables:
722 * mode: C
723 * c-set-style: "BSD"
724 * c-basic-offset: 4
725 * tab-width: 4
726 * indent-tabs-mode: nil
727 * End:
728 */