ia64/xen-unstable

view xen/common/domctl.c @ 16856:cff4c8a1aa28

New XEN_DOMCTL_set_target
Stubdomains (and probably other domain disagregation elements too)
need to be able to tinker with another domain. This adds IS_PRIV_FOR
that extends IS_PRIV by allowing domains to have privileges over a
given "target" domain. XEN_DOMCTL_set_target permits to set this
"target". A new 'target' configuration option makes the domain builder
use it.

Signed-off-by: Samuel Thibault <samuel.thibault@eu.citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Jan 23 13:21:44 2008 +0000 (2008-01-23)
parents a583f3a7eafc
children 98c2665056ea
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 extern long arch_do_domctl(
29 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
31 void cpumask_to_xenctl_cpumap(
32 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
33 {
34 unsigned int guest_bytes, copy_bytes, i;
35 uint8_t zero = 0;
36 uint8_t bytemap[(NR_CPUS + 7) / 8];
38 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
39 return;
41 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
42 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
44 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
46 if ( copy_bytes != 0 )
47 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
49 for ( i = copy_bytes; i < guest_bytes; i++ )
50 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
51 }
53 void xenctl_cpumap_to_cpumask(
54 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
55 {
56 unsigned int guest_bytes, copy_bytes;
57 uint8_t bytemap[(NR_CPUS + 7) / 8];
59 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
60 return;
62 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
63 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
65 memset(bytemap, 0, sizeof(bytemap));
67 if ( copy_bytes != 0 )
68 {
69 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
70 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
71 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
72 }
74 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
75 }
77 static inline int is_free_domid(domid_t dom)
78 {
79 struct domain *d;
81 if ( dom >= DOMID_FIRST_RESERVED )
82 return 0;
84 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
85 return 1;
87 rcu_unlock_domain(d);
88 return 0;
89 }
91 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
92 {
93 struct vcpu *v;
94 u64 cpu_time = 0;
95 int flags = XEN_DOMINF_blocked;
96 struct vcpu_runstate_info runstate;
98 info->domain = d->domain_id;
99 info->nr_online_vcpus = 0;
101 /*
102 * - domain is marked as blocked only if all its vcpus are blocked
103 * - domain is marked as running if any of its vcpus is running
104 */
105 for_each_vcpu ( d, v )
106 {
107 vcpu_runstate_get(v, &runstate);
108 cpu_time += runstate.time[RUNSTATE_running];
109 info->max_vcpu_id = v->vcpu_id;
110 if ( !test_bit(_VPF_down, &v->pause_flags) )
111 {
112 if ( !(v->pause_flags & VPF_blocked) )
113 flags &= ~XEN_DOMINF_blocked;
114 if ( v->is_running )
115 flags |= XEN_DOMINF_running;
116 info->nr_online_vcpus++;
117 }
118 }
120 info->cpu_time = cpu_time;
122 info->flags = flags |
123 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
124 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
125 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
126 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
127 d->shutdown_code << XEN_DOMINF_shutdownshift;
129 if ( is_hvm_domain(d) )
130 info->flags |= XEN_DOMINF_hvm_guest;
132 xsm_security_domaininfo(d, info);
134 info->tot_pages = d->tot_pages;
135 info->max_pages = d->max_pages;
136 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
138 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
139 }
141 static unsigned int default_vcpu0_location(void)
142 {
143 struct domain *d;
144 struct vcpu *v;
145 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
146 cpumask_t cpu_exclude_map;
148 /* Do an initial CPU placement. Pick the least-populated CPU. */
149 rcu_read_lock(&domlist_read_lock);
150 for_each_domain ( d )
151 for_each_vcpu ( d, v )
152 if ( !test_bit(_VPF_down, &v->pause_flags) )
153 cnt[v->processor]++;
154 rcu_read_unlock(&domlist_read_lock);
156 /*
157 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
158 * favour high numbered CPUs in the event of a tie.
159 */
160 cpu = first_cpu(cpu_sibling_map[0]);
161 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
162 cpu = next_cpu(cpu, cpu_sibling_map[0]);
163 cpu_exclude_map = cpu_sibling_map[0];
164 for_each_online_cpu ( i )
165 {
166 if ( cpu_isset(i, cpu_exclude_map) )
167 continue;
168 if ( (i == first_cpu(cpu_sibling_map[i])) &&
169 (cpus_weight(cpu_sibling_map[i]) > 1) )
170 continue;
171 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
172 if ( cnt[i] <= cnt[cpu] )
173 cpu = i;
174 }
176 return cpu;
177 }
179 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
180 {
181 long ret = 0;
182 struct xen_domctl curop, *op = &curop;
183 static DEFINE_SPINLOCK(domctl_lock);
185 if ( copy_from_guest(op, u_domctl, 1) )
186 return -EFAULT;
188 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
189 return -EACCES;
191 spin_lock(&domctl_lock);
193 switch ( op->cmd )
194 {
196 case XEN_DOMCTL_setvcpucontext:
197 {
198 struct domain *d = rcu_lock_domain_by_id(op->domain);
199 vcpu_guest_context_u c = { .nat = NULL };
200 unsigned int vcpu = op->u.vcpucontext.vcpu;
201 struct vcpu *v;
203 ret = -ESRCH;
204 if ( d == NULL )
205 break;
207 ret = -EPERM;
208 if ( !IS_PRIV_FOR(current->domain, d) )
209 goto svc_out;
211 ret = xsm_setvcpucontext(d);
212 if ( ret )
213 goto svc_out;
215 ret = -EINVAL;
216 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
217 goto svc_out;
219 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
220 {
221 ret = vcpu_reset(v);
222 goto svc_out;
223 }
225 #ifdef CONFIG_COMPAT
226 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
227 < sizeof(struct compat_vcpu_guest_context));
228 #endif
229 ret = -ENOMEM;
230 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
231 goto svc_out;
233 if ( !IS_COMPAT(v->domain) )
234 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
235 #ifdef CONFIG_COMPAT
236 else
237 ret = copy_from_guest(c.cmp,
238 guest_handle_cast(op->u.vcpucontext.ctxt,
239 void), 1);
240 #endif
241 ret = ret ? -EFAULT : 0;
243 if ( ret == 0 )
244 {
245 domain_pause(d);
246 ret = arch_set_info_guest(v, c);
247 domain_unpause(d);
248 }
250 svc_out:
251 xfree(c.nat);
252 rcu_unlock_domain(d);
253 }
254 break;
256 case XEN_DOMCTL_pausedomain:
257 {
258 struct domain *d = rcu_lock_domain_by_id(op->domain);
259 ret = -ESRCH;
260 if ( d != NULL )
261 {
262 ret = -EPERM;
263 if ( !IS_PRIV_FOR(current->domain, d) )
264 goto pausedomain_out;
266 ret = xsm_pausedomain(d);
267 if ( ret )
268 goto pausedomain_out;
270 ret = -EINVAL;
271 if ( d != current->domain )
272 {
273 domain_pause_by_systemcontroller(d);
274 ret = 0;
275 }
276 pausedomain_out:
277 rcu_unlock_domain(d);
278 }
279 }
280 break;
282 case XEN_DOMCTL_unpausedomain:
283 {
284 struct domain *d = rcu_lock_domain_by_id(op->domain);
286 ret = -ESRCH;
287 if ( d == NULL )
288 break;
290 ret = -EPERM;
291 if ( !IS_PRIV_FOR(current->domain, d) )
292 goto unpausedomain_out;
294 ret = xsm_unpausedomain(d);
295 if ( ret )
296 goto unpausedomain_out;
298 domain_unpause_by_systemcontroller(d);
299 ret = 0;
300 unpausedomain_out:
301 rcu_unlock_domain(d);
302 }
303 break;
305 case XEN_DOMCTL_resumedomain:
306 {
307 struct domain *d = rcu_lock_domain_by_id(op->domain);
309 ret = -ESRCH;
310 if ( d == NULL )
311 break;
313 ret = -EPERM;
314 if ( !IS_PRIV_FOR(current->domain, d) )
315 goto resumedomain_out;
317 ret = xsm_resumedomain(d);
318 if ( ret )
319 goto resumedomain_out;
321 domain_resume(d);
322 ret = 0;
323 resumedomain_out:
324 rcu_unlock_domain(d);
325 }
326 break;
328 case XEN_DOMCTL_createdomain:
329 {
330 struct domain *d;
331 domid_t dom;
332 static domid_t rover = 0;
333 unsigned int domcr_flags;
335 ret = -EPERM;
336 if ( !IS_PRIV(current->domain) )
337 break;
339 ret = -EINVAL;
340 if ( supervisor_mode_kernel ||
341 (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) )
342 break;
344 dom = op->domain;
345 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
346 {
347 ret = -EINVAL;
348 if ( !is_free_domid(dom) )
349 break;
350 }
351 else
352 {
353 for ( dom = rover + 1; dom != rover; dom++ )
354 {
355 if ( dom == DOMID_FIRST_RESERVED )
356 dom = 0;
357 if ( is_free_domid(dom) )
358 break;
359 }
361 ret = -ENOMEM;
362 if ( dom == rover )
363 break;
365 rover = dom;
366 }
368 domcr_flags = 0;
369 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
370 domcr_flags |= DOMCRF_hvm;
372 ret = -ENOMEM;
373 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
374 if ( d == NULL )
375 break;
377 ret = 0;
379 memcpy(d->handle, op->u.createdomain.handle,
380 sizeof(xen_domain_handle_t));
382 op->domain = d->domain_id;
383 if ( copy_to_guest(u_domctl, op, 1) )
384 ret = -EFAULT;
385 }
386 break;
388 case XEN_DOMCTL_max_vcpus:
389 {
390 struct domain *d;
391 unsigned int i, max = op->u.max_vcpus.max, cpu;
393 ret = -EINVAL;
394 if ( max > MAX_VIRT_CPUS )
395 break;
397 ret = -ESRCH;
398 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
399 break;
401 ret = -EPERM;
402 if ( !IS_PRIV_FOR(current->domain, d) )
403 goto maxvcpu_out2;
405 ret = xsm_max_vcpus(d);
406 if ( ret )
407 goto maxvcpu_out2;
409 /* Needed, for example, to ensure writable p.t. state is synced. */
410 domain_pause(d);
412 /* We cannot reduce maximum VCPUs. */
413 ret = -EINVAL;
414 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
415 goto maxvcpu_out;
417 ret = -ENOMEM;
418 for ( i = 0; i < max; i++ )
419 {
420 if ( d->vcpu[i] != NULL )
421 continue;
423 cpu = (i == 0) ?
424 default_vcpu0_location() :
425 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
427 if ( alloc_vcpu(d, i, cpu) == NULL )
428 goto maxvcpu_out;
429 }
431 ret = 0;
433 maxvcpu_out:
434 domain_unpause(d);
435 maxvcpu_out2:
436 rcu_unlock_domain(d);
437 }
438 break;
440 case XEN_DOMCTL_destroydomain:
441 {
442 struct domain *d = rcu_lock_domain_by_id(op->domain);
443 ret = -ESRCH;
444 if ( d != NULL )
445 {
446 ret = -EPERM;
447 if ( IS_PRIV_FOR(current->domain, d) )
448 ret = xsm_destroydomain(d) ? : domain_kill(d);
449 rcu_unlock_domain(d);
450 }
451 }
452 break;
454 case XEN_DOMCTL_setvcpuaffinity:
455 case XEN_DOMCTL_getvcpuaffinity:
456 {
457 domid_t dom = op->domain;
458 struct domain *d = rcu_lock_domain_by_id(dom);
459 struct vcpu *v;
460 cpumask_t new_affinity;
462 ret = -ESRCH;
463 if ( d == NULL )
464 break;
466 ret = -EPERM;
467 if ( !IS_PRIV_FOR(current->domain, d) )
468 goto vcpuaffinity_out;
470 ret = xsm_vcpuaffinity(op->cmd, d);
471 if ( ret )
472 goto vcpuaffinity_out;
474 ret = -EINVAL;
475 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
476 goto vcpuaffinity_out;
478 ret = -ESRCH;
479 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
480 goto vcpuaffinity_out;
482 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
483 {
484 xenctl_cpumap_to_cpumask(
485 &new_affinity, &op->u.vcpuaffinity.cpumap);
486 ret = vcpu_set_affinity(v, &new_affinity);
487 }
488 else
489 {
490 cpumask_to_xenctl_cpumap(
491 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
492 ret = 0;
493 }
495 vcpuaffinity_out:
496 rcu_unlock_domain(d);
497 }
498 break;
500 case XEN_DOMCTL_scheduler_op:
501 {
502 struct domain *d;
504 ret = -ESRCH;
505 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
506 break;
508 ret = -EPERM;
509 if ( !IS_PRIV_FOR(current->domain, d) )
510 goto scheduler_op_out;
512 ret = xsm_scheduler(d);
513 if ( ret )
514 goto scheduler_op_out;
516 ret = sched_adjust(d, &op->u.scheduler_op);
517 if ( copy_to_guest(u_domctl, op, 1) )
518 ret = -EFAULT;
520 scheduler_op_out:
521 rcu_unlock_domain(d);
522 }
523 break;
525 case XEN_DOMCTL_getdomaininfo:
526 {
527 struct domain *d;
528 domid_t dom = op->domain;
530 rcu_read_lock(&domlist_read_lock);
532 for_each_domain ( d )
533 if ( d->domain_id >= dom && IS_PRIV_FOR(current->domain, d))
534 break;
536 if ( d == NULL )
537 {
538 rcu_read_unlock(&domlist_read_lock);
539 ret = -ESRCH;
540 break;
541 }
543 ret = xsm_getdomaininfo(d);
544 if ( ret )
545 goto getdomaininfo_out;
547 getdomaininfo(d, &op->u.getdomaininfo);
549 op->domain = op->u.getdomaininfo.domain;
550 if ( copy_to_guest(u_domctl, op, 1) )
551 ret = -EFAULT;
553 getdomaininfo_out:
554 rcu_read_unlock(&domlist_read_lock);
555 }
556 break;
558 case XEN_DOMCTL_getvcpucontext:
559 {
560 vcpu_guest_context_u c = { .nat = NULL };
561 struct domain *d;
562 struct vcpu *v;
564 ret = -ESRCH;
565 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
566 break;
568 ret = -EPERM;
569 if ( !IS_PRIV_FOR(current->domain, d) )
570 goto getvcpucontext_out;
572 ret = xsm_getvcpucontext(d);
573 if ( ret )
574 goto getvcpucontext_out;
576 ret = -EINVAL;
577 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
578 goto getvcpucontext_out;
580 ret = -ESRCH;
581 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
582 goto getvcpucontext_out;
584 ret = -ENODATA;
585 if ( !v->is_initialised )
586 goto getvcpucontext_out;
588 #ifdef CONFIG_COMPAT
589 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
590 < sizeof(struct compat_vcpu_guest_context));
591 #endif
592 ret = -ENOMEM;
593 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
594 goto getvcpucontext_out;
596 if ( v != current )
597 vcpu_pause(v);
599 arch_get_info_guest(v, c);
600 ret = 0;
602 if ( v != current )
603 vcpu_unpause(v);
605 if ( !IS_COMPAT(v->domain) )
606 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
607 #ifdef CONFIG_COMPAT
608 else
609 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
610 void), c.cmp, 1);
611 #endif
613 if ( copy_to_guest(u_domctl, op, 1) || ret )
614 ret = -EFAULT;
616 getvcpucontext_out:
617 xfree(c.nat);
618 rcu_unlock_domain(d);
619 }
620 break;
622 case XEN_DOMCTL_getvcpuinfo:
623 {
624 struct domain *d;
625 struct vcpu *v;
626 struct vcpu_runstate_info runstate;
628 ret = -ESRCH;
629 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
630 break;
632 ret = -EPERM;
633 if ( !IS_PRIV_FOR(current->domain, d) )
634 goto getvcpuinfo_out;
636 ret = xsm_getvcpuinfo(d);
637 if ( ret )
638 goto getvcpuinfo_out;
640 ret = -EINVAL;
641 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
642 goto getvcpuinfo_out;
644 ret = -ESRCH;
645 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
646 goto getvcpuinfo_out;
648 vcpu_runstate_get(v, &runstate);
650 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
651 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
652 op->u.getvcpuinfo.running = v->is_running;
653 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
654 op->u.getvcpuinfo.cpu = v->processor;
655 ret = 0;
657 if ( copy_to_guest(u_domctl, op, 1) )
658 ret = -EFAULT;
660 getvcpuinfo_out:
661 rcu_unlock_domain(d);
662 }
663 break;
665 case XEN_DOMCTL_max_mem:
666 {
667 struct domain *d;
668 unsigned long new_max;
670 ret = -ESRCH;
671 d = rcu_lock_domain_by_id(op->domain);
672 if ( d == NULL )
673 break;
675 ret = -EPERM;
676 if ( !IS_PRIV_FOR(current->domain, d) )
677 goto max_mem_out;
679 ret = xsm_setdomainmaxmem(d);
680 if ( ret )
681 goto max_mem_out;
683 ret = -EINVAL;
684 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
686 spin_lock(&d->page_alloc_lock);
687 if ( new_max >= d->tot_pages )
688 {
689 ret = guest_physmap_max_mem_pages(d, new_max);
690 if ( ret != 0 )
691 break;
692 d->max_pages = new_max;
693 ret = 0;
694 }
695 else
696 printk("new max %ld, tot pages %d\n", new_max, d->tot_pages);
697 spin_unlock(&d->page_alloc_lock);
699 max_mem_out:
700 rcu_unlock_domain(d);
701 }
702 break;
704 case XEN_DOMCTL_setdomainhandle:
705 {
706 struct domain *d;
708 ret = -ESRCH;
709 d = rcu_lock_domain_by_id(op->domain);
710 if ( d == NULL )
711 break;
713 ret = -EPERM;
714 if ( !IS_PRIV_FOR(current->domain, d) )
715 goto setdomainhandle_out;
717 ret = xsm_setdomainhandle(d);
718 if ( ret )
719 goto setdomainhandle_out;
721 memcpy(d->handle, op->u.setdomainhandle.handle,
722 sizeof(xen_domain_handle_t));
723 ret = 0;
724 setdomainhandle_out:
725 rcu_unlock_domain(d);
726 }
727 break;
729 case XEN_DOMCTL_setdebugging:
730 {
731 struct domain *d;
733 ret = -ESRCH;
734 d = rcu_lock_domain_by_id(op->domain);
735 if ( d == NULL )
736 break;
738 ret = -EPERM;
739 if ( !IS_PRIV_FOR(current->domain, d) )
740 goto setdebugging_out;
742 ret = xsm_setdebugging(d);
743 if ( ret )
744 goto setdebugging_out;
746 domain_pause(d);
747 d->debugger_attached = !!op->u.setdebugging.enable;
748 domain_unpause(d); /* causes guest to latch new status */
749 ret = 0;
750 setdebugging_out:
751 rcu_unlock_domain(d);
752 }
753 break;
755 case XEN_DOMCTL_irq_permission:
756 {
757 struct domain *d;
758 unsigned int pirq = op->u.irq_permission.pirq;
760 ret = -EINVAL;
761 if ( pirq >= NR_IRQS )
762 break;
764 ret = -ESRCH;
765 d = rcu_lock_domain_by_id(op->domain);
766 if ( d == NULL )
767 break;
769 ret = -EPERM;
770 if ( !IS_PRIV_FOR(current->domain, d) )
771 goto irq_permission_out;
773 ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access);
774 if ( ret )
775 goto irq_permission_out;
777 if ( op->u.irq_permission.allow_access )
778 ret = irq_permit_access(d, pirq);
779 else
780 ret = irq_deny_access(d, pirq);
782 irq_permission_out:
783 rcu_unlock_domain(d);
784 }
785 break;
787 case XEN_DOMCTL_iomem_permission:
788 {
789 struct domain *d;
790 unsigned long mfn = op->u.iomem_permission.first_mfn;
791 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
793 ret = -EINVAL;
794 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
795 break;
797 ret = -ESRCH;
798 d = rcu_lock_domain_by_id(op->domain);
799 if ( d == NULL )
800 break;
802 ret = -EPERM;
803 if ( !IS_PRIV_FOR(current->domain, d) )
804 goto iomem_permission_out;
806 ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access);
807 if ( ret )
808 goto iomem_permission_out;
810 if ( op->u.iomem_permission.allow_access )
811 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
812 else
813 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
815 iomem_permission_out:
816 rcu_unlock_domain(d);
817 }
818 break;
820 case XEN_DOMCTL_settimeoffset:
821 {
822 struct domain *d;
824 ret = -ESRCH;
825 d = rcu_lock_domain_by_id(op->domain);
826 if ( d == NULL )
827 break;
829 ret = -EPERM;
830 if ( !IS_PRIV_FOR(current->domain, d) )
831 goto settimeoffset_out;
833 ret = xsm_domain_settime(d);
834 if ( ret )
835 goto settimeoffset_out;
837 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
839 ret = 0;
840 settimeoffset_out:
841 rcu_unlock_domain(d);
842 }
843 break;
845 case XEN_DOMCTL_set_target:
846 {
847 struct domain *d, *e;
849 ret = -ESRCH;
850 d = rcu_lock_domain_by_id(op->domain);
851 if ( d == NULL )
852 break;
854 ret = -EPERM;
855 if (!IS_PRIV_FOR(current->domain, d))
856 goto set_target_out;
858 ret = -ESRCH;
859 e = get_domain_by_id(op->u.set_target.target);
860 if ( e == NULL )
861 goto set_target_out;
863 if ( d == e ) {
864 ret = -EINVAL;
865 put_domain(e);
866 goto set_target_out;
867 }
869 if (!IS_PRIV_FOR(current->domain, e)) {
870 ret = -EPERM;
871 put_domain(e);
872 goto set_target_out;
873 }
875 d->target = e;
876 /* and we keep the reference on e, released when destroying d */
877 ret = 0;
879 set_target_out:
880 rcu_unlock_domain(d);
881 }
882 break;
884 default:
885 ret = arch_do_domctl(op, u_domctl);
886 break;
887 }
889 spin_unlock(&domctl_lock);
891 return ret;
892 }
894 /*
895 * Local variables:
896 * mode: C
897 * c-set-style: "BSD"
898 * c-basic-offset: 4
899 * tab-width: 4
900 * indent-tabs-mode: nil
901 * End:
902 */