ia64/xen-unstable

view xen/common/domctl.c @ 17062:0769835cf50f

x86 shadow: Reduce scope of shadow lock.

emulate_map_dest doesn't require holding lock, since
only shadow related operation possibly involved is to
remove shadow which is less frequent and can acquire
lock inside. Rest are either guest table walk or
per-vcpu monitor table manipulation

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 14 10:33:12 2008 +0000 (2008-02-14)
parents 98c2665056ea
children 4e2e98c2098e
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 extern long arch_do_domctl(
29 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
31 void cpumask_to_xenctl_cpumap(
32 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
33 {
34 unsigned int guest_bytes, copy_bytes, i;
35 uint8_t zero = 0;
36 uint8_t bytemap[(NR_CPUS + 7) / 8];
38 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
39 return;
41 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
42 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
44 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
46 if ( copy_bytes != 0 )
47 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
49 for ( i = copy_bytes; i < guest_bytes; i++ )
50 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
51 }
53 void xenctl_cpumap_to_cpumask(
54 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
55 {
56 unsigned int guest_bytes, copy_bytes;
57 uint8_t bytemap[(NR_CPUS + 7) / 8];
59 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
60 return;
62 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
63 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
65 memset(bytemap, 0, sizeof(bytemap));
67 if ( copy_bytes != 0 )
68 {
69 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
70 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
71 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
72 }
74 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
75 }
77 static inline int is_free_domid(domid_t dom)
78 {
79 struct domain *d;
81 if ( dom >= DOMID_FIRST_RESERVED )
82 return 0;
84 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
85 return 1;
87 rcu_unlock_domain(d);
88 return 0;
89 }
91 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
92 {
93 struct vcpu *v;
94 u64 cpu_time = 0;
95 int flags = XEN_DOMINF_blocked;
96 struct vcpu_runstate_info runstate;
98 info->domain = d->domain_id;
99 info->nr_online_vcpus = 0;
101 /*
102 * - domain is marked as blocked only if all its vcpus are blocked
103 * - domain is marked as running if any of its vcpus is running
104 */
105 for_each_vcpu ( d, v )
106 {
107 vcpu_runstate_get(v, &runstate);
108 cpu_time += runstate.time[RUNSTATE_running];
109 info->max_vcpu_id = v->vcpu_id;
110 if ( !test_bit(_VPF_down, &v->pause_flags) )
111 {
112 if ( !(v->pause_flags & VPF_blocked) )
113 flags &= ~XEN_DOMINF_blocked;
114 if ( v->is_running )
115 flags |= XEN_DOMINF_running;
116 info->nr_online_vcpus++;
117 }
118 }
120 info->cpu_time = cpu_time;
122 info->flags = flags |
123 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
124 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
125 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
126 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
127 d->shutdown_code << XEN_DOMINF_shutdownshift;
129 if ( is_hvm_domain(d) )
130 info->flags |= XEN_DOMINF_hvm_guest;
132 xsm_security_domaininfo(d, info);
134 info->tot_pages = d->tot_pages;
135 info->max_pages = d->max_pages;
136 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
138 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
139 }
141 static unsigned int default_vcpu0_location(void)
142 {
143 struct domain *d;
144 struct vcpu *v;
145 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
146 cpumask_t cpu_exclude_map;
148 /* Do an initial CPU placement. Pick the least-populated CPU. */
149 rcu_read_lock(&domlist_read_lock);
150 for_each_domain ( d )
151 for_each_vcpu ( d, v )
152 if ( !test_bit(_VPF_down, &v->pause_flags) )
153 cnt[v->processor]++;
154 rcu_read_unlock(&domlist_read_lock);
156 /*
157 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
158 * favour high numbered CPUs in the event of a tie.
159 */
160 cpu = first_cpu(cpu_sibling_map[0]);
161 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
162 cpu = next_cpu(cpu, cpu_sibling_map[0]);
163 cpu_exclude_map = cpu_sibling_map[0];
164 for_each_online_cpu ( i )
165 {
166 if ( cpu_isset(i, cpu_exclude_map) )
167 continue;
168 if ( (i == first_cpu(cpu_sibling_map[i])) &&
169 (cpus_weight(cpu_sibling_map[i]) > 1) )
170 continue;
171 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
172 if ( cnt[i] <= cnt[cpu] )
173 cpu = i;
174 }
176 return cpu;
177 }
179 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
180 {
181 long ret = 0;
182 struct xen_domctl curop, *op = &curop;
183 static DEFINE_SPINLOCK(domctl_lock);
185 if ( copy_from_guest(op, u_domctl, 1) )
186 return -EFAULT;
188 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
189 return -EACCES;
191 spin_lock(&domctl_lock);
193 switch ( op->cmd )
194 {
196 case XEN_DOMCTL_setvcpucontext:
197 {
198 struct domain *d = rcu_lock_domain_by_id(op->domain);
199 vcpu_guest_context_u c = { .nat = NULL };
200 unsigned int vcpu = op->u.vcpucontext.vcpu;
201 struct vcpu *v;
203 ret = -ESRCH;
204 if ( d == NULL )
205 break;
207 ret = -EPERM;
208 if ( !IS_PRIV_FOR(current->domain, d) )
209 goto svc_out;
211 ret = xsm_setvcpucontext(d);
212 if ( ret )
213 goto svc_out;
215 ret = -EINVAL;
216 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
217 goto svc_out;
219 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
220 {
221 ret = vcpu_reset(v);
222 goto svc_out;
223 }
225 #ifdef CONFIG_COMPAT
226 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
227 < sizeof(struct compat_vcpu_guest_context));
228 #endif
229 ret = -ENOMEM;
230 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
231 goto svc_out;
233 if ( !IS_COMPAT(v->domain) )
234 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
235 #ifdef CONFIG_COMPAT
236 else
237 ret = copy_from_guest(c.cmp,
238 guest_handle_cast(op->u.vcpucontext.ctxt,
239 void), 1);
240 #endif
241 ret = ret ? -EFAULT : 0;
243 if ( ret == 0 )
244 {
245 domain_pause(d);
246 ret = arch_set_info_guest(v, c);
247 domain_unpause(d);
248 }
250 svc_out:
251 xfree(c.nat);
252 rcu_unlock_domain(d);
253 }
254 break;
256 case XEN_DOMCTL_pausedomain:
257 {
258 struct domain *d = rcu_lock_domain_by_id(op->domain);
259 ret = -ESRCH;
260 if ( d != NULL )
261 {
262 ret = -EPERM;
263 if ( !IS_PRIV_FOR(current->domain, d) )
264 goto pausedomain_out;
266 ret = xsm_pausedomain(d);
267 if ( ret )
268 goto pausedomain_out;
270 ret = -EINVAL;
271 if ( d != current->domain )
272 {
273 domain_pause_by_systemcontroller(d);
274 ret = 0;
275 }
276 pausedomain_out:
277 rcu_unlock_domain(d);
278 }
279 }
280 break;
282 case XEN_DOMCTL_unpausedomain:
283 {
284 struct domain *d = rcu_lock_domain_by_id(op->domain);
286 ret = -ESRCH;
287 if ( d == NULL )
288 break;
290 ret = -EPERM;
291 if ( !IS_PRIV_FOR(current->domain, d) )
292 goto unpausedomain_out;
294 ret = xsm_unpausedomain(d);
295 if ( ret )
296 goto unpausedomain_out;
298 domain_unpause_by_systemcontroller(d);
299 ret = 0;
300 unpausedomain_out:
301 rcu_unlock_domain(d);
302 }
303 break;
305 case XEN_DOMCTL_resumedomain:
306 {
307 struct domain *d = rcu_lock_domain_by_id(op->domain);
309 ret = -ESRCH;
310 if ( d == NULL )
311 break;
313 ret = -EPERM;
314 if ( !IS_PRIV_FOR(current->domain, d) )
315 goto resumedomain_out;
317 ret = xsm_resumedomain(d);
318 if ( ret )
319 goto resumedomain_out;
321 domain_resume(d);
322 ret = 0;
323 resumedomain_out:
324 rcu_unlock_domain(d);
325 }
326 break;
328 case XEN_DOMCTL_createdomain:
329 {
330 struct domain *d;
331 domid_t dom;
332 static domid_t rover = 0;
333 unsigned int domcr_flags;
335 ret = -EPERM;
336 if ( !IS_PRIV(current->domain) )
337 break;
339 ret = -EINVAL;
340 if ( supervisor_mode_kernel ||
341 (op->u.createdomain.flags &
342 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap)) )
343 break;
345 dom = op->domain;
346 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
347 {
348 ret = -EINVAL;
349 if ( !is_free_domid(dom) )
350 break;
351 }
352 else
353 {
354 for ( dom = rover + 1; dom != rover; dom++ )
355 {
356 if ( dom == DOMID_FIRST_RESERVED )
357 dom = 0;
358 if ( is_free_domid(dom) )
359 break;
360 }
362 ret = -ENOMEM;
363 if ( dom == rover )
364 break;
366 rover = dom;
367 }
369 domcr_flags = 0;
370 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
371 domcr_flags |= DOMCRF_hvm;
372 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
373 domcr_flags |= DOMCRF_hap;
375 ret = -ENOMEM;
376 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
377 if ( d == NULL )
378 break;
380 ret = 0;
382 memcpy(d->handle, op->u.createdomain.handle,
383 sizeof(xen_domain_handle_t));
385 op->domain = d->domain_id;
386 if ( copy_to_guest(u_domctl, op, 1) )
387 ret = -EFAULT;
388 }
389 break;
391 case XEN_DOMCTL_max_vcpus:
392 {
393 struct domain *d;
394 unsigned int i, max = op->u.max_vcpus.max, cpu;
396 ret = -EINVAL;
397 if ( max > MAX_VIRT_CPUS )
398 break;
400 ret = -ESRCH;
401 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
402 break;
404 ret = -EPERM;
405 if ( !IS_PRIV_FOR(current->domain, d) )
406 goto maxvcpu_out2;
408 ret = xsm_max_vcpus(d);
409 if ( ret )
410 goto maxvcpu_out2;
412 /* Needed, for example, to ensure writable p.t. state is synced. */
413 domain_pause(d);
415 /* We cannot reduce maximum VCPUs. */
416 ret = -EINVAL;
417 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
418 goto maxvcpu_out;
420 ret = -ENOMEM;
421 for ( i = 0; i < max; i++ )
422 {
423 if ( d->vcpu[i] != NULL )
424 continue;
426 cpu = (i == 0) ?
427 default_vcpu0_location() :
428 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
430 if ( alloc_vcpu(d, i, cpu) == NULL )
431 goto maxvcpu_out;
432 }
434 ret = 0;
436 maxvcpu_out:
437 domain_unpause(d);
438 maxvcpu_out2:
439 rcu_unlock_domain(d);
440 }
441 break;
443 case XEN_DOMCTL_destroydomain:
444 {
445 struct domain *d = rcu_lock_domain_by_id(op->domain);
446 ret = -ESRCH;
447 if ( d != NULL )
448 {
449 ret = -EPERM;
450 if ( IS_PRIV_FOR(current->domain, d) )
451 ret = xsm_destroydomain(d) ? : domain_kill(d);
452 rcu_unlock_domain(d);
453 }
454 }
455 break;
457 case XEN_DOMCTL_setvcpuaffinity:
458 case XEN_DOMCTL_getvcpuaffinity:
459 {
460 domid_t dom = op->domain;
461 struct domain *d = rcu_lock_domain_by_id(dom);
462 struct vcpu *v;
463 cpumask_t new_affinity;
465 ret = -ESRCH;
466 if ( d == NULL )
467 break;
469 ret = -EPERM;
470 if ( !IS_PRIV_FOR(current->domain, d) )
471 goto vcpuaffinity_out;
473 ret = xsm_vcpuaffinity(op->cmd, d);
474 if ( ret )
475 goto vcpuaffinity_out;
477 ret = -EINVAL;
478 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
479 goto vcpuaffinity_out;
481 ret = -ESRCH;
482 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
483 goto vcpuaffinity_out;
485 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
486 {
487 xenctl_cpumap_to_cpumask(
488 &new_affinity, &op->u.vcpuaffinity.cpumap);
489 ret = vcpu_set_affinity(v, &new_affinity);
490 }
491 else
492 {
493 cpumask_to_xenctl_cpumap(
494 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
495 ret = 0;
496 }
498 vcpuaffinity_out:
499 rcu_unlock_domain(d);
500 }
501 break;
503 case XEN_DOMCTL_scheduler_op:
504 {
505 struct domain *d;
507 ret = -ESRCH;
508 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
509 break;
511 ret = -EPERM;
512 if ( !IS_PRIV_FOR(current->domain, d) )
513 goto scheduler_op_out;
515 ret = xsm_scheduler(d);
516 if ( ret )
517 goto scheduler_op_out;
519 ret = sched_adjust(d, &op->u.scheduler_op);
520 if ( copy_to_guest(u_domctl, op, 1) )
521 ret = -EFAULT;
523 scheduler_op_out:
524 rcu_unlock_domain(d);
525 }
526 break;
528 case XEN_DOMCTL_getdomaininfo:
529 {
530 struct domain *d;
531 domid_t dom = op->domain;
533 rcu_read_lock(&domlist_read_lock);
535 for_each_domain ( d )
536 if ( d->domain_id >= dom && IS_PRIV_FOR(current->domain, d))
537 break;
539 if ( d == NULL )
540 {
541 rcu_read_unlock(&domlist_read_lock);
542 ret = -ESRCH;
543 break;
544 }
546 ret = xsm_getdomaininfo(d);
547 if ( ret )
548 goto getdomaininfo_out;
550 getdomaininfo(d, &op->u.getdomaininfo);
552 op->domain = op->u.getdomaininfo.domain;
553 if ( copy_to_guest(u_domctl, op, 1) )
554 ret = -EFAULT;
556 getdomaininfo_out:
557 rcu_read_unlock(&domlist_read_lock);
558 }
559 break;
561 case XEN_DOMCTL_getvcpucontext:
562 {
563 vcpu_guest_context_u c = { .nat = NULL };
564 struct domain *d;
565 struct vcpu *v;
567 ret = -ESRCH;
568 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
569 break;
571 ret = -EPERM;
572 if ( !IS_PRIV_FOR(current->domain, d) )
573 goto getvcpucontext_out;
575 ret = xsm_getvcpucontext(d);
576 if ( ret )
577 goto getvcpucontext_out;
579 ret = -EINVAL;
580 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
581 goto getvcpucontext_out;
583 ret = -ESRCH;
584 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
585 goto getvcpucontext_out;
587 ret = -ENODATA;
588 if ( !v->is_initialised )
589 goto getvcpucontext_out;
591 #ifdef CONFIG_COMPAT
592 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
593 < sizeof(struct compat_vcpu_guest_context));
594 #endif
595 ret = -ENOMEM;
596 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
597 goto getvcpucontext_out;
599 if ( v != current )
600 vcpu_pause(v);
602 arch_get_info_guest(v, c);
603 ret = 0;
605 if ( v != current )
606 vcpu_unpause(v);
608 if ( !IS_COMPAT(v->domain) )
609 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
610 #ifdef CONFIG_COMPAT
611 else
612 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
613 void), c.cmp, 1);
614 #endif
616 if ( copy_to_guest(u_domctl, op, 1) || ret )
617 ret = -EFAULT;
619 getvcpucontext_out:
620 xfree(c.nat);
621 rcu_unlock_domain(d);
622 }
623 break;
625 case XEN_DOMCTL_getvcpuinfo:
626 {
627 struct domain *d;
628 struct vcpu *v;
629 struct vcpu_runstate_info runstate;
631 ret = -ESRCH;
632 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
633 break;
635 ret = -EPERM;
636 if ( !IS_PRIV_FOR(current->domain, d) )
637 goto getvcpuinfo_out;
639 ret = xsm_getvcpuinfo(d);
640 if ( ret )
641 goto getvcpuinfo_out;
643 ret = -EINVAL;
644 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
645 goto getvcpuinfo_out;
647 ret = -ESRCH;
648 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
649 goto getvcpuinfo_out;
651 vcpu_runstate_get(v, &runstate);
653 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
654 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
655 op->u.getvcpuinfo.running = v->is_running;
656 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
657 op->u.getvcpuinfo.cpu = v->processor;
658 ret = 0;
660 if ( copy_to_guest(u_domctl, op, 1) )
661 ret = -EFAULT;
663 getvcpuinfo_out:
664 rcu_unlock_domain(d);
665 }
666 break;
668 case XEN_DOMCTL_max_mem:
669 {
670 struct domain *d;
671 unsigned long new_max;
673 ret = -ESRCH;
674 d = rcu_lock_domain_by_id(op->domain);
675 if ( d == NULL )
676 break;
678 ret = -EPERM;
679 if ( !IS_PRIV_FOR(current->domain, d) )
680 goto max_mem_out;
682 ret = xsm_setdomainmaxmem(d);
683 if ( ret )
684 goto max_mem_out;
686 ret = -EINVAL;
687 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
689 spin_lock(&d->page_alloc_lock);
690 if ( new_max >= d->tot_pages )
691 {
692 ret = guest_physmap_max_mem_pages(d, new_max);
693 if ( ret != 0 )
694 break;
695 d->max_pages = new_max;
696 ret = 0;
697 }
698 else
699 printk("new max %ld, tot pages %d\n", new_max, d->tot_pages);
700 spin_unlock(&d->page_alloc_lock);
702 max_mem_out:
703 rcu_unlock_domain(d);
704 }
705 break;
707 case XEN_DOMCTL_setdomainhandle:
708 {
709 struct domain *d;
711 ret = -ESRCH;
712 d = rcu_lock_domain_by_id(op->domain);
713 if ( d == NULL )
714 break;
716 ret = -EPERM;
717 if ( !IS_PRIV_FOR(current->domain, d) )
718 goto setdomainhandle_out;
720 ret = xsm_setdomainhandle(d);
721 if ( ret )
722 goto setdomainhandle_out;
724 memcpy(d->handle, op->u.setdomainhandle.handle,
725 sizeof(xen_domain_handle_t));
726 ret = 0;
727 setdomainhandle_out:
728 rcu_unlock_domain(d);
729 }
730 break;
732 case XEN_DOMCTL_setdebugging:
733 {
734 struct domain *d;
736 ret = -ESRCH;
737 d = rcu_lock_domain_by_id(op->domain);
738 if ( d == NULL )
739 break;
741 ret = -EPERM;
742 if ( !IS_PRIV_FOR(current->domain, d) )
743 goto setdebugging_out;
745 ret = xsm_setdebugging(d);
746 if ( ret )
747 goto setdebugging_out;
749 domain_pause(d);
750 d->debugger_attached = !!op->u.setdebugging.enable;
751 domain_unpause(d); /* causes guest to latch new status */
752 ret = 0;
753 setdebugging_out:
754 rcu_unlock_domain(d);
755 }
756 break;
758 case XEN_DOMCTL_irq_permission:
759 {
760 struct domain *d;
761 unsigned int pirq = op->u.irq_permission.pirq;
763 ret = -EINVAL;
764 if ( pirq >= NR_IRQS )
765 break;
767 ret = -ESRCH;
768 d = rcu_lock_domain_by_id(op->domain);
769 if ( d == NULL )
770 break;
772 ret = -EPERM;
773 if ( !IS_PRIV_FOR(current->domain, d) )
774 goto irq_permission_out;
776 ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access);
777 if ( ret )
778 goto irq_permission_out;
780 if ( op->u.irq_permission.allow_access )
781 ret = irq_permit_access(d, pirq);
782 else
783 ret = irq_deny_access(d, pirq);
785 irq_permission_out:
786 rcu_unlock_domain(d);
787 }
788 break;
790 case XEN_DOMCTL_iomem_permission:
791 {
792 struct domain *d;
793 unsigned long mfn = op->u.iomem_permission.first_mfn;
794 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
796 ret = -EINVAL;
797 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
798 break;
800 ret = -ESRCH;
801 d = rcu_lock_domain_by_id(op->domain);
802 if ( d == NULL )
803 break;
805 ret = -EPERM;
806 if ( !IS_PRIV_FOR(current->domain, d) )
807 goto iomem_permission_out;
809 ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access);
810 if ( ret )
811 goto iomem_permission_out;
813 if ( op->u.iomem_permission.allow_access )
814 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
815 else
816 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
818 iomem_permission_out:
819 rcu_unlock_domain(d);
820 }
821 break;
823 case XEN_DOMCTL_settimeoffset:
824 {
825 struct domain *d;
827 ret = -ESRCH;
828 d = rcu_lock_domain_by_id(op->domain);
829 if ( d == NULL )
830 break;
832 ret = -EPERM;
833 if ( !IS_PRIV_FOR(current->domain, d) )
834 goto settimeoffset_out;
836 ret = xsm_domain_settime(d);
837 if ( ret )
838 goto settimeoffset_out;
840 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
842 ret = 0;
843 settimeoffset_out:
844 rcu_unlock_domain(d);
845 }
846 break;
848 case XEN_DOMCTL_set_target:
849 {
850 struct domain *d, *e;
852 ret = -ESRCH;
853 d = rcu_lock_domain_by_id(op->domain);
854 if ( d == NULL )
855 break;
857 ret = -EPERM;
858 if (!IS_PRIV_FOR(current->domain, d))
859 goto set_target_out;
861 ret = -ESRCH;
862 e = get_domain_by_id(op->u.set_target.target);
863 if ( e == NULL )
864 goto set_target_out;
866 if ( d == e ) {
867 ret = -EINVAL;
868 put_domain(e);
869 goto set_target_out;
870 }
872 if (!IS_PRIV_FOR(current->domain, e)) {
873 ret = -EPERM;
874 put_domain(e);
875 goto set_target_out;
876 }
878 d->target = e;
879 /* and we keep the reference on e, released when destroying d */
880 ret = 0;
882 set_target_out:
883 rcu_unlock_domain(d);
884 }
885 break;
887 default:
888 ret = arch_do_domctl(op, u_domctl);
889 break;
890 }
892 spin_unlock(&domctl_lock);
894 return ret;
895 }
897 /*
898 * Local variables:
899 * mode: C
900 * c-set-style: "BSD"
901 * c-basic-offset: 4
902 * tab-width: 4
903 * indent-tabs-mode: nil
904 * End:
905 */