ia64/xen-unstable

view xen/common/domctl.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 468561f3c8ee
children
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 static DEFINE_SPINLOCK(domctl_lock);
30 extern long arch_do_domctl(
31 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
33 void cpumask_to_xenctl_cpumap(
34 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
35 {
36 unsigned int guest_bytes, copy_bytes, i;
37 uint8_t zero = 0;
38 uint8_t bytemap[(NR_CPUS + 7) / 8];
40 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
41 return;
43 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
44 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
46 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
48 if ( copy_bytes != 0 )
49 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
51 for ( i = copy_bytes; i < guest_bytes; i++ )
52 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
53 }
55 void xenctl_cpumap_to_cpumask(
56 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
57 {
58 unsigned int guest_bytes, copy_bytes;
59 uint8_t bytemap[(NR_CPUS + 7) / 8];
61 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
62 return;
64 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
65 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
67 memset(bytemap, 0, sizeof(bytemap));
69 if ( copy_bytes != 0 )
70 {
71 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
74 }
76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
77 }
79 static inline int is_free_domid(domid_t dom)
80 {
81 struct domain *d;
83 if ( dom >= DOMID_FIRST_RESERVED )
84 return 0;
86 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
87 return 1;
89 rcu_unlock_domain(d);
90 return 0;
91 }
93 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
94 {
95 struct vcpu *v;
96 u64 cpu_time = 0;
97 int flags = XEN_DOMINF_blocked;
98 struct vcpu_runstate_info runstate;
100 info->domain = d->domain_id;
101 info->nr_online_vcpus = 0;
102 info->ssidref = 0;
104 /*
105 * - domain is marked as blocked only if all its vcpus are blocked
106 * - domain is marked as running if any of its vcpus is running
107 */
108 for_each_vcpu ( d, v )
109 {
110 vcpu_runstate_get(v, &runstate);
111 cpu_time += runstate.time[RUNSTATE_running];
112 info->max_vcpu_id = v->vcpu_id;
113 if ( !test_bit(_VPF_down, &v->pause_flags) )
114 {
115 if ( !(v->pause_flags & VPF_blocked) )
116 flags &= ~XEN_DOMINF_blocked;
117 if ( v->is_running )
118 flags |= XEN_DOMINF_running;
119 info->nr_online_vcpus++;
120 }
121 }
123 info->cpu_time = cpu_time;
125 info->flags = (info->nr_online_vcpus ? flags : 0) |
126 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
127 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
128 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
129 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
130 d->shutdown_code << XEN_DOMINF_shutdownshift;
132 if ( is_hvm_domain(d) )
133 info->flags |= XEN_DOMINF_hvm_guest;
135 xsm_security_domaininfo(d, info);
137 info->tot_pages = d->tot_pages;
138 info->max_pages = d->max_pages;
139 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
141 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
142 }
144 static unsigned int default_vcpu0_location(void)
145 {
146 struct domain *d;
147 struct vcpu *v;
148 unsigned int i, cpu, nr_cpus, *cnt;
149 cpumask_t cpu_exclude_map;
151 /* Do an initial CPU placement. Pick the least-populated CPU. */
152 nr_cpus = last_cpu(cpu_possible_map) + 1;
153 cnt = xmalloc_array(unsigned int, nr_cpus);
154 if ( cnt )
155 {
156 memset(cnt, 0, nr_cpus * sizeof(*cnt));
158 rcu_read_lock(&domlist_read_lock);
159 for_each_domain ( d )
160 for_each_vcpu ( d, v )
161 if ( !test_bit(_VPF_down, &v->pause_flags) )
162 cnt[v->processor]++;
163 rcu_read_unlock(&domlist_read_lock);
164 }
166 /*
167 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
168 * favour high numbered CPUs in the event of a tie.
169 */
170 cpu = first_cpu(cpu_sibling_map[0]);
171 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
172 cpu = next_cpu(cpu, cpu_sibling_map[0]);
173 cpu_exclude_map = cpu_sibling_map[0];
174 for_each_online_cpu ( i )
175 {
176 if ( cpu_isset(i, cpu_exclude_map) )
177 continue;
178 if ( (i == first_cpu(cpu_sibling_map[i])) &&
179 (cpus_weight(cpu_sibling_map[i]) > 1) )
180 continue;
181 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
182 if ( !cnt || cnt[i] <= cnt[cpu] )
183 cpu = i;
184 }
186 xfree(cnt);
188 return cpu;
189 }
191 bool_t domctl_lock_acquire(void)
192 {
193 /*
194 * Caller may try to pause its own VCPUs. We must prevent deadlock
195 * against other non-domctl routines which try to do the same.
196 */
197 if ( !spin_trylock(&current->domain->hypercall_deadlock_mutex) )
198 return 0;
200 /*
201 * Trylock here is paranoia if we have multiple privileged domains. Then
202 * we could have one domain trying to pause another which is spinning
203 * on domctl_lock -- results in deadlock.
204 */
205 if ( spin_trylock(&domctl_lock) )
206 return 1;
208 spin_unlock(&current->domain->hypercall_deadlock_mutex);
209 return 0;
210 }
212 void domctl_lock_release(void)
213 {
214 spin_unlock(&domctl_lock);
215 spin_unlock(&current->domain->hypercall_deadlock_mutex);
216 }
218 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
219 {
220 long ret = 0;
221 struct xen_domctl curop, *op = &curop;
223 if ( !IS_PRIV(current->domain) )
224 return -EPERM;
226 if ( copy_from_guest(op, u_domctl, 1) )
227 return -EFAULT;
229 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
230 return -EACCES;
232 if ( !domctl_lock_acquire() )
233 return hypercall_create_continuation(
234 __HYPERVISOR_domctl, "h", u_domctl);
236 switch ( op->cmd )
237 {
239 case XEN_DOMCTL_setvcpucontext:
240 {
241 struct domain *d = rcu_lock_domain_by_id(op->domain);
242 vcpu_guest_context_u c = { .nat = NULL };
243 unsigned int vcpu = op->u.vcpucontext.vcpu;
244 struct vcpu *v;
246 ret = -ESRCH;
247 if ( d == NULL )
248 break;
250 ret = xsm_setvcpucontext(d);
251 if ( ret )
252 goto svc_out;
254 ret = -EINVAL;
255 if ( (d == current->domain) || /* no domain_pause() */
256 (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
257 goto svc_out;
259 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
260 {
261 vcpu_reset(v);
262 ret = 0;
263 goto svc_out;
264 }
266 #ifdef CONFIG_COMPAT
267 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
268 < sizeof(struct compat_vcpu_guest_context));
269 #endif
270 ret = -ENOMEM;
271 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
272 goto svc_out;
274 #ifdef CONFIG_COMPAT
275 if ( !is_pv_32on64_vcpu(v) )
276 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
277 else
278 ret = copy_from_guest(c.cmp,
279 guest_handle_cast(op->u.vcpucontext.ctxt,
280 void), 1);
281 #else
282 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
283 #endif
284 ret = ret ? -EFAULT : 0;
286 if ( ret == 0 )
287 {
288 domain_pause(d);
289 ret = arch_set_info_guest(v, c);
290 domain_unpause(d);
291 }
293 svc_out:
294 xfree(c.nat);
295 rcu_unlock_domain(d);
296 }
297 break;
299 case XEN_DOMCTL_pausedomain:
300 {
301 struct domain *d = rcu_lock_domain_by_id(op->domain);
302 ret = -ESRCH;
303 if ( d != NULL )
304 {
305 ret = xsm_pausedomain(d);
306 if ( ret )
307 goto pausedomain_out;
309 ret = -EINVAL;
310 if ( d != current->domain )
311 {
312 domain_pause_by_systemcontroller(d);
313 ret = 0;
314 }
315 pausedomain_out:
316 rcu_unlock_domain(d);
317 }
318 }
319 break;
321 case XEN_DOMCTL_unpausedomain:
322 {
323 struct domain *d = rcu_lock_domain_by_id(op->domain);
325 ret = -ESRCH;
326 if ( d == NULL )
327 break;
329 ret = xsm_unpausedomain(d);
330 if ( ret )
331 {
332 rcu_unlock_domain(d);
333 break;
334 }
336 domain_unpause_by_systemcontroller(d);
337 rcu_unlock_domain(d);
338 ret = 0;
339 }
340 break;
342 case XEN_DOMCTL_resumedomain:
343 {
344 struct domain *d = rcu_lock_domain_by_id(op->domain);
346 ret = -ESRCH;
347 if ( d == NULL )
348 break;
350 ret = xsm_resumedomain(d);
351 if ( ret )
352 {
353 rcu_unlock_domain(d);
354 break;
355 }
357 domain_resume(d);
358 rcu_unlock_domain(d);
359 ret = 0;
360 }
361 break;
363 case XEN_DOMCTL_createdomain:
364 {
365 struct domain *d;
366 domid_t dom;
367 static domid_t rover = 0;
368 unsigned int domcr_flags;
370 ret = -EINVAL;
371 if ( supervisor_mode_kernel ||
372 (op->u.createdomain.flags &
373 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap |
374 XEN_DOMCTL_CDF_s3_integrity)) )
375 break;
377 dom = op->domain;
378 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
379 {
380 ret = -EINVAL;
381 if ( !is_free_domid(dom) )
382 break;
383 }
384 else
385 {
386 for ( dom = rover + 1; dom != rover; dom++ )
387 {
388 if ( dom == DOMID_FIRST_RESERVED )
389 dom = 0;
390 if ( is_free_domid(dom) )
391 break;
392 }
394 ret = -ENOMEM;
395 if ( dom == rover )
396 break;
398 rover = dom;
399 }
401 domcr_flags = 0;
402 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
403 domcr_flags |= DOMCRF_hvm;
404 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
405 domcr_flags |= DOMCRF_hap;
406 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_s3_integrity )
407 domcr_flags |= DOMCRF_s3_integrity;
409 ret = -ENOMEM;
410 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
411 if ( d == NULL )
412 break;
414 ret = 0;
416 memcpy(d->handle, op->u.createdomain.handle,
417 sizeof(xen_domain_handle_t));
419 op->domain = d->domain_id;
420 if ( copy_to_guest(u_domctl, op, 1) )
421 ret = -EFAULT;
422 }
423 break;
425 case XEN_DOMCTL_max_vcpus:
426 {
427 struct domain *d;
428 unsigned int i, max = op->u.max_vcpus.max, cpu;
430 ret = -ESRCH;
431 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
432 break;
434 ret = -EINVAL;
435 if ( (d == current->domain) || /* no domain_pause() */
436 (max > MAX_VIRT_CPUS) ||
437 (is_hvm_domain(d) && max > XEN_LEGACY_MAX_VCPUS) )
438 {
439 rcu_unlock_domain(d);
440 break;
441 }
443 ret = xsm_max_vcpus(d);
444 if ( ret )
445 {
446 rcu_unlock_domain(d);
447 break;
448 }
450 /* Until Xenoprof can dynamically grow its vcpu-s array... */
451 if ( d->xenoprof )
452 {
453 rcu_unlock_domain(d);
454 ret = -EAGAIN;
455 break;
456 }
458 /* Needed, for example, to ensure writable p.t. state is synced. */
459 domain_pause(d);
461 /* We cannot reduce maximum VCPUs. */
462 ret = -EINVAL;
463 if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
464 goto maxvcpu_out;
466 /*
467 * For now don't allow increasing the vcpu count from a non-zero
468 * value: This code and all readers of d->vcpu would otherwise need
469 * to be converted to use RCU, but at present there's no tools side
470 * code path that would issue such a request.
471 */
472 ret = -EBUSY;
473 if ( (d->max_vcpus > 0) && (max > d->max_vcpus) )
474 goto maxvcpu_out;
476 ret = -ENOMEM;
477 if ( max > d->max_vcpus )
478 {
479 struct vcpu **vcpus;
481 BUG_ON(d->vcpu != NULL);
482 BUG_ON(d->max_vcpus != 0);
484 if ( (vcpus = xmalloc_array(struct vcpu *, max)) == NULL )
485 goto maxvcpu_out;
486 memset(vcpus, 0, max * sizeof(*vcpus));
488 /* Install vcpu array /then/ update max_vcpus. */
489 d->vcpu = vcpus;
490 wmb();
491 d->max_vcpus = max;
492 }
494 for ( i = 0; i < max; i++ )
495 {
496 if ( d->vcpu[i] != NULL )
497 continue;
499 cpu = (i == 0) ?
500 default_vcpu0_location() :
501 cycle_cpu(d->vcpu[i-1]->processor, cpu_online_map);
503 if ( alloc_vcpu(d, i, cpu) == NULL )
504 goto maxvcpu_out;
505 }
507 ret = 0;
509 maxvcpu_out:
510 domain_unpause(d);
511 rcu_unlock_domain(d);
512 }
513 break;
515 case XEN_DOMCTL_destroydomain:
516 {
517 struct domain *d = rcu_lock_domain_by_id(op->domain);
518 ret = -ESRCH;
519 if ( d != NULL )
520 {
521 ret = xsm_destroydomain(d) ? : domain_kill(d);
522 rcu_unlock_domain(d);
523 }
524 }
525 break;
527 case XEN_DOMCTL_setvcpuaffinity:
528 case XEN_DOMCTL_getvcpuaffinity:
529 {
530 domid_t dom = op->domain;
531 struct domain *d = rcu_lock_domain_by_id(dom);
532 struct vcpu *v;
533 cpumask_t new_affinity;
535 ret = -ESRCH;
536 if ( d == NULL )
537 break;
539 ret = xsm_vcpuaffinity(op->cmd, d);
540 if ( ret )
541 goto vcpuaffinity_out;
543 ret = -EINVAL;
544 if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
545 goto vcpuaffinity_out;
547 ret = -ESRCH;
548 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
549 goto vcpuaffinity_out;
551 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
552 {
553 xenctl_cpumap_to_cpumask(
554 &new_affinity, &op->u.vcpuaffinity.cpumap);
555 ret = vcpu_set_affinity(v, &new_affinity);
556 }
557 else
558 {
559 cpumask_to_xenctl_cpumap(
560 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
561 ret = 0;
562 }
564 vcpuaffinity_out:
565 rcu_unlock_domain(d);
566 }
567 break;
569 case XEN_DOMCTL_scheduler_op:
570 {
571 struct domain *d;
573 ret = -ESRCH;
574 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
575 break;
577 ret = xsm_scheduler(d);
578 if ( ret )
579 goto scheduler_op_out;
581 ret = sched_adjust(d, &op->u.scheduler_op);
582 if ( copy_to_guest(u_domctl, op, 1) )
583 ret = -EFAULT;
585 scheduler_op_out:
586 rcu_unlock_domain(d);
587 }
588 break;
590 case XEN_DOMCTL_getdomaininfo:
591 {
592 struct domain *d;
593 domid_t dom = op->domain;
595 rcu_read_lock(&domlist_read_lock);
597 for_each_domain ( d )
598 if ( d->domain_id >= dom )
599 break;
601 if ( d == NULL )
602 {
603 rcu_read_unlock(&domlist_read_lock);
604 ret = -ESRCH;
605 break;
606 }
608 ret = xsm_getdomaininfo(d);
609 if ( ret )
610 goto getdomaininfo_out;
612 getdomaininfo(d, &op->u.getdomaininfo);
614 op->domain = op->u.getdomaininfo.domain;
615 if ( copy_to_guest(u_domctl, op, 1) )
616 ret = -EFAULT;
618 getdomaininfo_out:
619 rcu_read_unlock(&domlist_read_lock);
620 }
621 break;
623 case XEN_DOMCTL_getvcpucontext:
624 {
625 vcpu_guest_context_u c = { .nat = NULL };
626 struct domain *d;
627 struct vcpu *v;
629 ret = -ESRCH;
630 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
631 break;
633 ret = xsm_getvcpucontext(d);
634 if ( ret )
635 goto getvcpucontext_out;
637 ret = -EINVAL;
638 if ( op->u.vcpucontext.vcpu >= d->max_vcpus )
639 goto getvcpucontext_out;
641 ret = -ESRCH;
642 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
643 goto getvcpucontext_out;
645 ret = -ENODATA;
646 if ( !v->is_initialised )
647 goto getvcpucontext_out;
649 #ifdef CONFIG_COMPAT
650 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
651 < sizeof(struct compat_vcpu_guest_context));
652 #endif
653 ret = -ENOMEM;
654 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
655 goto getvcpucontext_out;
657 if ( v != current )
658 vcpu_pause(v);
660 arch_get_info_guest(v, c);
661 ret = 0;
663 if ( v != current )
664 vcpu_unpause(v);
666 #ifdef CONFIG_COMPAT
667 if ( !is_pv_32on64_vcpu(v) )
668 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
669 else
670 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
671 void), c.cmp, 1);
672 #else
673 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
674 #endif
676 if ( copy_to_guest(u_domctl, op, 1) || ret )
677 ret = -EFAULT;
679 getvcpucontext_out:
680 xfree(c.nat);
681 rcu_unlock_domain(d);
682 }
683 break;
685 case XEN_DOMCTL_getvcpuinfo:
686 {
687 struct domain *d;
688 struct vcpu *v;
689 struct vcpu_runstate_info runstate;
691 ret = -ESRCH;
692 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
693 break;
695 ret = xsm_getvcpuinfo(d);
696 if ( ret )
697 goto getvcpuinfo_out;
699 ret = -EINVAL;
700 if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus )
701 goto getvcpuinfo_out;
703 ret = -ESRCH;
704 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
705 goto getvcpuinfo_out;
707 vcpu_runstate_get(v, &runstate);
709 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
710 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
711 op->u.getvcpuinfo.running = v->is_running;
712 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
713 op->u.getvcpuinfo.cpu = v->processor;
714 ret = 0;
716 if ( copy_to_guest(u_domctl, op, 1) )
717 ret = -EFAULT;
719 getvcpuinfo_out:
720 rcu_unlock_domain(d);
721 }
722 break;
724 case XEN_DOMCTL_max_mem:
725 {
726 struct domain *d;
727 unsigned long new_max;
729 ret = -ESRCH;
730 d = rcu_lock_domain_by_id(op->domain);
731 if ( d == NULL )
732 break;
734 ret = xsm_setdomainmaxmem(d);
735 if ( ret )
736 goto max_mem_out;
738 ret = -EINVAL;
739 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
741 spin_lock(&d->page_alloc_lock);
742 if ( new_max >= d->tot_pages )
743 {
744 d->max_pages = new_max;
745 ret = 0;
746 }
747 spin_unlock(&d->page_alloc_lock);
749 max_mem_out:
750 rcu_unlock_domain(d);
751 }
752 break;
754 case XEN_DOMCTL_setdomainhandle:
755 {
756 struct domain *d;
758 ret = -ESRCH;
759 d = rcu_lock_domain_by_id(op->domain);
760 if ( d == NULL )
761 break;
763 ret = xsm_setdomainhandle(d);
764 if ( ret )
765 {
766 rcu_unlock_domain(d);
767 break;
768 }
770 memcpy(d->handle, op->u.setdomainhandle.handle,
771 sizeof(xen_domain_handle_t));
772 rcu_unlock_domain(d);
773 ret = 0;
774 }
775 break;
777 case XEN_DOMCTL_setdebugging:
778 {
779 struct domain *d;
781 ret = -ESRCH;
782 d = rcu_lock_domain_by_id(op->domain);
783 if ( d == NULL )
784 break;
786 ret = -EINVAL;
787 if ( d == current->domain ) /* no domain_pause() */
788 {
789 rcu_unlock_domain(d);
790 break;
791 }
793 ret = xsm_setdebugging(d);
794 if ( ret )
795 {
796 rcu_unlock_domain(d);
797 break;
798 }
800 domain_pause(d);
801 d->debugger_attached = !!op->u.setdebugging.enable;
802 domain_unpause(d); /* causes guest to latch new status */
803 rcu_unlock_domain(d);
804 ret = 0;
805 }
806 break;
808 case XEN_DOMCTL_irq_permission:
809 {
810 struct domain *d;
811 unsigned int pirq = op->u.irq_permission.pirq;
813 ret = -ESRCH;
814 d = rcu_lock_domain_by_id(op->domain);
815 if ( d == NULL )
816 break;
818 if ( pirq >= d->nr_pirqs )
819 ret = -EINVAL;
820 else if ( op->u.irq_permission.allow_access )
821 ret = irq_permit_access(d, pirq);
822 else
823 ret = irq_deny_access(d, pirq);
825 rcu_unlock_domain(d);
826 }
827 break;
829 case XEN_DOMCTL_iomem_permission:
830 {
831 struct domain *d;
832 unsigned long mfn = op->u.iomem_permission.first_mfn;
833 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
835 ret = -EINVAL;
836 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
837 break;
839 ret = -ESRCH;
840 d = rcu_lock_domain_by_id(op->domain);
841 if ( d == NULL )
842 break;
844 if ( op->u.iomem_permission.allow_access )
845 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
846 else
847 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
849 rcu_unlock_domain(d);
850 }
851 break;
853 case XEN_DOMCTL_settimeoffset:
854 {
855 struct domain *d;
857 ret = -ESRCH;
858 d = rcu_lock_domain_by_id(op->domain);
859 if ( d == NULL )
860 break;
862 ret = xsm_domain_settime(d);
863 if ( ret )
864 {
865 rcu_unlock_domain(d);
866 break;
867 }
869 domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
870 rcu_unlock_domain(d);
871 ret = 0;
872 }
873 break;
875 case XEN_DOMCTL_set_target:
876 {
877 struct domain *d, *e;
879 ret = -ESRCH;
880 d = rcu_lock_domain_by_id(op->domain);
881 if ( d == NULL )
882 break;
884 ret = -ESRCH;
885 e = get_domain_by_id(op->u.set_target.target);
886 if ( e == NULL )
887 goto set_target_out;
889 ret = -EINVAL;
890 if ( (d == e) || (d->target != NULL) )
891 {
892 put_domain(e);
893 goto set_target_out;
894 }
896 ret = xsm_set_target(d, e);
897 if ( ret ) {
898 put_domain(e);
899 goto set_target_out;
900 }
902 /* Hold reference on @e until we destroy @d. */
903 d->target = e;
905 ret = 0;
907 set_target_out:
908 rcu_unlock_domain(d);
909 }
910 break;
912 case XEN_DOMCTL_subscribe:
913 {
914 struct domain *d;
916 ret = -ESRCH;
917 d = rcu_lock_domain_by_id(op->domain);
918 if ( d != NULL )
919 {
920 d->suspend_evtchn = op->u.subscribe.port;
921 rcu_unlock_domain(d);
922 ret = 0;
923 }
924 }
925 break;
927 default:
928 ret = arch_do_domctl(op, u_domctl);
929 break;
930 }
932 domctl_lock_release();
934 return ret;
935 }
937 /*
938 * Local variables:
939 * mode: C
940 * c-set-style: "BSD"
941 * c-basic-offset: 4
942 * tab-width: 4
943 * indent-tabs-mode: nil
944 * End:
945 */