ia64/xen-unstable

view xen/common/domctl.c @ 18594:5e4e234d58be

x86: Define __per_cpu_shift label to help kdump/crashdump.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Oct 08 13:11:06 2008 +0100 (2008-10-08)
parents 1c09b810f977
children 43019597f85c
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 DEFINE_SPINLOCK(domctl_lock);
30 extern long arch_do_domctl(
31 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
33 void cpumask_to_xenctl_cpumap(
34 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
35 {
36 unsigned int guest_bytes, copy_bytes, i;
37 uint8_t zero = 0;
38 uint8_t bytemap[(NR_CPUS + 7) / 8];
40 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
41 return;
43 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
44 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
46 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
48 if ( copy_bytes != 0 )
49 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
51 for ( i = copy_bytes; i < guest_bytes; i++ )
52 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
53 }
55 void xenctl_cpumap_to_cpumask(
56 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
57 {
58 unsigned int guest_bytes, copy_bytes;
59 uint8_t bytemap[(NR_CPUS + 7) / 8];
61 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
62 return;
64 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
65 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
67 memset(bytemap, 0, sizeof(bytemap));
69 if ( copy_bytes != 0 )
70 {
71 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
74 }
76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
77 }
79 static inline int is_free_domid(domid_t dom)
80 {
81 struct domain *d;
83 if ( dom >= DOMID_FIRST_RESERVED )
84 return 0;
86 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
87 return 1;
89 rcu_unlock_domain(d);
90 return 0;
91 }
93 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
94 {
95 struct vcpu *v;
96 u64 cpu_time = 0;
97 int flags = XEN_DOMINF_blocked;
98 struct vcpu_runstate_info runstate;
100 info->domain = d->domain_id;
101 info->nr_online_vcpus = 0;
102 info->ssidref = 0;
104 /*
105 * - domain is marked as blocked only if all its vcpus are blocked
106 * - domain is marked as running if any of its vcpus is running
107 */
108 for_each_vcpu ( d, v )
109 {
110 vcpu_runstate_get(v, &runstate);
111 cpu_time += runstate.time[RUNSTATE_running];
112 info->max_vcpu_id = v->vcpu_id;
113 if ( !test_bit(_VPF_down, &v->pause_flags) )
114 {
115 if ( !(v->pause_flags & VPF_blocked) )
116 flags &= ~XEN_DOMINF_blocked;
117 if ( v->is_running )
118 flags |= XEN_DOMINF_running;
119 info->nr_online_vcpus++;
120 }
121 }
123 info->cpu_time = cpu_time;
125 info->flags = (info->nr_online_vcpus ? flags : 0) |
126 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
127 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
128 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
129 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
130 d->shutdown_code << XEN_DOMINF_shutdownshift;
132 if ( is_hvm_domain(d) )
133 info->flags |= XEN_DOMINF_hvm_guest;
135 xsm_security_domaininfo(d, info);
137 info->tot_pages = d->tot_pages;
138 info->max_pages = d->max_pages;
139 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
141 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
142 }
144 static unsigned int default_vcpu0_location(void)
145 {
146 struct domain *d;
147 struct vcpu *v;
148 unsigned int i, cpu, nr_cpus, *cnt;
149 cpumask_t cpu_exclude_map;
151 /* Do an initial CPU placement. Pick the least-populated CPU. */
152 nr_cpus = last_cpu(cpu_possible_map) + 1;
153 cnt = xmalloc_array(unsigned int, nr_cpus);
154 if ( cnt )
155 {
156 memset(cnt, 0, nr_cpus * sizeof(*cnt));
158 rcu_read_lock(&domlist_read_lock);
159 for_each_domain ( d )
160 for_each_vcpu ( d, v )
161 if ( !test_bit(_VPF_down, &v->pause_flags) )
162 cnt[v->processor]++;
163 rcu_read_unlock(&domlist_read_lock);
164 }
166 /*
167 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
168 * favour high numbered CPUs in the event of a tie.
169 */
170 cpu = first_cpu(cpu_sibling_map[0]);
171 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
172 cpu = next_cpu(cpu, cpu_sibling_map[0]);
173 cpu_exclude_map = cpu_sibling_map[0];
174 for_each_online_cpu ( i )
175 {
176 if ( cpu_isset(i, cpu_exclude_map) )
177 continue;
178 if ( (i == first_cpu(cpu_sibling_map[i])) &&
179 (cpus_weight(cpu_sibling_map[i]) > 1) )
180 continue;
181 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
182 if ( !cnt || cnt[i] <= cnt[cpu] )
183 cpu = i;
184 }
186 xfree(cnt);
188 return cpu;
189 }
191 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
192 {
193 long ret = 0;
194 struct xen_domctl curop, *op = &curop;
196 if ( !IS_PRIV(current->domain) )
197 return -EPERM;
199 if ( copy_from_guest(op, u_domctl, 1) )
200 return -EFAULT;
202 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
203 return -EACCES;
205 spin_lock(&domctl_lock);
207 switch ( op->cmd )
208 {
210 case XEN_DOMCTL_setvcpucontext:
211 {
212 struct domain *d = rcu_lock_domain_by_id(op->domain);
213 vcpu_guest_context_u c = { .nat = NULL };
214 unsigned int vcpu = op->u.vcpucontext.vcpu;
215 struct vcpu *v;
217 ret = -ESRCH;
218 if ( d == NULL )
219 break;
221 ret = xsm_setvcpucontext(d);
222 if ( ret )
223 goto svc_out;
225 ret = -EINVAL;
226 if ( (d == current->domain) || /* no domain_pause() */
227 (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
228 goto svc_out;
230 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
231 {
232 vcpu_reset(v);
233 ret = 0;
234 goto svc_out;
235 }
237 #ifdef CONFIG_COMPAT
238 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
239 < sizeof(struct compat_vcpu_guest_context));
240 #endif
241 ret = -ENOMEM;
242 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
243 goto svc_out;
245 if ( !IS_COMPAT(v->domain) )
246 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
247 #ifdef CONFIG_COMPAT
248 else
249 ret = copy_from_guest(c.cmp,
250 guest_handle_cast(op->u.vcpucontext.ctxt,
251 void), 1);
252 #endif
253 ret = ret ? -EFAULT : 0;
255 if ( ret == 0 )
256 {
257 domain_pause(d);
258 ret = arch_set_info_guest(v, c);
259 domain_unpause(d);
260 }
262 svc_out:
263 xfree(c.nat);
264 rcu_unlock_domain(d);
265 }
266 break;
268 case XEN_DOMCTL_pausedomain:
269 {
270 struct domain *d = rcu_lock_domain_by_id(op->domain);
271 ret = -ESRCH;
272 if ( d != NULL )
273 {
274 ret = xsm_pausedomain(d);
275 if ( ret )
276 goto pausedomain_out;
278 ret = -EINVAL;
279 if ( d != current->domain )
280 {
281 domain_pause_by_systemcontroller(d);
282 ret = 0;
283 }
284 pausedomain_out:
285 rcu_unlock_domain(d);
286 }
287 }
288 break;
290 case XEN_DOMCTL_unpausedomain:
291 {
292 struct domain *d = rcu_lock_domain_by_id(op->domain);
294 ret = -ESRCH;
295 if ( d == NULL )
296 break;
298 ret = xsm_unpausedomain(d);
299 if ( ret )
300 {
301 rcu_unlock_domain(d);
302 break;
303 }
305 domain_unpause_by_systemcontroller(d);
306 rcu_unlock_domain(d);
307 ret = 0;
308 }
309 break;
311 case XEN_DOMCTL_resumedomain:
312 {
313 struct domain *d = rcu_lock_domain_by_id(op->domain);
315 ret = -ESRCH;
316 if ( d == NULL )
317 break;
319 ret = xsm_resumedomain(d);
320 if ( ret )
321 {
322 rcu_unlock_domain(d);
323 break;
324 }
326 domain_resume(d);
327 rcu_unlock_domain(d);
328 ret = 0;
329 }
330 break;
332 case XEN_DOMCTL_createdomain:
333 {
334 struct domain *d;
335 domid_t dom;
336 static domid_t rover = 0;
337 unsigned int domcr_flags;
339 ret = -EINVAL;
340 if ( supervisor_mode_kernel ||
341 (op->u.createdomain.flags &
342 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap)) )
343 break;
345 dom = op->domain;
346 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
347 {
348 ret = -EINVAL;
349 if ( !is_free_domid(dom) )
350 break;
351 }
352 else
353 {
354 for ( dom = rover + 1; dom != rover; dom++ )
355 {
356 if ( dom == DOMID_FIRST_RESERVED )
357 dom = 0;
358 if ( is_free_domid(dom) )
359 break;
360 }
362 ret = -ENOMEM;
363 if ( dom == rover )
364 break;
366 rover = dom;
367 }
369 domcr_flags = 0;
370 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
371 domcr_flags |= DOMCRF_hvm;
372 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
373 domcr_flags |= DOMCRF_hap;
375 ret = -ENOMEM;
376 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
377 if ( d == NULL )
378 break;
380 ret = 0;
382 memcpy(d->handle, op->u.createdomain.handle,
383 sizeof(xen_domain_handle_t));
385 op->domain = d->domain_id;
386 if ( copy_to_guest(u_domctl, op, 1) )
387 ret = -EFAULT;
388 }
389 break;
391 case XEN_DOMCTL_max_vcpus:
392 {
393 struct domain *d;
394 unsigned int i, max = op->u.max_vcpus.max, cpu;
396 ret = -ESRCH;
397 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
398 break;
400 ret = -EINVAL;
401 if ( (d == current->domain) || /* no domain_pause() */
402 (max > MAX_VIRT_CPUS) )
403 {
404 rcu_unlock_domain(d);
405 break;
406 }
408 ret = xsm_max_vcpus(d);
409 if ( ret )
410 {
411 rcu_unlock_domain(d);
412 break;
413 }
415 /* Needed, for example, to ensure writable p.t. state is synced. */
416 domain_pause(d);
418 /* We cannot reduce maximum VCPUs. */
419 ret = -EINVAL;
420 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
421 goto maxvcpu_out;
423 ret = -ENOMEM;
424 for ( i = 0; i < max; i++ )
425 {
426 if ( d->vcpu[i] != NULL )
427 continue;
429 cpu = (i == 0) ?
430 default_vcpu0_location() :
431 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
433 if ( alloc_vcpu(d, i, cpu) == NULL )
434 goto maxvcpu_out;
435 }
437 ret = 0;
439 maxvcpu_out:
440 domain_unpause(d);
441 rcu_unlock_domain(d);
442 }
443 break;
445 case XEN_DOMCTL_destroydomain:
446 {
447 struct domain *d = rcu_lock_domain_by_id(op->domain);
448 ret = -ESRCH;
449 if ( d != NULL )
450 {
451 ret = xsm_destroydomain(d) ? : domain_kill(d);
452 rcu_unlock_domain(d);
453 }
454 }
455 break;
457 case XEN_DOMCTL_setvcpuaffinity:
458 case XEN_DOMCTL_getvcpuaffinity:
459 {
460 domid_t dom = op->domain;
461 struct domain *d = rcu_lock_domain_by_id(dom);
462 struct vcpu *v;
463 cpumask_t new_affinity;
465 ret = -ESRCH;
466 if ( d == NULL )
467 break;
469 ret = xsm_vcpuaffinity(op->cmd, d);
470 if ( ret )
471 goto vcpuaffinity_out;
473 ret = -EINVAL;
474 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
475 goto vcpuaffinity_out;
477 ret = -ESRCH;
478 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
479 goto vcpuaffinity_out;
481 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
482 {
483 xenctl_cpumap_to_cpumask(
484 &new_affinity, &op->u.vcpuaffinity.cpumap);
485 ret = vcpu_set_affinity(v, &new_affinity);
486 }
487 else
488 {
489 cpumask_to_xenctl_cpumap(
490 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
491 ret = 0;
492 }
494 vcpuaffinity_out:
495 rcu_unlock_domain(d);
496 }
497 break;
499 case XEN_DOMCTL_scheduler_op:
500 {
501 struct domain *d;
503 ret = -ESRCH;
504 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
505 break;
507 ret = xsm_scheduler(d);
508 if ( ret )
509 goto scheduler_op_out;
511 ret = sched_adjust(d, &op->u.scheduler_op);
512 if ( copy_to_guest(u_domctl, op, 1) )
513 ret = -EFAULT;
515 scheduler_op_out:
516 rcu_unlock_domain(d);
517 }
518 break;
520 case XEN_DOMCTL_getdomaininfo:
521 {
522 struct domain *d;
523 domid_t dom = op->domain;
525 rcu_read_lock(&domlist_read_lock);
527 for_each_domain ( d )
528 if ( d->domain_id >= dom )
529 break;
531 if ( d == NULL )
532 {
533 rcu_read_unlock(&domlist_read_lock);
534 ret = -ESRCH;
535 break;
536 }
538 ret = xsm_getdomaininfo(d);
539 if ( ret )
540 goto getdomaininfo_out;
542 getdomaininfo(d, &op->u.getdomaininfo);
544 op->domain = op->u.getdomaininfo.domain;
545 if ( copy_to_guest(u_domctl, op, 1) )
546 ret = -EFAULT;
548 getdomaininfo_out:
549 rcu_read_unlock(&domlist_read_lock);
550 }
551 break;
553 case XEN_DOMCTL_getvcpucontext:
554 {
555 vcpu_guest_context_u c = { .nat = NULL };
556 struct domain *d;
557 struct vcpu *v;
559 ret = -ESRCH;
560 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
561 break;
563 ret = xsm_getvcpucontext(d);
564 if ( ret )
565 goto getvcpucontext_out;
567 ret = -EINVAL;
568 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
569 goto getvcpucontext_out;
571 ret = -ESRCH;
572 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
573 goto getvcpucontext_out;
575 ret = -ENODATA;
576 if ( !v->is_initialised )
577 goto getvcpucontext_out;
579 #ifdef CONFIG_COMPAT
580 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
581 < sizeof(struct compat_vcpu_guest_context));
582 #endif
583 ret = -ENOMEM;
584 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
585 goto getvcpucontext_out;
587 if ( v != current )
588 vcpu_pause(v);
590 arch_get_info_guest(v, c);
591 ret = 0;
593 if ( v != current )
594 vcpu_unpause(v);
596 if ( !IS_COMPAT(v->domain) )
597 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
598 #ifdef CONFIG_COMPAT
599 else
600 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
601 void), c.cmp, 1);
602 #endif
604 if ( copy_to_guest(u_domctl, op, 1) || ret )
605 ret = -EFAULT;
607 getvcpucontext_out:
608 xfree(c.nat);
609 rcu_unlock_domain(d);
610 }
611 break;
613 case XEN_DOMCTL_getvcpuinfo:
614 {
615 struct domain *d;
616 struct vcpu *v;
617 struct vcpu_runstate_info runstate;
619 ret = -ESRCH;
620 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
621 break;
623 ret = xsm_getvcpuinfo(d);
624 if ( ret )
625 goto getvcpuinfo_out;
627 ret = -EINVAL;
628 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
629 goto getvcpuinfo_out;
631 ret = -ESRCH;
632 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
633 goto getvcpuinfo_out;
635 vcpu_runstate_get(v, &runstate);
637 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
638 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
639 op->u.getvcpuinfo.running = v->is_running;
640 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
641 op->u.getvcpuinfo.cpu = v->processor;
642 ret = 0;
644 if ( copy_to_guest(u_domctl, op, 1) )
645 ret = -EFAULT;
647 getvcpuinfo_out:
648 rcu_unlock_domain(d);
649 }
650 break;
652 case XEN_DOMCTL_max_mem:
653 {
654 struct domain *d;
655 unsigned long new_max;
657 ret = -ESRCH;
658 d = rcu_lock_domain_by_id(op->domain);
659 if ( d == NULL )
660 break;
662 ret = xsm_setdomainmaxmem(d);
663 if ( ret )
664 goto max_mem_out;
666 ret = -EINVAL;
667 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
669 spin_lock(&d->page_alloc_lock);
670 if ( new_max >= d->tot_pages )
671 {
672 d->max_pages = new_max;
673 ret = 0;
674 }
675 spin_unlock(&d->page_alloc_lock);
677 max_mem_out:
678 rcu_unlock_domain(d);
679 }
680 break;
682 case XEN_DOMCTL_setdomainhandle:
683 {
684 struct domain *d;
686 ret = -ESRCH;
687 d = rcu_lock_domain_by_id(op->domain);
688 if ( d == NULL )
689 break;
691 ret = xsm_setdomainhandle(d);
692 if ( ret )
693 {
694 rcu_unlock_domain(d);
695 break;
696 }
698 memcpy(d->handle, op->u.setdomainhandle.handle,
699 sizeof(xen_domain_handle_t));
700 rcu_unlock_domain(d);
701 ret = 0;
702 }
703 break;
705 case XEN_DOMCTL_setdebugging:
706 {
707 struct domain *d;
709 ret = -ESRCH;
710 d = rcu_lock_domain_by_id(op->domain);
711 if ( d == NULL )
712 break;
714 ret = -EINVAL;
715 if ( d == current->domain ) /* no domain_pause() */
716 {
717 rcu_unlock_domain(d);
718 break;
719 }
721 ret = xsm_setdebugging(d);
722 if ( ret )
723 {
724 rcu_unlock_domain(d);
725 break;
726 }
728 domain_pause(d);
729 d->debugger_attached = !!op->u.setdebugging.enable;
730 domain_unpause(d); /* causes guest to latch new status */
731 rcu_unlock_domain(d);
732 ret = 0;
733 }
734 break;
736 case XEN_DOMCTL_irq_permission:
737 {
738 struct domain *d;
739 unsigned int pirq = op->u.irq_permission.pirq;
741 ret = -EINVAL;
742 if ( pirq >= NR_IRQS )
743 break;
745 ret = -ESRCH;
746 d = rcu_lock_domain_by_id(op->domain);
747 if ( d == NULL )
748 break;
750 if ( op->u.irq_permission.allow_access )
751 ret = irq_permit_access(d, pirq);
752 else
753 ret = irq_deny_access(d, pirq);
755 rcu_unlock_domain(d);
756 }
757 break;
759 case XEN_DOMCTL_iomem_permission:
760 {
761 struct domain *d;
762 unsigned long mfn = op->u.iomem_permission.first_mfn;
763 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
765 ret = -EINVAL;
766 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
767 break;
769 ret = -ESRCH;
770 d = rcu_lock_domain_by_id(op->domain);
771 if ( d == NULL )
772 break;
774 if ( op->u.iomem_permission.allow_access )
775 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
776 else
777 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
779 rcu_unlock_domain(d);
780 }
781 break;
783 case XEN_DOMCTL_settimeoffset:
784 {
785 struct domain *d;
787 ret = -ESRCH;
788 d = rcu_lock_domain_by_id(op->domain);
789 if ( d == NULL )
790 break;
792 ret = xsm_domain_settime(d);
793 if ( ret )
794 {
795 rcu_unlock_domain(d);
796 break;
797 }
799 domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
800 rcu_unlock_domain(d);
801 ret = 0;
802 }
803 break;
805 case XEN_DOMCTL_set_target:
806 {
807 struct domain *d, *e;
809 ret = -ESRCH;
810 d = rcu_lock_domain_by_id(op->domain);
811 if ( d == NULL )
812 break;
814 ret = -ESRCH;
815 e = get_domain_by_id(op->u.set_target.target);
816 if ( e == NULL )
817 goto set_target_out;
819 ret = -EINVAL;
820 if ( (d == e) || (d->target != NULL) )
821 {
822 put_domain(e);
823 goto set_target_out;
824 }
826 ret = xsm_set_target(d, e);
827 if ( ret ) {
828 put_domain(e);
829 goto set_target_out;
830 }
832 /* Hold reference on @e until we destroy @d. */
833 d->target = e;
835 ret = 0;
837 set_target_out:
838 rcu_unlock_domain(d);
839 }
840 break;
842 case XEN_DOMCTL_subscribe:
843 {
844 struct domain *d;
846 ret = -ESRCH;
847 d = rcu_lock_domain_by_id(op->domain);
848 if ( d != NULL )
849 {
850 d->suspend_evtchn = op->u.subscribe.port;
851 rcu_unlock_domain(d);
852 ret = 0;
853 }
854 }
855 break;
857 default:
858 ret = arch_do_domctl(op, u_domctl);
859 break;
860 }
862 spin_unlock(&domctl_lock);
864 return ret;
865 }
867 /*
868 * Local variables:
869 * mode: C
870 * c-set-style: "BSD"
871 * c-basic-offset: 4
872 * tab-width: 4
873 * indent-tabs-mode: nil
874 * End:
875 */