ia64/xen-unstable

view xen/common/domctl.c @ 18346:56770e3eb76e

domctl: ssidref is not initialized if !XSM_ENABLE

The ssidref in getdomaininfo() is not initialized when XSM_ENABLE is
not defined. So, xentop might display a wrong value of ssid.

Signed-off-by: Kazuhiro Suzuki <kaz@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Aug 20 09:06:58 2008 +0100 (2008-08-20)
parents 14fd83fe71c3
children c759a6cef79f
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/paging.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <xsm/xsm.h>
28 DEFINE_SPINLOCK(domctl_lock);
30 extern long arch_do_domctl(
31 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
33 void cpumask_to_xenctl_cpumap(
34 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
35 {
36 unsigned int guest_bytes, copy_bytes, i;
37 uint8_t zero = 0;
38 uint8_t bytemap[(NR_CPUS + 7) / 8];
40 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
41 return;
43 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
44 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
46 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
48 if ( copy_bytes != 0 )
49 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
51 for ( i = copy_bytes; i < guest_bytes; i++ )
52 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
53 }
55 void xenctl_cpumap_to_cpumask(
56 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
57 {
58 unsigned int guest_bytes, copy_bytes;
59 uint8_t bytemap[(NR_CPUS + 7) / 8];
61 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
62 return;
64 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
65 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
67 memset(bytemap, 0, sizeof(bytemap));
69 if ( copy_bytes != 0 )
70 {
71 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
72 if ( (xenctl_cpumap->nr_cpus & 7) && (guest_bytes <= sizeof(bytemap)) )
73 bytemap[guest_bytes-1] &= ~(0xff << (xenctl_cpumap->nr_cpus & 7));
74 }
76 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
77 }
79 static inline int is_free_domid(domid_t dom)
80 {
81 struct domain *d;
83 if ( dom >= DOMID_FIRST_RESERVED )
84 return 0;
86 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
87 return 1;
89 rcu_unlock_domain(d);
90 return 0;
91 }
93 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
94 {
95 struct vcpu *v;
96 u64 cpu_time = 0;
97 int flags = XEN_DOMINF_blocked;
98 struct vcpu_runstate_info runstate;
100 info->domain = d->domain_id;
101 info->nr_online_vcpus = 0;
102 info->ssidref = 0;
104 /*
105 * - domain is marked as blocked only if all its vcpus are blocked
106 * - domain is marked as running if any of its vcpus is running
107 */
108 for_each_vcpu ( d, v )
109 {
110 vcpu_runstate_get(v, &runstate);
111 cpu_time += runstate.time[RUNSTATE_running];
112 info->max_vcpu_id = v->vcpu_id;
113 if ( !test_bit(_VPF_down, &v->pause_flags) )
114 {
115 if ( !(v->pause_flags & VPF_blocked) )
116 flags &= ~XEN_DOMINF_blocked;
117 if ( v->is_running )
118 flags |= XEN_DOMINF_running;
119 info->nr_online_vcpus++;
120 }
121 }
123 info->cpu_time = cpu_time;
125 info->flags = (info->nr_online_vcpus ? flags : 0) |
126 ((d->is_dying == DOMDYING_dead) ? XEN_DOMINF_dying : 0) |
127 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
128 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
129 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
130 d->shutdown_code << XEN_DOMINF_shutdownshift;
132 if ( is_hvm_domain(d) )
133 info->flags |= XEN_DOMINF_hvm_guest;
135 xsm_security_domaininfo(d, info);
137 info->tot_pages = d->tot_pages;
138 info->max_pages = d->max_pages;
139 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
141 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
142 }
144 static unsigned int default_vcpu0_location(void)
145 {
146 struct domain *d;
147 struct vcpu *v;
148 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
149 cpumask_t cpu_exclude_map;
151 /* Do an initial CPU placement. Pick the least-populated CPU. */
152 rcu_read_lock(&domlist_read_lock);
153 for_each_domain ( d )
154 for_each_vcpu ( d, v )
155 if ( !test_bit(_VPF_down, &v->pause_flags) )
156 cnt[v->processor]++;
157 rcu_read_unlock(&domlist_read_lock);
159 /*
160 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
161 * favour high numbered CPUs in the event of a tie.
162 */
163 cpu = first_cpu(cpu_sibling_map[0]);
164 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
165 cpu = next_cpu(cpu, cpu_sibling_map[0]);
166 cpu_exclude_map = cpu_sibling_map[0];
167 for_each_online_cpu ( i )
168 {
169 if ( cpu_isset(i, cpu_exclude_map) )
170 continue;
171 if ( (i == first_cpu(cpu_sibling_map[i])) &&
172 (cpus_weight(cpu_sibling_map[i]) > 1) )
173 continue;
174 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
175 if ( cnt[i] <= cnt[cpu] )
176 cpu = i;
177 }
179 return cpu;
180 }
182 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
183 {
184 long ret = 0;
185 struct xen_domctl curop, *op = &curop;
187 if ( !IS_PRIV(current->domain) )
188 return -EPERM;
190 if ( copy_from_guest(op, u_domctl, 1) )
191 return -EFAULT;
193 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
194 return -EACCES;
196 spin_lock(&domctl_lock);
198 switch ( op->cmd )
199 {
201 case XEN_DOMCTL_setvcpucontext:
202 {
203 struct domain *d = rcu_lock_domain_by_id(op->domain);
204 vcpu_guest_context_u c = { .nat = NULL };
205 unsigned int vcpu = op->u.vcpucontext.vcpu;
206 struct vcpu *v;
208 ret = -ESRCH;
209 if ( d == NULL )
210 break;
212 ret = xsm_setvcpucontext(d);
213 if ( ret )
214 goto svc_out;
216 ret = -EINVAL;
217 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
218 goto svc_out;
220 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
221 {
222 vcpu_reset(v);
223 ret = 0;
224 goto svc_out;
225 }
227 #ifdef CONFIG_COMPAT
228 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
229 < sizeof(struct compat_vcpu_guest_context));
230 #endif
231 ret = -ENOMEM;
232 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
233 goto svc_out;
235 if ( !IS_COMPAT(v->domain) )
236 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
237 #ifdef CONFIG_COMPAT
238 else
239 ret = copy_from_guest(c.cmp,
240 guest_handle_cast(op->u.vcpucontext.ctxt,
241 void), 1);
242 #endif
243 ret = ret ? -EFAULT : 0;
245 if ( ret == 0 )
246 {
247 domain_pause(d);
248 ret = arch_set_info_guest(v, c);
249 domain_unpause(d);
250 }
252 svc_out:
253 xfree(c.nat);
254 rcu_unlock_domain(d);
255 }
256 break;
258 case XEN_DOMCTL_pausedomain:
259 {
260 struct domain *d = rcu_lock_domain_by_id(op->domain);
261 ret = -ESRCH;
262 if ( d != NULL )
263 {
264 ret = xsm_pausedomain(d);
265 if ( ret )
266 goto pausedomain_out;
268 ret = -EINVAL;
269 if ( d != current->domain )
270 {
271 domain_pause_by_systemcontroller(d);
272 ret = 0;
273 }
274 pausedomain_out:
275 rcu_unlock_domain(d);
276 }
277 }
278 break;
280 case XEN_DOMCTL_unpausedomain:
281 {
282 struct domain *d = rcu_lock_domain_by_id(op->domain);
284 ret = -ESRCH;
285 if ( d == NULL )
286 break;
288 ret = xsm_unpausedomain(d);
289 if ( ret )
290 {
291 rcu_unlock_domain(d);
292 break;
293 }
295 domain_unpause_by_systemcontroller(d);
296 rcu_unlock_domain(d);
297 ret = 0;
298 }
299 break;
301 case XEN_DOMCTL_resumedomain:
302 {
303 struct domain *d = rcu_lock_domain_by_id(op->domain);
305 ret = -ESRCH;
306 if ( d == NULL )
307 break;
309 ret = xsm_resumedomain(d);
310 if ( ret )
311 {
312 rcu_unlock_domain(d);
313 break;
314 }
316 domain_resume(d);
317 rcu_unlock_domain(d);
318 ret = 0;
319 }
320 break;
322 case XEN_DOMCTL_createdomain:
323 {
324 struct domain *d;
325 domid_t dom;
326 static domid_t rover = 0;
327 unsigned int domcr_flags;
329 ret = -EINVAL;
330 if ( supervisor_mode_kernel ||
331 (op->u.createdomain.flags &
332 ~(XEN_DOMCTL_CDF_hvm_guest | XEN_DOMCTL_CDF_hap)) )
333 break;
335 dom = op->domain;
336 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
337 {
338 ret = -EINVAL;
339 if ( !is_free_domid(dom) )
340 break;
341 }
342 else
343 {
344 for ( dom = rover + 1; dom != rover; dom++ )
345 {
346 if ( dom == DOMID_FIRST_RESERVED )
347 dom = 0;
348 if ( is_free_domid(dom) )
349 break;
350 }
352 ret = -ENOMEM;
353 if ( dom == rover )
354 break;
356 rover = dom;
357 }
359 domcr_flags = 0;
360 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
361 domcr_flags |= DOMCRF_hvm;
362 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
363 domcr_flags |= DOMCRF_hap;
365 ret = -ENOMEM;
366 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
367 if ( d == NULL )
368 break;
370 ret = 0;
372 memcpy(d->handle, op->u.createdomain.handle,
373 sizeof(xen_domain_handle_t));
375 op->domain = d->domain_id;
376 if ( copy_to_guest(u_domctl, op, 1) )
377 ret = -EFAULT;
378 }
379 break;
381 case XEN_DOMCTL_max_vcpus:
382 {
383 struct domain *d;
384 unsigned int i, max = op->u.max_vcpus.max, cpu;
386 ret = -EINVAL;
387 if ( max > MAX_VIRT_CPUS )
388 break;
390 ret = -ESRCH;
391 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
392 break;
394 ret = xsm_max_vcpus(d);
395 if ( ret )
396 {
397 rcu_unlock_domain(d);
398 break;
399 }
401 /* Needed, for example, to ensure writable p.t. state is synced. */
402 domain_pause(d);
404 /* We cannot reduce maximum VCPUs. */
405 ret = -EINVAL;
406 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
407 goto maxvcpu_out;
409 ret = -ENOMEM;
410 for ( i = 0; i < max; i++ )
411 {
412 if ( d->vcpu[i] != NULL )
413 continue;
415 cpu = (i == 0) ?
416 default_vcpu0_location() :
417 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
419 if ( alloc_vcpu(d, i, cpu) == NULL )
420 goto maxvcpu_out;
421 }
423 ret = 0;
425 maxvcpu_out:
426 domain_unpause(d);
427 rcu_unlock_domain(d);
428 }
429 break;
431 case XEN_DOMCTL_destroydomain:
432 {
433 struct domain *d = rcu_lock_domain_by_id(op->domain);
434 ret = -ESRCH;
435 if ( d != NULL )
436 {
437 ret = xsm_destroydomain(d) ? : domain_kill(d);
438 rcu_unlock_domain(d);
439 }
440 }
441 break;
443 case XEN_DOMCTL_setvcpuaffinity:
444 case XEN_DOMCTL_getvcpuaffinity:
445 {
446 domid_t dom = op->domain;
447 struct domain *d = rcu_lock_domain_by_id(dom);
448 struct vcpu *v;
449 cpumask_t new_affinity;
451 ret = -ESRCH;
452 if ( d == NULL )
453 break;
455 ret = xsm_vcpuaffinity(op->cmd, d);
456 if ( ret )
457 goto vcpuaffinity_out;
459 ret = -EINVAL;
460 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
461 goto vcpuaffinity_out;
463 ret = -ESRCH;
464 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
465 goto vcpuaffinity_out;
467 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
468 {
469 xenctl_cpumap_to_cpumask(
470 &new_affinity, &op->u.vcpuaffinity.cpumap);
471 ret = vcpu_set_affinity(v, &new_affinity);
472 }
473 else
474 {
475 cpumask_to_xenctl_cpumap(
476 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
477 ret = 0;
478 }
480 vcpuaffinity_out:
481 rcu_unlock_domain(d);
482 }
483 break;
485 case XEN_DOMCTL_scheduler_op:
486 {
487 struct domain *d;
489 ret = -ESRCH;
490 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
491 break;
493 ret = xsm_scheduler(d);
494 if ( ret )
495 goto scheduler_op_out;
497 ret = sched_adjust(d, &op->u.scheduler_op);
498 if ( copy_to_guest(u_domctl, op, 1) )
499 ret = -EFAULT;
501 scheduler_op_out:
502 rcu_unlock_domain(d);
503 }
504 break;
506 case XEN_DOMCTL_getdomaininfo:
507 {
508 struct domain *d;
509 domid_t dom = op->domain;
511 rcu_read_lock(&domlist_read_lock);
513 for_each_domain ( d )
514 if ( d->domain_id >= dom )
515 break;
517 if ( d == NULL )
518 {
519 rcu_read_unlock(&domlist_read_lock);
520 ret = -ESRCH;
521 break;
522 }
524 ret = xsm_getdomaininfo(d);
525 if ( ret )
526 goto getdomaininfo_out;
528 getdomaininfo(d, &op->u.getdomaininfo);
530 op->domain = op->u.getdomaininfo.domain;
531 if ( copy_to_guest(u_domctl, op, 1) )
532 ret = -EFAULT;
534 getdomaininfo_out:
535 rcu_read_unlock(&domlist_read_lock);
536 }
537 break;
539 case XEN_DOMCTL_getvcpucontext:
540 {
541 vcpu_guest_context_u c = { .nat = NULL };
542 struct domain *d;
543 struct vcpu *v;
545 ret = -ESRCH;
546 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
547 break;
549 ret = xsm_getvcpucontext(d);
550 if ( ret )
551 goto getvcpucontext_out;
553 ret = -EINVAL;
554 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
555 goto getvcpucontext_out;
557 ret = -ESRCH;
558 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
559 goto getvcpucontext_out;
561 ret = -ENODATA;
562 if ( !v->is_initialised )
563 goto getvcpucontext_out;
565 #ifdef CONFIG_COMPAT
566 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
567 < sizeof(struct compat_vcpu_guest_context));
568 #endif
569 ret = -ENOMEM;
570 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
571 goto getvcpucontext_out;
573 if ( v != current )
574 vcpu_pause(v);
576 arch_get_info_guest(v, c);
577 ret = 0;
579 if ( v != current )
580 vcpu_unpause(v);
582 if ( !IS_COMPAT(v->domain) )
583 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
584 #ifdef CONFIG_COMPAT
585 else
586 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
587 void), c.cmp, 1);
588 #endif
590 if ( copy_to_guest(u_domctl, op, 1) || ret )
591 ret = -EFAULT;
593 getvcpucontext_out:
594 xfree(c.nat);
595 rcu_unlock_domain(d);
596 }
597 break;
599 case XEN_DOMCTL_getvcpuinfo:
600 {
601 struct domain *d;
602 struct vcpu *v;
603 struct vcpu_runstate_info runstate;
605 ret = -ESRCH;
606 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
607 break;
609 ret = xsm_getvcpuinfo(d);
610 if ( ret )
611 goto getvcpuinfo_out;
613 ret = -EINVAL;
614 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
615 goto getvcpuinfo_out;
617 ret = -ESRCH;
618 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
619 goto getvcpuinfo_out;
621 vcpu_runstate_get(v, &runstate);
623 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
624 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
625 op->u.getvcpuinfo.running = v->is_running;
626 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
627 op->u.getvcpuinfo.cpu = v->processor;
628 ret = 0;
630 if ( copy_to_guest(u_domctl, op, 1) )
631 ret = -EFAULT;
633 getvcpuinfo_out:
634 rcu_unlock_domain(d);
635 }
636 break;
638 case XEN_DOMCTL_max_mem:
639 {
640 struct domain *d;
641 unsigned long new_max;
643 ret = -ESRCH;
644 d = rcu_lock_domain_by_id(op->domain);
645 if ( d == NULL )
646 break;
648 ret = xsm_setdomainmaxmem(d);
649 if ( ret )
650 goto max_mem_out;
652 ret = -EINVAL;
653 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
655 spin_lock(&d->page_alloc_lock);
656 if ( new_max >= d->tot_pages )
657 {
658 ret = guest_physmap_max_mem_pages(d, new_max);
659 if ( ret != 0 )
660 break;
661 d->max_pages = new_max;
662 ret = 0;
663 }
664 spin_unlock(&d->page_alloc_lock);
666 max_mem_out:
667 rcu_unlock_domain(d);
668 }
669 break;
671 case XEN_DOMCTL_setdomainhandle:
672 {
673 struct domain *d;
675 ret = -ESRCH;
676 d = rcu_lock_domain_by_id(op->domain);
677 if ( d == NULL )
678 break;
680 ret = xsm_setdomainhandle(d);
681 if ( ret )
682 {
683 rcu_unlock_domain(d);
684 break;
685 }
687 memcpy(d->handle, op->u.setdomainhandle.handle,
688 sizeof(xen_domain_handle_t));
689 rcu_unlock_domain(d);
690 ret = 0;
691 }
692 break;
694 case XEN_DOMCTL_setdebugging:
695 {
696 struct domain *d;
698 ret = -ESRCH;
699 d = rcu_lock_domain_by_id(op->domain);
700 if ( d == NULL )
701 break;
703 ret = xsm_setdebugging(d);
704 if ( ret )
705 {
706 rcu_unlock_domain(d);
707 break;
708 }
710 domain_pause(d);
711 d->debugger_attached = !!op->u.setdebugging.enable;
712 domain_unpause(d); /* causes guest to latch new status */
713 rcu_unlock_domain(d);
714 ret = 0;
715 }
716 break;
718 case XEN_DOMCTL_irq_permission:
719 {
720 struct domain *d;
721 unsigned int pirq = op->u.irq_permission.pirq;
723 ret = -EINVAL;
724 if ( pirq >= NR_IRQS )
725 break;
727 ret = -ESRCH;
728 d = rcu_lock_domain_by_id(op->domain);
729 if ( d == NULL )
730 break;
732 ret = xsm_irq_permission(d, pirq, op->u.irq_permission.allow_access);
733 if ( ret )
734 goto irq_permission_out;
736 if ( op->u.irq_permission.allow_access )
737 ret = irq_permit_access(d, pirq);
738 else
739 ret = irq_deny_access(d, pirq);
741 irq_permission_out:
742 rcu_unlock_domain(d);
743 }
744 break;
746 case XEN_DOMCTL_iomem_permission:
747 {
748 struct domain *d;
749 unsigned long mfn = op->u.iomem_permission.first_mfn;
750 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
752 ret = -EINVAL;
753 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
754 break;
756 ret = -ESRCH;
757 d = rcu_lock_domain_by_id(op->domain);
758 if ( d == NULL )
759 break;
761 ret = xsm_iomem_permission(d, mfn, op->u.iomem_permission.allow_access);
762 if ( ret )
763 goto iomem_permission_out;
765 if ( op->u.iomem_permission.allow_access )
766 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
767 else
768 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
770 iomem_permission_out:
771 rcu_unlock_domain(d);
772 }
773 break;
775 case XEN_DOMCTL_settimeoffset:
776 {
777 struct domain *d;
779 ret = -ESRCH;
780 d = rcu_lock_domain_by_id(op->domain);
781 if ( d == NULL )
782 break;
784 ret = xsm_domain_settime(d);
785 if ( ret )
786 {
787 rcu_unlock_domain(d);
788 break;
789 }
791 domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds);
792 rcu_unlock_domain(d);
793 ret = 0;
794 }
795 break;
797 case XEN_DOMCTL_set_target:
798 {
799 struct domain *d, *e;
801 ret = -ESRCH;
802 d = rcu_lock_domain_by_id(op->domain);
803 if ( d == NULL )
804 break;
806 ret = -ESRCH;
807 e = get_domain_by_id(op->u.set_target.target);
808 if ( e == NULL )
809 goto set_target_out;
811 ret = -EINVAL;
812 if ( (d == e) || (d->target != NULL) )
813 {
814 put_domain(e);
815 goto set_target_out;
816 }
818 /* Hold reference on @e until we destroy @d. */
819 d->target = e;
821 ret = 0;
823 set_target_out:
824 rcu_unlock_domain(d);
825 }
826 break;
828 case XEN_DOMCTL_subscribe:
829 {
830 struct domain *d;
832 ret = -ESRCH;
833 d = rcu_lock_domain_by_id(op->domain);
834 if ( d != NULL )
835 {
836 d->suspend_evtchn = op->u.subscribe.port;
837 rcu_unlock_domain(d);
838 ret = 0;
839 }
840 }
841 break;
843 default:
844 ret = arch_do_domctl(op, u_domctl);
845 break;
846 }
848 spin_unlock(&domctl_lock);
850 return ret;
851 }
853 /*
854 * Local variables:
855 * mode: C
856 * c-set-style: "BSD"
857 * c-basic-offset: 4
858 * tab-width: 4
859 * indent-tabs-mode: nil
860 * End:
861 */