direct-io.hg

view xen/common/domctl.c @ 15478:05331a29f3cb

Export debugger attached state to xc API.
Signed-off-by: Tristan Gingold <tgingold@free.fr>
author kfraser@localhost.localdomain
date Fri Jul 06 14:42:55 2007 +0100 (2007-07-06)
parents 45a44a9cbe8d
children
line source
1 /******************************************************************************
2 * domctl.c
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/rcupdate.h>
21 #include <xen/guest_access.h>
22 #include <xen/bitmap.h>
23 #include <xen/shadow.h>
24 #include <asm/current.h>
25 #include <public/domctl.h>
26 #include <acm/acm_hooks.h>
28 extern long arch_do_domctl(
29 struct xen_domctl *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl);
31 void cpumask_to_xenctl_cpumap(
32 struct xenctl_cpumap *xenctl_cpumap, cpumask_t *cpumask)
33 {
34 unsigned int guest_bytes, copy_bytes, i;
35 uint8_t zero = 0;
36 uint8_t bytemap[(NR_CPUS + 7) / 8];
38 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
39 return;
41 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
42 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
44 bitmap_long_to_byte(bytemap, cpus_addr(*cpumask), NR_CPUS);
46 copy_to_guest(xenctl_cpumap->bitmap, bytemap, copy_bytes);
48 for ( i = copy_bytes; i < guest_bytes; i++ )
49 copy_to_guest_offset(xenctl_cpumap->bitmap, i, &zero, 1);
50 }
52 void xenctl_cpumap_to_cpumask(
53 cpumask_t *cpumask, struct xenctl_cpumap *xenctl_cpumap)
54 {
55 unsigned int guest_bytes, copy_bytes;
56 uint8_t bytemap[(NR_CPUS + 7) / 8];
58 guest_bytes = (xenctl_cpumap->nr_cpus + 7) / 8;
59 copy_bytes = min_t(unsigned int, guest_bytes, sizeof(bytemap));
61 cpus_clear(*cpumask);
63 if ( guest_handle_is_null(xenctl_cpumap->bitmap) )
64 return;
66 copy_from_guest(bytemap, xenctl_cpumap->bitmap, copy_bytes);
68 bitmap_byte_to_long(cpus_addr(*cpumask), bytemap, NR_CPUS);
69 }
71 static inline int is_free_domid(domid_t dom)
72 {
73 struct domain *d;
75 if ( dom >= DOMID_FIRST_RESERVED )
76 return 0;
78 if ( (d = rcu_lock_domain_by_id(dom)) == NULL )
79 return 1;
81 rcu_unlock_domain(d);
82 return 0;
83 }
85 void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
86 {
87 struct vcpu *v;
88 u64 cpu_time = 0;
89 int flags = XEN_DOMINF_blocked;
90 struct vcpu_runstate_info runstate;
92 info->domain = d->domain_id;
93 info->nr_online_vcpus = 0;
95 /*
96 * - domain is marked as blocked only if all its vcpus are blocked
97 * - domain is marked as running if any of its vcpus is running
98 */
99 for_each_vcpu ( d, v )
100 {
101 vcpu_runstate_get(v, &runstate);
102 cpu_time += runstate.time[RUNSTATE_running];
103 info->max_vcpu_id = v->vcpu_id;
104 if ( !test_bit(_VPF_down, &v->pause_flags) )
105 {
106 if ( !(v->pause_flags & VPF_blocked) )
107 flags &= ~XEN_DOMINF_blocked;
108 if ( v->is_running )
109 flags |= XEN_DOMINF_running;
110 info->nr_online_vcpus++;
111 }
112 }
114 info->cpu_time = cpu_time;
116 info->flags = flags |
117 (d->is_dying ? XEN_DOMINF_dying : 0) |
118 (d->is_shut_down ? XEN_DOMINF_shutdown : 0) |
119 (d->is_paused_by_controller ? XEN_DOMINF_paused : 0) |
120 (d->debugger_attached ? XEN_DOMINF_debugged : 0) |
121 d->shutdown_code << XEN_DOMINF_shutdownshift;
123 if ( is_hvm_domain(d) )
124 info->flags |= XEN_DOMINF_hvm_guest;
126 if ( d->ssid != NULL )
127 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
128 else
129 info->ssidref = ACM_DEFAULT_SSID;
131 info->tot_pages = d->tot_pages;
132 info->max_pages = d->max_pages;
133 info->shared_info_frame = mfn_to_gmfn(d, __pa(d->shared_info)>>PAGE_SHIFT);
135 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
136 }
138 static unsigned int default_vcpu0_location(void)
139 {
140 struct domain *d;
141 struct vcpu *v;
142 unsigned int i, cpu, cnt[NR_CPUS] = { 0 };
143 cpumask_t cpu_exclude_map;
145 /* Do an initial CPU placement. Pick the least-populated CPU. */
146 rcu_read_lock(&domlist_read_lock);
147 for_each_domain ( d )
148 for_each_vcpu ( d, v )
149 if ( !test_bit(_VPF_down, &v->pause_flags) )
150 cnt[v->processor]++;
151 rcu_read_unlock(&domlist_read_lock);
153 /*
154 * If we're on a HT system, we only auto-allocate to a non-primary HT. We
155 * favour high numbered CPUs in the event of a tie.
156 */
157 cpu = first_cpu(cpu_sibling_map[0]);
158 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
159 cpu = next_cpu(cpu, cpu_sibling_map[0]);
160 cpu_exclude_map = cpu_sibling_map[0];
161 for_each_online_cpu ( i )
162 {
163 if ( cpu_isset(i, cpu_exclude_map) )
164 continue;
165 if ( (i == first_cpu(cpu_sibling_map[i])) &&
166 (cpus_weight(cpu_sibling_map[i]) > 1) )
167 continue;
168 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
169 if ( cnt[i] <= cnt[cpu] )
170 cpu = i;
171 }
173 return cpu;
174 }
176 long do_domctl(XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
177 {
178 long ret = 0;
179 struct xen_domctl curop, *op = &curop;
180 static DEFINE_SPINLOCK(domctl_lock);
182 if ( !IS_PRIV(current->domain) )
183 return -EPERM;
185 if ( copy_from_guest(op, u_domctl, 1) )
186 return -EFAULT;
188 if ( op->interface_version != XEN_DOMCTL_INTERFACE_VERSION )
189 return -EACCES;
191 spin_lock(&domctl_lock);
193 switch ( op->cmd )
194 {
196 case XEN_DOMCTL_setvcpucontext:
197 {
198 struct domain *d = rcu_lock_domain_by_id(op->domain);
199 vcpu_guest_context_u c = { .nat = NULL };
200 unsigned int vcpu = op->u.vcpucontext.vcpu;
201 struct vcpu *v;
203 ret = -ESRCH;
204 if ( d == NULL )
205 break;
207 ret = -EINVAL;
208 if ( (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
209 goto svc_out;
211 if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
212 {
213 ret = vcpu_reset(v);
214 goto svc_out;
215 }
217 #ifdef CONFIG_COMPAT
218 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
219 < sizeof(struct compat_vcpu_guest_context));
220 #endif
221 ret = -ENOMEM;
222 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
223 goto svc_out;
225 if ( !IS_COMPAT(v->domain) )
226 ret = copy_from_guest(c.nat, op->u.vcpucontext.ctxt, 1);
227 #ifdef CONFIG_COMPAT
228 else
229 ret = copy_from_guest(c.cmp,
230 guest_handle_cast(op->u.vcpucontext.ctxt,
231 void), 1);
232 #endif
233 ret = ret ? -EFAULT : 0;
235 if ( ret == 0 )
236 {
237 domain_pause(d);
238 ret = arch_set_info_guest(v, c);
239 domain_unpause(d);
240 }
242 svc_out:
243 xfree(c.nat);
244 rcu_unlock_domain(d);
245 }
246 break;
248 case XEN_DOMCTL_pausedomain:
249 {
250 struct domain *d = rcu_lock_domain_by_id(op->domain);
251 ret = -ESRCH;
252 if ( d != NULL )
253 {
254 ret = -EINVAL;
255 if ( d != current->domain )
256 {
257 domain_pause_by_systemcontroller(d);
258 ret = 0;
259 }
260 rcu_unlock_domain(d);
261 }
262 }
263 break;
265 case XEN_DOMCTL_unpausedomain:
266 {
267 struct domain *d = rcu_lock_domain_by_id(op->domain);
269 ret = -ESRCH;
270 if ( d == NULL )
271 break;
273 domain_unpause_by_systemcontroller(d);
274 rcu_unlock_domain(d);
275 ret = 0;
276 }
277 break;
279 case XEN_DOMCTL_resumedomain:
280 {
281 struct domain *d = rcu_lock_domain_by_id(op->domain);
283 ret = -ESRCH;
284 if ( d == NULL )
285 break;
287 domain_resume(d);
288 rcu_unlock_domain(d);
289 ret = 0;
290 }
291 break;
293 case XEN_DOMCTL_createdomain:
294 {
295 struct domain *d;
296 domid_t dom;
297 static domid_t rover = 0;
298 unsigned int domcr_flags;
300 ret = -EINVAL;
301 if ( supervisor_mode_kernel ||
302 (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) )
303 break;
305 dom = op->domain;
306 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
307 {
308 ret = -EINVAL;
309 if ( !is_free_domid(dom) )
310 break;
311 }
312 else
313 {
314 for ( dom = rover + 1; dom != rover; dom++ )
315 {
316 if ( dom == DOMID_FIRST_RESERVED )
317 dom = 0;
318 if ( is_free_domid(dom) )
319 break;
320 }
322 ret = -ENOMEM;
323 if ( dom == rover )
324 break;
326 rover = dom;
327 }
329 domcr_flags = 0;
330 if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
331 domcr_flags |= DOMCRF_hvm;
333 ret = -ENOMEM;
334 d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
335 if ( d == NULL )
336 break;
338 ret = 0;
340 memcpy(d->handle, op->u.createdomain.handle,
341 sizeof(xen_domain_handle_t));
343 op->domain = d->domain_id;
344 if ( copy_to_guest(u_domctl, op, 1) )
345 ret = -EFAULT;
346 }
347 break;
349 case XEN_DOMCTL_max_vcpus:
350 {
351 struct domain *d;
352 unsigned int i, max = op->u.max_vcpus.max, cpu;
354 ret = -EINVAL;
355 if ( max > MAX_VIRT_CPUS )
356 break;
358 ret = -ESRCH;
359 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
360 break;
362 /* Needed, for example, to ensure writable p.t. state is synced. */
363 domain_pause(d);
365 /* We cannot reduce maximum VCPUs. */
366 ret = -EINVAL;
367 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
368 goto maxvcpu_out;
370 ret = -ENOMEM;
371 for ( i = 0; i < max; i++ )
372 {
373 if ( d->vcpu[i] != NULL )
374 continue;
376 cpu = (i == 0) ?
377 default_vcpu0_location() :
378 (d->vcpu[i-1]->processor + 1) % num_online_cpus();
380 if ( alloc_vcpu(d, i, cpu) == NULL )
381 goto maxvcpu_out;
382 }
384 ret = 0;
386 maxvcpu_out:
387 domain_unpause(d);
388 rcu_unlock_domain(d);
389 }
390 break;
392 case XEN_DOMCTL_destroydomain:
393 {
394 struct domain *d = rcu_lock_domain_by_id(op->domain);
395 ret = -ESRCH;
396 if ( d != NULL )
397 {
398 ret = -EINVAL;
399 if ( d != current->domain )
400 {
401 domain_kill(d);
402 ret = 0;
403 }
404 rcu_unlock_domain(d);
405 }
406 }
407 break;
409 case XEN_DOMCTL_setvcpuaffinity:
410 case XEN_DOMCTL_getvcpuaffinity:
411 {
412 domid_t dom = op->domain;
413 struct domain *d = rcu_lock_domain_by_id(dom);
414 struct vcpu *v;
415 cpumask_t new_affinity;
417 ret = -ESRCH;
418 if ( d == NULL )
419 break;
421 ret = -EINVAL;
422 if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
423 goto vcpuaffinity_out;
425 ret = -ESRCH;
426 if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL )
427 goto vcpuaffinity_out;
429 if ( op->cmd == XEN_DOMCTL_setvcpuaffinity )
430 {
431 xenctl_cpumap_to_cpumask(
432 &new_affinity, &op->u.vcpuaffinity.cpumap);
433 ret = vcpu_set_affinity(v, &new_affinity);
434 }
435 else
436 {
437 cpumask_to_xenctl_cpumap(
438 &op->u.vcpuaffinity.cpumap, &v->cpu_affinity);
439 ret = 0;
440 }
442 vcpuaffinity_out:
443 rcu_unlock_domain(d);
444 }
445 break;
447 case XEN_DOMCTL_scheduler_op:
448 {
449 struct domain *d;
451 ret = -ESRCH;
452 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
453 break;
455 ret = sched_adjust(d, &op->u.scheduler_op);
456 if ( copy_to_guest(u_domctl, op, 1) )
457 ret = -EFAULT;
459 rcu_unlock_domain(d);
460 }
461 break;
463 case XEN_DOMCTL_getdomaininfo:
464 {
465 struct domain *d;
466 domid_t dom;
468 dom = op->domain;
469 if ( dom == DOMID_SELF )
470 dom = current->domain->domain_id;
472 rcu_read_lock(&domlist_read_lock);
474 for_each_domain ( d )
475 {
476 if ( d->domain_id >= dom )
477 break;
478 }
480 if ( d == NULL )
481 {
482 rcu_read_unlock(&domlist_read_lock);
483 ret = -ESRCH;
484 break;
485 }
487 getdomaininfo(d, &op->u.getdomaininfo);
489 op->domain = op->u.getdomaininfo.domain;
490 if ( copy_to_guest(u_domctl, op, 1) )
491 ret = -EFAULT;
493 rcu_read_unlock(&domlist_read_lock);
494 }
495 break;
497 case XEN_DOMCTL_getvcpucontext:
498 {
499 vcpu_guest_context_u c = { .nat = NULL };
500 struct domain *d;
501 struct vcpu *v;
503 ret = -ESRCH;
504 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
505 break;
507 ret = -EINVAL;
508 if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
509 goto getvcpucontext_out;
511 ret = -ESRCH;
512 if ( (v = d->vcpu[op->u.vcpucontext.vcpu]) == NULL )
513 goto getvcpucontext_out;
515 ret = -ENODATA;
516 if ( !v->is_initialised )
517 goto getvcpucontext_out;
519 #ifdef CONFIG_COMPAT
520 BUILD_BUG_ON(sizeof(struct vcpu_guest_context)
521 < sizeof(struct compat_vcpu_guest_context));
522 #endif
523 ret = -ENOMEM;
524 if ( (c.nat = xmalloc(struct vcpu_guest_context)) == NULL )
525 goto getvcpucontext_out;
527 if ( v != current )
528 vcpu_pause(v);
530 arch_get_info_guest(v, c);
531 ret = 0;
533 if ( v != current )
534 vcpu_unpause(v);
536 if ( !IS_COMPAT(v->domain) )
537 ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
538 #ifdef CONFIG_COMPAT
539 else
540 ret = copy_to_guest(guest_handle_cast(op->u.vcpucontext.ctxt,
541 void), c.cmp, 1);
542 #endif
544 if ( copy_to_guest(u_domctl, op, 1) || ret )
545 ret = -EFAULT;
547 getvcpucontext_out:
548 xfree(c.nat);
549 rcu_unlock_domain(d);
550 }
551 break;
553 case XEN_DOMCTL_getvcpuinfo:
554 {
555 struct domain *d;
556 struct vcpu *v;
557 struct vcpu_runstate_info runstate;
559 ret = -ESRCH;
560 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
561 break;
563 ret = -EINVAL;
564 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
565 goto getvcpuinfo_out;
567 ret = -ESRCH;
568 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
569 goto getvcpuinfo_out;
571 vcpu_runstate_get(v, &runstate);
573 op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
574 op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
575 op->u.getvcpuinfo.running = v->is_running;
576 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
577 op->u.getvcpuinfo.cpu = v->processor;
578 ret = 0;
580 if ( copy_to_guest(u_domctl, op, 1) )
581 ret = -EFAULT;
583 getvcpuinfo_out:
584 rcu_unlock_domain(d);
585 }
586 break;
588 case XEN_DOMCTL_max_mem:
589 {
590 struct domain *d;
591 unsigned long new_max;
593 ret = -ESRCH;
594 d = rcu_lock_domain_by_id(op->domain);
595 if ( d == NULL )
596 break;
598 ret = -EINVAL;
599 new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10);
601 spin_lock(&d->page_alloc_lock);
602 if ( new_max >= d->tot_pages )
603 {
604 ret = guest_physmap_max_mem_pages(d, new_max);
605 if ( ret != 0 )
606 break;
607 d->max_pages = new_max;
608 ret = 0;
609 }
610 spin_unlock(&d->page_alloc_lock);
612 rcu_unlock_domain(d);
613 }
614 break;
616 case XEN_DOMCTL_setdomainhandle:
617 {
618 struct domain *d;
620 ret = -ESRCH;
621 d = rcu_lock_domain_by_id(op->domain);
622 if ( d == NULL )
623 break;
625 memcpy(d->handle, op->u.setdomainhandle.handle,
626 sizeof(xen_domain_handle_t));
627 rcu_unlock_domain(d);
628 ret = 0;
629 }
630 break;
632 case XEN_DOMCTL_setdebugging:
633 {
634 struct domain *d;
636 ret = -ESRCH;
637 d = rcu_lock_domain_by_id(op->domain);
638 if ( d == NULL )
639 break;
641 domain_pause(d);
642 d->debugger_attached = !!op->u.setdebugging.enable;
643 domain_unpause(d); /* causes guest to latch new status */
644 rcu_unlock_domain(d);
645 ret = 0;
646 }
647 break;
649 case XEN_DOMCTL_irq_permission:
650 {
651 struct domain *d;
652 unsigned int pirq = op->u.irq_permission.pirq;
654 ret = -EINVAL;
655 if ( pirq >= NR_IRQS )
656 break;
658 ret = -ESRCH;
659 d = rcu_lock_domain_by_id(op->domain);
660 if ( d == NULL )
661 break;
663 if ( op->u.irq_permission.allow_access )
664 ret = irq_permit_access(d, pirq);
665 else
666 ret = irq_deny_access(d, pirq);
668 rcu_unlock_domain(d);
669 }
670 break;
672 case XEN_DOMCTL_iomem_permission:
673 {
674 struct domain *d;
675 unsigned long mfn = op->u.iomem_permission.first_mfn;
676 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
678 ret = -EINVAL;
679 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
680 break;
682 ret = -ESRCH;
683 d = rcu_lock_domain_by_id(op->domain);
684 if ( d == NULL )
685 break;
687 if ( op->u.iomem_permission.allow_access )
688 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
689 else
690 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
692 rcu_unlock_domain(d);
693 }
694 break;
696 case XEN_DOMCTL_settimeoffset:
697 {
698 struct domain *d;
700 ret = -ESRCH;
701 d = rcu_lock_domain_by_id(op->domain);
702 if ( d != NULL )
703 {
704 d->time_offset_seconds = op->u.settimeoffset.time_offset_seconds;
705 rcu_unlock_domain(d);
706 ret = 0;
707 }
708 }
709 break;
711 default:
712 ret = arch_do_domctl(op, u_domctl);
713 break;
714 }
716 spin_unlock(&domctl_lock);
718 return ret;
719 }
721 /*
722 * Local variables:
723 * mode: C
724 * c-set-style: "BSD"
725 * c-basic-offset: 4
726 * tab-width: 4
727 * indent-tabs-mode: nil
728 * End:
729 */