ia64/xen-unstable

view xen/common/dom0_ops.c @ 9776:72f9c751d3ea

Replace &foo[0] with foo where the latter seems cleaner
(which is usually, and particularly when its an argument
to one of the bitops functions).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Apr 19 18:32:20 2006 +0100 (2006-04-19)
parents 1d8b3c85121d
children 4e1b8be54311
line source
1 /******************************************************************************
2 * dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <xen/guest_access.h>
21 #include <asm/current.h>
22 #include <public/dom0_ops.h>
23 #include <public/sched_ctl.h>
24 #include <acm/acm_hooks.h>
26 extern long arch_do_dom0_op(
27 struct dom0_op *op, GUEST_HANDLE(dom0_op_t) u_dom0_op);
28 extern void arch_getdomaininfo_ctxt(
29 struct vcpu *, struct vcpu_guest_context *);
31 static inline int is_free_domid(domid_t dom)
32 {
33 struct domain *d;
35 if ( dom >= DOMID_FIRST_RESERVED )
36 return 0;
38 if ( (d = find_domain_by_id(dom)) == NULL )
39 return 1;
41 put_domain(d);
42 return 0;
43 }
45 static void getdomaininfo(struct domain *d, dom0_getdomaininfo_t *info)
46 {
47 struct vcpu *v;
48 u64 cpu_time = 0;
49 int flags = DOMFLAGS_BLOCKED;
50 struct vcpu_runstate_info runstate;
52 info->domain = d->domain_id;
53 info->nr_online_vcpus = 0;
55 /*
56 * - domain is marked as blocked only if all its vcpus are blocked
57 * - domain is marked as running if any of its vcpus is running
58 */
59 for_each_vcpu ( d, v ) {
60 vcpu_runstate_get(v, &runstate);
61 cpu_time += runstate.time[RUNSTATE_running];
62 info->max_vcpu_id = v->vcpu_id;
63 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
64 {
65 if ( !(v->vcpu_flags & VCPUF_blocked) )
66 flags &= ~DOMFLAGS_BLOCKED;
67 if ( v->vcpu_flags & VCPUF_running )
68 flags |= DOMFLAGS_RUNNING;
69 info->nr_online_vcpus++;
70 }
71 }
73 info->cpu_time = cpu_time;
75 info->flags = flags |
76 ((d->domain_flags & DOMF_dying) ? DOMFLAGS_DYING : 0) |
77 ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
78 ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED : 0) |
79 d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
81 if (d->ssid != NULL)
82 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
83 else
84 info->ssidref = ACM_DEFAULT_SSID;
86 info->tot_pages = d->tot_pages;
87 info->max_pages = d->max_pages;
88 info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
90 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
91 }
93 long do_dom0_op(GUEST_HANDLE(dom0_op_t) u_dom0_op)
94 {
95 long ret = 0;
96 struct dom0_op curop, *op = &curop;
97 void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
98 static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
100 if ( !IS_PRIV(current->domain) )
101 return -EPERM;
103 if ( copy_from_guest(op, u_dom0_op, 1) )
104 return -EFAULT;
106 if ( op->interface_version != DOM0_INTERFACE_VERSION )
107 return -EACCES;
109 if ( acm_pre_dom0_op(op, &ssid) )
110 return -EPERM;
112 spin_lock(&dom0_lock);
114 switch ( op->cmd )
115 {
117 case DOM0_SETVCPUCONTEXT:
118 {
119 struct domain *d = find_domain_by_id(op->u.setvcpucontext.domain);
120 ret = -ESRCH;
121 if ( d != NULL )
122 {
123 ret = set_info_guest(d, &op->u.setvcpucontext);
124 put_domain(d);
125 }
126 }
127 break;
129 case DOM0_PAUSEDOMAIN:
130 {
131 struct domain *d = find_domain_by_id(op->u.pausedomain.domain);
132 ret = -ESRCH;
133 if ( d != NULL )
134 {
135 ret = -EINVAL;
136 if ( d != current->domain )
137 {
138 domain_pause_by_systemcontroller(d);
139 ret = 0;
140 }
141 put_domain(d);
142 }
143 }
144 break;
146 case DOM0_UNPAUSEDOMAIN:
147 {
148 struct domain *d = find_domain_by_id(op->u.unpausedomain.domain);
149 ret = -ESRCH;
150 if ( d != NULL )
151 {
152 ret = -EINVAL;
153 if ( (d != current->domain) &&
154 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
155 {
156 domain_unpause_by_systemcontroller(d);
157 ret = 0;
158 }
159 put_domain(d);
160 }
161 }
162 break;
164 case DOM0_CREATEDOMAIN:
165 {
166 struct domain *d;
167 unsigned int pro;
168 domid_t dom;
169 struct vcpu *v;
170 unsigned int i, cnt[NR_CPUS] = { 0 };
171 cpumask_t cpu_exclude_map;
172 static domid_t rover = 0;
174 /*
175 * Running the domain 0 kernel in ring 0 is not compatible
176 * with multiple guests.
177 */
178 if ( supervisor_mode_kernel )
179 return -EINVAL;
181 dom = op->u.createdomain.domain;
182 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
183 {
184 ret = -EINVAL;
185 if ( !is_free_domid(dom) )
186 break;
187 }
188 else
189 {
190 for ( dom = rover + 1; dom != rover; dom++ )
191 {
192 if ( dom == DOMID_FIRST_RESERVED )
193 dom = 0;
194 if ( is_free_domid(dom) )
195 break;
196 }
198 ret = -ENOMEM;
199 if ( dom == rover )
200 break;
202 rover = dom;
203 }
205 /* Do an initial CPU placement. Pick the least-populated CPU. */
206 read_lock(&domlist_lock);
207 for_each_domain ( d )
208 for_each_vcpu ( d, v )
209 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
210 cnt[v->processor]++;
211 read_unlock(&domlist_lock);
213 /*
214 * If we're on a HT system, we only auto-allocate to a non-primary HT.
215 * We favour high numbered CPUs in the event of a tie.
216 */
217 pro = first_cpu(cpu_sibling_map[0]);
218 if ( cpus_weight(cpu_sibling_map[0]) > 1 )
219 pro = next_cpu(pro, cpu_sibling_map[0]);
220 cpu_exclude_map = cpu_sibling_map[0];
221 for_each_online_cpu ( i )
222 {
223 if ( cpu_isset(i, cpu_exclude_map) )
224 continue;
225 if ( (i == first_cpu(cpu_sibling_map[i])) &&
226 (cpus_weight(cpu_sibling_map[i]) > 1) )
227 continue;
228 cpus_or(cpu_exclude_map, cpu_exclude_map, cpu_sibling_map[i]);
229 if ( cnt[i] <= cnt[pro] )
230 pro = i;
231 }
233 ret = -ENOMEM;
234 if ( (d = domain_create(dom, pro)) == NULL )
235 break;
237 memcpy(d->handle, op->u.createdomain.handle,
238 sizeof(xen_domain_handle_t));
240 ret = 0;
242 op->u.createdomain.domain = d->domain_id;
243 if ( copy_to_guest(u_dom0_op, op, 1) )
244 ret = -EFAULT;
245 }
246 break;
248 case DOM0_MAX_VCPUS:
249 {
250 struct domain *d;
251 unsigned int i, max = op->u.max_vcpus.max, cpu;
253 ret = -EINVAL;
254 if ( max > MAX_VIRT_CPUS )
255 break;
257 ret = -ESRCH;
258 if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
259 break;
261 /*
262 * Can only create new VCPUs while the domain is not fully constructed
263 * (and hence not runnable). Xen needs auditing for races before
264 * removing this check.
265 */
266 ret = -EINVAL;
267 if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
268 goto maxvcpu_out;
270 /* We cannot reduce maximum VCPUs. */
271 ret = -EINVAL;
272 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
273 goto maxvcpu_out;
275 ret = -ENOMEM;
276 for ( i = 0; i < max; i++ )
277 {
278 if ( d->vcpu[i] == NULL )
279 {
280 cpu = (d->vcpu[i-1]->processor + 1) % num_online_cpus();
281 if ( alloc_vcpu(d, i, cpu) == NULL )
282 goto maxvcpu_out;
283 }
284 }
286 ret = 0;
288 maxvcpu_out:
289 put_domain(d);
290 }
291 break;
293 case DOM0_DESTROYDOMAIN:
294 {
295 struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
296 ret = -ESRCH;
297 if ( d != NULL )
298 {
299 ret = -EINVAL;
300 if ( d != current->domain )
301 {
302 domain_kill(d);
303 ret = 0;
304 }
305 put_domain(d);
306 }
307 }
308 break;
310 case DOM0_SETVCPUAFFINITY:
311 {
312 domid_t dom = op->u.setvcpuaffinity.domain;
313 struct domain *d = find_domain_by_id(dom);
314 struct vcpu *v;
315 cpumask_t new_affinity;
317 if ( d == NULL )
318 {
319 ret = -ESRCH;
320 break;
321 }
323 if ( (op->u.setvcpuaffinity.vcpu >= MAX_VIRT_CPUS) ||
324 !d->vcpu[op->u.setvcpuaffinity.vcpu] )
325 {
326 ret = -EINVAL;
327 put_domain(d);
328 break;
329 }
331 v = d->vcpu[op->u.setvcpuaffinity.vcpu];
332 if ( v == NULL )
333 {
334 ret = -ESRCH;
335 put_domain(d);
336 break;
337 }
339 if ( v == current )
340 {
341 ret = -EINVAL;
342 put_domain(d);
343 break;
344 }
346 new_affinity = v->cpu_affinity;
347 memcpy(cpus_addr(new_affinity),
348 &op->u.setvcpuaffinity.cpumap,
349 min((int)(BITS_TO_LONGS(NR_CPUS) * sizeof(long)),
350 (int)sizeof(op->u.setvcpuaffinity.cpumap)));
352 ret = vcpu_set_affinity(v, &new_affinity);
354 put_domain(d);
355 }
356 break;
358 case DOM0_SCHEDCTL:
359 {
360 ret = sched_ctl(&op->u.schedctl);
361 if ( copy_to_guest(u_dom0_op, op, 1) )
362 ret = -EFAULT;
363 }
364 break;
366 case DOM0_ADJUSTDOM:
367 {
368 ret = sched_adjdom(&op->u.adjustdom);
369 if ( copy_to_guest(u_dom0_op, op, 1) )
370 ret = -EFAULT;
371 }
372 break;
374 case DOM0_GETDOMAININFO:
375 {
376 struct domain *d;
377 domid_t dom;
379 dom = op->u.getdomaininfo.domain;
380 if ( dom == DOMID_SELF )
381 dom = current->domain->domain_id;
383 read_lock(&domlist_lock);
385 for_each_domain ( d )
386 {
387 if ( d->domain_id >= dom )
388 break;
389 }
391 if ( (d == NULL) || !get_domain(d) )
392 {
393 read_unlock(&domlist_lock);
394 ret = -ESRCH;
395 break;
396 }
398 read_unlock(&domlist_lock);
400 getdomaininfo(d, &op->u.getdomaininfo);
402 if ( copy_to_guest(u_dom0_op, op, 1) )
403 ret = -EFAULT;
405 put_domain(d);
406 }
407 break;
409 case DOM0_GETDOMAININFOLIST:
410 {
411 struct domain *d;
412 dom0_getdomaininfo_t info;
413 u32 num_domains = 0;
415 read_lock(&domlist_lock);
417 for_each_domain ( d )
418 {
419 if ( d->domain_id < op->u.getdomaininfolist.first_domain )
420 continue;
421 if ( num_domains == op->u.getdomaininfolist.max_domains )
422 break;
423 if ( (d == NULL) || !get_domain(d) )
424 {
425 ret = -ESRCH;
426 break;
427 }
429 getdomaininfo(d, &info);
431 put_domain(d);
433 if ( copy_to_guest_offset(op->u.getdomaininfolist.buffer,
434 num_domains, &info, 1) )
435 {
436 ret = -EFAULT;
437 break;
438 }
440 num_domains++;
441 }
443 read_unlock(&domlist_lock);
445 if ( ret != 0 )
446 break;
448 op->u.getdomaininfolist.num_domains = num_domains;
450 if ( copy_to_guest(u_dom0_op, op, 1) )
451 ret = -EFAULT;
452 }
453 break;
455 case DOM0_GETVCPUCONTEXT:
456 {
457 struct vcpu_guest_context *c;
458 struct domain *d;
459 struct vcpu *v;
461 ret = -ESRCH;
462 if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
463 break;
465 ret = -EINVAL;
466 if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
467 goto getvcpucontext_out;
469 ret = -ESRCH;
470 if ( (v = d->vcpu[op->u.getvcpucontext.vcpu]) == NULL )
471 goto getvcpucontext_out;
473 ret = -ENODATA;
474 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
475 goto getvcpucontext_out;
477 ret = -ENOMEM;
478 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
479 goto getvcpucontext_out;
481 if ( v != current )
482 vcpu_pause(v);
484 arch_getdomaininfo_ctxt(v,c);
485 ret = 0;
487 if ( v != current )
488 vcpu_unpause(v);
490 if ( copy_to_guest(op->u.getvcpucontext.ctxt, c, 1) )
491 ret = -EFAULT;
493 xfree(c);
495 if ( copy_to_guest(u_dom0_op, op, 1) )
496 ret = -EFAULT;
498 getvcpucontext_out:
499 put_domain(d);
500 }
501 break;
503 case DOM0_GETVCPUINFO:
504 {
505 struct domain *d;
506 struct vcpu *v;
507 struct vcpu_runstate_info runstate;
509 ret = -ESRCH;
510 if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
511 break;
513 ret = -EINVAL;
514 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
515 goto getvcpuinfo_out;
517 ret = -ESRCH;
518 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
519 goto getvcpuinfo_out;
521 vcpu_runstate_get(v, &runstate);
523 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
524 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
525 op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
526 op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
527 op->u.getvcpuinfo.cpu = v->processor;
528 op->u.getvcpuinfo.cpumap = 0;
529 memcpy(&op->u.getvcpuinfo.cpumap,
530 cpus_addr(v->cpu_affinity),
531 min((int)(BITS_TO_LONGS(NR_CPUS) * sizeof(long)),
532 (int)sizeof(op->u.getvcpuinfo.cpumap)));
533 ret = 0;
535 if ( copy_to_guest(u_dom0_op, op, 1) )
536 ret = -EFAULT;
538 getvcpuinfo_out:
539 put_domain(d);
540 }
541 break;
543 case DOM0_SETTIME:
544 {
545 do_settime(op->u.settime.secs,
546 op->u.settime.nsecs,
547 op->u.settime.system_time);
548 ret = 0;
549 }
550 break;
552 case DOM0_TBUFCONTROL:
553 {
554 ret = tb_control(&op->u.tbufcontrol);
555 if ( copy_to_guest(u_dom0_op, op, 1) )
556 ret = -EFAULT;
557 }
558 break;
560 case DOM0_READCONSOLE:
561 {
562 ret = read_console_ring(
563 op->u.readconsole.buffer,
564 &op->u.readconsole.count,
565 op->u.readconsole.clear);
566 if ( copy_to_guest(u_dom0_op, op, 1) )
567 ret = -EFAULT;
568 }
569 break;
571 case DOM0_SCHED_ID:
572 {
573 op->u.sched_id.sched_id = sched_id();
574 if ( copy_to_guest(u_dom0_op, op, 1) )
575 ret = -EFAULT;
576 else
577 ret = 0;
578 }
579 break;
581 case DOM0_SETDOMAINMAXMEM:
582 {
583 struct domain *d;
584 unsigned long new_max;
586 ret = -ESRCH;
587 d = find_domain_by_id(op->u.setdomainmaxmem.domain);
588 if ( d == NULL )
589 break;
591 ret = -EINVAL;
592 new_max = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
594 spin_lock(&d->page_alloc_lock);
595 if ( new_max >= d->tot_pages )
596 {
597 d->max_pages = new_max;
598 ret = 0;
599 }
600 spin_unlock(&d->page_alloc_lock);
602 put_domain(d);
603 }
604 break;
606 case DOM0_SETDOMAINHANDLE:
607 {
608 struct domain *d;
609 ret = -ESRCH;
610 d = find_domain_by_id(op->u.setdomainhandle.domain);
611 if ( d != NULL )
612 {
613 memcpy(d->handle, op->u.setdomainhandle.handle,
614 sizeof(xen_domain_handle_t));
615 put_domain(d);
616 ret = 0;
617 }
618 }
619 break;
621 case DOM0_SETDEBUGGING:
622 {
623 struct domain *d;
624 ret = -ESRCH;
625 d = find_domain_by_id(op->u.setdebugging.domain);
626 if ( d != NULL )
627 {
628 if ( op->u.setdebugging.enable )
629 set_bit(_DOMF_debugging, &d->domain_flags);
630 else
631 clear_bit(_DOMF_debugging, &d->domain_flags);
632 put_domain(d);
633 ret = 0;
634 }
635 }
636 break;
638 case DOM0_IRQ_PERMISSION:
639 {
640 struct domain *d;
641 unsigned int pirq = op->u.irq_permission.pirq;
643 ret = -EINVAL;
644 if ( pirq >= NR_PIRQS )
645 break;
647 ret = -ESRCH;
648 d = find_domain_by_id(op->u.irq_permission.domain);
649 if ( d == NULL )
650 break;
652 if ( op->u.irq_permission.allow_access )
653 ret = irq_permit_access(d, pirq);
654 else
655 ret = irq_deny_access(d, pirq);
657 put_domain(d);
658 }
659 break;
661 case DOM0_IOMEM_PERMISSION:
662 {
663 struct domain *d;
664 unsigned long mfn = op->u.iomem_permission.first_mfn;
665 unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
667 ret = -EINVAL;
668 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
669 break;
671 ret = -ESRCH;
672 d = find_domain_by_id(op->u.iomem_permission.domain);
673 if ( d == NULL )
674 break;
676 if ( op->u.iomem_permission.allow_access )
677 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
678 else
679 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
681 put_domain(d);
682 }
683 break;
685 #ifdef PERF_COUNTERS
686 case DOM0_PERFCCONTROL:
687 {
688 extern int perfc_control(dom0_perfccontrol_t *);
689 ret = perfc_control(&op->u.perfccontrol);
690 if ( copy_to_guest(u_dom0_op, op, 1) )
691 ret = -EFAULT;
692 }
693 break;
694 #endif
696 default:
697 ret = arch_do_dom0_op(op, u_dom0_op);
698 break;
699 }
701 spin_unlock(&dom0_lock);
703 if (!ret)
704 acm_post_dom0_op(op, ssid);
705 else
706 acm_fail_dom0_op(op, ssid);
708 return ret;
709 }
711 /*
712 * Local variables:
713 * mode: C
714 * c-set-style: "BSD"
715 * c-basic-offset: 4
716 * tab-width: 4
717 * indent-tabs-mode: nil
718 * End:
719 */