direct-io.hg

view xen/common/dom0_ops.c @ 8500:dd5649730b32

Fix a couple of bogus dom0_op names:
setdomaininfo -> setvcpucontext
pincpudomain -> setvcpuaffinity

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Fri Jan 06 12:53:19 2006 +0100 (2006-01-06)
parents 84cf56328ce0
children 3eeabf448f91
line source
1 /******************************************************************************
2 * dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <xen/iocap.h>
20 #include <asm/current.h>
21 #include <public/dom0_ops.h>
22 #include <public/sched_ctl.h>
23 #include <acm/acm_hooks.h>
25 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
26 extern void arch_getdomaininfo_ctxt(
27 struct vcpu *, struct vcpu_guest_context *);
29 static inline int is_free_domid(domid_t dom)
30 {
31 struct domain *d;
33 if ( dom >= DOMID_FIRST_RESERVED )
34 return 0;
36 if ( (d = find_domain_by_id(dom)) == NULL )
37 return 1;
39 put_domain(d);
40 return 0;
41 }
43 static void getdomaininfo(struct domain *d, dom0_getdomaininfo_t *info)
44 {
45 struct vcpu *v;
46 u64 cpu_time = 0;
47 int flags = DOMFLAGS_BLOCKED;
49 info->domain = d->domain_id;
50 info->nr_online_vcpus = 0;
52 /*
53 * - domain is marked as blocked only if all its vcpus are blocked
54 * - domain is marked as running if any of its vcpus is running
55 */
56 for_each_vcpu ( d, v ) {
57 cpu_time += v->cpu_time;
58 info->max_vcpu_id = v->vcpu_id;
59 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
60 {
61 if ( !(v->vcpu_flags & VCPUF_blocked) )
62 flags &= ~DOMFLAGS_BLOCKED;
63 if ( v->vcpu_flags & VCPUF_running )
64 flags |= DOMFLAGS_RUNNING;
65 info->nr_online_vcpus++;
66 }
67 }
69 info->cpu_time = cpu_time;
71 info->flags = flags |
72 ((d->domain_flags & DOMF_dying) ? DOMFLAGS_DYING : 0) |
73 ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
74 ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED : 0) |
75 d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
77 if (d->ssid != NULL)
78 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
79 else
80 info->ssidref = ACM_DEFAULT_SSID;
82 info->tot_pages = d->tot_pages;
83 info->max_pages = d->max_pages;
84 info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
86 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
87 }
89 long do_dom0_op(dom0_op_t *u_dom0_op)
90 {
91 long ret = 0;
92 dom0_op_t curop, *op = &curop;
93 void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
94 static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
96 if ( !IS_PRIV(current->domain) )
97 return -EPERM;
99 if ( copy_from_user(op, u_dom0_op, sizeof(*op)) )
100 return -EFAULT;
102 if ( op->interface_version != DOM0_INTERFACE_VERSION )
103 return -EACCES;
105 if ( acm_pre_dom0_op(op, &ssid) )
106 return -EPERM;
108 spin_lock(&dom0_lock);
110 switch ( op->cmd )
111 {
113 case DOM0_SETVCPUCONTEXT:
114 {
115 struct domain *d = find_domain_by_id(op->u.setvcpucontext.domain);
116 ret = -ESRCH;
117 if ( d != NULL )
118 {
119 ret = set_info_guest(d, &op->u.setvcpucontext);
120 put_domain(d);
121 }
122 }
123 break;
125 case DOM0_PAUSEDOMAIN:
126 {
127 struct domain *d = find_domain_by_id(op->u.pausedomain.domain);
128 ret = -ESRCH;
129 if ( d != NULL )
130 {
131 ret = -EINVAL;
132 if ( d != current->domain )
133 {
134 domain_pause_by_systemcontroller(d);
135 ret = 0;
136 }
137 put_domain(d);
138 }
139 }
140 break;
142 case DOM0_UNPAUSEDOMAIN:
143 {
144 struct domain *d = find_domain_by_id(op->u.unpausedomain.domain);
145 ret = -ESRCH;
146 if ( d != NULL )
147 {
148 ret = -EINVAL;
149 if ( (d != current->domain) &&
150 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
151 {
152 domain_unpause_by_systemcontroller(d);
153 ret = 0;
154 }
155 put_domain(d);
156 }
157 }
158 break;
160 case DOM0_CREATEDOMAIN:
161 {
162 struct domain *d;
163 unsigned int pro;
164 domid_t dom;
165 struct vcpu *v;
166 unsigned int i, cnt[NR_CPUS] = { 0 };
167 static domid_t rover = 0;
169 dom = op->u.createdomain.domain;
170 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
171 {
172 ret = -EINVAL;
173 if ( !is_free_domid(dom) )
174 break;
175 }
176 else
177 {
178 for ( dom = rover + 1; dom != rover; dom++ )
179 {
180 if ( dom == DOMID_FIRST_RESERVED )
181 dom = 0;
182 if ( is_free_domid(dom) )
183 break;
184 }
186 ret = -ENOMEM;
187 if ( dom == rover )
188 break;
190 rover = dom;
191 }
193 /* Do an initial CPU placement. Pick the least-populated CPU. */
194 read_lock(&domlist_lock);
195 for_each_domain ( d )
196 for_each_vcpu ( d, v )
197 cnt[v->processor]++;
198 read_unlock(&domlist_lock);
200 /*
201 * If we're on a HT system, we only use the first HT for dom0, other
202 * domains will all share the second HT of each CPU. Since dom0 is on
203 * CPU 0, we favour high numbered CPUs in the event of a tie.
204 */
205 pro = smp_num_siblings - 1;
206 for ( i = pro; i < num_online_cpus(); i += smp_num_siblings )
207 if ( cnt[i] <= cnt[pro] )
208 pro = i;
210 ret = -ENOMEM;
211 if ( (d = do_createdomain(dom, pro)) == NULL )
212 break;
214 memcpy(d->handle, op->u.createdomain.handle,
215 sizeof(xen_domain_handle_t));
217 ret = 0;
219 op->u.createdomain.domain = d->domain_id;
220 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
221 ret = -EFAULT;
222 }
223 break;
225 case DOM0_MAX_VCPUS:
226 {
227 struct domain *d;
228 unsigned int i, max = op->u.max_vcpus.max, cpu;
230 ret = -EINVAL;
231 if ( max > MAX_VIRT_CPUS )
232 break;
234 ret = -ESRCH;
235 if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
236 break;
238 /*
239 * Can only create new VCPUs while the domain is not fully constructed
240 * (and hence not runnable). Xen needs auditing for races before
241 * removing this check.
242 */
243 ret = -EINVAL;
244 if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
245 goto maxvcpu_out;
247 /* We cannot reduce maximum VCPUs. */
248 ret = -EINVAL;
249 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
250 goto maxvcpu_out;
252 ret = -ENOMEM;
253 for ( i = 0; i < max; i++ )
254 {
255 if ( d->vcpu[i] == NULL )
256 {
257 cpu = (d->vcpu[i-1]->processor + 1) % num_online_cpus();
258 if ( alloc_vcpu(d, i, cpu) == NULL )
259 goto maxvcpu_out;
260 }
261 }
263 ret = 0;
265 maxvcpu_out:
266 put_domain(d);
267 }
268 break;
270 case DOM0_DESTROYDOMAIN:
271 {
272 struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
273 ret = -ESRCH;
274 if ( d != NULL )
275 {
276 ret = -EINVAL;
277 if ( d != current->domain )
278 {
279 domain_kill(d);
280 ret = 0;
281 }
282 put_domain(d);
283 }
284 }
285 break;
287 case DOM0_SETVCPUAFFINITY:
288 {
289 domid_t dom = op->u.setvcpuaffinity.domain;
290 struct domain *d = find_domain_by_id(dom);
291 struct vcpu *v;
293 if ( d == NULL )
294 {
295 ret = -ESRCH;
296 break;
297 }
299 if ( (op->u.setvcpuaffinity.vcpu >= MAX_VIRT_CPUS) ||
300 !d->vcpu[op->u.setvcpuaffinity.vcpu] )
301 {
302 ret = -EINVAL;
303 put_domain(d);
304 break;
305 }
307 v = d->vcpu[op->u.setvcpuaffinity.vcpu];
308 if ( v == NULL )
309 {
310 ret = -ESRCH;
311 put_domain(d);
312 break;
313 }
315 if ( v == current )
316 {
317 ret = -EINVAL;
318 put_domain(d);
319 break;
320 }
322 memcpy(cpus_addr(v->cpu_affinity),
323 &op->u.setvcpuaffinity.cpumap,
324 min((int)BITS_TO_LONGS(NR_CPUS),
325 (int)sizeof(op->u.setvcpuaffinity.cpumap)));
327 vcpu_pause(v);
328 vcpu_migrate_cpu(v, first_cpu(v->cpu_affinity));
329 vcpu_unpause(v);
331 put_domain(d);
332 }
333 break;
335 case DOM0_SCHEDCTL:
336 {
337 ret = sched_ctl(&op->u.schedctl);
338 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
339 ret = -EFAULT;
340 }
341 break;
343 case DOM0_ADJUSTDOM:
344 {
345 ret = sched_adjdom(&op->u.adjustdom);
346 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
347 ret = -EFAULT;
348 }
349 break;
351 case DOM0_GETDOMAININFO:
352 {
353 struct domain *d;
354 domid_t dom;
356 dom = op->u.getdomaininfo.domain;
357 if ( dom == DOMID_SELF )
358 dom = current->domain->domain_id;
360 read_lock(&domlist_lock);
362 for_each_domain ( d )
363 {
364 if ( d->domain_id >= dom )
365 break;
366 }
368 if ( (d == NULL) || !get_domain(d) )
369 {
370 read_unlock(&domlist_lock);
371 ret = -ESRCH;
372 break;
373 }
375 read_unlock(&domlist_lock);
377 getdomaininfo(d, &op->u.getdomaininfo);
379 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
380 ret = -EFAULT;
382 put_domain(d);
383 }
384 break;
388 case DOM0_GETDOMAININFOLIST:
389 {
390 struct domain *d;
391 dom0_getdomaininfo_t info;
392 dom0_getdomaininfo_t *buffer = op->u.getdomaininfolist.buffer;
393 u32 num_domains = 0;
395 read_lock(&domlist_lock);
397 for_each_domain ( d )
398 {
399 if ( d->domain_id < op->u.getdomaininfolist.first_domain )
400 continue;
401 if ( num_domains == op->u.getdomaininfolist.max_domains )
402 break;
403 if ( (d == NULL) || !get_domain(d) )
404 {
405 ret = -ESRCH;
406 break;
407 }
409 getdomaininfo(d, &info);
411 put_domain(d);
413 if ( copy_to_user(buffer, &info, sizeof(dom0_getdomaininfo_t)) )
414 {
415 ret = -EFAULT;
416 break;
417 }
419 buffer++;
420 num_domains++;
421 }
423 read_unlock(&domlist_lock);
425 if ( ret != 0 )
426 break;
428 op->u.getdomaininfolist.num_domains = num_domains;
430 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
431 ret = -EFAULT;
432 }
433 break;
435 case DOM0_GETVCPUCONTEXT:
436 {
437 struct vcpu_guest_context *c;
438 struct domain *d;
439 struct vcpu *v;
441 ret = -ESRCH;
442 if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
443 break;
445 ret = -EINVAL;
446 if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
447 goto getvcpucontext_out;
449 ret = -ESRCH;
450 if ( (v = d->vcpu[op->u.getvcpucontext.vcpu]) == NULL )
451 goto getvcpucontext_out;
453 ret = -ENOMEM;
454 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
455 goto getvcpucontext_out;
457 if ( v != current )
458 vcpu_pause(v);
460 arch_getdomaininfo_ctxt(v,c);
461 ret = 0;
463 if ( v != current )
464 vcpu_unpause(v);
466 if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
467 ret = -EFAULT;
469 xfree(c);
471 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
472 ret = -EFAULT;
474 getvcpucontext_out:
475 put_domain(d);
476 }
477 break;
479 case DOM0_GETVCPUINFO:
480 {
481 struct domain *d;
482 struct vcpu *v;
484 ret = -ESRCH;
485 if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
486 break;
488 ret = -EINVAL;
489 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
490 goto getvcpuinfo_out;
492 ret = -ESRCH;
493 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
494 goto getvcpuinfo_out;
496 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
497 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
498 op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
499 op->u.getvcpuinfo.cpu_time = v->cpu_time;
500 op->u.getvcpuinfo.cpu = v->processor;
501 op->u.getvcpuinfo.cpumap = 0;
502 memcpy(&op->u.getvcpuinfo.cpumap,
503 cpus_addr(v->cpu_affinity),
504 min((int)BITS_TO_LONGS(NR_CPUS),
505 (int)sizeof(op->u.getvcpuinfo.cpumap)));
506 ret = 0;
508 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
509 ret = -EFAULT;
511 getvcpuinfo_out:
512 put_domain(d);
513 }
514 break;
516 case DOM0_SETTIME:
517 {
518 do_settime(op->u.settime.secs,
519 op->u.settime.nsecs,
520 op->u.settime.system_time);
521 ret = 0;
522 }
523 break;
525 case DOM0_TBUFCONTROL:
526 {
527 ret = tb_control(&op->u.tbufcontrol);
528 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
529 ret = -EFAULT;
530 }
531 break;
533 case DOM0_READCONSOLE:
534 {
535 ret = read_console_ring(
536 &op->u.readconsole.buffer,
537 &op->u.readconsole.count,
538 op->u.readconsole.clear);
539 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
540 ret = -EFAULT;
541 }
542 break;
544 case DOM0_SCHED_ID:
545 {
546 op->u.sched_id.sched_id = sched_id();
547 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
548 ret = -EFAULT;
549 else
550 ret = 0;
551 }
552 break;
554 case DOM0_SETDOMAINMAXMEM:
555 {
556 struct domain *d;
557 ret = -ESRCH;
558 d = find_domain_by_id(op->u.setdomainmaxmem.domain);
559 if ( d != NULL )
560 {
561 d->max_pages = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
562 put_domain(d);
563 ret = 0;
564 }
565 }
566 break;
568 case DOM0_SETDOMAINHANDLE:
569 {
570 struct domain *d;
571 ret = -ESRCH;
572 d = find_domain_by_id(op->u.setdomainhandle.domain);
573 if ( d != NULL )
574 {
575 memcpy(d->handle, op->u.setdomainhandle.handle,
576 sizeof(xen_domain_handle_t));
577 put_domain(d);
578 ret = 0;
579 }
580 }
581 break;
583 case DOM0_SETDEBUGGING:
584 {
585 struct domain *d;
586 ret = -ESRCH;
587 d = find_domain_by_id(op->u.setdebugging.domain);
588 if ( d != NULL )
589 {
590 if ( op->u.setdebugging.enable )
591 set_bit(_DOMF_debugging, &d->domain_flags);
592 else
593 clear_bit(_DOMF_debugging, &d->domain_flags);
594 put_domain(d);
595 ret = 0;
596 }
597 }
598 break;
600 case DOM0_IRQ_PERMISSION:
601 {
602 struct domain *d;
603 unsigned int pirq = op->u.irq_permission.pirq;
605 ret = -EINVAL;
606 if ( pirq >= NR_PIRQS )
607 break;
609 ret = -ESRCH;
610 d = find_domain_by_id(op->u.irq_permission.domain);
611 if ( d == NULL )
612 break;
614 if ( op->u.irq_permission.allow_access )
615 ret = irq_permit_access(d, pirq);
616 else
617 ret = irq_deny_access(d, pirq);
619 put_domain(d);
620 }
621 break;
623 case DOM0_IOMEM_PERMISSION:
624 {
625 struct domain *d;
626 unsigned long pfn = op->u.iomem_permission.first_pfn;
627 unsigned long nr_pfns = op->u.iomem_permission.nr_pfns;
629 ret = -EINVAL;
630 if ( (pfn + nr_pfns - 1) < pfn ) /* wrap? */
631 break;
633 ret = -ESRCH;
634 d = find_domain_by_id(op->u.iomem_permission.domain);
635 if ( d == NULL )
636 break;
638 if ( op->u.iomem_permission.allow_access )
639 ret = iomem_permit_access(d, pfn, pfn + nr_pfns - 1);
640 else
641 ret = iomem_deny_access(d, pfn, pfn + nr_pfns - 1);
643 put_domain(d);
644 }
645 break;
647 #ifdef PERF_COUNTERS
648 case DOM0_PERFCCONTROL:
649 {
650 extern int perfc_control(dom0_perfccontrol_t *);
651 ret = perfc_control(&op->u.perfccontrol);
652 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
653 ret = -EFAULT;
654 }
655 break;
656 #endif
658 default:
659 ret = arch_do_dom0_op(op,u_dom0_op);
661 }
663 spin_unlock(&dom0_lock);
665 if (!ret)
666 acm_post_dom0_op(op, ssid);
667 else
668 acm_fail_dom0_op(op, ssid);
670 return ret;
671 }
673 /*
674 * Local variables:
675 * mode: C
676 * c-set-style: "BSD"
677 * c-basic-offset: 4
678 * tab-width: 4
679 * indent-tabs-mode: nil
680 * End:
681 */