ia64/xen-unstable

view xen/common/dom0_ops.c @ 8400:6b1d39a56c2b

Add debugging flag for domains to make domu debugging a run-time option
Signed-off-by: Kip Macy kmacy@fsmware.ckm
author kaf24@firebug.cl.cam.ac.uk
date Thu Dec 15 21:50:12 2005 +0100 (2005-12-15)
parents 82e283d25f3c
children 381cafbbc3d2
line source
1 /******************************************************************************
2 * dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <asm/current.h>
20 #include <public/dom0_ops.h>
21 #include <public/sched_ctl.h>
22 #include <acm/acm_hooks.h>
24 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
25 extern void arch_getdomaininfo_ctxt(
26 struct vcpu *, struct vcpu_guest_context *);
28 static inline int is_free_domid(domid_t dom)
29 {
30 struct domain *d;
32 if ( dom >= DOMID_FIRST_RESERVED )
33 return 0;
35 if ( (d = find_domain_by_id(dom)) == NULL )
36 return 1;
38 put_domain(d);
39 return 0;
40 }
42 static void getdomaininfo(struct domain *d, dom0_getdomaininfo_t *info)
43 {
44 struct vcpu *v;
45 u64 cpu_time = 0;
46 int flags = DOMFLAGS_BLOCKED;
48 info->domain = d->domain_id;
49 info->nr_online_vcpus = 0;
51 /*
52 * - domain is marked as blocked only if all its vcpus are blocked
53 * - domain is marked as running if any of its vcpus is running
54 */
55 for_each_vcpu ( d, v ) {
56 cpu_time += v->cpu_time;
57 info->max_vcpu_id = v->vcpu_id;
58 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
59 {
60 if ( !(v->vcpu_flags & VCPUF_blocked) )
61 flags &= ~DOMFLAGS_BLOCKED;
62 if ( v->vcpu_flags & VCPUF_running )
63 flags |= DOMFLAGS_RUNNING;
64 info->nr_online_vcpus++;
65 }
66 }
68 info->cpu_time = cpu_time;
70 info->flags = flags |
71 ((d->domain_flags & DOMF_dying) ? DOMFLAGS_DYING : 0) |
72 ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
73 ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED : 0) |
74 d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
76 if (d->ssid != NULL)
77 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
78 else
79 info->ssidref = ACM_DEFAULT_SSID;
81 info->tot_pages = d->tot_pages;
82 info->max_pages = d->max_pages;
83 info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
85 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
86 }
88 long do_dom0_op(dom0_op_t *u_dom0_op)
89 {
90 long ret = 0;
91 dom0_op_t curop, *op = &curop;
92 void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
93 static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
95 if ( !IS_PRIV(current->domain) )
96 return -EPERM;
98 if ( copy_from_user(op, u_dom0_op, sizeof(*op)) )
99 return -EFAULT;
101 if ( op->interface_version != DOM0_INTERFACE_VERSION )
102 return -EACCES;
104 if ( acm_pre_dom0_op(op, &ssid) )
105 return -EPERM;
107 spin_lock(&dom0_lock);
109 switch ( op->cmd )
110 {
112 case DOM0_SETDOMAININFO:
113 {
114 struct domain *d = find_domain_by_id(op->u.setdomaininfo.domain);
115 ret = -ESRCH;
116 if ( d != NULL )
117 {
118 ret = set_info_guest(d, &op->u.setdomaininfo);
119 put_domain(d);
120 }
121 }
122 break;
124 case DOM0_PAUSEDOMAIN:
125 {
126 struct domain *d = find_domain_by_id(op->u.pausedomain.domain);
127 ret = -ESRCH;
128 if ( d != NULL )
129 {
130 ret = -EINVAL;
131 if ( d != current->domain )
132 {
133 domain_pause_by_systemcontroller(d);
134 ret = 0;
135 }
136 put_domain(d);
137 }
138 }
139 break;
141 case DOM0_UNPAUSEDOMAIN:
142 {
143 struct domain *d = find_domain_by_id(op->u.unpausedomain.domain);
144 ret = -ESRCH;
145 if ( d != NULL )
146 {
147 ret = -EINVAL;
148 if ( (d != current->domain) &&
149 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
150 {
151 domain_unpause_by_systemcontroller(d);
152 ret = 0;
153 }
154 put_domain(d);
155 }
156 }
157 break;
159 case DOM0_CREATEDOMAIN:
160 {
161 struct domain *d;
162 unsigned int pro;
163 domid_t dom;
164 struct vcpu *v;
165 unsigned int i, cnt[NR_CPUS] = { 0 };
166 static domid_t rover = 0;
168 dom = op->u.createdomain.domain;
169 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
170 {
171 ret = -EINVAL;
172 if ( !is_free_domid(dom) )
173 break;
174 }
175 else
176 {
177 for ( dom = rover + 1; dom != rover; dom++ )
178 {
179 if ( dom == DOMID_FIRST_RESERVED )
180 dom = 0;
181 if ( is_free_domid(dom) )
182 break;
183 }
185 ret = -ENOMEM;
186 if ( dom == rover )
187 break;
189 rover = dom;
190 }
192 /* Do an initial CPU placement. Pick the least-populated CPU. */
193 read_lock(&domlist_lock);
194 for_each_domain ( d )
195 for_each_vcpu ( d, v )
196 cnt[v->processor]++;
197 read_unlock(&domlist_lock);
199 /*
200 * If we're on a HT system, we only use the first HT for dom0, other
201 * domains will all share the second HT of each CPU. Since dom0 is on
202 * CPU 0, we favour high numbered CPUs in the event of a tie.
203 */
204 pro = smp_num_siblings - 1;
205 for ( i = pro; i < num_online_cpus(); i += smp_num_siblings )
206 if ( cnt[i] <= cnt[pro] )
207 pro = i;
209 ret = -ENOMEM;
210 if ( (d = do_createdomain(dom, pro)) == NULL )
211 break;
213 memcpy(d->handle, op->u.createdomain.handle,
214 sizeof(xen_domain_handle_t));
216 ret = 0;
218 op->u.createdomain.domain = d->domain_id;
219 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
220 ret = -EFAULT;
221 }
222 break;
224 case DOM0_MAX_VCPUS:
225 {
226 struct domain *d;
227 unsigned int i, max = op->u.max_vcpus.max, cpu;
229 ret = -EINVAL;
230 if ( max > MAX_VIRT_CPUS )
231 break;
233 ret = -ESRCH;
234 if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
235 break;
237 /*
238 * Can only create new VCPUs while the domain is not fully constructed
239 * (and hence not runnable). Xen needs auditing for races before
240 * removing this check.
241 */
242 ret = -EINVAL;
243 if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
244 goto maxvcpu_out;
246 /* We cannot reduce maximum VCPUs. */
247 ret = -EINVAL;
248 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
249 goto maxvcpu_out;
251 ret = -ENOMEM;
252 for ( i = 0; i < max; i++ )
253 {
254 if ( d->vcpu[i] == NULL )
255 {
256 cpu = (d->vcpu[i-1]->processor + 1) % num_online_cpus();
257 if ( alloc_vcpu(d, i, cpu) == NULL )
258 goto maxvcpu_out;
259 }
260 }
262 ret = 0;
264 maxvcpu_out:
265 put_domain(d);
266 }
267 break;
269 case DOM0_DESTROYDOMAIN:
270 {
271 struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
272 ret = -ESRCH;
273 if ( d != NULL )
274 {
275 ret = -EINVAL;
276 if ( d != current->domain )
277 {
278 domain_kill(d);
279 ret = 0;
280 }
281 put_domain(d);
282 }
283 }
284 break;
286 case DOM0_PINCPUDOMAIN:
287 {
288 domid_t dom = op->u.pincpudomain.domain;
289 struct domain *d = find_domain_by_id(dom);
290 struct vcpu *v;
292 if ( d == NULL )
293 {
294 ret = -ESRCH;
295 break;
296 }
298 if ( (op->u.pincpudomain.vcpu >= MAX_VIRT_CPUS) ||
299 !d->vcpu[op->u.pincpudomain.vcpu] )
300 {
301 ret = -EINVAL;
302 put_domain(d);
303 break;
304 }
306 v = d->vcpu[op->u.pincpudomain.vcpu];
307 if ( v == NULL )
308 {
309 ret = -ESRCH;
310 put_domain(d);
311 break;
312 }
314 if ( v == current )
315 {
316 ret = -EINVAL;
317 put_domain(d);
318 break;
319 }
321 v->cpumap = op->u.pincpudomain.cpumap;
323 if ( v->cpumap == CPUMAP_RUNANYWHERE )
324 {
325 clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
326 }
327 else
328 {
329 /* pick a new cpu from the usable map */
330 int new_cpu;
331 new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus();
332 vcpu_pause(v);
333 vcpu_migrate_cpu(v, new_cpu);
334 set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
335 vcpu_unpause(v);
336 }
338 put_domain(d);
339 }
340 break;
342 case DOM0_SCHEDCTL:
343 {
344 ret = sched_ctl(&op->u.schedctl);
345 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
346 ret = -EFAULT;
347 }
348 break;
350 case DOM0_ADJUSTDOM:
351 {
352 ret = sched_adjdom(&op->u.adjustdom);
353 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
354 ret = -EFAULT;
355 }
356 break;
358 case DOM0_GETDOMAININFO:
359 {
360 struct domain *d;
362 read_lock(&domlist_lock);
364 for_each_domain ( d )
365 {
366 if ( d->domain_id >= op->u.getdomaininfo.domain )
367 break;
368 }
370 if ( (d == NULL) || !get_domain(d) )
371 {
372 read_unlock(&domlist_lock);
373 ret = -ESRCH;
374 break;
375 }
377 read_unlock(&domlist_lock);
379 getdomaininfo(d, &op->u.getdomaininfo);
381 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
382 ret = -EFAULT;
384 put_domain(d);
385 }
386 break;
390 case DOM0_GETDOMAININFOLIST:
391 {
392 struct domain *d;
393 dom0_getdomaininfo_t info;
394 dom0_getdomaininfo_t *buffer = op->u.getdomaininfolist.buffer;
395 u32 num_domains = 0;
397 read_lock(&domlist_lock);
399 for_each_domain ( d )
400 {
401 if ( d->domain_id < op->u.getdomaininfolist.first_domain )
402 continue;
403 if ( num_domains == op->u.getdomaininfolist.max_domains )
404 break;
405 if ( (d == NULL) || !get_domain(d) )
406 {
407 ret = -ESRCH;
408 break;
409 }
411 getdomaininfo(d, &info);
413 put_domain(d);
415 if ( copy_to_user(buffer, &info, sizeof(dom0_getdomaininfo_t)) )
416 {
417 ret = -EFAULT;
418 break;
419 }
421 buffer++;
422 num_domains++;
423 }
425 read_unlock(&domlist_lock);
427 if ( ret != 0 )
428 break;
430 op->u.getdomaininfolist.num_domains = num_domains;
432 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
433 ret = -EFAULT;
434 }
435 break;
437 case DOM0_GETVCPUCONTEXT:
438 {
439 struct vcpu_guest_context *c;
440 struct domain *d;
441 struct vcpu *v;
443 ret = -ESRCH;
444 if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
445 break;
447 ret = -EINVAL;
448 if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
449 goto getvcpucontext_out;
451 ret = -ESRCH;
452 if ( (v = d->vcpu[op->u.getvcpucontext.vcpu]) == NULL )
453 goto getvcpucontext_out;
455 ret = -ENOMEM;
456 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
457 goto getvcpucontext_out;
459 if ( v != current )
460 vcpu_pause(v);
462 arch_getdomaininfo_ctxt(v,c);
463 ret = 0;
465 if ( v != current )
466 vcpu_unpause(v);
468 if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
469 ret = -EFAULT;
471 xfree(c);
473 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
474 ret = -EFAULT;
476 getvcpucontext_out:
477 put_domain(d);
478 }
479 break;
481 case DOM0_GETVCPUINFO:
482 {
483 struct domain *d;
484 struct vcpu *v;
486 ret = -ESRCH;
487 if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
488 break;
490 ret = -EINVAL;
491 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
492 goto getvcpuinfo_out;
494 ret = -ESRCH;
495 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
496 goto getvcpuinfo_out;
498 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
499 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
500 op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
501 op->u.getvcpuinfo.cpu_time = v->cpu_time;
502 op->u.getvcpuinfo.cpu = v->processor;
503 op->u.getvcpuinfo.cpumap = v->cpumap;
504 ret = 0;
506 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
507 ret = -EFAULT;
509 getvcpuinfo_out:
510 put_domain(d);
511 }
512 break;
514 case DOM0_SETTIME:
515 {
516 do_settime(op->u.settime.secs,
517 op->u.settime.nsecs,
518 op->u.settime.system_time);
519 ret = 0;
520 }
521 break;
523 case DOM0_TBUFCONTROL:
524 {
525 ret = tb_control(&op->u.tbufcontrol);
526 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
527 ret = -EFAULT;
528 }
529 break;
531 case DOM0_READCONSOLE:
532 {
533 ret = read_console_ring(
534 &op->u.readconsole.buffer,
535 &op->u.readconsole.count,
536 op->u.readconsole.clear);
537 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
538 ret = -EFAULT;
539 }
540 break;
542 case DOM0_SCHED_ID:
543 {
544 op->u.sched_id.sched_id = sched_id();
545 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
546 ret = -EFAULT;
547 else
548 ret = 0;
549 }
550 break;
552 case DOM0_SETDOMAINMAXMEM:
553 {
554 struct domain *d;
555 ret = -ESRCH;
556 d = find_domain_by_id(op->u.setdomainmaxmem.domain);
557 if ( d != NULL )
558 {
559 d->max_pages = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
560 put_domain(d);
561 ret = 0;
562 }
563 }
564 break;
566 case DOM0_SETDOMAINHANDLE:
567 {
568 struct domain *d;
569 ret = -ESRCH;
570 d = find_domain_by_id(op->u.setdomainhandle.domain);
571 if ( d != NULL )
572 {
573 memcpy(d->handle, op->u.setdomainhandle.handle,
574 sizeof(xen_domain_handle_t));
575 put_domain(d);
576 ret = 0;
577 }
578 }
579 break;
580 case DOM0_SETDEBUGGING:
581 {
582 struct domain *d;
583 ret = -ESRCH;
584 d = find_domain_by_id(op->u.setdebugging.domain);
585 if ( d != NULL )
586 {
587 if ( op->u.setdebugging.enable )
588 set_bit(_DOMF_debugging, &d->domain_flags);
589 else
590 clear_bit(_DOMF_debugging, &d->domain_flags);
591 put_domain(d);
592 ret = 0;
593 }
594 }
595 break;
597 #ifdef PERF_COUNTERS
598 case DOM0_PERFCCONTROL:
599 {
600 extern int perfc_control(dom0_perfccontrol_t *);
601 ret = perfc_control(&op->u.perfccontrol);
602 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
603 ret = -EFAULT;
604 }
605 break;
606 #endif
608 default:
609 ret = arch_do_dom0_op(op,u_dom0_op);
611 }
613 spin_unlock(&dom0_lock);
615 if (!ret)
616 acm_post_dom0_op(op, ssid);
617 else
618 acm_fail_dom0_op(op, ssid);
620 return ret;
621 }
623 /*
624 * Local variables:
625 * mode: C
626 * c-set-style: "BSD"
627 * c-basic-offset: 4
628 * tab-width: 4
629 * indent-tabs-mode: nil
630 * End:
631 */