ia64/xen-unstable

view xen/common/dom0_ops.c @ 7395:27d7c7f226f4

Extend VCPUINFO dom0_op to return status information about
run state of the VCPU. VCPUCONTEXT returns info about
hotplugged VCPUs.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Oct 15 08:52:22 2005 +0100 (2005-10-15)
parents d48bc069122c
children fd7b8b051466
line source
1 /******************************************************************************
2 * dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <asm/current.h>
20 #include <public/dom0_ops.h>
21 #include <public/sched_ctl.h>
22 #include <acm/acm_hooks.h>
24 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
25 extern void arch_getdomaininfo_ctxt(
26 struct vcpu *, struct vcpu_guest_context *);
28 static inline int is_free_domid(domid_t dom)
29 {
30 struct domain *d;
32 if ( dom >= DOMID_FIRST_RESERVED )
33 return 0;
35 if ( (d = find_domain_by_id(dom)) == NULL )
36 return 1;
38 put_domain(d);
39 return 0;
40 }
42 static void getdomaininfo(struct domain *d, dom0_getdomaininfo_t *info)
43 {
44 struct vcpu *v;
45 u64 cpu_time = 0;
46 int vcpu_count = 0;
47 int flags = DOMFLAGS_BLOCKED;
49 info->domain = d->domain_id;
51 /*
52 * - domain is marked as blocked only if all its vcpus are blocked
53 * - domain is marked as running if any of its vcpus is running
54 */
55 for_each_vcpu ( d, v ) {
56 cpu_time += v->cpu_time;
57 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
58 {
59 if ( !(v->vcpu_flags & VCPUF_blocked) )
60 flags &= ~DOMFLAGS_BLOCKED;
61 if ( v->vcpu_flags & VCPUF_running )
62 flags |= DOMFLAGS_RUNNING;
63 vcpu_count++;
64 }
65 }
67 info->cpu_time = cpu_time;
68 info->n_vcpu = vcpu_count;
70 info->flags = flags |
71 ((d->domain_flags & DOMF_dying) ? DOMFLAGS_DYING : 0) |
72 ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
73 ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED : 0) |
74 d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
76 if (d->ssid != NULL)
77 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
78 else
79 info->ssidref = ACM_DEFAULT_SSID;
81 info->tot_pages = d->tot_pages;
82 info->max_pages = d->max_pages;
83 info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
85 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
86 }
88 long do_dom0_op(dom0_op_t *u_dom0_op)
89 {
90 long ret = 0;
91 dom0_op_t curop, *op = &curop;
92 void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
93 static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
95 if ( !IS_PRIV(current->domain) )
96 return -EPERM;
98 if ( copy_from_user(op, u_dom0_op, sizeof(*op)) )
99 return -EFAULT;
101 if ( op->interface_version != DOM0_INTERFACE_VERSION )
102 return -EACCES;
104 if ( acm_pre_dom0_op(op, &ssid) )
105 return -EACCES;
107 spin_lock(&dom0_lock);
109 switch ( op->cmd )
110 {
112 case DOM0_SETDOMAININFO:
113 {
114 struct domain *d = find_domain_by_id(op->u.setdomaininfo.domain);
115 ret = -ESRCH;
116 if ( d != NULL )
117 {
118 ret = set_info_guest(d, &op->u.setdomaininfo);
119 put_domain(d);
120 }
121 }
122 break;
124 case DOM0_PAUSEDOMAIN:
125 {
126 struct domain *d = find_domain_by_id(op->u.pausedomain.domain);
127 ret = -ESRCH;
128 if ( d != NULL )
129 {
130 ret = -EINVAL;
131 if ( d != current->domain )
132 {
133 domain_pause_by_systemcontroller(d);
134 ret = 0;
135 }
136 put_domain(d);
137 }
138 }
139 break;
141 case DOM0_UNPAUSEDOMAIN:
142 {
143 struct domain *d = find_domain_by_id(op->u.unpausedomain.domain);
144 ret = -ESRCH;
145 if ( d != NULL )
146 {
147 ret = -EINVAL;
148 if ( (d != current->domain) &&
149 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
150 {
151 domain_unpause_by_systemcontroller(d);
152 ret = 0;
153 }
154 put_domain(d);
155 }
156 }
157 break;
159 case DOM0_CREATEDOMAIN:
160 {
161 struct domain *d;
162 unsigned int pro;
163 domid_t dom;
164 struct vcpu *v;
165 unsigned int i, cnt[NR_CPUS] = { 0 };
166 static domid_t rover = 0;
168 dom = op->u.createdomain.domain;
169 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
170 {
171 ret = -EINVAL;
172 if ( !is_free_domid(dom) )
173 break;
174 }
175 else
176 {
177 for ( dom = rover + 1; dom != rover; dom++ )
178 {
179 if ( dom == DOMID_FIRST_RESERVED )
180 dom = 0;
181 if ( is_free_domid(dom) )
182 break;
183 }
185 ret = -ENOMEM;
186 if ( dom == rover )
187 break;
189 rover = dom;
190 }
192 /* Do an initial CPU placement. Pick the least-populated CPU. */
193 read_lock(&domlist_lock);
194 for_each_domain ( d )
195 for_each_vcpu ( d, v )
196 cnt[v->processor]++;
197 read_unlock(&domlist_lock);
199 /*
200 * If we're on a HT system, we only use the first HT for dom0, other
201 * domains will all share the second HT of each CPU. Since dom0 is on
202 * CPU 0, we favour high numbered CPUs in the event of a tie.
203 */
204 pro = smp_num_siblings - 1;
205 for ( i = pro; i < num_online_cpus(); i += smp_num_siblings )
206 if ( cnt[i] <= cnt[pro] )
207 pro = i;
209 ret = -ENOMEM;
210 if ( (d = do_createdomain(dom, pro)) == NULL )
211 break;
213 memcpy(d->handle, op->u.createdomain.handle,
214 sizeof(xen_domain_handle_t));
216 ret = 0;
218 op->u.createdomain.domain = d->domain_id;
219 copy_to_user(u_dom0_op, op, sizeof(*op));
220 }
221 break;
223 case DOM0_MAX_VCPUS:
224 {
225 struct domain *d;
226 unsigned int i, max = op->u.max_vcpus.max, cpu;
228 ret = -EINVAL;
229 if ( max > MAX_VIRT_CPUS )
230 break;
232 ret = -ESRCH;
233 if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
234 break;
236 /*
237 * Can only create new VCPUs while the domain is not fully constructed
238 * (and hence not runnable). Xen needs auditing for races before
239 * removing this check.
240 */
241 ret = -EINVAL;
242 if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
243 goto maxvcpu_out;
245 /* We cannot reduce maximum VCPUs. */
246 ret = -EINVAL;
247 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
248 goto maxvcpu_out;
250 ret = -ENOMEM;
251 for ( i = 0; i < max; i++ )
252 {
253 if ( d->vcpu[i] == NULL )
254 {
255 cpu = (d->vcpu[i-1]->processor + 1) % num_online_cpus();
256 if ( alloc_vcpu(d, i, cpu) == NULL )
257 goto maxvcpu_out;
258 }
259 }
261 ret = 0;
263 maxvcpu_out:
264 put_domain(d);
265 }
266 break;
268 case DOM0_DESTROYDOMAIN:
269 {
270 struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
271 ret = -ESRCH;
272 if ( d != NULL )
273 {
274 ret = -EINVAL;
275 if ( d != current->domain )
276 {
277 domain_kill(d);
278 ret = 0;
279 }
280 put_domain(d);
281 }
282 }
283 break;
285 case DOM0_PINCPUDOMAIN:
286 {
287 domid_t dom = op->u.pincpudomain.domain;
288 struct domain *d = find_domain_by_id(dom);
289 struct vcpu *v;
291 if ( d == NULL )
292 {
293 ret = -ESRCH;
294 break;
295 }
297 if ( (op->u.pincpudomain.vcpu >= MAX_VIRT_CPUS) ||
298 !d->vcpu[op->u.pincpudomain.vcpu] )
299 {
300 ret = -EINVAL;
301 put_domain(d);
302 break;
303 }
305 v = d->vcpu[op->u.pincpudomain.vcpu];
306 if ( v == NULL )
307 {
308 ret = -ESRCH;
309 put_domain(d);
310 break;
311 }
313 if ( v == current )
314 {
315 ret = -EINVAL;
316 put_domain(d);
317 break;
318 }
320 v->cpumap = op->u.pincpudomain.cpumap;
322 if ( v->cpumap == CPUMAP_RUNANYWHERE )
323 {
324 clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
325 }
326 else
327 {
328 /* pick a new cpu from the usable map */
329 int new_cpu;
330 new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus();
331 vcpu_pause(v);
332 vcpu_migrate_cpu(v, new_cpu);
333 set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
334 vcpu_unpause(v);
335 }
337 put_domain(d);
338 }
339 break;
341 case DOM0_SCHEDCTL:
342 {
343 ret = sched_ctl(&op->u.schedctl);
344 copy_to_user(u_dom0_op, op, sizeof(*op));
345 }
346 break;
348 case DOM0_ADJUSTDOM:
349 {
350 ret = sched_adjdom(&op->u.adjustdom);
351 copy_to_user(u_dom0_op, op, sizeof(*op));
352 }
353 break;
355 case DOM0_GETDOMAININFO:
356 {
357 struct domain *d;
359 read_lock(&domlist_lock);
361 for_each_domain ( d )
362 {
363 if ( d->domain_id >= op->u.getdomaininfo.domain )
364 break;
365 }
367 if ( (d == NULL) || !get_domain(d) )
368 {
369 read_unlock(&domlist_lock);
370 ret = -ESRCH;
371 break;
372 }
374 read_unlock(&domlist_lock);
376 getdomaininfo(d, &op->u.getdomaininfo);
378 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
379 ret = -EINVAL;
381 put_domain(d);
382 }
383 break;
387 case DOM0_GETDOMAININFOLIST:
388 {
389 struct domain *d;
390 dom0_getdomaininfo_t info;
391 dom0_getdomaininfo_t *buffer = op->u.getdomaininfolist.buffer;
392 u32 num_domains = 0;
394 read_lock(&domlist_lock);
396 for_each_domain ( d )
397 {
398 if ( d->domain_id < op->u.getdomaininfolist.first_domain )
399 continue;
400 if ( num_domains == op->u.getdomaininfolist.max_domains )
401 break;
402 if ( (d == NULL) || !get_domain(d) )
403 {
404 ret = -ESRCH;
405 break;
406 }
408 getdomaininfo(d, &info);
410 put_domain(d);
412 if ( copy_to_user(buffer, &info, sizeof(dom0_getdomaininfo_t)) )
413 {
414 ret = -EINVAL;
415 break;
416 }
418 buffer++;
419 num_domains++;
420 }
422 read_unlock(&domlist_lock);
424 if ( ret != 0 )
425 break;
427 op->u.getdomaininfolist.num_domains = num_domains;
429 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
430 ret = -EINVAL;
431 }
432 break;
434 case DOM0_GETVCPUCONTEXT:
435 {
436 struct vcpu_guest_context *c;
437 struct domain *d;
438 struct vcpu *v;
440 ret = -ESRCH;
441 if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
442 break;
444 ret = -EINVAL;
445 if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
446 goto getvcpucontext_out;
448 ret = -ESRCH;
449 v = d->vcpu[op->u.getvcpucontext.vcpu];
450 if ( (v == NULL) || !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
451 goto getvcpucontext_out;
453 ret = -ENOMEM;
454 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
455 goto getvcpucontext_out;
457 if ( v != current )
458 vcpu_pause(v);
460 arch_getdomaininfo_ctxt(v,c);
461 ret = 0;
463 if ( v != current )
464 vcpu_unpause(v);
466 if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
467 ret = -EFAULT;
469 xfree(c);
471 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
472 ret = -EFAULT;
474 getvcpucontext_out:
475 put_domain(d);
476 }
477 break;
479 case DOM0_GETVCPUINFO:
480 {
481 struct domain *d;
482 struct vcpu *v;
484 ret = -ESRCH;
485 if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
486 break;
488 ret = -EINVAL;
489 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
490 goto getvcpuinfo_out;
492 ret = -ESRCH;
493 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
494 goto getvcpuinfo_out;
496 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
497 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
498 op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
499 op->u.getvcpuinfo.cpu_time = v->cpu_time;
500 op->u.getvcpuinfo.cpu = v->processor;
501 op->u.getvcpuinfo.cpumap = v->cpumap;
502 ret = 0;
504 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
505 ret = -EFAULT;
507 getvcpuinfo_out:
508 put_domain(d);
509 }
510 break;
512 case DOM0_SETTIME:
513 {
514 do_settime(op->u.settime.secs,
515 op->u.settime.nsecs,
516 op->u.settime.system_time);
517 ret = 0;
518 }
519 break;
521 #ifdef TRACE_BUFFER
522 case DOM0_TBUFCONTROL:
523 {
524 ret = tb_control(&op->u.tbufcontrol);
525 copy_to_user(u_dom0_op, op, sizeof(*op));
526 }
527 break;
528 #endif
530 case DOM0_READCONSOLE:
531 {
532 ret = read_console_ring(
533 &op->u.readconsole.buffer,
534 &op->u.readconsole.count,
535 op->u.readconsole.clear);
536 copy_to_user(u_dom0_op, op, sizeof(*op));
537 }
538 break;
540 case DOM0_SCHED_ID:
541 {
542 op->u.sched_id.sched_id = sched_id();
543 copy_to_user(u_dom0_op, op, sizeof(*op));
544 ret = 0;
545 }
546 break;
548 case DOM0_SETDOMAINMAXMEM:
549 {
550 struct domain *d;
551 ret = -ESRCH;
552 d = find_domain_by_id(op->u.setdomainmaxmem.domain);
553 if ( d != NULL )
554 {
555 d->max_pages = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
556 put_domain(d);
557 ret = 0;
558 }
559 }
560 break;
562 case DOM0_SETDOMAINHANDLE:
563 {
564 struct domain *d;
565 ret = -ESRCH;
566 d = find_domain_by_id(op->u.setdomainhandle.domain);
567 if ( d != NULL )
568 {
569 memcpy(d->handle, op->u.setdomainhandle.handle,
570 sizeof(xen_domain_handle_t));
571 put_domain(d);
572 ret = 0;
573 }
574 }
575 break;
577 #ifdef PERF_COUNTERS
578 case DOM0_PERFCCONTROL:
579 {
580 extern int perfc_control(dom0_perfccontrol_t *);
581 ret = perfc_control(&op->u.perfccontrol);
582 copy_to_user(u_dom0_op, op, sizeof(*op));
583 }
584 break;
585 #endif
587 default:
588 ret = arch_do_dom0_op(op,u_dom0_op);
590 }
592 spin_unlock(&dom0_lock);
594 if (!ret)
595 acm_post_dom0_op(op, ssid);
596 else
597 acm_fail_dom0_op(op, ssid);
599 return ret;
600 }
602 /*
603 * Local variables:
604 * mode: C
605 * c-set-style: "BSD"
606 * c-basic-offset: 4
607 * tab-width: 4
608 * indent-tabs-mode: nil
609 * End:
610 */