ia64/xen-unstable

view xen/common/dom0_ops.c @ 7396:fd7b8b051466

At least for the time being, GETVCPUCONTEXT needs to work
even for uninitialised VCPUs. xc_linux_build() depends on
it (rather stupidly).

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Sat Oct 15 09:32:10 2005 +0100 (2005-10-15)
parents 27d7c7f226f4
children fa0faada967b
line source
1 /******************************************************************************
2 * dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <xen/trace.h>
18 #include <xen/console.h>
19 #include <asm/current.h>
20 #include <public/dom0_ops.h>
21 #include <public/sched_ctl.h>
22 #include <acm/acm_hooks.h>
24 extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
25 extern void arch_getdomaininfo_ctxt(
26 struct vcpu *, struct vcpu_guest_context *);
28 static inline int is_free_domid(domid_t dom)
29 {
30 struct domain *d;
32 if ( dom >= DOMID_FIRST_RESERVED )
33 return 0;
35 if ( (d = find_domain_by_id(dom)) == NULL )
36 return 1;
38 put_domain(d);
39 return 0;
40 }
42 static void getdomaininfo(struct domain *d, dom0_getdomaininfo_t *info)
43 {
44 struct vcpu *v;
45 u64 cpu_time = 0;
46 int vcpu_count = 0;
47 int flags = DOMFLAGS_BLOCKED;
49 info->domain = d->domain_id;
51 /*
52 * - domain is marked as blocked only if all its vcpus are blocked
53 * - domain is marked as running if any of its vcpus is running
54 */
55 for_each_vcpu ( d, v ) {
56 cpu_time += v->cpu_time;
57 if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
58 {
59 if ( !(v->vcpu_flags & VCPUF_blocked) )
60 flags &= ~DOMFLAGS_BLOCKED;
61 if ( v->vcpu_flags & VCPUF_running )
62 flags |= DOMFLAGS_RUNNING;
63 vcpu_count++;
64 }
65 }
67 info->cpu_time = cpu_time;
68 info->n_vcpu = vcpu_count;
70 info->flags = flags |
71 ((d->domain_flags & DOMF_dying) ? DOMFLAGS_DYING : 0) |
72 ((d->domain_flags & DOMF_shutdown) ? DOMFLAGS_SHUTDOWN : 0) |
73 ((d->domain_flags & DOMF_ctrl_pause) ? DOMFLAGS_PAUSED : 0) |
74 d->shutdown_code << DOMFLAGS_SHUTDOWNSHIFT;
76 if (d->ssid != NULL)
77 info->ssidref = ((struct acm_ssid_domain *)d->ssid)->ssidref;
78 else
79 info->ssidref = ACM_DEFAULT_SSID;
81 info->tot_pages = d->tot_pages;
82 info->max_pages = d->max_pages;
83 info->shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT;
85 memcpy(info->handle, d->handle, sizeof(xen_domain_handle_t));
86 }
88 long do_dom0_op(dom0_op_t *u_dom0_op)
89 {
90 long ret = 0;
91 dom0_op_t curop, *op = &curop;
92 void *ssid = NULL; /* save security ptr between pre and post/fail hooks */
93 static spinlock_t dom0_lock = SPIN_LOCK_UNLOCKED;
95 if ( !IS_PRIV(current->domain) )
96 return -EPERM;
98 if ( copy_from_user(op, u_dom0_op, sizeof(*op)) )
99 return -EFAULT;
101 if ( op->interface_version != DOM0_INTERFACE_VERSION )
102 return -EACCES;
104 if ( acm_pre_dom0_op(op, &ssid) )
105 return -EACCES;
107 spin_lock(&dom0_lock);
109 switch ( op->cmd )
110 {
112 case DOM0_SETDOMAININFO:
113 {
114 struct domain *d = find_domain_by_id(op->u.setdomaininfo.domain);
115 ret = -ESRCH;
116 if ( d != NULL )
117 {
118 ret = set_info_guest(d, &op->u.setdomaininfo);
119 put_domain(d);
120 }
121 }
122 break;
124 case DOM0_PAUSEDOMAIN:
125 {
126 struct domain *d = find_domain_by_id(op->u.pausedomain.domain);
127 ret = -ESRCH;
128 if ( d != NULL )
129 {
130 ret = -EINVAL;
131 if ( d != current->domain )
132 {
133 domain_pause_by_systemcontroller(d);
134 ret = 0;
135 }
136 put_domain(d);
137 }
138 }
139 break;
141 case DOM0_UNPAUSEDOMAIN:
142 {
143 struct domain *d = find_domain_by_id(op->u.unpausedomain.domain);
144 ret = -ESRCH;
145 if ( d != NULL )
146 {
147 ret = -EINVAL;
148 if ( (d != current->domain) &&
149 test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
150 {
151 domain_unpause_by_systemcontroller(d);
152 ret = 0;
153 }
154 put_domain(d);
155 }
156 }
157 break;
159 case DOM0_CREATEDOMAIN:
160 {
161 struct domain *d;
162 unsigned int pro;
163 domid_t dom;
164 struct vcpu *v;
165 unsigned int i, cnt[NR_CPUS] = { 0 };
166 static domid_t rover = 0;
168 dom = op->u.createdomain.domain;
169 if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
170 {
171 ret = -EINVAL;
172 if ( !is_free_domid(dom) )
173 break;
174 }
175 else
176 {
177 for ( dom = rover + 1; dom != rover; dom++ )
178 {
179 if ( dom == DOMID_FIRST_RESERVED )
180 dom = 0;
181 if ( is_free_domid(dom) )
182 break;
183 }
185 ret = -ENOMEM;
186 if ( dom == rover )
187 break;
189 rover = dom;
190 }
192 /* Do an initial CPU placement. Pick the least-populated CPU. */
193 read_lock(&domlist_lock);
194 for_each_domain ( d )
195 for_each_vcpu ( d, v )
196 cnt[v->processor]++;
197 read_unlock(&domlist_lock);
199 /*
200 * If we're on a HT system, we only use the first HT for dom0, other
201 * domains will all share the second HT of each CPU. Since dom0 is on
202 * CPU 0, we favour high numbered CPUs in the event of a tie.
203 */
204 pro = smp_num_siblings - 1;
205 for ( i = pro; i < num_online_cpus(); i += smp_num_siblings )
206 if ( cnt[i] <= cnt[pro] )
207 pro = i;
209 ret = -ENOMEM;
210 if ( (d = do_createdomain(dom, pro)) == NULL )
211 break;
213 memcpy(d->handle, op->u.createdomain.handle,
214 sizeof(xen_domain_handle_t));
216 ret = 0;
218 op->u.createdomain.domain = d->domain_id;
219 copy_to_user(u_dom0_op, op, sizeof(*op));
220 }
221 break;
223 case DOM0_MAX_VCPUS:
224 {
225 struct domain *d;
226 unsigned int i, max = op->u.max_vcpus.max, cpu;
228 ret = -EINVAL;
229 if ( max > MAX_VIRT_CPUS )
230 break;
232 ret = -ESRCH;
233 if ( (d = find_domain_by_id(op->u.max_vcpus.domain)) == NULL )
234 break;
236 /*
237 * Can only create new VCPUs while the domain is not fully constructed
238 * (and hence not runnable). Xen needs auditing for races before
239 * removing this check.
240 */
241 ret = -EINVAL;
242 if ( test_bit(_VCPUF_initialised, &d->vcpu[0]->vcpu_flags) )
243 goto maxvcpu_out;
245 /* We cannot reduce maximum VCPUs. */
246 ret = -EINVAL;
247 if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
248 goto maxvcpu_out;
250 ret = -ENOMEM;
251 for ( i = 0; i < max; i++ )
252 {
253 if ( d->vcpu[i] == NULL )
254 {
255 cpu = (d->vcpu[i-1]->processor + 1) % num_online_cpus();
256 if ( alloc_vcpu(d, i, cpu) == NULL )
257 goto maxvcpu_out;
258 }
259 }
261 ret = 0;
263 maxvcpu_out:
264 put_domain(d);
265 }
266 break;
268 case DOM0_DESTROYDOMAIN:
269 {
270 struct domain *d = find_domain_by_id(op->u.destroydomain.domain);
271 ret = -ESRCH;
272 if ( d != NULL )
273 {
274 ret = -EINVAL;
275 if ( d != current->domain )
276 {
277 domain_kill(d);
278 ret = 0;
279 }
280 put_domain(d);
281 }
282 }
283 break;
285 case DOM0_PINCPUDOMAIN:
286 {
287 domid_t dom = op->u.pincpudomain.domain;
288 struct domain *d = find_domain_by_id(dom);
289 struct vcpu *v;
291 if ( d == NULL )
292 {
293 ret = -ESRCH;
294 break;
295 }
297 if ( (op->u.pincpudomain.vcpu >= MAX_VIRT_CPUS) ||
298 !d->vcpu[op->u.pincpudomain.vcpu] )
299 {
300 ret = -EINVAL;
301 put_domain(d);
302 break;
303 }
305 v = d->vcpu[op->u.pincpudomain.vcpu];
306 if ( v == NULL )
307 {
308 ret = -ESRCH;
309 put_domain(d);
310 break;
311 }
313 if ( v == current )
314 {
315 ret = -EINVAL;
316 put_domain(d);
317 break;
318 }
320 v->cpumap = op->u.pincpudomain.cpumap;
322 if ( v->cpumap == CPUMAP_RUNANYWHERE )
323 {
324 clear_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
325 }
326 else
327 {
328 /* pick a new cpu from the usable map */
329 int new_cpu;
330 new_cpu = (int)find_first_set_bit(v->cpumap) % num_online_cpus();
331 vcpu_pause(v);
332 vcpu_migrate_cpu(v, new_cpu);
333 set_bit(_VCPUF_cpu_pinned, &v->vcpu_flags);
334 vcpu_unpause(v);
335 }
337 put_domain(d);
338 }
339 break;
341 case DOM0_SCHEDCTL:
342 {
343 ret = sched_ctl(&op->u.schedctl);
344 copy_to_user(u_dom0_op, op, sizeof(*op));
345 }
346 break;
348 case DOM0_ADJUSTDOM:
349 {
350 ret = sched_adjdom(&op->u.adjustdom);
351 copy_to_user(u_dom0_op, op, sizeof(*op));
352 }
353 break;
355 case DOM0_GETDOMAININFO:
356 {
357 struct domain *d;
359 read_lock(&domlist_lock);
361 for_each_domain ( d )
362 {
363 if ( d->domain_id >= op->u.getdomaininfo.domain )
364 break;
365 }
367 if ( (d == NULL) || !get_domain(d) )
368 {
369 read_unlock(&domlist_lock);
370 ret = -ESRCH;
371 break;
372 }
374 read_unlock(&domlist_lock);
376 getdomaininfo(d, &op->u.getdomaininfo);
378 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
379 ret = -EINVAL;
381 put_domain(d);
382 }
383 break;
387 case DOM0_GETDOMAININFOLIST:
388 {
389 struct domain *d;
390 dom0_getdomaininfo_t info;
391 dom0_getdomaininfo_t *buffer = op->u.getdomaininfolist.buffer;
392 u32 num_domains = 0;
394 read_lock(&domlist_lock);
396 for_each_domain ( d )
397 {
398 if ( d->domain_id < op->u.getdomaininfolist.first_domain )
399 continue;
400 if ( num_domains == op->u.getdomaininfolist.max_domains )
401 break;
402 if ( (d == NULL) || !get_domain(d) )
403 {
404 ret = -ESRCH;
405 break;
406 }
408 getdomaininfo(d, &info);
410 put_domain(d);
412 if ( copy_to_user(buffer, &info, sizeof(dom0_getdomaininfo_t)) )
413 {
414 ret = -EINVAL;
415 break;
416 }
418 buffer++;
419 num_domains++;
420 }
422 read_unlock(&domlist_lock);
424 if ( ret != 0 )
425 break;
427 op->u.getdomaininfolist.num_domains = num_domains;
429 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
430 ret = -EINVAL;
431 }
432 break;
434 case DOM0_GETVCPUCONTEXT:
435 {
436 struct vcpu_guest_context *c;
437 struct domain *d;
438 struct vcpu *v;
440 ret = -ESRCH;
441 if ( (d = find_domain_by_id(op->u.getvcpucontext.domain)) == NULL )
442 break;
444 ret = -EINVAL;
445 if ( op->u.getvcpucontext.vcpu >= MAX_VIRT_CPUS )
446 goto getvcpucontext_out;
448 ret = -ESRCH;
449 if ( (v = d->vcpu[op->u.getvcpucontext.vcpu]) == NULL )
450 goto getvcpucontext_out;
452 ret = -ENOMEM;
453 if ( (c = xmalloc(struct vcpu_guest_context)) == NULL )
454 goto getvcpucontext_out;
456 if ( v != current )
457 vcpu_pause(v);
459 arch_getdomaininfo_ctxt(v,c);
460 ret = 0;
462 if ( v != current )
463 vcpu_unpause(v);
465 if ( copy_to_user(op->u.getvcpucontext.ctxt, c, sizeof(*c)) )
466 ret = -EFAULT;
468 xfree(c);
470 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
471 ret = -EFAULT;
473 getvcpucontext_out:
474 put_domain(d);
475 }
476 break;
478 case DOM0_GETVCPUINFO:
479 {
480 struct domain *d;
481 struct vcpu *v;
483 ret = -ESRCH;
484 if ( (d = find_domain_by_id(op->u.getvcpuinfo.domain)) == NULL )
485 break;
487 ret = -EINVAL;
488 if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
489 goto getvcpuinfo_out;
491 ret = -ESRCH;
492 if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL )
493 goto getvcpuinfo_out;
495 op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
496 op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
497 op->u.getvcpuinfo.running = test_bit(_VCPUF_running, &v->vcpu_flags);
498 op->u.getvcpuinfo.cpu_time = v->cpu_time;
499 op->u.getvcpuinfo.cpu = v->processor;
500 op->u.getvcpuinfo.cpumap = v->cpumap;
501 ret = 0;
503 if ( copy_to_user(u_dom0_op, op, sizeof(*op)) )
504 ret = -EFAULT;
506 getvcpuinfo_out:
507 put_domain(d);
508 }
509 break;
511 case DOM0_SETTIME:
512 {
513 do_settime(op->u.settime.secs,
514 op->u.settime.nsecs,
515 op->u.settime.system_time);
516 ret = 0;
517 }
518 break;
520 #ifdef TRACE_BUFFER
521 case DOM0_TBUFCONTROL:
522 {
523 ret = tb_control(&op->u.tbufcontrol);
524 copy_to_user(u_dom0_op, op, sizeof(*op));
525 }
526 break;
527 #endif
529 case DOM0_READCONSOLE:
530 {
531 ret = read_console_ring(
532 &op->u.readconsole.buffer,
533 &op->u.readconsole.count,
534 op->u.readconsole.clear);
535 copy_to_user(u_dom0_op, op, sizeof(*op));
536 }
537 break;
539 case DOM0_SCHED_ID:
540 {
541 op->u.sched_id.sched_id = sched_id();
542 copy_to_user(u_dom0_op, op, sizeof(*op));
543 ret = 0;
544 }
545 break;
547 case DOM0_SETDOMAINMAXMEM:
548 {
549 struct domain *d;
550 ret = -ESRCH;
551 d = find_domain_by_id(op->u.setdomainmaxmem.domain);
552 if ( d != NULL )
553 {
554 d->max_pages = op->u.setdomainmaxmem.max_memkb >> (PAGE_SHIFT-10);
555 put_domain(d);
556 ret = 0;
557 }
558 }
559 break;
561 case DOM0_SETDOMAINHANDLE:
562 {
563 struct domain *d;
564 ret = -ESRCH;
565 d = find_domain_by_id(op->u.setdomainhandle.domain);
566 if ( d != NULL )
567 {
568 memcpy(d->handle, op->u.setdomainhandle.handle,
569 sizeof(xen_domain_handle_t));
570 put_domain(d);
571 ret = 0;
572 }
573 }
574 break;
576 #ifdef PERF_COUNTERS
577 case DOM0_PERFCCONTROL:
578 {
579 extern int perfc_control(dom0_perfccontrol_t *);
580 ret = perfc_control(&op->u.perfccontrol);
581 copy_to_user(u_dom0_op, op, sizeof(*op));
582 }
583 break;
584 #endif
586 default:
587 ret = arch_do_dom0_op(op,u_dom0_op);
589 }
591 spin_unlock(&dom0_lock);
593 if (!ret)
594 acm_post_dom0_op(op, ssid);
595 else
596 acm_fail_dom0_op(op, ssid);
598 return ret;
599 }
601 /*
602 * Local variables:
603 * mode: C
604 * c-set-style: "BSD"
605 * c-basic-offset: 4
606 * tab-width: 4
607 * indent-tabs-mode: nil
608 * End:
609 */