ia64/xen-unstable

view xen/arch/x86/domctl.c @ 15828:3b50a7e52ff2

Implement x86 continuable domain destroy.
This patch addresses the following bug report.
http://bugzilla.xensource.com/bugzilla/show_bug.cgi?id=1037
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author kfraser@localhost.localdomain
date Fri Aug 31 17:00:11 2007 +0100 (2007-08-31)
parents 96f64f4c42f0
children 4b5f3a087737
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <public/domctl.h>
14 #include <xen/sched.h>
15 #include <xen/domain.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/paging.h>
23 #include <asm/irq.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/processor.h>
27 #include <xsm/xsm.h>
29 long arch_do_domctl(
30 struct xen_domctl *domctl,
31 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
32 {
33 long ret = 0;
35 switch ( domctl->cmd )
36 {
38 case XEN_DOMCTL_shadow_op:
39 {
40 struct domain *d;
41 ret = -ESRCH;
42 d = rcu_lock_domain_by_id(domctl->domain);
43 if ( d != NULL )
44 {
45 ret = paging_domctl(d,
46 &domctl->u.shadow_op,
47 guest_handle_cast(u_domctl, void));
48 rcu_unlock_domain(d);
49 copy_to_guest(u_domctl, domctl, 1);
50 }
51 }
52 break;
54 case XEN_DOMCTL_ioport_permission:
55 {
56 struct domain *d;
57 unsigned int fp = domctl->u.ioport_permission.first_port;
58 unsigned int np = domctl->u.ioport_permission.nr_ports;
60 ret = -EINVAL;
61 if ( (fp + np) > 65536 )
62 break;
64 ret = -ESRCH;
65 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
66 break;
68 ret = xsm_ioport_permission(d, fp,
69 domctl->u.ioport_permission.allow_access);
70 if ( ret )
71 {
72 rcu_unlock_domain(d);
73 break;
74 }
76 if ( np == 0 )
77 ret = 0;
78 else if ( domctl->u.ioport_permission.allow_access )
79 ret = ioports_permit_access(d, fp, fp + np - 1);
80 else
81 ret = ioports_deny_access(d, fp, fp + np - 1);
83 rcu_unlock_domain(d);
84 }
85 break;
87 case XEN_DOMCTL_getpageframeinfo:
88 {
89 struct page_info *page;
90 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
91 domid_t dom = domctl->domain;
92 struct domain *d;
94 ret = -EINVAL;
96 if ( unlikely(!mfn_valid(mfn)) ||
97 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
98 break;
100 page = mfn_to_page(mfn);
102 ret = xsm_getpageframeinfo(page);
103 if ( ret )
104 {
105 rcu_unlock_domain(d);
106 break;
107 }
109 if ( likely(get_page(page, d)) )
110 {
111 ret = 0;
113 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
115 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
116 {
117 switch ( page->u.inuse.type_info & PGT_type_mask )
118 {
119 case PGT_l1_page_table:
120 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
121 break;
122 case PGT_l2_page_table:
123 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
124 break;
125 case PGT_l3_page_table:
126 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
127 break;
128 case PGT_l4_page_table:
129 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
130 break;
131 }
132 }
134 put_page(page);
135 }
137 rcu_unlock_domain(d);
139 copy_to_guest(u_domctl, domctl, 1);
140 }
141 break;
143 case XEN_DOMCTL_getpageframeinfo2:
144 {
145 int n,j;
146 int num = domctl->u.getpageframeinfo2.num;
147 domid_t dom = domctl->domain;
148 struct domain *d;
149 uint32_t *arr32;
150 ret = -ESRCH;
152 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
153 break;
155 if ( unlikely(num > 1024) )
156 {
157 ret = -E2BIG;
158 rcu_unlock_domain(d);
159 break;
160 }
162 arr32 = alloc_xenheap_page();
163 if ( !arr32 )
164 {
165 ret = -ENOMEM;
166 put_domain(d);
167 break;
168 }
170 ret = 0;
171 for ( n = 0; n < num; )
172 {
173 int k = PAGE_SIZE / 4;
174 if ( (num - n) < k )
175 k = num - n;
177 if ( copy_from_guest_offset(arr32,
178 domctl->u.getpageframeinfo2.array,
179 n, k) )
180 {
181 ret = -EFAULT;
182 break;
183 }
185 for ( j = 0; j < k; j++ )
186 {
187 struct page_info *page;
188 unsigned long mfn = arr32[j];
190 page = mfn_to_page(mfn);
192 ret = xsm_getpageframeinfo(page);
193 if ( ret )
194 continue;
196 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
197 {
198 unsigned long type = 0;
200 switch( page->u.inuse.type_info & PGT_type_mask )
201 {
202 case PGT_l1_page_table:
203 type = XEN_DOMCTL_PFINFO_L1TAB;
204 break;
205 case PGT_l2_page_table:
206 type = XEN_DOMCTL_PFINFO_L2TAB;
207 break;
208 case PGT_l3_page_table:
209 type = XEN_DOMCTL_PFINFO_L3TAB;
210 break;
211 case PGT_l4_page_table:
212 type = XEN_DOMCTL_PFINFO_L4TAB;
213 break;
214 }
216 if ( page->u.inuse.type_info & PGT_pinned )
217 type |= XEN_DOMCTL_PFINFO_LPINTAB;
218 arr32[j] |= type;
219 put_page(page);
220 }
221 else
222 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
224 }
226 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
227 n, arr32, k) )
228 {
229 ret = -EFAULT;
230 break;
231 }
233 n += k;
234 }
236 free_xenheap_page(arr32);
238 rcu_unlock_domain(d);
239 }
240 break;
242 case XEN_DOMCTL_getmemlist:
243 {
244 int i;
245 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
246 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
247 uint64_t mfn;
248 struct list_head *list_ent;
250 ret = -EINVAL;
251 if ( d != NULL )
252 {
253 ret = xsm_getmemlist(d);
254 if ( ret )
255 {
256 rcu_unlock_domain(d);
257 break;
258 }
260 spin_lock(&d->page_alloc_lock);
262 if ( unlikely(d->is_dying) ) {
263 spin_unlock(&d->page_alloc_lock);
264 goto getmemlist_out;
265 }
267 ret = 0;
268 list_ent = d->page_list.next;
269 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
270 {
271 mfn = page_to_mfn(list_entry(
272 list_ent, struct page_info, list));
273 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
274 i, &mfn, 1) )
275 {
276 ret = -EFAULT;
277 break;
278 }
279 list_ent = mfn_to_page(mfn)->list.next;
280 }
282 spin_unlock(&d->page_alloc_lock);
284 domctl->u.getmemlist.num_pfns = i;
285 copy_to_guest(u_domctl, domctl, 1);
286 getmemlist_out:
287 rcu_unlock_domain(d);
288 }
289 }
290 break;
292 case XEN_DOMCTL_hypercall_init:
293 {
294 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
295 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
296 unsigned long mfn;
297 void *hypercall_page;
299 ret = -ESRCH;
300 if ( unlikely(d == NULL) )
301 break;
303 ret = xsm_hypercall_init(d);
304 if ( ret )
305 {
306 rcu_unlock_domain(d);
307 break;
308 }
310 mfn = gmfn_to_mfn(d, gmfn);
312 ret = -EACCES;
313 if ( !mfn_valid(mfn) ||
314 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
315 {
316 rcu_unlock_domain(d);
317 break;
318 }
320 ret = 0;
322 hypercall_page = map_domain_page(mfn);
323 hypercall_page_initialise(d, hypercall_page);
324 unmap_domain_page(hypercall_page);
326 put_page_and_type(mfn_to_page(mfn));
328 rcu_unlock_domain(d);
329 }
330 break;
332 case XEN_DOMCTL_sethvmcontext:
333 {
334 struct hvm_domain_context c;
335 struct domain *d;
337 c.cur = 0;
338 c.size = domctl->u.hvmcontext.size;
339 c.data = NULL;
341 ret = -ESRCH;
342 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
343 break;
345 ret = xsm_hvmcontext(d, domctl->cmd);
346 if ( ret )
347 goto sethvmcontext_out;
349 ret = -EINVAL;
350 if ( !is_hvm_domain(d) )
351 goto sethvmcontext_out;
353 ret = -ENOMEM;
354 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
355 goto sethvmcontext_out;
357 ret = -EFAULT;
358 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
359 goto sethvmcontext_out;
361 domain_pause(d);
362 ret = hvm_load(d, &c);
363 domain_unpause(d);
365 sethvmcontext_out:
366 if ( c.data != NULL )
367 xfree(c.data);
369 rcu_unlock_domain(d);
370 }
371 break;
373 case XEN_DOMCTL_gethvmcontext:
374 {
375 struct hvm_domain_context c;
376 struct domain *d;
378 ret = -ESRCH;
379 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
380 break;
382 ret = xsm_hvmcontext(d, domctl->cmd);
383 if ( ret )
384 goto gethvmcontext_out;
386 ret = -EINVAL;
387 if ( !is_hvm_domain(d) )
388 goto gethvmcontext_out;
390 c.cur = 0;
391 c.size = hvm_save_size(d);
392 c.data = NULL;
394 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
395 {
396 /* Client is querying for the correct buffer size */
397 domctl->u.hvmcontext.size = c.size;
398 ret = 0;
399 goto gethvmcontext_out;
400 }
402 /* Check that the client has a big enough buffer */
403 ret = -ENOSPC;
404 if ( domctl->u.hvmcontext.size < c.size )
405 goto gethvmcontext_out;
407 /* Allocate our own marshalling buffer */
408 ret = -ENOMEM;
409 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
410 goto gethvmcontext_out;
412 domain_pause(d);
413 ret = hvm_save(d, &c);
414 domain_unpause(d);
416 domctl->u.hvmcontext.size = c.cur;
417 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
418 ret = -EFAULT;
420 gethvmcontext_out:
421 if ( copy_to_guest(u_domctl, domctl, 1) )
422 ret = -EFAULT;
424 if ( c.data != NULL )
425 xfree(c.data);
427 rcu_unlock_domain(d);
428 }
429 break;
431 case XEN_DOMCTL_set_address_size:
432 {
433 struct domain *d;
435 ret = -ESRCH;
436 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
437 break;
439 ret = xsm_address_size(d, domctl->cmd);
440 if ( ret )
441 {
442 rcu_unlock_domain(d);
443 break;
444 }
446 switch ( domctl->u.address_size.size )
447 {
448 #ifdef CONFIG_COMPAT
449 case 32:
450 ret = switch_compat(d);
451 break;
452 case 64:
453 ret = switch_native(d);
454 break;
455 #endif
456 default:
457 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
458 break;
459 }
461 rcu_unlock_domain(d);
462 }
463 break;
465 case XEN_DOMCTL_get_address_size:
466 {
467 struct domain *d;
469 ret = -ESRCH;
470 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
471 break;
473 ret = xsm_address_size(d, domctl->cmd);
474 if ( ret )
475 {
476 rcu_unlock_domain(d);
477 break;
478 }
480 domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
482 ret = 0;
483 rcu_unlock_domain(d);
485 if ( copy_to_guest(u_domctl, domctl, 1) )
486 ret = -EFAULT;
487 }
488 break;
490 case XEN_DOMCTL_sendtrigger:
491 {
492 struct domain *d;
493 struct vcpu *v;
495 ret = -ESRCH;
496 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
497 break;
499 ret = -EINVAL;
500 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
501 goto sendtrigger_out;
503 ret = -ESRCH;
504 if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
505 goto sendtrigger_out;
507 switch ( domctl->u.sendtrigger.trigger )
508 {
509 case XEN_DOMCTL_SENDTRIGGER_NMI:
510 {
511 ret = -ENOSYS;
512 if ( !is_hvm_domain(d) )
513 break;
515 ret = 0;
516 if ( !test_and_set_bool(v->arch.hvm_vcpu.nmi_pending) )
517 vcpu_kick(v);
518 }
519 break;
521 default:
522 ret = -ENOSYS;
523 }
525 sendtrigger_out:
526 rcu_unlock_domain(d);
527 }
528 break;
530 default:
531 ret = -ENOSYS;
532 break;
533 }
535 return ret;
536 }
538 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
539 {
540 #ifdef CONFIG_COMPAT
541 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
542 #else
543 #define c(fld) (c.nat->fld)
544 #endif
546 if ( !is_pv_32on64_domain(v->domain) )
547 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
548 #ifdef CONFIG_COMPAT
549 else
550 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
551 #endif
553 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
554 if ( v->fpu_initialised )
555 c(flags |= VGCF_i387_valid);
556 if ( !test_bit(_VPF_down, &v->pause_flags) )
557 c(flags |= VGCF_online);
559 if ( is_hvm_vcpu(v) )
560 {
561 if ( !is_pv_32on64_domain(v->domain) )
562 hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
563 #ifdef CONFIG_COMPAT
564 else
565 {
566 struct cpu_user_regs user_regs;
567 typeof(c.nat->ctrlreg) ctrlreg;
568 unsigned i;
570 hvm_store_cpu_guest_regs(v, &user_regs, ctrlreg);
571 XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
572 for ( i = 0; i < ARRAY_SIZE(c.cmp->ctrlreg); ++i )
573 c.cmp->ctrlreg[i] = ctrlreg[i];
574 }
575 #endif
576 }
577 else
578 {
579 /* IOPL privileges are virtualised: merge back into returned eflags. */
580 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
581 c(user_regs.eflags |= v->arch.iopl << 12);
583 if ( !is_pv_32on64_domain(v->domain) )
584 {
585 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
586 pagetable_get_pfn(v->arch.guest_table));
587 #ifdef __x86_64__
588 if ( !pagetable_is_null(v->arch.guest_table_user) )
589 c.nat->ctrlreg[1] = xen_pfn_to_cr3(
590 pagetable_get_pfn(v->arch.guest_table_user));
591 #endif
592 }
593 #ifdef CONFIG_COMPAT
594 else
595 {
596 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
597 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
598 }
599 #endif
601 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
602 c(flags |= VGCF_in_kernel);
603 }
605 c(vm_assist = v->domain->vm_assist);
606 #undef c
607 }
609 /*
610 * Local variables:
611 * mode: C
612 * c-set-style: "BSD"
613 * c-basic-offset: 4
614 * tab-width: 4
615 * indent-tabs-mode: nil
616 * End:
617 */