ia64/xen-unstable

view xen/arch/x86/domctl.c @ 15252:2d3034d0b36b

hvm: Pause domain during state save/load.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Thu May 31 16:05:27 2007 +0100 (2007-05-31)
parents 405573aedd24
children 7eeddd787d2f
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <public/domctl.h>
14 #include <xen/sched.h>
15 #include <xen/domain.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/paging.h>
23 #include <asm/irq.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/processor.h>
27 #include <public/hvm/e820.h>
29 long arch_do_domctl(
30 struct xen_domctl *domctl,
31 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
32 {
33 long ret = 0;
35 switch ( domctl->cmd )
36 {
38 case XEN_DOMCTL_shadow_op:
39 {
40 struct domain *d;
41 ret = -ESRCH;
42 d = rcu_lock_domain_by_id(domctl->domain);
43 if ( d != NULL )
44 {
45 ret = paging_domctl(d,
46 &domctl->u.shadow_op,
47 guest_handle_cast(u_domctl, void));
48 rcu_unlock_domain(d);
49 copy_to_guest(u_domctl, domctl, 1);
50 }
51 }
52 break;
54 case XEN_DOMCTL_ioport_permission:
55 {
56 struct domain *d;
57 unsigned int fp = domctl->u.ioport_permission.first_port;
58 unsigned int np = domctl->u.ioport_permission.nr_ports;
60 ret = -EINVAL;
61 if ( (fp + np) > 65536 )
62 break;
64 ret = -ESRCH;
65 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
66 break;
68 if ( np == 0 )
69 ret = 0;
70 else if ( domctl->u.ioport_permission.allow_access )
71 ret = ioports_permit_access(d, fp, fp + np - 1);
72 else
73 ret = ioports_deny_access(d, fp, fp + np - 1);
75 rcu_unlock_domain(d);
76 }
77 break;
79 case XEN_DOMCTL_getpageframeinfo:
80 {
81 struct page_info *page;
82 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
83 domid_t dom = domctl->domain;
84 struct domain *d;
86 ret = -EINVAL;
88 if ( unlikely(!mfn_valid(mfn)) ||
89 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
90 break;
92 page = mfn_to_page(mfn);
94 if ( likely(get_page(page, d)) )
95 {
96 ret = 0;
98 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
100 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
101 {
102 switch ( page->u.inuse.type_info & PGT_type_mask )
103 {
104 case PGT_l1_page_table:
105 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
106 break;
107 case PGT_l2_page_table:
108 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
109 break;
110 case PGT_l3_page_table:
111 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
112 break;
113 case PGT_l4_page_table:
114 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
115 break;
116 }
117 }
119 put_page(page);
120 }
122 rcu_unlock_domain(d);
124 copy_to_guest(u_domctl, domctl, 1);
125 }
126 break;
128 case XEN_DOMCTL_getpageframeinfo2:
129 {
130 int n,j;
131 int num = domctl->u.getpageframeinfo2.num;
132 domid_t dom = domctl->domain;
133 struct domain *d;
134 uint32_t *arr32;
135 ret = -ESRCH;
137 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
138 break;
140 if ( unlikely(num > 1024) )
141 {
142 ret = -E2BIG;
143 rcu_unlock_domain(d);
144 break;
145 }
147 arr32 = alloc_xenheap_page();
148 if ( !arr32 )
149 {
150 ret = -ENOMEM;
151 put_domain(d);
152 break;
153 }
155 ret = 0;
156 for ( n = 0; n < num; )
157 {
158 int k = PAGE_SIZE / 4;
159 if ( (num - n) < k )
160 k = num - n;
162 if ( copy_from_guest_offset(arr32,
163 domctl->u.getpageframeinfo2.array,
164 n, k) )
165 {
166 ret = -EFAULT;
167 break;
168 }
170 for ( j = 0; j < k; j++ )
171 {
172 struct page_info *page;
173 unsigned long mfn = arr32[j];
175 page = mfn_to_page(mfn);
177 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
178 {
179 unsigned long type = 0;
181 switch( page->u.inuse.type_info & PGT_type_mask )
182 {
183 case PGT_l1_page_table:
184 type = XEN_DOMCTL_PFINFO_L1TAB;
185 break;
186 case PGT_l2_page_table:
187 type = XEN_DOMCTL_PFINFO_L2TAB;
188 break;
189 case PGT_l3_page_table:
190 type = XEN_DOMCTL_PFINFO_L3TAB;
191 break;
192 case PGT_l4_page_table:
193 type = XEN_DOMCTL_PFINFO_L4TAB;
194 break;
195 }
197 if ( page->u.inuse.type_info & PGT_pinned )
198 type |= XEN_DOMCTL_PFINFO_LPINTAB;
199 arr32[j] |= type;
200 put_page(page);
201 }
202 else
203 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
205 }
207 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
208 n, arr32, k) )
209 {
210 ret = -EFAULT;
211 break;
212 }
214 n += k;
215 }
217 free_xenheap_page(arr32);
219 rcu_unlock_domain(d);
220 }
221 break;
223 case XEN_DOMCTL_getmemlist:
224 {
225 int i;
226 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
227 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
228 uint64_t mfn;
229 struct list_head *list_ent;
231 ret = -EINVAL;
232 if ( d != NULL )
233 {
234 ret = 0;
236 spin_lock(&d->page_alloc_lock);
238 list_ent = d->page_list.next;
239 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
240 {
241 mfn = page_to_mfn(list_entry(
242 list_ent, struct page_info, list));
243 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
244 i, &mfn, 1) )
245 {
246 ret = -EFAULT;
247 break;
248 }
249 list_ent = mfn_to_page(mfn)->list.next;
250 }
252 spin_unlock(&d->page_alloc_lock);
254 domctl->u.getmemlist.num_pfns = i;
255 copy_to_guest(u_domctl, domctl, 1);
257 rcu_unlock_domain(d);
258 }
259 }
260 break;
262 case XEN_DOMCTL_hypercall_init:
263 {
264 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
265 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
266 unsigned long mfn;
267 void *hypercall_page;
269 ret = -ESRCH;
270 if ( unlikely(d == NULL) )
271 break;
273 mfn = gmfn_to_mfn(d, gmfn);
275 ret = -EACCES;
276 if ( !mfn_valid(mfn) ||
277 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
278 {
279 rcu_unlock_domain(d);
280 break;
281 }
283 ret = 0;
285 hypercall_page = map_domain_page(mfn);
286 hypercall_page_initialise(d, hypercall_page);
287 unmap_domain_page(hypercall_page);
289 put_page_and_type(mfn_to_page(mfn));
291 rcu_unlock_domain(d);
292 }
293 break;
295 case XEN_DOMCTL_sethvmcontext:
296 {
297 struct hvm_domain_context c;
298 struct domain *d;
300 c.cur = 0;
301 c.size = domctl->u.hvmcontext.size;
302 c.data = NULL;
304 ret = -ESRCH;
305 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
306 break;
308 ret = -EINVAL;
309 if ( !is_hvm_domain(d) )
310 goto sethvmcontext_out;
312 ret = -ENOMEM;
313 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
314 goto sethvmcontext_out;
316 ret = -EFAULT;
317 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
318 goto sethvmcontext_out;
320 domain_pause(d);
321 ret = hvm_load(d, &c);
322 domain_unpause(d);
324 sethvmcontext_out:
325 if ( c.data != NULL )
326 xfree(c.data);
328 rcu_unlock_domain(d);
329 }
330 break;
332 case XEN_DOMCTL_gethvmcontext:
333 {
334 struct hvm_domain_context c;
335 struct domain *d;
337 ret = -ESRCH;
338 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
339 break;
341 ret = -EINVAL;
342 if ( !is_hvm_domain(d) )
343 goto gethvmcontext_out;
345 c.cur = 0;
346 c.size = hvm_save_size(d);
347 c.data = NULL;
349 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
350 {
351 /* Client is querying for the correct buffer size */
352 domctl->u.hvmcontext.size = c.size;
353 ret = 0;
354 goto gethvmcontext_out;
355 }
357 /* Check that the client has a big enough buffer */
358 ret = -ENOSPC;
359 if ( domctl->u.hvmcontext.size < c.size )
360 goto gethvmcontext_out;
362 /* Allocate our own marshalling buffer */
363 ret = -ENOMEM;
364 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
365 goto gethvmcontext_out;
367 domain_pause(d);
368 ret = hvm_save(d, &c);
369 domain_unpause(d);
371 domctl->u.hvmcontext.size = c.cur;
372 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
373 ret = -EFAULT;
375 gethvmcontext_out:
376 if ( copy_to_guest(u_domctl, domctl, 1) )
377 ret = -EFAULT;
379 if ( c.data != NULL )
380 xfree(c.data);
382 rcu_unlock_domain(d);
383 }
384 break;
386 case XEN_DOMCTL_set_address_size:
387 {
388 struct domain *d;
390 ret = -ESRCH;
391 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
392 break;
394 switch ( domctl->u.address_size.size )
395 {
396 #ifdef CONFIG_COMPAT
397 case 32:
398 ret = switch_compat(d);
399 break;
400 case 64:
401 ret = switch_native(d);
402 break;
403 #endif
404 default:
405 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
406 break;
407 }
409 rcu_unlock_domain(d);
410 }
411 break;
413 case XEN_DOMCTL_get_address_size:
414 {
415 struct domain *d;
417 ret = -ESRCH;
418 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
419 break;
421 domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
423 ret = 0;
424 rcu_unlock_domain(d);
426 if ( copy_to_guest(u_domctl, domctl, 1) )
427 ret = -EFAULT;
428 }
429 break;
431 default:
432 ret = -ENOSYS;
433 break;
434 }
436 return ret;
437 }
439 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
440 {
441 #ifdef CONFIG_COMPAT
442 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
443 #else
444 #define c(fld) (c.nat->fld)
445 #endif
447 if ( !is_pv_32on64_domain(v->domain) )
448 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
449 #ifdef CONFIG_COMPAT
450 else
451 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
452 #endif
454 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
455 if ( v->fpu_initialised )
456 c(flags |= VGCF_i387_valid);
457 if ( !test_bit(_VPF_down, &v->pause_flags) )
458 c(flags |= VGCF_online);
460 if ( is_hvm_vcpu(v) )
461 {
462 if ( !is_pv_32on64_domain(v->domain) )
463 hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
464 #ifdef CONFIG_COMPAT
465 else
466 {
467 struct cpu_user_regs user_regs;
468 typeof(c.nat->ctrlreg) ctrlreg;
469 unsigned i;
471 hvm_store_cpu_guest_regs(v, &user_regs, ctrlreg);
472 XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
473 for ( i = 0; i < ARRAY_SIZE(c.cmp->ctrlreg); ++i )
474 c.cmp->ctrlreg[i] = ctrlreg[i];
475 }
476 #endif
477 }
478 else
479 {
480 /* IOPL privileges are virtualised: merge back into returned eflags. */
481 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
482 c(user_regs.eflags |= v->arch.iopl << 12);
484 if ( !is_pv_32on64_domain(v->domain) )
485 {
486 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
487 pagetable_get_pfn(v->arch.guest_table));
488 #ifdef __x86_64__
489 if ( !pagetable_is_null(v->arch.guest_table_user) )
490 c.nat->ctrlreg[1] = xen_pfn_to_cr3(
491 pagetable_get_pfn(v->arch.guest_table_user));
492 #endif
493 }
494 #ifdef CONFIG_COMPAT
495 else
496 {
497 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
498 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
499 }
500 #endif
502 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
503 c(flags |= VGCF_in_kernel);
504 }
506 c(vm_assist = v->domain->vm_assist);
507 #undef c
508 }
510 /*
511 * Local variables:
512 * mode: C
513 * c-set-style: "BSD"
514 * c-basic-offset: 4
515 * tab-width: 4
516 * indent-tabs-mode: nil
517 * End:
518 */