ia64/xen-unstable

view xen/arch/x86/domctl.c @ 13661:19a600376688

Don't clobber vcpu flags when getting vcpu context.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Fri Jan 26 17:33:58 2007 +0000 (2007-01-26)
parents ace66ef96b5e
children 271ffb1c12eb
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <public/domctl.h>
14 #include <xen/sched.h>
15 #include <xen/domain.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/shadow.h>
23 #include <asm/irq.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/processor.h>
27 #include <public/hvm/e820.h>
29 long arch_do_domctl(
30 struct xen_domctl *domctl,
31 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
32 {
33 long ret = 0;
35 switch ( domctl->cmd )
36 {
38 case XEN_DOMCTL_shadow_op:
39 {
40 struct domain *d;
41 ret = -ESRCH;
42 d = find_domain_by_id(domctl->domain);
43 if ( d != NULL )
44 {
45 ret = shadow_domctl(d,
46 &domctl->u.shadow_op,
47 guest_handle_cast(u_domctl, void));
48 put_domain(d);
49 copy_to_guest(u_domctl, domctl, 1);
50 }
51 }
52 break;
54 case XEN_DOMCTL_ioport_permission:
55 {
56 struct domain *d;
57 unsigned int fp = domctl->u.ioport_permission.first_port;
58 unsigned int np = domctl->u.ioport_permission.nr_ports;
60 ret = -EINVAL;
61 if ( (fp + np) > 65536 )
62 break;
64 ret = -ESRCH;
65 if ( unlikely((d = find_domain_by_id(domctl->domain)) == NULL) )
66 break;
68 if ( np == 0 )
69 ret = 0;
70 else if ( domctl->u.ioport_permission.allow_access )
71 ret = ioports_permit_access(d, fp, fp + np - 1);
72 else
73 ret = ioports_deny_access(d, fp, fp + np - 1);
75 put_domain(d);
76 }
77 break;
79 case XEN_DOMCTL_getpageframeinfo:
80 {
81 struct page_info *page;
82 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
83 domid_t dom = domctl->domain;
84 struct domain *d;
86 ret = -EINVAL;
88 if ( unlikely(!mfn_valid(mfn)) ||
89 unlikely((d = find_domain_by_id(dom)) == NULL) )
90 break;
92 page = mfn_to_page(mfn);
94 if ( likely(get_page(page, d)) )
95 {
96 ret = 0;
98 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
100 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
101 {
102 switch ( page->u.inuse.type_info & PGT_type_mask )
103 {
104 case PGT_l1_page_table:
105 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
106 break;
107 case PGT_l2_page_table:
108 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
109 break;
110 case PGT_l3_page_table:
111 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
112 break;
113 case PGT_l4_page_table:
114 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
115 break;
116 }
117 }
119 put_page(page);
120 }
122 put_domain(d);
124 copy_to_guest(u_domctl, domctl, 1);
125 }
126 break;
128 case XEN_DOMCTL_getpageframeinfo2:
129 {
130 int n,j;
131 int num = domctl->u.getpageframeinfo2.num;
132 domid_t dom = domctl->domain;
133 struct domain *d;
134 uint32_t *arr32;
135 ret = -ESRCH;
137 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
138 break;
140 if ( unlikely(num > 1024) )
141 {
142 ret = -E2BIG;
143 put_domain(d);
144 break;
145 }
147 arr32 = alloc_xenheap_page();
149 ret = 0;
150 for ( n = 0; n < num; )
151 {
152 int k = PAGE_SIZE / 4;
153 if ( (num - n) < k )
154 k = num - n;
156 if ( copy_from_guest_offset(arr32,
157 domctl->u.getpageframeinfo2.array,
158 n, k) )
159 {
160 ret = -EINVAL;
161 break;
162 }
164 for ( j = 0; j < k; j++ )
165 {
166 struct page_info *page;
167 unsigned long mfn = arr32[j];
169 page = mfn_to_page(mfn);
171 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
172 {
173 unsigned long type = 0;
175 switch( page->u.inuse.type_info & PGT_type_mask )
176 {
177 case PGT_l1_page_table:
178 type = XEN_DOMCTL_PFINFO_L1TAB;
179 break;
180 case PGT_l2_page_table:
181 type = XEN_DOMCTL_PFINFO_L2TAB;
182 break;
183 case PGT_l3_page_table:
184 type = XEN_DOMCTL_PFINFO_L3TAB;
185 break;
186 case PGT_l4_page_table:
187 type = XEN_DOMCTL_PFINFO_L4TAB;
188 break;
189 }
191 if ( page->u.inuse.type_info & PGT_pinned )
192 type |= XEN_DOMCTL_PFINFO_LPINTAB;
193 arr32[j] |= type;
194 put_page(page);
195 }
196 else
197 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
199 }
201 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
202 n, arr32, k) )
203 {
204 ret = -EINVAL;
205 break;
206 }
208 n += k;
209 }
211 free_xenheap_page(arr32);
213 put_domain(d);
214 }
215 break;
217 case XEN_DOMCTL_getmemlist:
218 {
219 int i;
220 struct domain *d = find_domain_by_id(domctl->domain);
221 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
222 uint64_t mfn;
223 struct list_head *list_ent;
225 ret = -EINVAL;
226 if ( d != NULL )
227 {
228 ret = 0;
230 spin_lock(&d->page_alloc_lock);
232 list_ent = d->page_list.next;
233 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
234 {
235 mfn = page_to_mfn(list_entry(
236 list_ent, struct page_info, list));
237 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
238 i, &mfn, 1) )
239 {
240 ret = -EFAULT;
241 break;
242 }
243 list_ent = mfn_to_page(mfn)->list.next;
244 }
246 spin_unlock(&d->page_alloc_lock);
248 domctl->u.getmemlist.num_pfns = i;
249 copy_to_guest(u_domctl, domctl, 1);
251 put_domain(d);
252 }
253 }
254 break;
256 case XEN_DOMCTL_hypercall_init:
257 {
258 struct domain *d = find_domain_by_id(domctl->domain);
259 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
260 unsigned long mfn;
261 void *hypercall_page;
263 ret = -ESRCH;
264 if ( unlikely(d == NULL) )
265 break;
267 mfn = gmfn_to_mfn(d, gmfn);
269 ret = -EACCES;
270 if ( !mfn_valid(mfn) ||
271 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
272 {
273 put_domain(d);
274 break;
275 }
277 ret = 0;
279 hypercall_page = map_domain_page(mfn);
280 hypercall_page_initialise(d, hypercall_page);
281 unmap_domain_page(hypercall_page);
283 put_page_and_type(mfn_to_page(mfn));
285 put_domain(d);
286 }
287 break;
289 case XEN_DOMCTL_sethvmcontext:
290 {
291 struct hvm_domain_context *c;
292 struct domain *d;
293 struct vcpu *v;
295 ret = -ESRCH;
296 if ( (d = find_domain_by_id(domctl->domain)) == NULL )
297 break;
299 ret = -ENOMEM;
300 if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
301 goto sethvmcontext_out;
303 v = d->vcpu[0];
305 ret = -EFAULT;
307 if ( copy_from_guest(c, domctl->u.hvmcontext.ctxt, 1) != 0 )
308 goto sethvmcontext_out;
310 ret = arch_sethvm_ctxt(v, c);
312 xfree(c);
314 sethvmcontext_out:
315 put_domain(d);
317 }
318 break;
320 case XEN_DOMCTL_gethvmcontext:
321 {
322 struct hvm_domain_context *c;
323 struct domain *d;
324 struct vcpu *v;
326 ret = -ESRCH;
327 if ( (d = find_domain_by_id(domctl->domain)) == NULL )
328 break;
330 ret = -ENOMEM;
331 if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
332 goto gethvmcontext_out;
334 v = d->vcpu[0];
336 ret = -ENODATA;
337 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
338 goto gethvmcontext_out;
340 ret = 0;
341 if (arch_gethvm_ctxt(v, c) == -1)
342 ret = -EFAULT;
344 if ( copy_to_guest(domctl->u.hvmcontext.ctxt, c, 1) )
345 ret = -EFAULT;
347 xfree(c);
349 if ( copy_to_guest(u_domctl, domctl, 1) )
350 ret = -EFAULT;
352 gethvmcontext_out:
353 put_domain(d);
355 }
356 break;
358 case XEN_DOMCTL_set_address_size:
359 {
360 struct domain *d;
362 ret = -ESRCH;
363 if ( (d = find_domain_by_id(domctl->domain)) == NULL )
364 break;
366 switch ( domctl->u.address_size.size )
367 {
368 #ifdef CONFIG_COMPAT
369 case 32:
370 ret = switch_compat(d);
371 break;
372 case 64:
373 ret = switch_native(d);
374 break;
375 #endif
376 default:
377 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
378 break;
379 }
381 put_domain(d);
382 }
384 case XEN_DOMCTL_get_address_size:
385 {
386 struct domain *d;
388 ret = -ESRCH;
389 if ( (d = find_domain_by_id(domctl->domain)) == NULL )
390 break;
392 domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
394 ret = 0;
395 put_domain(d);
396 }
398 default:
399 ret = -ENOSYS;
400 break;
401 }
403 return ret;
404 }
406 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
407 {
408 #ifdef CONFIG_COMPAT
409 #define c(fld) (!IS_COMPAT(v->domain) ? (c.nat->fld) : (c.cmp->fld))
410 #else
411 #define c(fld) (c.nat->fld)
412 #endif
414 if ( !IS_COMPAT(v->domain) )
415 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
416 #ifdef CONFIG_COMPAT
417 else
418 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
419 #endif
421 if ( is_hvm_vcpu(v) )
422 {
423 if ( !IS_COMPAT(v->domain) )
424 hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
425 #ifdef CONFIG_COMPAT
426 else
427 {
428 struct cpu_user_regs user_regs;
429 typeof(c.nat->ctrlreg) ctrlreg;
430 unsigned i;
432 hvm_store_cpu_guest_regs(v, &user_regs, ctrlreg);
433 XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
434 for ( i = 0; i < ARRAY_SIZE(c.cmp->ctrlreg); ++i )
435 c.cmp->ctrlreg[i] = ctrlreg[i];
436 }
437 #endif
438 }
439 else
440 {
441 /* IOPL privileges are virtualised: merge back into returned eflags. */
442 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
443 c(user_regs.eflags |= v->arch.iopl << 12);
444 }
446 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
447 if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
448 c(flags |= VGCF_i387_valid);
449 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
450 c(flags |= VGCF_in_kernel);
452 if ( !IS_COMPAT(v->domain) )
453 c.nat->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table));
454 #ifdef CONFIG_COMPAT
455 else
456 {
457 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
458 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
459 }
460 #endif
462 c(vm_assist = v->domain->vm_assist);
463 #undef c
464 }
466 /*
467 * Local variables:
468 * mode: C
469 * c-set-style: "BSD"
470 * c-basic-offset: 4
471 * tab-width: 4
472 * indent-tabs-mode: nil
473 * End:
474 */