ia64/xen-unstable

view xen/arch/x86/domctl.c @ 13580:fe01db0e59a3

[HVM] fix save/restore on 64b HV

Enable hvm_ctxt ops for compat mode to fix HVM save/restore on 64b HV.

Signed-off-by: Zhai Edwin <edwin.zhai@intel.com>
author kfraser@localhost.localdomain
date Tue Jan 23 15:26:17 2007 +0000 (2007-01-23)
parents 5dc5e6ba42d2
children 30af6cfdb05c
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <public/domctl.h>
13 #include <xen/sched.h>
14 #include <xen/domain.h>
15 #include <xen/event.h>
16 #include <xen/domain_page.h>
17 #include <asm/msr.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/iocap.h>
21 #include <asm/shadow.h>
22 #include <asm/irq.h>
23 #include <asm/hvm/hvm.h>
24 #include <asm/hvm/support.h>
25 #include <asm/processor.h>
26 #include <public/hvm/e820.h>
27 #ifdef CONFIG_COMPAT
28 #include <compat/xen.h>
29 #endif
31 #ifndef COMPAT
32 #define _long long
33 #define copy_from_xxx_offset copy_from_guest_offset
34 #define copy_to_xxx_offset copy_to_guest_offset
35 #endif
37 _long arch_do_domctl(
38 struct xen_domctl *domctl,
39 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
40 {
41 _long ret = 0;
43 switch ( domctl->cmd )
44 {
46 case XEN_DOMCTL_shadow_op:
47 {
48 struct domain *d;
49 ret = -ESRCH;
50 d = find_domain_by_id(domctl->domain);
51 if ( d != NULL )
52 {
53 ret = shadow_domctl(d,
54 &domctl->u.shadow_op,
55 guest_handle_cast(u_domctl, void));
56 put_domain(d);
57 copy_to_guest(u_domctl, domctl, 1);
58 }
59 }
60 break;
62 case XEN_DOMCTL_ioport_permission:
63 {
64 struct domain *d;
65 unsigned int fp = domctl->u.ioport_permission.first_port;
66 unsigned int np = domctl->u.ioport_permission.nr_ports;
68 ret = -EINVAL;
69 if ( (fp + np) > 65536 )
70 break;
72 ret = -ESRCH;
73 if ( unlikely((d = find_domain_by_id(domctl->domain)) == NULL) )
74 break;
76 if ( np == 0 )
77 ret = 0;
78 else if ( domctl->u.ioport_permission.allow_access )
79 ret = ioports_permit_access(d, fp, fp + np - 1);
80 else
81 ret = ioports_deny_access(d, fp, fp + np - 1);
83 put_domain(d);
84 }
85 break;
87 case XEN_DOMCTL_getpageframeinfo:
88 {
89 struct page_info *page;
90 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
91 domid_t dom = domctl->domain;
92 struct domain *d;
94 ret = -EINVAL;
96 if ( unlikely(!mfn_valid(mfn)) ||
97 unlikely((d = find_domain_by_id(dom)) == NULL) )
98 break;
100 page = mfn_to_page(mfn);
102 if ( likely(get_page(page, d)) )
103 {
104 ret = 0;
106 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
108 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
109 {
110 switch ( page->u.inuse.type_info & PGT_type_mask )
111 {
112 case PGT_l1_page_table:
113 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
114 break;
115 case PGT_l2_page_table:
116 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
117 break;
118 case PGT_l3_page_table:
119 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
120 break;
121 case PGT_l4_page_table:
122 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
123 break;
124 }
125 }
127 put_page(page);
128 }
130 put_domain(d);
132 copy_to_guest(u_domctl, domctl, 1);
133 }
134 break;
136 case XEN_DOMCTL_getpageframeinfo2:
137 {
138 #define GPF2_BATCH (PAGE_SIZE / sizeof(_long))
139 int n,j;
140 int num = domctl->u.getpageframeinfo2.num;
141 domid_t dom = domctl->domain;
142 struct domain *d;
143 unsigned _long *l_arr;
144 ret = -ESRCH;
146 if ( unlikely((d = find_domain_by_id(dom)) == NULL) )
147 break;
149 if ( unlikely(num > 1024) )
150 {
151 ret = -E2BIG;
152 put_domain(d);
153 break;
154 }
156 l_arr = alloc_xenheap_page();
158 ret = 0;
159 for ( n = 0; n < num; )
160 {
161 int k = ((num-n)>GPF2_BATCH)?GPF2_BATCH:(num-n);
163 if ( copy_from_xxx_offset(l_arr,
164 domctl->u.getpageframeinfo2.array,
165 n, k) )
166 {
167 ret = -EINVAL;
168 break;
169 }
171 for ( j = 0; j < k; j++ )
172 {
173 struct page_info *page;
174 unsigned _long mfn = l_arr[j];
176 page = mfn_to_page(mfn);
178 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
179 {
180 unsigned _long type = 0;
182 switch( page->u.inuse.type_info & PGT_type_mask )
183 {
184 case PGT_l1_page_table:
185 type = XEN_DOMCTL_PFINFO_L1TAB;
186 break;
187 case PGT_l2_page_table:
188 type = XEN_DOMCTL_PFINFO_L2TAB;
189 break;
190 case PGT_l3_page_table:
191 type = XEN_DOMCTL_PFINFO_L3TAB;
192 break;
193 case PGT_l4_page_table:
194 type = XEN_DOMCTL_PFINFO_L4TAB;
195 break;
196 }
198 if ( page->u.inuse.type_info & PGT_pinned )
199 type |= XEN_DOMCTL_PFINFO_LPINTAB;
200 l_arr[j] |= type;
201 put_page(page);
202 }
203 else
204 l_arr[j] |= XEN_DOMCTL_PFINFO_XTAB;
206 }
208 if ( copy_to_xxx_offset(domctl->u.getpageframeinfo2.array,
209 n, l_arr, k) )
210 {
211 ret = -EINVAL;
212 break;
213 }
215 n += k;
216 }
218 free_xenheap_page(l_arr);
220 put_domain(d);
221 }
222 break;
224 case XEN_DOMCTL_getmemlist:
225 {
226 int i;
227 struct domain *d = find_domain_by_id(domctl->domain);
228 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
229 xen_pfn_t mfn;
230 struct list_head *list_ent;
232 ret = -EINVAL;
233 if ( d != NULL )
234 {
235 ret = 0;
237 spin_lock(&d->page_alloc_lock);
239 list_ent = d->page_list.next;
240 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
241 {
242 mfn = page_to_mfn(list_entry(
243 list_ent, struct page_info, list));
244 if ( copy_to_xxx_offset(domctl->u.getmemlist.buffer,
245 i, &mfn, 1) )
246 {
247 ret = -EFAULT;
248 break;
249 }
250 list_ent = mfn_to_page(mfn)->list.next;
251 }
253 spin_unlock(&d->page_alloc_lock);
255 domctl->u.getmemlist.num_pfns = i;
256 copy_to_guest(u_domctl, domctl, 1);
258 put_domain(d);
259 }
260 }
261 break;
263 case XEN_DOMCTL_hypercall_init:
264 {
265 struct domain *d = find_domain_by_id(domctl->domain);
266 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
267 unsigned long mfn;
268 void *hypercall_page;
270 ret = -ESRCH;
271 if ( unlikely(d == NULL) )
272 break;
274 mfn = gmfn_to_mfn(d, gmfn);
276 ret = -EACCES;
277 if ( !mfn_valid(mfn) ||
278 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
279 {
280 put_domain(d);
281 break;
282 }
284 ret = 0;
286 hypercall_page = map_domain_page(mfn);
287 hypercall_page_initialise(d, hypercall_page);
288 unmap_domain_page(hypercall_page);
290 put_page_and_type(mfn_to_page(mfn));
292 put_domain(d);
293 }
294 break;
296 case XEN_DOMCTL_sethvmcontext:
297 {
298 struct hvm_domain_context *c;
299 struct domain *d;
300 struct vcpu *v;
302 ret = -ESRCH;
303 if ( (d = find_domain_by_id(domctl->domain)) == NULL )
304 break;
306 ret = -ENOMEM;
307 if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
308 goto sethvmcontext_out;
310 v = d->vcpu[0];
312 ret = -EFAULT;
314 #ifndef COMPAT
315 if ( copy_from_guest(c, domctl->u.hvmcontext.ctxt, 1) != 0 )
316 #else
317 if ( copy_from_guest(c,
318 compat_handle_cast(domctl->u.hvmcontext.ctxt, void),
319 1) != 0 )
320 #endif
321 goto sethvmcontext_out;
323 ret = arch_sethvm_ctxt(v, c);
325 xfree(c);
327 sethvmcontext_out:
328 put_domain(d);
330 }
331 break;
333 case XEN_DOMCTL_gethvmcontext:
334 {
335 struct hvm_domain_context *c;
336 struct domain *d;
337 struct vcpu *v;
339 ret = -ESRCH;
340 if ( (d = find_domain_by_id(domctl->domain)) == NULL )
341 break;
343 ret = -ENOMEM;
344 if ( (c = xmalloc(struct hvm_domain_context)) == NULL )
345 goto gethvmcontext_out;
347 v = d->vcpu[0];
349 ret = -ENODATA;
350 if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
351 goto gethvmcontext_out;
353 ret = 0;
354 if (arch_gethvm_ctxt(v, c) == -1)
355 ret = -EFAULT;
357 #ifndef COMPAT
358 if ( copy_to_guest(domctl->u.hvmcontext.ctxt, c, 1) )
359 #else
360 if ( copy_to_guest(compat_handle_cast(domctl->u.hvmcontext.ctxt,
361 void),
362 c, 1) )
363 ret = -EFAULT;
364 #endif
366 xfree(c);
368 if ( copy_to_guest(u_domctl, domctl, 1) )
369 ret = -EFAULT;
371 gethvmcontext_out:
372 put_domain(d);
374 }
375 break;
377 default:
378 ret = -ENOSYS;
379 break;
380 }
382 return ret;
383 }
385 #ifndef COMPAT
386 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
387 {
388 #ifdef CONFIG_COMPAT
389 #define c(fld) (!IS_COMPAT(v->domain) ? (c.nat->fld) : (c.cmp->fld))
390 #else
391 #define c(fld) (c.nat->fld)
392 #endif
393 unsigned long flags;
395 if ( !IS_COMPAT(v->domain) )
396 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
397 #ifdef CONFIG_COMPAT
398 else
399 {
400 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
401 }
402 #endif
404 if ( is_hvm_vcpu(v) )
405 {
406 if ( !IS_COMPAT(v->domain) )
407 hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
408 #ifdef CONFIG_COMPAT
409 else
410 {
411 struct cpu_user_regs user_regs;
412 typeof(c.nat->ctrlreg) ctrlreg;
413 unsigned i;
415 hvm_store_cpu_guest_regs(v, &user_regs, ctrlreg);
416 XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
417 for ( i = 0; i < ARRAY_SIZE(c.cmp->ctrlreg); ++i )
418 c.cmp->ctrlreg[i] = ctrlreg[i];
419 }
420 #endif
421 }
422 else
423 {
424 /* IOPL privileges are virtualised: merge back into returned eflags. */
425 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
426 c(user_regs.eflags |= v->arch.iopl << 12);
427 }
429 flags = 0;
430 if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
431 flags |= VGCF_i387_valid;
432 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
433 flags |= VGCF_in_kernel;
434 c(flags = flags);
436 if ( !IS_COMPAT(v->domain) )
437 c.nat->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table));
438 #ifdef CONFIG_COMPAT
439 else
440 {
441 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
442 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
443 }
444 #endif
446 c(vm_assist = v->domain->vm_assist);
447 #undef c
448 }
449 #endif
451 /*
452 * Local variables:
453 * mode: C
454 * c-set-style: "BSD"
455 * c-basic-offset: 4
456 * tab-width: 4
457 * indent-tabs-mode: nil
458 * End:
459 */