ia64/xen-unstable

view xen/arch/x86/domctl.c @ 16509:0e8e68cfc8ac

vt-d: Print messages when:
- vt-d has been enabled by xen
- user attempts assign a PCI device that has already been assigned to another hvm guest

Signed-off-by: Allen Kay <allen.m.kay@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Dec 04 10:29:00 2007 +0000 (2007-12-04)
parents c555a5f97982
children ef83b50fc4a4
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <public/domctl.h>
14 #include <xen/sched.h>
15 #include <xen/domain.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/paging.h>
23 #include <asm/irq.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/hvm/cacheattr.h>
27 #include <asm/processor.h>
28 #include <xsm/xsm.h>
29 #include <asm/iommu.h>
31 long arch_do_domctl(
32 struct xen_domctl *domctl,
33 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
34 {
35 long ret = 0;
37 switch ( domctl->cmd )
38 {
40 case XEN_DOMCTL_shadow_op:
41 {
42 struct domain *d;
43 ret = -ESRCH;
44 d = rcu_lock_domain_by_id(domctl->domain);
45 if ( d != NULL )
46 {
47 ret = paging_domctl(d,
48 &domctl->u.shadow_op,
49 guest_handle_cast(u_domctl, void));
50 rcu_unlock_domain(d);
51 copy_to_guest(u_domctl, domctl, 1);
52 }
53 }
54 break;
56 case XEN_DOMCTL_ioport_permission:
57 {
58 struct domain *d;
59 unsigned int fp = domctl->u.ioport_permission.first_port;
60 unsigned int np = domctl->u.ioport_permission.nr_ports;
62 ret = -EINVAL;
63 if ( (fp + np) > 65536 )
64 break;
66 ret = -ESRCH;
67 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
68 break;
70 ret = xsm_ioport_permission(d, fp,
71 domctl->u.ioport_permission.allow_access);
72 if ( ret )
73 {
74 rcu_unlock_domain(d);
75 break;
76 }
78 if ( np == 0 )
79 ret = 0;
80 else if ( domctl->u.ioport_permission.allow_access )
81 ret = ioports_permit_access(d, fp, fp + np - 1);
82 else
83 ret = ioports_deny_access(d, fp, fp + np - 1);
85 rcu_unlock_domain(d);
86 }
87 break;
89 case XEN_DOMCTL_getpageframeinfo:
90 {
91 struct page_info *page;
92 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
93 domid_t dom = domctl->domain;
94 struct domain *d;
96 ret = -EINVAL;
98 if ( unlikely(!mfn_valid(mfn)) ||
99 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
100 break;
102 page = mfn_to_page(mfn);
104 ret = xsm_getpageframeinfo(page);
105 if ( ret )
106 {
107 rcu_unlock_domain(d);
108 break;
109 }
111 if ( likely(get_page(page, d)) )
112 {
113 ret = 0;
115 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
117 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
118 {
119 switch ( page->u.inuse.type_info & PGT_type_mask )
120 {
121 case PGT_l1_page_table:
122 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
123 break;
124 case PGT_l2_page_table:
125 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
126 break;
127 case PGT_l3_page_table:
128 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
129 break;
130 case PGT_l4_page_table:
131 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
132 break;
133 }
134 }
136 put_page(page);
137 }
139 rcu_unlock_domain(d);
141 copy_to_guest(u_domctl, domctl, 1);
142 }
143 break;
145 case XEN_DOMCTL_getpageframeinfo2:
146 {
147 int n,j;
148 int num = domctl->u.getpageframeinfo2.num;
149 domid_t dom = domctl->domain;
150 struct domain *d;
151 uint32_t *arr32;
152 ret = -ESRCH;
154 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
155 break;
157 if ( unlikely(num > 1024) )
158 {
159 ret = -E2BIG;
160 rcu_unlock_domain(d);
161 break;
162 }
164 arr32 = alloc_xenheap_page();
165 if ( !arr32 )
166 {
167 ret = -ENOMEM;
168 put_domain(d);
169 break;
170 }
172 ret = 0;
173 for ( n = 0; n < num; )
174 {
175 int k = PAGE_SIZE / 4;
176 if ( (num - n) < k )
177 k = num - n;
179 if ( copy_from_guest_offset(arr32,
180 domctl->u.getpageframeinfo2.array,
181 n, k) )
182 {
183 ret = -EFAULT;
184 break;
185 }
187 for ( j = 0; j < k; j++ )
188 {
189 struct page_info *page;
190 unsigned long mfn = arr32[j];
192 page = mfn_to_page(mfn);
194 ret = xsm_getpageframeinfo(page);
195 if ( ret )
196 continue;
198 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
199 {
200 unsigned long type = 0;
202 switch( page->u.inuse.type_info & PGT_type_mask )
203 {
204 case PGT_l1_page_table:
205 type = XEN_DOMCTL_PFINFO_L1TAB;
206 break;
207 case PGT_l2_page_table:
208 type = XEN_DOMCTL_PFINFO_L2TAB;
209 break;
210 case PGT_l3_page_table:
211 type = XEN_DOMCTL_PFINFO_L3TAB;
212 break;
213 case PGT_l4_page_table:
214 type = XEN_DOMCTL_PFINFO_L4TAB;
215 break;
216 }
218 if ( page->u.inuse.type_info & PGT_pinned )
219 type |= XEN_DOMCTL_PFINFO_LPINTAB;
220 arr32[j] |= type;
221 put_page(page);
222 }
223 else
224 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
226 }
228 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
229 n, arr32, k) )
230 {
231 ret = -EFAULT;
232 break;
233 }
235 n += k;
236 }
238 free_xenheap_page(arr32);
240 rcu_unlock_domain(d);
241 }
242 break;
244 case XEN_DOMCTL_getmemlist:
245 {
246 int i;
247 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
248 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
249 uint64_t mfn;
250 struct list_head *list_ent;
252 ret = -EINVAL;
253 if ( d != NULL )
254 {
255 ret = xsm_getmemlist(d);
256 if ( ret )
257 {
258 rcu_unlock_domain(d);
259 break;
260 }
262 spin_lock(&d->page_alloc_lock);
264 if ( unlikely(d->is_dying) ) {
265 spin_unlock(&d->page_alloc_lock);
266 goto getmemlist_out;
267 }
269 ret = 0;
270 list_ent = d->page_list.next;
271 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
272 {
273 mfn = page_to_mfn(list_entry(
274 list_ent, struct page_info, list));
275 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
276 i, &mfn, 1) )
277 {
278 ret = -EFAULT;
279 break;
280 }
281 list_ent = mfn_to_page(mfn)->list.next;
282 }
284 spin_unlock(&d->page_alloc_lock);
286 domctl->u.getmemlist.num_pfns = i;
287 copy_to_guest(u_domctl, domctl, 1);
288 getmemlist_out:
289 rcu_unlock_domain(d);
290 }
291 }
292 break;
294 case XEN_DOMCTL_hypercall_init:
295 {
296 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
297 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
298 unsigned long mfn;
299 void *hypercall_page;
301 ret = -ESRCH;
302 if ( unlikely(d == NULL) )
303 break;
305 ret = xsm_hypercall_init(d);
306 if ( ret )
307 {
308 rcu_unlock_domain(d);
309 break;
310 }
312 mfn = gmfn_to_mfn(d, gmfn);
314 ret = -EACCES;
315 if ( !mfn_valid(mfn) ||
316 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
317 {
318 rcu_unlock_domain(d);
319 break;
320 }
322 ret = 0;
324 hypercall_page = map_domain_page(mfn);
325 hypercall_page_initialise(d, hypercall_page);
326 unmap_domain_page(hypercall_page);
328 put_page_and_type(mfn_to_page(mfn));
330 rcu_unlock_domain(d);
331 }
332 break;
334 case XEN_DOMCTL_sethvmcontext:
335 {
336 struct hvm_domain_context c;
337 struct domain *d;
339 c.cur = 0;
340 c.size = domctl->u.hvmcontext.size;
341 c.data = NULL;
343 ret = -ESRCH;
344 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
345 break;
347 ret = xsm_hvmcontext(d, domctl->cmd);
348 if ( ret )
349 goto sethvmcontext_out;
351 ret = -EINVAL;
352 if ( !is_hvm_domain(d) )
353 goto sethvmcontext_out;
355 ret = -ENOMEM;
356 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
357 goto sethvmcontext_out;
359 ret = -EFAULT;
360 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
361 goto sethvmcontext_out;
363 domain_pause(d);
364 ret = hvm_load(d, &c);
365 domain_unpause(d);
367 sethvmcontext_out:
368 if ( c.data != NULL )
369 xfree(c.data);
371 rcu_unlock_domain(d);
372 }
373 break;
375 case XEN_DOMCTL_gethvmcontext:
376 {
377 struct hvm_domain_context c;
378 struct domain *d;
380 ret = -ESRCH;
381 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
382 break;
384 ret = xsm_hvmcontext(d, domctl->cmd);
385 if ( ret )
386 goto gethvmcontext_out;
388 ret = -EINVAL;
389 if ( !is_hvm_domain(d) )
390 goto gethvmcontext_out;
392 c.cur = 0;
393 c.size = hvm_save_size(d);
394 c.data = NULL;
396 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
397 {
398 /* Client is querying for the correct buffer size */
399 domctl->u.hvmcontext.size = c.size;
400 ret = 0;
401 goto gethvmcontext_out;
402 }
404 /* Check that the client has a big enough buffer */
405 ret = -ENOSPC;
406 if ( domctl->u.hvmcontext.size < c.size )
407 goto gethvmcontext_out;
409 /* Allocate our own marshalling buffer */
410 ret = -ENOMEM;
411 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
412 goto gethvmcontext_out;
414 domain_pause(d);
415 ret = hvm_save(d, &c);
416 domain_unpause(d);
418 domctl->u.hvmcontext.size = c.cur;
419 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
420 ret = -EFAULT;
422 gethvmcontext_out:
423 if ( copy_to_guest(u_domctl, domctl, 1) )
424 ret = -EFAULT;
426 if ( c.data != NULL )
427 xfree(c.data);
429 rcu_unlock_domain(d);
430 }
431 break;
433 case XEN_DOMCTL_set_address_size:
434 {
435 struct domain *d;
437 ret = -ESRCH;
438 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
439 break;
441 ret = xsm_address_size(d, domctl->cmd);
442 if ( ret )
443 {
444 rcu_unlock_domain(d);
445 break;
446 }
448 switch ( domctl->u.address_size.size )
449 {
450 #ifdef CONFIG_COMPAT
451 case 32:
452 ret = switch_compat(d);
453 break;
454 case 64:
455 ret = switch_native(d);
456 break;
457 #endif
458 default:
459 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
460 break;
461 }
463 rcu_unlock_domain(d);
464 }
465 break;
467 case XEN_DOMCTL_get_address_size:
468 {
469 struct domain *d;
471 ret = -ESRCH;
472 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
473 break;
475 ret = xsm_address_size(d, domctl->cmd);
476 if ( ret )
477 {
478 rcu_unlock_domain(d);
479 break;
480 }
482 domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
484 ret = 0;
485 rcu_unlock_domain(d);
487 if ( copy_to_guest(u_domctl, domctl, 1) )
488 ret = -EFAULT;
489 }
490 break;
492 case XEN_DOMCTL_sendtrigger:
493 {
494 struct domain *d;
495 struct vcpu *v;
497 ret = -ESRCH;
498 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
499 break;
501 ret = -EINVAL;
502 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
503 goto sendtrigger_out;
505 ret = -ESRCH;
506 if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
507 goto sendtrigger_out;
509 switch ( domctl->u.sendtrigger.trigger )
510 {
511 case XEN_DOMCTL_SENDTRIGGER_NMI:
512 {
513 ret = 0;
514 if ( !test_and_set_bool(v->nmi_pending) )
515 vcpu_kick(v);
516 }
517 break;
519 default:
520 ret = -ENOSYS;
521 }
523 sendtrigger_out:
524 rcu_unlock_domain(d);
525 }
526 break;
528 case XEN_DOMCTL_assign_device:
529 {
530 struct domain *d;
531 struct hvm_iommu *hd;
532 u8 bus, devfn;
534 ret = -EINVAL;
535 if ( !vtd_enabled )
536 break;
538 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
539 {
540 gdprintk(XENLOG_ERR,
541 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
542 break;
543 }
544 hd = domain_hvm_iommu(d);
545 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
546 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
548 if ( device_assigned(bus, devfn) )
549 {
550 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
551 "%x:%x:%x already assigned\n",
552 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
553 break;
554 }
556 ret = assign_device(d, bus, devfn);
557 gdprintk(XENLOG_INFO, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
558 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
559 put_domain(d);
560 }
561 break;
563 case XEN_DOMCTL_bind_pt_irq:
564 {
565 struct domain * d;
566 xen_domctl_bind_pt_irq_t * bind;
568 ret = -ESRCH;
569 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
570 break;
571 bind = &(domctl->u.bind_pt_irq);
572 if (vtd_enabled)
573 ret = pt_irq_create_bind_vtd(d, bind);
574 if (ret < 0)
575 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
576 rcu_unlock_domain(d);
577 }
578 break;
580 case XEN_DOMCTL_memory_mapping:
581 {
582 struct domain *d;
583 unsigned long gfn = domctl->u.memory_mapping.first_gfn;
584 unsigned long mfn = domctl->u.memory_mapping.first_mfn;
585 unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
586 int i;
588 ret = -EINVAL;
589 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
590 break;
592 ret = -ESRCH;
593 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
594 break;
596 ret=0;
597 if ( domctl->u.memory_mapping.add_mapping )
598 {
599 gdprintk(XENLOG_INFO,
600 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
601 gfn, mfn, nr_mfns);
603 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
604 for ( i = 0; i < nr_mfns; i++ )
605 set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
606 }
607 else
608 {
609 gdprintk(XENLOG_INFO,
610 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
611 gfn, mfn, nr_mfns);
613 for ( i = 0; i < nr_mfns; i++ )
614 clear_mmio_p2m_entry(d, gfn+i);
615 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
616 }
618 rcu_unlock_domain(d);
619 }
620 break;
622 case XEN_DOMCTL_ioport_mapping:
623 {
624 #define MAX_IOPORTS 0x10000
625 struct domain *d;
626 struct hvm_iommu *hd;
627 unsigned int fgp = domctl->u.ioport_mapping.first_gport;
628 unsigned int fmp = domctl->u.ioport_mapping.first_mport;
629 unsigned int np = domctl->u.ioport_mapping.nr_ports;
630 struct g2m_ioport *g2m_ioport;
631 int found = 0;
633 ret = -EINVAL;
634 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
635 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
636 {
637 gdprintk(XENLOG_ERR,
638 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
639 fgp, fmp, np);
640 break;
641 }
643 ret = -ESRCH;
644 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
645 break;
647 hd = domain_hvm_iommu(d);
648 if ( domctl->u.ioport_mapping.add_mapping )
649 {
650 gdprintk(XENLOG_INFO,
651 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
652 fgp, fmp, np);
654 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
655 if (g2m_ioport->mport == fmp )
656 {
657 g2m_ioport->gport = fgp;
658 g2m_ioport->np = np;
659 found = 1;
660 break;
661 }
662 if ( !found )
663 {
664 g2m_ioport = xmalloc(struct g2m_ioport);
665 g2m_ioport->gport = fgp;
666 g2m_ioport->mport = fmp;
667 g2m_ioport->np = np;
668 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
669 }
670 ret = ioports_permit_access(d, fmp, fmp + np - 1);
671 }
672 else
673 {
674 gdprintk(XENLOG_INFO,
675 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
676 fgp, fmp, np);
677 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
678 if ( g2m_ioport->mport == fmp )
679 {
680 list_del(&g2m_ioport->list);
681 xfree(g2m_ioport);
682 break;
683 }
684 ret = ioports_deny_access(d, fmp, fmp + np - 1);
685 }
686 rcu_unlock_domain(d);
687 }
688 break;
690 case XEN_DOMCTL_pin_mem_cacheattr:
691 {
692 struct domain *d;
694 ret = -ESRCH;
695 d = rcu_lock_domain_by_id(domctl->domain);
696 if ( d == NULL )
697 break;
699 ret = hvm_set_mem_pinned_cacheattr(
700 d, domctl->u.pin_mem_cacheattr.start,
701 domctl->u.pin_mem_cacheattr.end,
702 domctl->u.pin_mem_cacheattr.type);
704 rcu_unlock_domain(d);
705 }
706 break;
708 case XEN_DOMCTL_set_ext_vcpucontext:
709 case XEN_DOMCTL_get_ext_vcpucontext:
710 {
711 struct xen_domctl_ext_vcpucontext *evc;
712 struct domain *d;
713 struct vcpu *v;
715 evc = &domctl->u.ext_vcpucontext;
717 ret = -ESRCH;
718 d = rcu_lock_domain_by_id(domctl->domain);
719 if ( d == NULL )
720 break;
722 ret = -ESRCH;
723 if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
724 ((v = d->vcpu[evc->vcpu]) == NULL) )
725 goto ext_vcpucontext_out;
727 if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
728 {
729 evc->size = sizeof(*evc);
730 #ifdef __x86_64__
731 evc->sysenter_callback_cs = v->arch.sysenter_callback_cs;
732 evc->sysenter_callback_eip = v->arch.sysenter_callback_eip;
733 evc->sysenter_disables_events = v->arch.sysenter_disables_events;
734 evc->syscall32_callback_cs = v->arch.syscall32_callback_cs;
735 evc->syscall32_callback_eip = v->arch.syscall32_callback_eip;
736 evc->syscall32_disables_events = v->arch.syscall32_disables_events;
737 #else
738 evc->sysenter_callback_cs = 0;
739 evc->sysenter_callback_eip = 0;
740 evc->sysenter_disables_events = 0;
741 evc->syscall32_callback_cs = 0;
742 evc->syscall32_callback_eip = 0;
743 evc->syscall32_disables_events = 0;
744 #endif
745 }
746 else
747 {
748 ret = -EINVAL;
749 if ( evc->size != sizeof(*evc) )
750 goto ext_vcpucontext_out;
751 #ifdef __x86_64__
752 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
753 v->arch.sysenter_callback_cs = evc->sysenter_callback_cs;
754 v->arch.sysenter_callback_eip = evc->sysenter_callback_eip;
755 v->arch.sysenter_disables_events = evc->sysenter_disables_events;
756 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
757 v->arch.syscall32_callback_cs = evc->syscall32_callback_cs;
758 v->arch.syscall32_callback_eip = evc->syscall32_callback_eip;
759 v->arch.syscall32_disables_events = evc->syscall32_disables_events;
760 #else
761 /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
762 if ( (evc->sysenter_callback_cs & ~3) ||
763 evc->sysenter_callback_eip ||
764 (evc->syscall32_callback_cs & ~3) ||
765 evc->syscall32_callback_eip )
766 goto ext_vcpucontext_out;
767 #endif
768 }
770 ret = 0;
772 ext_vcpucontext_out:
773 rcu_unlock_domain(d);
774 if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
775 copy_to_guest(u_domctl, domctl, 1) )
776 ret = -EFAULT;
777 }
778 break;
780 default:
781 ret = -ENOSYS;
782 break;
783 }
785 return ret;
786 }
788 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
789 {
790 #ifdef CONFIG_COMPAT
791 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
792 #else
793 #define c(fld) (c.nat->fld)
794 #endif
796 if ( !is_pv_32on64_domain(v->domain) )
797 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
798 #ifdef CONFIG_COMPAT
799 else
800 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
801 #endif
803 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
804 if ( v->fpu_initialised )
805 c(flags |= VGCF_i387_valid);
806 if ( !test_bit(_VPF_down, &v->pause_flags) )
807 c(flags |= VGCF_online);
809 if ( is_hvm_vcpu(v) )
810 {
811 memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
812 c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
813 c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
814 c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
815 c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
816 }
817 else
818 {
819 /* IOPL privileges are virtualised: merge back into returned eflags. */
820 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
821 c(user_regs.eflags |= v->arch.iopl << 12);
823 if ( !is_pv_32on64_domain(v->domain) )
824 {
825 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
826 pagetable_get_pfn(v->arch.guest_table));
827 #ifdef __x86_64__
828 if ( !pagetable_is_null(v->arch.guest_table_user) )
829 c.nat->ctrlreg[1] = xen_pfn_to_cr3(
830 pagetable_get_pfn(v->arch.guest_table_user));
831 #endif
833 /* Merge shadow DR7 bits into real DR7. */
834 c.nat->debugreg[7] |= c.nat->debugreg[5];
835 c.nat->debugreg[5] = 0;
836 }
837 #ifdef CONFIG_COMPAT
838 else
839 {
840 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
841 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
843 /* Merge shadow DR7 bits into real DR7. */
844 c.cmp->debugreg[7] |= c.cmp->debugreg[5];
845 c.cmp->debugreg[5] = 0;
846 }
847 #endif
849 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
850 c(flags |= VGCF_in_kernel);
851 }
853 c(vm_assist = v->domain->vm_assist);
854 #undef c
855 }
857 /*
858 * Local variables:
859 * mode: C
860 * c-set-style: "BSD"
861 * c-basic-offset: 4
862 * tab-width: 4
863 * indent-tabs-mode: nil
864 * End:
865 */