ia64/xen-unstable

view xen/arch/x86/domctl.c @ 18786:7e8db19d72a5

x86: fix getvcpucontext for HVM segment registers

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Nov 11 11:13:57 2008 +0000 (2008-11-11)
parents 3603e95245fa
children 00a15b45cae3
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <xen/pci.h>
14 #include <public/domctl.h>
15 #include <xen/sched.h>
16 #include <xen/domain.h>
17 #include <xen/event.h>
18 #include <xen/domain_page.h>
19 #include <asm/msr.h>
20 #include <xen/trace.h>
21 #include <xen/console.h>
22 #include <xen/iocap.h>
23 #include <xen/paging.h>
24 #include <asm/irq.h>
25 #include <asm/hvm/hvm.h>
26 #include <asm/hvm/support.h>
27 #include <asm/hvm/cacheattr.h>
28 #include <asm/processor.h>
29 #include <xsm/xsm.h>
30 #include <xen/iommu.h>
32 long arch_do_domctl(
33 struct xen_domctl *domctl,
34 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
35 {
36 long ret = 0;
38 switch ( domctl->cmd )
39 {
41 case XEN_DOMCTL_shadow_op:
42 {
43 struct domain *d;
44 ret = -ESRCH;
45 d = rcu_lock_domain_by_id(domctl->domain);
46 if ( d != NULL )
47 {
48 ret = paging_domctl(d,
49 &domctl->u.shadow_op,
50 guest_handle_cast(u_domctl, void));
51 rcu_unlock_domain(d);
52 copy_to_guest(u_domctl, domctl, 1);
53 }
54 }
55 break;
57 case XEN_DOMCTL_ioport_permission:
58 {
59 struct domain *d;
60 unsigned int fp = domctl->u.ioport_permission.first_port;
61 unsigned int np = domctl->u.ioport_permission.nr_ports;
63 ret = -EINVAL;
64 if ( (fp + np) > 65536 )
65 break;
67 ret = -ESRCH;
68 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
69 break;
71 if ( np == 0 )
72 ret = 0;
73 else if ( domctl->u.ioport_permission.allow_access )
74 ret = ioports_permit_access(d, fp, fp + np - 1);
75 else
76 ret = ioports_deny_access(d, fp, fp + np - 1);
78 rcu_unlock_domain(d);
79 }
80 break;
82 case XEN_DOMCTL_getpageframeinfo:
83 {
84 struct page_info *page;
85 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
86 domid_t dom = domctl->domain;
87 struct domain *d;
89 ret = -EINVAL;
91 if ( unlikely(!mfn_valid(mfn)) ||
92 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
93 break;
95 page = mfn_to_page(mfn);
97 ret = xsm_getpageframeinfo(page);
98 if ( ret )
99 {
100 rcu_unlock_domain(d);
101 break;
102 }
104 if ( likely(get_page(page, d)) )
105 {
106 ret = 0;
108 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
110 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
111 {
112 switch ( page->u.inuse.type_info & PGT_type_mask )
113 {
114 case PGT_l1_page_table:
115 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
116 break;
117 case PGT_l2_page_table:
118 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
119 break;
120 case PGT_l3_page_table:
121 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
122 break;
123 case PGT_l4_page_table:
124 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
125 break;
126 }
127 }
129 put_page(page);
130 }
132 rcu_unlock_domain(d);
134 copy_to_guest(u_domctl, domctl, 1);
135 }
136 break;
138 case XEN_DOMCTL_getpageframeinfo2:
139 {
140 int n,j;
141 int num = domctl->u.getpageframeinfo2.num;
142 domid_t dom = domctl->domain;
143 struct domain *d;
144 uint32_t *arr32;
145 ret = -ESRCH;
147 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
148 break;
150 if ( unlikely(num > 1024) )
151 {
152 ret = -E2BIG;
153 rcu_unlock_domain(d);
154 break;
155 }
157 arr32 = alloc_xenheap_page();
158 if ( !arr32 )
159 {
160 ret = -ENOMEM;
161 put_domain(d);
162 break;
163 }
165 ret = 0;
166 for ( n = 0; n < num; )
167 {
168 int k = PAGE_SIZE / 4;
169 if ( (num - n) < k )
170 k = num - n;
172 if ( copy_from_guest_offset(arr32,
173 domctl->u.getpageframeinfo2.array,
174 n, k) )
175 {
176 ret = -EFAULT;
177 break;
178 }
180 for ( j = 0; j < k; j++ )
181 {
182 struct page_info *page;
183 unsigned long mfn = arr32[j];
185 page = mfn_to_page(mfn);
187 ret = xsm_getpageframeinfo(page);
188 if ( ret )
189 continue;
191 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
192 {
193 unsigned long type = 0;
195 switch( page->u.inuse.type_info & PGT_type_mask )
196 {
197 case PGT_l1_page_table:
198 type = XEN_DOMCTL_PFINFO_L1TAB;
199 break;
200 case PGT_l2_page_table:
201 type = XEN_DOMCTL_PFINFO_L2TAB;
202 break;
203 case PGT_l3_page_table:
204 type = XEN_DOMCTL_PFINFO_L3TAB;
205 break;
206 case PGT_l4_page_table:
207 type = XEN_DOMCTL_PFINFO_L4TAB;
208 break;
209 }
211 if ( page->u.inuse.type_info & PGT_pinned )
212 type |= XEN_DOMCTL_PFINFO_LPINTAB;
213 arr32[j] |= type;
214 put_page(page);
215 }
216 else
217 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
219 }
221 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
222 n, arr32, k) )
223 {
224 ret = -EFAULT;
225 break;
226 }
228 n += k;
229 }
231 free_xenheap_page(arr32);
233 rcu_unlock_domain(d);
234 }
235 break;
237 case XEN_DOMCTL_getmemlist:
238 {
239 int i;
240 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
241 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
242 uint64_t mfn;
243 struct list_head *list_ent;
245 ret = -EINVAL;
246 if ( d != NULL )
247 {
248 ret = xsm_getmemlist(d);
249 if ( ret )
250 {
251 rcu_unlock_domain(d);
252 break;
253 }
255 spin_lock(&d->page_alloc_lock);
257 if ( unlikely(d->is_dying) ) {
258 spin_unlock(&d->page_alloc_lock);
259 goto getmemlist_out;
260 }
262 ret = 0;
263 list_ent = d->page_list.next;
264 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
265 {
266 mfn = page_to_mfn(list_entry(
267 list_ent, struct page_info, list));
268 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
269 i, &mfn, 1) )
270 {
271 ret = -EFAULT;
272 break;
273 }
274 list_ent = mfn_to_page(mfn)->list.next;
275 }
277 spin_unlock(&d->page_alloc_lock);
279 domctl->u.getmemlist.num_pfns = i;
280 copy_to_guest(u_domctl, domctl, 1);
281 getmemlist_out:
282 rcu_unlock_domain(d);
283 }
284 }
285 break;
287 case XEN_DOMCTL_hypercall_init:
288 {
289 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
290 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
291 unsigned long mfn;
292 void *hypercall_page;
294 ret = -ESRCH;
295 if ( unlikely(d == NULL) )
296 break;
298 ret = xsm_hypercall_init(d);
299 if ( ret )
300 {
301 rcu_unlock_domain(d);
302 break;
303 }
305 mfn = gmfn_to_mfn(d, gmfn);
307 ret = -EACCES;
308 if ( !mfn_valid(mfn) ||
309 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
310 {
311 rcu_unlock_domain(d);
312 break;
313 }
315 ret = 0;
317 hypercall_page = map_domain_page(mfn);
318 hypercall_page_initialise(d, hypercall_page);
319 unmap_domain_page(hypercall_page);
321 put_page_and_type(mfn_to_page(mfn));
323 rcu_unlock_domain(d);
324 }
325 break;
327 case XEN_DOMCTL_sethvmcontext:
328 {
329 struct hvm_domain_context c;
330 struct domain *d;
332 c.cur = 0;
333 c.size = domctl->u.hvmcontext.size;
334 c.data = NULL;
336 ret = -ESRCH;
337 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
338 break;
340 ret = xsm_hvmcontext(d, domctl->cmd);
341 if ( ret )
342 goto sethvmcontext_out;
344 ret = -EINVAL;
345 if ( !is_hvm_domain(d) )
346 goto sethvmcontext_out;
348 ret = -ENOMEM;
349 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
350 goto sethvmcontext_out;
352 ret = -EFAULT;
353 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
354 goto sethvmcontext_out;
356 domain_pause(d);
357 ret = hvm_load(d, &c);
358 domain_unpause(d);
360 sethvmcontext_out:
361 if ( c.data != NULL )
362 xfree(c.data);
364 rcu_unlock_domain(d);
365 }
366 break;
368 case XEN_DOMCTL_gethvmcontext:
369 {
370 struct hvm_domain_context c;
371 struct domain *d;
373 ret = -ESRCH;
374 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
375 break;
377 ret = xsm_hvmcontext(d, domctl->cmd);
378 if ( ret )
379 goto gethvmcontext_out;
381 ret = -EINVAL;
382 if ( !is_hvm_domain(d) )
383 goto gethvmcontext_out;
385 c.cur = 0;
386 c.size = hvm_save_size(d);
387 c.data = NULL;
389 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
390 {
391 /* Client is querying for the correct buffer size */
392 domctl->u.hvmcontext.size = c.size;
393 ret = 0;
394 goto gethvmcontext_out;
395 }
397 /* Check that the client has a big enough buffer */
398 ret = -ENOSPC;
399 if ( domctl->u.hvmcontext.size < c.size )
400 goto gethvmcontext_out;
402 /* Allocate our own marshalling buffer */
403 ret = -ENOMEM;
404 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
405 goto gethvmcontext_out;
407 domain_pause(d);
408 ret = hvm_save(d, &c);
409 domain_unpause(d);
411 domctl->u.hvmcontext.size = c.cur;
412 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
413 ret = -EFAULT;
415 gethvmcontext_out:
416 if ( copy_to_guest(u_domctl, domctl, 1) )
417 ret = -EFAULT;
419 if ( c.data != NULL )
420 xfree(c.data);
422 rcu_unlock_domain(d);
423 }
424 break;
426 case XEN_DOMCTL_set_address_size:
427 {
428 struct domain *d;
430 ret = -ESRCH;
431 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
432 break;
434 ret = xsm_address_size(d, domctl->cmd);
435 if ( ret )
436 {
437 rcu_unlock_domain(d);
438 break;
439 }
441 switch ( domctl->u.address_size.size )
442 {
443 #ifdef CONFIG_COMPAT
444 case 32:
445 ret = switch_compat(d);
446 break;
447 case 64:
448 ret = switch_native(d);
449 break;
450 #endif
451 default:
452 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
453 break;
454 }
456 rcu_unlock_domain(d);
457 }
458 break;
460 case XEN_DOMCTL_get_address_size:
461 {
462 struct domain *d;
464 ret = -ESRCH;
465 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
466 break;
468 ret = xsm_address_size(d, domctl->cmd);
469 if ( ret )
470 {
471 rcu_unlock_domain(d);
472 break;
473 }
475 domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
477 ret = 0;
478 rcu_unlock_domain(d);
480 if ( copy_to_guest(u_domctl, domctl, 1) )
481 ret = -EFAULT;
482 }
483 break;
485 case XEN_DOMCTL_set_machine_address_size:
486 {
487 struct domain *d;
489 ret = -ESRCH;
490 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
491 break;
493 ret = xsm_machine_address_size(d, domctl->cmd);
494 if ( ret )
495 rcu_unlock_domain(d);
497 ret = -EBUSY;
498 if ( d->tot_pages > 0 )
499 goto set_machine_address_size_out;
501 d->arch.physaddr_bitsize = domctl->u.address_size.size;
503 ret = 0;
504 set_machine_address_size_out:
505 rcu_unlock_domain(d);
506 }
507 break;
509 case XEN_DOMCTL_get_machine_address_size:
510 {
511 struct domain *d;
513 ret = -ESRCH;
514 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
515 break;
517 ret = xsm_machine_address_size(d, domctl->cmd);
518 if ( ret )
519 {
520 rcu_unlock_domain(d);
521 break;
522 }
524 domctl->u.address_size.size = d->arch.physaddr_bitsize;
526 ret = 0;
527 rcu_unlock_domain(d);
529 if ( copy_to_guest(u_domctl, domctl, 1) )
530 ret = -EFAULT;
533 }
534 break;
536 case XEN_DOMCTL_sendtrigger:
537 {
538 struct domain *d;
539 struct vcpu *v;
541 ret = -ESRCH;
542 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
543 break;
545 ret = xsm_sendtrigger(d);
546 if ( ret )
547 goto sendtrigger_out;
549 ret = -EINVAL;
550 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
551 goto sendtrigger_out;
553 ret = -ESRCH;
554 if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
555 goto sendtrigger_out;
557 switch ( domctl->u.sendtrigger.trigger )
558 {
559 case XEN_DOMCTL_SENDTRIGGER_NMI:
560 {
561 ret = 0;
562 if ( !test_and_set_bool(v->nmi_pending) )
563 vcpu_kick(v);
564 }
565 break;
567 default:
568 ret = -ENOSYS;
569 }
571 sendtrigger_out:
572 rcu_unlock_domain(d);
573 }
574 break;
576 case XEN_DOMCTL_get_device_group:
577 {
578 struct domain *d;
579 u32 max_sdevs;
580 u8 bus, devfn;
581 XEN_GUEST_HANDLE_64(uint32) sdevs;
582 int num_sdevs;
584 ret = -ENOSYS;
585 if ( !iommu_enabled )
586 break;
588 ret = -EINVAL;
589 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
590 break;
592 bus = (domctl->u.get_device_group.machine_bdf >> 16) & 0xff;
593 devfn = (domctl->u.get_device_group.machine_bdf >> 8) & 0xff;
594 max_sdevs = domctl->u.get_device_group.max_sdevs;
595 sdevs = domctl->u.get_device_group.sdev_array;
597 num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
598 if ( num_sdevs < 0 )
599 {
600 dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
601 ret = -EFAULT;
602 domctl->u.get_device_group.num_sdevs = 0;
603 }
604 else
605 {
606 ret = 0;
607 domctl->u.get_device_group.num_sdevs = num_sdevs;
608 }
609 if ( copy_to_guest(u_domctl, domctl, 1) )
610 ret = -EFAULT;
611 rcu_unlock_domain(d);
612 }
613 break;
615 case XEN_DOMCTL_test_assign_device:
616 {
617 u8 bus, devfn;
619 ret = -ENOSYS;
620 if ( !iommu_enabled )
621 break;
623 ret = xsm_test_assign_device(domctl->u.assign_device.machine_bdf);
624 if ( ret )
625 break;
627 ret = -EINVAL;
628 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
629 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
631 if ( device_assigned(bus, devfn) )
632 {
633 gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
634 "%x:%x:%x already assigned, or non-existent\n",
635 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
636 break;
637 }
638 ret = 0;
639 }
640 break;
642 case XEN_DOMCTL_assign_device:
643 {
644 struct domain *d;
645 u8 bus, devfn;
647 ret = -ENOSYS;
648 if ( !iommu_enabled )
649 break;
651 ret = -EINVAL;
652 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
653 {
654 gdprintk(XENLOG_ERR,
655 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
656 break;
657 }
659 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
660 if ( ret )
661 goto assign_device_out;
663 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
664 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
666 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
667 {
668 ret = -ENOSYS;
669 put_domain(d);
670 break;
671 }
673 ret = -EINVAL;
674 if ( device_assigned(bus, devfn) )
675 {
676 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
677 "%x:%x:%x already assigned, or non-existent\n",
678 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
679 put_domain(d);
680 break;
681 }
683 ret = assign_device(d, bus, devfn);
684 if ( ret )
685 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
686 "assign device (%x:%x:%x) failed\n",
687 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
689 assign_device_out:
690 put_domain(d);
691 }
692 break;
694 case XEN_DOMCTL_deassign_device:
695 {
696 struct domain *d;
697 u8 bus, devfn;
699 ret = -ENOSYS;
700 if ( !iommu_enabled )
701 break;
703 ret = -EINVAL;
704 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
705 {
706 gdprintk(XENLOG_ERR,
707 "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
708 break;
709 }
711 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
712 if ( ret )
713 goto deassign_device_out;
715 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
716 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
718 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
719 {
720 ret = -ENOSYS;
721 put_domain(d);
722 break;
723 }
725 if ( !device_assigned(bus, devfn) )
726 {
727 put_domain(d);
728 break;
729 }
731 ret = 0;
732 deassign_device(d, bus, devfn);
733 gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n",
734 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
736 deassign_device_out:
737 put_domain(d);
738 }
739 break;
741 case XEN_DOMCTL_bind_pt_irq:
742 {
743 struct domain * d;
744 xen_domctl_bind_pt_irq_t * bind;
746 ret = -ESRCH;
747 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
748 break;
749 bind = &(domctl->u.bind_pt_irq);
751 ret = xsm_bind_pt_irq(d, bind);
752 if ( ret )
753 goto bind_out;
755 ret = -ESRCH;
756 if ( iommu_enabled )
757 ret = pt_irq_create_bind_vtd(d, bind);
758 if ( ret < 0 )
759 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
761 bind_out:
762 rcu_unlock_domain(d);
763 }
764 break;
766 case XEN_DOMCTL_unbind_pt_irq:
767 {
768 struct domain * d;
769 xen_domctl_bind_pt_irq_t * bind;
771 ret = -ESRCH;
772 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
773 break;
774 bind = &(domctl->u.bind_pt_irq);
775 if ( iommu_enabled )
776 ret = pt_irq_destroy_bind_vtd(d, bind);
777 if ( ret < 0 )
778 gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
779 rcu_unlock_domain(d);
780 }
781 break;
783 case XEN_DOMCTL_memory_mapping:
784 {
785 struct domain *d;
786 unsigned long gfn = domctl->u.memory_mapping.first_gfn;
787 unsigned long mfn = domctl->u.memory_mapping.first_mfn;
788 unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
789 int i;
791 ret = -EINVAL;
792 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
793 break;
795 ret = -ESRCH;
796 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
797 break;
799 ret=0;
800 if ( domctl->u.memory_mapping.add_mapping )
801 {
802 gdprintk(XENLOG_INFO,
803 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
804 gfn, mfn, nr_mfns);
806 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
807 for ( i = 0; i < nr_mfns; i++ )
808 set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
809 }
810 else
811 {
812 gdprintk(XENLOG_INFO,
813 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
814 gfn, mfn, nr_mfns);
816 for ( i = 0; i < nr_mfns; i++ )
817 clear_mmio_p2m_entry(d, gfn+i);
818 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
819 }
821 rcu_unlock_domain(d);
822 }
823 break;
825 case XEN_DOMCTL_ioport_mapping:
826 {
827 #define MAX_IOPORTS 0x10000
828 struct domain *d;
829 struct hvm_iommu *hd;
830 unsigned int fgp = domctl->u.ioport_mapping.first_gport;
831 unsigned int fmp = domctl->u.ioport_mapping.first_mport;
832 unsigned int np = domctl->u.ioport_mapping.nr_ports;
833 struct g2m_ioport *g2m_ioport;
834 int found = 0;
836 ret = -EINVAL;
837 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
838 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
839 {
840 gdprintk(XENLOG_ERR,
841 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
842 fgp, fmp, np);
843 break;
844 }
846 ret = -ESRCH;
847 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
848 break;
850 hd = domain_hvm_iommu(d);
851 if ( domctl->u.ioport_mapping.add_mapping )
852 {
853 gdprintk(XENLOG_INFO,
854 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
855 fgp, fmp, np);
857 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
858 if (g2m_ioport->mport == fmp )
859 {
860 g2m_ioport->gport = fgp;
861 g2m_ioport->np = np;
862 found = 1;
863 break;
864 }
865 if ( !found )
866 {
867 g2m_ioport = xmalloc(struct g2m_ioport);
868 g2m_ioport->gport = fgp;
869 g2m_ioport->mport = fmp;
870 g2m_ioport->np = np;
871 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
872 }
873 ret = ioports_permit_access(d, fmp, fmp + np - 1);
874 }
875 else
876 {
877 gdprintk(XENLOG_INFO,
878 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
879 fgp, fmp, np);
880 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
881 if ( g2m_ioport->mport == fmp )
882 {
883 list_del(&g2m_ioport->list);
884 xfree(g2m_ioport);
885 break;
886 }
887 ret = ioports_deny_access(d, fmp, fmp + np - 1);
888 }
889 rcu_unlock_domain(d);
890 }
891 break;
893 case XEN_DOMCTL_pin_mem_cacheattr:
894 {
895 struct domain *d;
897 ret = -ESRCH;
898 d = rcu_lock_domain_by_id(domctl->domain);
899 if ( d == NULL )
900 break;
902 ret = xsm_pin_mem_cacheattr(d);
903 if ( ret )
904 goto pin_out;
906 ret = hvm_set_mem_pinned_cacheattr(
907 d, domctl->u.pin_mem_cacheattr.start,
908 domctl->u.pin_mem_cacheattr.end,
909 domctl->u.pin_mem_cacheattr.type);
911 pin_out:
912 rcu_unlock_domain(d);
913 }
914 break;
916 case XEN_DOMCTL_set_ext_vcpucontext:
917 case XEN_DOMCTL_get_ext_vcpucontext:
918 {
919 struct xen_domctl_ext_vcpucontext *evc;
920 struct domain *d;
921 struct vcpu *v;
923 evc = &domctl->u.ext_vcpucontext;
925 ret = -ESRCH;
926 d = rcu_lock_domain_by_id(domctl->domain);
927 if ( d == NULL )
928 break;
930 ret = xsm_ext_vcpucontext(d, domctl->cmd);
931 if ( ret )
932 goto ext_vcpucontext_out;
934 ret = -ESRCH;
935 if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
936 ((v = d->vcpu[evc->vcpu]) == NULL) )
937 goto ext_vcpucontext_out;
939 if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
940 {
941 evc->size = sizeof(*evc);
942 #ifdef __x86_64__
943 evc->sysenter_callback_cs = v->arch.sysenter_callback_cs;
944 evc->sysenter_callback_eip = v->arch.sysenter_callback_eip;
945 evc->sysenter_disables_events = v->arch.sysenter_disables_events;
946 evc->syscall32_callback_cs = v->arch.syscall32_callback_cs;
947 evc->syscall32_callback_eip = v->arch.syscall32_callback_eip;
948 evc->syscall32_disables_events = v->arch.syscall32_disables_events;
949 #else
950 evc->sysenter_callback_cs = 0;
951 evc->sysenter_callback_eip = 0;
952 evc->sysenter_disables_events = 0;
953 evc->syscall32_callback_cs = 0;
954 evc->syscall32_callback_eip = 0;
955 evc->syscall32_disables_events = 0;
956 #endif
957 }
958 else
959 {
960 ret = -EINVAL;
961 if ( evc->size != sizeof(*evc) )
962 goto ext_vcpucontext_out;
963 #ifdef __x86_64__
964 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
965 v->arch.sysenter_callback_cs = evc->sysenter_callback_cs;
966 v->arch.sysenter_callback_eip = evc->sysenter_callback_eip;
967 v->arch.sysenter_disables_events = evc->sysenter_disables_events;
968 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
969 v->arch.syscall32_callback_cs = evc->syscall32_callback_cs;
970 v->arch.syscall32_callback_eip = evc->syscall32_callback_eip;
971 v->arch.syscall32_disables_events = evc->syscall32_disables_events;
972 #else
973 /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
974 if ( (evc->sysenter_callback_cs & ~3) ||
975 evc->sysenter_callback_eip ||
976 (evc->syscall32_callback_cs & ~3) ||
977 evc->syscall32_callback_eip )
978 goto ext_vcpucontext_out;
979 #endif
980 }
982 ret = 0;
984 ext_vcpucontext_out:
985 rcu_unlock_domain(d);
986 if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
987 copy_to_guest(u_domctl, domctl, 1) )
988 ret = -EFAULT;
989 }
990 break;
992 case XEN_DOMCTL_set_cpuid:
993 {
994 struct domain *d;
995 xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
996 cpuid_input_t *cpuid = NULL;
997 int i;
999 ret = -ESRCH;
1000 d = rcu_lock_domain_by_id(domctl->domain);
1001 if ( d == NULL )
1002 break;
1004 for ( i = 0; i < MAX_CPUID_INPUT; i++ )
1006 cpuid = &d->arch.cpuids[i];
1008 if ( cpuid->input[0] == XEN_CPUID_INPUT_UNUSED )
1009 break;
1011 if ( (cpuid->input[0] == ctl->input[0]) &&
1012 ((cpuid->input[1] == XEN_CPUID_INPUT_UNUSED) ||
1013 (cpuid->input[1] == ctl->input[1])) )
1014 break;
1017 if ( i == MAX_CPUID_INPUT )
1019 ret = -ENOENT;
1021 else
1023 memcpy(cpuid, ctl, sizeof(cpuid_input_t));
1024 ret = 0;
1027 rcu_unlock_domain(d);
1029 break;
1031 case XEN_DOMCTL_suppress_spurious_page_faults:
1033 struct domain *d;
1035 ret = -ESRCH;
1036 d = rcu_lock_domain_by_id(domctl->domain);
1037 if ( d != NULL )
1039 d->arch.suppress_spurious_page_faults = 1;
1040 rcu_unlock_domain(d);
1041 ret = 0;
1044 break;
1046 default:
1047 ret = -ENOSYS;
1048 break;
1051 return ret;
1054 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
1056 #ifdef CONFIG_COMPAT
1057 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
1058 #else
1059 #define c(fld) (c.nat->fld)
1060 #endif
1062 if ( !is_pv_32on64_domain(v->domain) )
1063 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
1064 #ifdef CONFIG_COMPAT
1065 else
1066 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
1067 #endif
1069 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
1070 if ( v->fpu_initialised )
1071 c(flags |= VGCF_i387_valid);
1072 if ( !test_bit(_VPF_down, &v->pause_flags) )
1073 c(flags |= VGCF_online);
1075 if ( is_hvm_vcpu(v) )
1077 struct segment_register sreg;
1078 memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
1079 c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
1080 c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
1081 c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
1082 c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
1083 hvm_get_segment_register(v, x86_seg_cs, &sreg);
1084 c.nat->user_regs.cs = sreg.sel;
1085 hvm_get_segment_register(v, x86_seg_ss, &sreg);
1086 c.nat->user_regs.ss = sreg.sel;
1087 hvm_get_segment_register(v, x86_seg_ds, &sreg);
1088 c.nat->user_regs.ds = sreg.sel;
1089 hvm_get_segment_register(v, x86_seg_es, &sreg);
1090 c.nat->user_regs.es = sreg.sel;
1091 hvm_get_segment_register(v, x86_seg_fs, &sreg);
1092 c.nat->user_regs.fs = sreg.sel;
1093 hvm_get_segment_register(v, x86_seg_gs, &sreg);
1094 c.nat->user_regs.gs = sreg.sel;
1096 else
1098 /* IOPL privileges are virtualised: merge back into returned eflags. */
1099 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
1100 c(user_regs.eflags |= v->arch.iopl << 12);
1102 if ( !is_pv_32on64_domain(v->domain) )
1104 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
1105 pagetable_get_pfn(v->arch.guest_table));
1106 #ifdef __x86_64__
1107 if ( !pagetable_is_null(v->arch.guest_table_user) )
1108 c.nat->ctrlreg[1] = xen_pfn_to_cr3(
1109 pagetable_get_pfn(v->arch.guest_table_user));
1110 #endif
1112 /* Merge shadow DR7 bits into real DR7. */
1113 c.nat->debugreg[7] |= c.nat->debugreg[5];
1114 c.nat->debugreg[5] = 0;
1116 #ifdef CONFIG_COMPAT
1117 else
1119 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
1120 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
1122 /* Merge shadow DR7 bits into real DR7. */
1123 c.cmp->debugreg[7] |= c.cmp->debugreg[5];
1124 c.cmp->debugreg[5] = 0;
1126 #endif
1128 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
1129 c(flags |= VGCF_in_kernel);
1132 c(vm_assist = v->domain->vm_assist);
1133 #undef c
1136 /*
1137 * Local variables:
1138 * mode: C
1139 * c-set-style: "BSD"
1140 * c-basic-offset: 4
1141 * tab-width: 4
1142 * indent-tabs-mode: nil
1143 * End:
1144 */