ia64/xen-unstable

view xen/arch/x86/domctl.c @ 18432:1e98ea5c8604

x86: Fix guest_handle_okay/guest_handle_subrange_okay

The guest handle checks should use paging_* predicates, not shadow_*.
Also tidy up a few places where p2m definitions were being imported
via asm/guest_access.h -> asm/shadow.h -> asm/p2m.h

Signed-off-by: Tim Deegan <Tim.Deegan@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Sep 03 14:16:35 2008 +0100 (2008-09-03)
parents 9b8f3ec6c5fd
children 44f039c4aee4
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <xen/pci.h>
14 #include <public/domctl.h>
15 #include <xen/sched.h>
16 #include <xen/domain.h>
17 #include <xen/event.h>
18 #include <xen/domain_page.h>
19 #include <asm/msr.h>
20 #include <xen/trace.h>
21 #include <xen/console.h>
22 #include <xen/iocap.h>
23 #include <xen/paging.h>
24 #include <asm/irq.h>
25 #include <asm/hvm/hvm.h>
26 #include <asm/hvm/support.h>
27 #include <asm/hvm/cacheattr.h>
28 #include <asm/processor.h>
29 #include <xsm/xsm.h>
30 #include <xen/iommu.h>
32 long arch_do_domctl(
33 struct xen_domctl *domctl,
34 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
35 {
36 long ret = 0;
38 switch ( domctl->cmd )
39 {
41 case XEN_DOMCTL_shadow_op:
42 {
43 struct domain *d;
44 ret = -ESRCH;
45 d = rcu_lock_domain_by_id(domctl->domain);
46 if ( d != NULL )
47 {
48 ret = paging_domctl(d,
49 &domctl->u.shadow_op,
50 guest_handle_cast(u_domctl, void));
51 rcu_unlock_domain(d);
52 copy_to_guest(u_domctl, domctl, 1);
53 }
54 }
55 break;
57 case XEN_DOMCTL_ioport_permission:
58 {
59 struct domain *d;
60 unsigned int fp = domctl->u.ioport_permission.first_port;
61 unsigned int np = domctl->u.ioport_permission.nr_ports;
63 ret = -EINVAL;
64 if ( (fp + np) > 65536 )
65 break;
67 ret = -ESRCH;
68 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
69 break;
71 ret = xsm_ioport_permission(d, fp,
72 domctl->u.ioport_permission.allow_access);
73 if ( ret )
74 {
75 rcu_unlock_domain(d);
76 break;
77 }
79 if ( np == 0 )
80 ret = 0;
81 else if ( domctl->u.ioport_permission.allow_access )
82 ret = ioports_permit_access(d, fp, fp + np - 1);
83 else
84 ret = ioports_deny_access(d, fp, fp + np - 1);
86 rcu_unlock_domain(d);
87 }
88 break;
90 case XEN_DOMCTL_getpageframeinfo:
91 {
92 struct page_info *page;
93 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
94 domid_t dom = domctl->domain;
95 struct domain *d;
97 ret = -EINVAL;
99 if ( unlikely(!mfn_valid(mfn)) ||
100 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
101 break;
103 page = mfn_to_page(mfn);
105 ret = xsm_getpageframeinfo(page);
106 if ( ret )
107 {
108 rcu_unlock_domain(d);
109 break;
110 }
112 if ( likely(get_page(page, d)) )
113 {
114 ret = 0;
116 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
118 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
119 {
120 switch ( page->u.inuse.type_info & PGT_type_mask )
121 {
122 case PGT_l1_page_table:
123 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
124 break;
125 case PGT_l2_page_table:
126 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
127 break;
128 case PGT_l3_page_table:
129 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
130 break;
131 case PGT_l4_page_table:
132 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
133 break;
134 }
135 }
137 put_page(page);
138 }
140 rcu_unlock_domain(d);
142 copy_to_guest(u_domctl, domctl, 1);
143 }
144 break;
146 case XEN_DOMCTL_getpageframeinfo2:
147 {
148 int n,j;
149 int num = domctl->u.getpageframeinfo2.num;
150 domid_t dom = domctl->domain;
151 struct domain *d;
152 uint32_t *arr32;
153 ret = -ESRCH;
155 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
156 break;
158 if ( unlikely(num > 1024) )
159 {
160 ret = -E2BIG;
161 rcu_unlock_domain(d);
162 break;
163 }
165 arr32 = alloc_xenheap_page();
166 if ( !arr32 )
167 {
168 ret = -ENOMEM;
169 put_domain(d);
170 break;
171 }
173 ret = 0;
174 for ( n = 0; n < num; )
175 {
176 int k = PAGE_SIZE / 4;
177 if ( (num - n) < k )
178 k = num - n;
180 if ( copy_from_guest_offset(arr32,
181 domctl->u.getpageframeinfo2.array,
182 n, k) )
183 {
184 ret = -EFAULT;
185 break;
186 }
188 for ( j = 0; j < k; j++ )
189 {
190 struct page_info *page;
191 unsigned long mfn = arr32[j];
193 page = mfn_to_page(mfn);
195 ret = xsm_getpageframeinfo(page);
196 if ( ret )
197 continue;
199 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
200 {
201 unsigned long type = 0;
203 switch( page->u.inuse.type_info & PGT_type_mask )
204 {
205 case PGT_l1_page_table:
206 type = XEN_DOMCTL_PFINFO_L1TAB;
207 break;
208 case PGT_l2_page_table:
209 type = XEN_DOMCTL_PFINFO_L2TAB;
210 break;
211 case PGT_l3_page_table:
212 type = XEN_DOMCTL_PFINFO_L3TAB;
213 break;
214 case PGT_l4_page_table:
215 type = XEN_DOMCTL_PFINFO_L4TAB;
216 break;
217 }
219 if ( page->u.inuse.type_info & PGT_pinned )
220 type |= XEN_DOMCTL_PFINFO_LPINTAB;
221 arr32[j] |= type;
222 put_page(page);
223 }
224 else
225 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
227 }
229 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
230 n, arr32, k) )
231 {
232 ret = -EFAULT;
233 break;
234 }
236 n += k;
237 }
239 free_xenheap_page(arr32);
241 rcu_unlock_domain(d);
242 }
243 break;
245 case XEN_DOMCTL_getmemlist:
246 {
247 int i;
248 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
249 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
250 uint64_t mfn;
251 struct list_head *list_ent;
253 ret = -EINVAL;
254 if ( d != NULL )
255 {
256 ret = xsm_getmemlist(d);
257 if ( ret )
258 {
259 rcu_unlock_domain(d);
260 break;
261 }
263 spin_lock(&d->page_alloc_lock);
265 if ( unlikely(d->is_dying) ) {
266 spin_unlock(&d->page_alloc_lock);
267 goto getmemlist_out;
268 }
270 ret = 0;
271 list_ent = d->page_list.next;
272 for ( i = 0; (i < max_pfns) && (list_ent != &d->page_list); i++ )
273 {
274 mfn = page_to_mfn(list_entry(
275 list_ent, struct page_info, list));
276 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
277 i, &mfn, 1) )
278 {
279 ret = -EFAULT;
280 break;
281 }
282 list_ent = mfn_to_page(mfn)->list.next;
283 }
285 spin_unlock(&d->page_alloc_lock);
287 domctl->u.getmemlist.num_pfns = i;
288 copy_to_guest(u_domctl, domctl, 1);
289 getmemlist_out:
290 rcu_unlock_domain(d);
291 }
292 }
293 break;
295 case XEN_DOMCTL_hypercall_init:
296 {
297 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
298 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
299 unsigned long mfn;
300 void *hypercall_page;
302 ret = -ESRCH;
303 if ( unlikely(d == NULL) )
304 break;
306 ret = xsm_hypercall_init(d);
307 if ( ret )
308 {
309 rcu_unlock_domain(d);
310 break;
311 }
313 mfn = gmfn_to_mfn(d, gmfn);
315 ret = -EACCES;
316 if ( !mfn_valid(mfn) ||
317 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
318 {
319 rcu_unlock_domain(d);
320 break;
321 }
323 ret = 0;
325 hypercall_page = map_domain_page(mfn);
326 hypercall_page_initialise(d, hypercall_page);
327 unmap_domain_page(hypercall_page);
329 put_page_and_type(mfn_to_page(mfn));
331 rcu_unlock_domain(d);
332 }
333 break;
335 case XEN_DOMCTL_sethvmcontext:
336 {
337 struct hvm_domain_context c;
338 struct domain *d;
340 c.cur = 0;
341 c.size = domctl->u.hvmcontext.size;
342 c.data = NULL;
344 ret = -ESRCH;
345 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
346 break;
348 ret = xsm_hvmcontext(d, domctl->cmd);
349 if ( ret )
350 goto sethvmcontext_out;
352 ret = -EINVAL;
353 if ( !is_hvm_domain(d) )
354 goto sethvmcontext_out;
356 ret = -ENOMEM;
357 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
358 goto sethvmcontext_out;
360 ret = -EFAULT;
361 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
362 goto sethvmcontext_out;
364 domain_pause(d);
365 ret = hvm_load(d, &c);
366 domain_unpause(d);
368 sethvmcontext_out:
369 if ( c.data != NULL )
370 xfree(c.data);
372 rcu_unlock_domain(d);
373 }
374 break;
376 case XEN_DOMCTL_gethvmcontext:
377 {
378 struct hvm_domain_context c;
379 struct domain *d;
381 ret = -ESRCH;
382 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
383 break;
385 ret = xsm_hvmcontext(d, domctl->cmd);
386 if ( ret )
387 goto gethvmcontext_out;
389 ret = -EINVAL;
390 if ( !is_hvm_domain(d) )
391 goto gethvmcontext_out;
393 c.cur = 0;
394 c.size = hvm_save_size(d);
395 c.data = NULL;
397 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
398 {
399 /* Client is querying for the correct buffer size */
400 domctl->u.hvmcontext.size = c.size;
401 ret = 0;
402 goto gethvmcontext_out;
403 }
405 /* Check that the client has a big enough buffer */
406 ret = -ENOSPC;
407 if ( domctl->u.hvmcontext.size < c.size )
408 goto gethvmcontext_out;
410 /* Allocate our own marshalling buffer */
411 ret = -ENOMEM;
412 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
413 goto gethvmcontext_out;
415 domain_pause(d);
416 ret = hvm_save(d, &c);
417 domain_unpause(d);
419 domctl->u.hvmcontext.size = c.cur;
420 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
421 ret = -EFAULT;
423 gethvmcontext_out:
424 if ( copy_to_guest(u_domctl, domctl, 1) )
425 ret = -EFAULT;
427 if ( c.data != NULL )
428 xfree(c.data);
430 rcu_unlock_domain(d);
431 }
432 break;
434 case XEN_DOMCTL_set_address_size:
435 {
436 struct domain *d;
438 ret = -ESRCH;
439 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
440 break;
442 ret = xsm_address_size(d, domctl->cmd);
443 if ( ret )
444 {
445 rcu_unlock_domain(d);
446 break;
447 }
449 switch ( domctl->u.address_size.size )
450 {
451 #ifdef CONFIG_COMPAT
452 case 32:
453 ret = switch_compat(d);
454 break;
455 case 64:
456 ret = switch_native(d);
457 break;
458 #endif
459 default:
460 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
461 break;
462 }
464 rcu_unlock_domain(d);
465 }
466 break;
468 case XEN_DOMCTL_get_address_size:
469 {
470 struct domain *d;
472 ret = -ESRCH;
473 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
474 break;
476 ret = xsm_address_size(d, domctl->cmd);
477 if ( ret )
478 {
479 rcu_unlock_domain(d);
480 break;
481 }
483 domctl->u.address_size.size = BITS_PER_GUEST_LONG(d);
485 ret = 0;
486 rcu_unlock_domain(d);
488 if ( copy_to_guest(u_domctl, domctl, 1) )
489 ret = -EFAULT;
490 }
491 break;
493 case XEN_DOMCTL_set_machine_address_size:
494 {
495 struct domain *d;
497 ret = -ESRCH;
498 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
499 break;
501 ret = xsm_machine_address_size(d, domctl->cmd);
502 if ( ret )
503 rcu_unlock_domain(d);
505 ret = -EBUSY;
506 if ( d->tot_pages > 0 )
507 goto set_machine_address_size_out;
509 d->arch.physaddr_bitsize = domctl->u.address_size.size;
511 ret = 0;
512 set_machine_address_size_out:
513 rcu_unlock_domain(d);
514 }
515 break;
517 case XEN_DOMCTL_get_machine_address_size:
518 {
519 struct domain *d;
521 ret = -ESRCH;
522 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
523 break;
525 ret = xsm_machine_address_size(d, domctl->cmd);
526 if ( ret )
527 {
528 rcu_unlock_domain(d);
529 break;
530 }
532 domctl->u.address_size.size = d->arch.physaddr_bitsize;
534 ret = 0;
535 rcu_unlock_domain(d);
537 if ( copy_to_guest(u_domctl, domctl, 1) )
538 ret = -EFAULT;
541 }
542 break;
544 case XEN_DOMCTL_sendtrigger:
545 {
546 struct domain *d;
547 struct vcpu *v;
549 ret = -ESRCH;
550 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
551 break;
553 ret = -EINVAL;
554 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
555 goto sendtrigger_out;
557 ret = -ESRCH;
558 if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
559 goto sendtrigger_out;
561 switch ( domctl->u.sendtrigger.trigger )
562 {
563 case XEN_DOMCTL_SENDTRIGGER_NMI:
564 {
565 ret = 0;
566 if ( !test_and_set_bool(v->nmi_pending) )
567 vcpu_kick(v);
568 }
569 break;
571 default:
572 ret = -ENOSYS;
573 }
575 sendtrigger_out:
576 rcu_unlock_domain(d);
577 }
578 break;
580 case XEN_DOMCTL_get_device_group:
581 {
582 struct domain *d;
583 u32 max_sdevs;
584 u8 bus, devfn;
585 XEN_GUEST_HANDLE_64(uint32) sdevs;
586 int num_sdevs;
588 ret = -ENOSYS;
589 if ( !iommu_enabled )
590 break;
592 ret = -EINVAL;
593 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
594 break;
596 bus = (domctl->u.get_device_group.machine_bdf >> 16) & 0xff;
597 devfn = (domctl->u.get_device_group.machine_bdf >> 8) & 0xff;
598 max_sdevs = domctl->u.get_device_group.max_sdevs;
599 sdevs = domctl->u.get_device_group.sdev_array;
601 num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
602 if ( num_sdevs < 0 )
603 {
604 dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
605 ret = -EFAULT;
606 domctl->u.get_device_group.num_sdevs = 0;
607 }
608 else
609 {
610 ret = 0;
611 domctl->u.get_device_group.num_sdevs = num_sdevs;
612 }
613 if ( copy_to_guest(u_domctl, domctl, 1) )
614 ret = -EFAULT;
615 rcu_unlock_domain(d);
616 }
617 break;
619 case XEN_DOMCTL_test_assign_device:
620 {
621 u8 bus, devfn;
623 ret = -ENOSYS;
624 if ( !iommu_enabled )
625 break;
627 ret = -EINVAL;
628 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
629 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
631 if ( device_assigned(bus, devfn) )
632 {
633 gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
634 "%x:%x:%x already assigned, or non-existent\n",
635 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
636 break;
637 }
638 ret = 0;
639 }
640 break;
642 case XEN_DOMCTL_assign_device:
643 {
644 struct domain *d;
645 u8 bus, devfn;
647 ret = -ENOSYS;
648 if ( !iommu_enabled )
649 break;
651 ret = -EINVAL;
652 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
653 {
654 gdprintk(XENLOG_ERR,
655 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
656 break;
657 }
658 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
659 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
661 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
662 {
663 ret = -ENOSYS;
664 put_domain(d);
665 break;
666 }
668 if ( device_assigned(bus, devfn) )
669 {
670 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
671 "%x:%x:%x already assigned, or non-existent\n",
672 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
673 put_domain(d);
674 break;
675 }
677 ret = assign_device(d, bus, devfn);
678 if ( ret )
679 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
680 "assign device (%x:%x:%x) failed\n",
681 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
683 put_domain(d);
684 }
685 break;
687 case XEN_DOMCTL_deassign_device:
688 {
689 struct domain *d;
690 u8 bus, devfn;
692 ret = -ENOSYS;
693 if ( !iommu_enabled )
694 break;
696 ret = -EINVAL;
697 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
698 {
699 gdprintk(XENLOG_ERR,
700 "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
701 break;
702 }
703 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
704 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
706 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
707 {
708 ret = -ENOSYS;
709 put_domain(d);
710 break;
711 }
713 if ( !device_assigned(bus, devfn) )
714 {
715 put_domain(d);
716 break;
717 }
719 ret = 0;
720 deassign_device(d, bus, devfn);
721 gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n",
722 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
723 put_domain(d);
724 }
725 break;
727 case XEN_DOMCTL_bind_pt_irq:
728 {
729 struct domain * d;
730 xen_domctl_bind_pt_irq_t * bind;
732 ret = -ESRCH;
733 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
734 break;
735 bind = &(domctl->u.bind_pt_irq);
736 if ( iommu_enabled )
737 ret = pt_irq_create_bind_vtd(d, bind);
738 if ( ret < 0 )
739 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
740 rcu_unlock_domain(d);
741 }
742 break;
744 case XEN_DOMCTL_unbind_pt_irq:
745 {
746 struct domain * d;
747 xen_domctl_bind_pt_irq_t * bind;
749 ret = -ESRCH;
750 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
751 break;
752 bind = &(domctl->u.bind_pt_irq);
753 if ( iommu_enabled )
754 ret = pt_irq_destroy_bind_vtd(d, bind);
755 if ( ret < 0 )
756 gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
757 rcu_unlock_domain(d);
758 }
759 break;
761 case XEN_DOMCTL_memory_mapping:
762 {
763 struct domain *d;
764 unsigned long gfn = domctl->u.memory_mapping.first_gfn;
765 unsigned long mfn = domctl->u.memory_mapping.first_mfn;
766 unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
767 int i;
769 ret = -EINVAL;
770 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
771 break;
773 ret = -ESRCH;
774 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
775 break;
777 ret=0;
778 if ( domctl->u.memory_mapping.add_mapping )
779 {
780 gdprintk(XENLOG_INFO,
781 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
782 gfn, mfn, nr_mfns);
784 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
785 for ( i = 0; i < nr_mfns; i++ )
786 set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
787 }
788 else
789 {
790 gdprintk(XENLOG_INFO,
791 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
792 gfn, mfn, nr_mfns);
794 for ( i = 0; i < nr_mfns; i++ )
795 clear_mmio_p2m_entry(d, gfn+i);
796 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
797 }
799 rcu_unlock_domain(d);
800 }
801 break;
803 case XEN_DOMCTL_ioport_mapping:
804 {
805 #define MAX_IOPORTS 0x10000
806 struct domain *d;
807 struct hvm_iommu *hd;
808 unsigned int fgp = domctl->u.ioport_mapping.first_gport;
809 unsigned int fmp = domctl->u.ioport_mapping.first_mport;
810 unsigned int np = domctl->u.ioport_mapping.nr_ports;
811 struct g2m_ioport *g2m_ioport;
812 int found = 0;
814 ret = -EINVAL;
815 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
816 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
817 {
818 gdprintk(XENLOG_ERR,
819 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
820 fgp, fmp, np);
821 break;
822 }
824 ret = -ESRCH;
825 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
826 break;
828 hd = domain_hvm_iommu(d);
829 if ( domctl->u.ioport_mapping.add_mapping )
830 {
831 gdprintk(XENLOG_INFO,
832 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
833 fgp, fmp, np);
835 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
836 if (g2m_ioport->mport == fmp )
837 {
838 g2m_ioport->gport = fgp;
839 g2m_ioport->np = np;
840 found = 1;
841 break;
842 }
843 if ( !found )
844 {
845 g2m_ioport = xmalloc(struct g2m_ioport);
846 g2m_ioport->gport = fgp;
847 g2m_ioport->mport = fmp;
848 g2m_ioport->np = np;
849 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
850 }
851 ret = ioports_permit_access(d, fmp, fmp + np - 1);
852 }
853 else
854 {
855 gdprintk(XENLOG_INFO,
856 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
857 fgp, fmp, np);
858 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
859 if ( g2m_ioport->mport == fmp )
860 {
861 list_del(&g2m_ioport->list);
862 xfree(g2m_ioport);
863 break;
864 }
865 ret = ioports_deny_access(d, fmp, fmp + np - 1);
866 }
867 rcu_unlock_domain(d);
868 }
869 break;
871 case XEN_DOMCTL_pin_mem_cacheattr:
872 {
873 struct domain *d;
875 ret = -ESRCH;
876 d = rcu_lock_domain_by_id(domctl->domain);
877 if ( d == NULL )
878 break;
880 ret = hvm_set_mem_pinned_cacheattr(
881 d, domctl->u.pin_mem_cacheattr.start,
882 domctl->u.pin_mem_cacheattr.end,
883 domctl->u.pin_mem_cacheattr.type);
885 rcu_unlock_domain(d);
886 }
887 break;
889 case XEN_DOMCTL_set_ext_vcpucontext:
890 case XEN_DOMCTL_get_ext_vcpucontext:
891 {
892 struct xen_domctl_ext_vcpucontext *evc;
893 struct domain *d;
894 struct vcpu *v;
896 evc = &domctl->u.ext_vcpucontext;
898 ret = -ESRCH;
899 d = rcu_lock_domain_by_id(domctl->domain);
900 if ( d == NULL )
901 break;
903 ret = -ESRCH;
904 if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
905 ((v = d->vcpu[evc->vcpu]) == NULL) )
906 goto ext_vcpucontext_out;
908 if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
909 {
910 evc->size = sizeof(*evc);
911 #ifdef __x86_64__
912 evc->sysenter_callback_cs = v->arch.sysenter_callback_cs;
913 evc->sysenter_callback_eip = v->arch.sysenter_callback_eip;
914 evc->sysenter_disables_events = v->arch.sysenter_disables_events;
915 evc->syscall32_callback_cs = v->arch.syscall32_callback_cs;
916 evc->syscall32_callback_eip = v->arch.syscall32_callback_eip;
917 evc->syscall32_disables_events = v->arch.syscall32_disables_events;
918 #else
919 evc->sysenter_callback_cs = 0;
920 evc->sysenter_callback_eip = 0;
921 evc->sysenter_disables_events = 0;
922 evc->syscall32_callback_cs = 0;
923 evc->syscall32_callback_eip = 0;
924 evc->syscall32_disables_events = 0;
925 #endif
926 }
927 else
928 {
929 ret = -EINVAL;
930 if ( evc->size != sizeof(*evc) )
931 goto ext_vcpucontext_out;
932 #ifdef __x86_64__
933 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
934 v->arch.sysenter_callback_cs = evc->sysenter_callback_cs;
935 v->arch.sysenter_callback_eip = evc->sysenter_callback_eip;
936 v->arch.sysenter_disables_events = evc->sysenter_disables_events;
937 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
938 v->arch.syscall32_callback_cs = evc->syscall32_callback_cs;
939 v->arch.syscall32_callback_eip = evc->syscall32_callback_eip;
940 v->arch.syscall32_disables_events = evc->syscall32_disables_events;
941 #else
942 /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
943 if ( (evc->sysenter_callback_cs & ~3) ||
944 evc->sysenter_callback_eip ||
945 (evc->syscall32_callback_cs & ~3) ||
946 evc->syscall32_callback_eip )
947 goto ext_vcpucontext_out;
948 #endif
949 }
951 ret = 0;
953 ext_vcpucontext_out:
954 rcu_unlock_domain(d);
955 if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
956 copy_to_guest(u_domctl, domctl, 1) )
957 ret = -EFAULT;
958 }
959 break;
961 case XEN_DOMCTL_set_cpuid:
962 {
963 struct domain *d;
964 xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
965 cpuid_input_t *cpuid = NULL;
966 int i;
968 ret = -ESRCH;
969 d = rcu_lock_domain_by_id(domctl->domain);
970 if ( d == NULL )
971 break;
973 for ( i = 0; i < MAX_CPUID_INPUT; i++ )
974 {
975 cpuid = &d->arch.cpuids[i];
977 if ( cpuid->input[0] == XEN_CPUID_INPUT_UNUSED )
978 break;
980 if ( (cpuid->input[0] == ctl->input[0]) &&
981 ((cpuid->input[1] == XEN_CPUID_INPUT_UNUSED) ||
982 (cpuid->input[1] == ctl->input[1])) )
983 break;
984 }
986 if ( i == MAX_CPUID_INPUT )
987 {
988 ret = -ENOENT;
989 }
990 else
991 {
992 memcpy(cpuid, ctl, sizeof(cpuid_input_t));
993 ret = 0;
994 }
996 rcu_unlock_domain(d);
997 }
998 break;
1000 default:
1001 ret = -ENOSYS;
1002 break;
1005 return ret;
1008 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
1010 #ifdef CONFIG_COMPAT
1011 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
1012 #else
1013 #define c(fld) (c.nat->fld)
1014 #endif
1016 if ( !is_pv_32on64_domain(v->domain) )
1017 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
1018 #ifdef CONFIG_COMPAT
1019 else
1020 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
1021 #endif
1023 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
1024 if ( v->fpu_initialised )
1025 c(flags |= VGCF_i387_valid);
1026 if ( !test_bit(_VPF_down, &v->pause_flags) )
1027 c(flags |= VGCF_online);
1029 if ( is_hvm_vcpu(v) )
1031 memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
1032 c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
1033 c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
1034 c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
1035 c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
1037 else
1039 /* IOPL privileges are virtualised: merge back into returned eflags. */
1040 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
1041 c(user_regs.eflags |= v->arch.iopl << 12);
1043 if ( !is_pv_32on64_domain(v->domain) )
1045 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
1046 pagetable_get_pfn(v->arch.guest_table));
1047 #ifdef __x86_64__
1048 if ( !pagetable_is_null(v->arch.guest_table_user) )
1049 c.nat->ctrlreg[1] = xen_pfn_to_cr3(
1050 pagetable_get_pfn(v->arch.guest_table_user));
1051 #endif
1053 /* Merge shadow DR7 bits into real DR7. */
1054 c.nat->debugreg[7] |= c.nat->debugreg[5];
1055 c.nat->debugreg[5] = 0;
1057 #ifdef CONFIG_COMPAT
1058 else
1060 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
1061 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
1063 /* Merge shadow DR7 bits into real DR7. */
1064 c.cmp->debugreg[7] |= c.cmp->debugreg[5];
1065 c.cmp->debugreg[5] = 0;
1067 #endif
1069 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
1070 c(flags |= VGCF_in_kernel);
1073 c(vm_assist = v->domain->vm_assist);
1074 #undef c
1077 /*
1078 * Local variables:
1079 * mode: C
1080 * c-set-style: "BSD"
1081 * c-basic-offset: 4
1082 * tab-width: 4
1083 * indent-tabs-mode: nil
1084 * End:
1085 */