ia64/xen-unstable

view xen/arch/x86/domctl.c @ 19311:e6b7b747d122

passthrough: fix some spinlock issues in vmsi

Apart from efficiency, I hasten to fix the assertion failure.

- acquire pcidevs_lock before calling pt_irq_xxx_bind_vtd
- allocate msixtbl_entry beforehand
- check return value from domain_spin_lock_irq_desc()
- typo: spin_unlock(&irq_desc->lock) ->
- spin_unlock_irq(&irq_desc->lock)
- acquire msixtbl_list_lock with irq_disabled

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Wed Mar 11 10:09:21 2009 +0000 (2009-03-11)
parents 71af89e70fee
children 567d312e80ad
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <xen/pci.h>
14 #include <public/domctl.h>
15 #include <xen/sched.h>
16 #include <xen/domain.h>
17 #include <xen/event.h>
18 #include <xen/domain_page.h>
19 #include <asm/msr.h>
20 #include <xen/trace.h>
21 #include <xen/console.h>
22 #include <xen/iocap.h>
23 #include <xen/paging.h>
24 #include <asm/irq.h>
25 #include <asm/hvm/hvm.h>
26 #include <asm/hvm/support.h>
27 #include <asm/hvm/cacheattr.h>
28 #include <asm/processor.h>
29 #include <xsm/xsm.h>
30 #include <xen/iommu.h>
32 long arch_do_domctl(
33 struct xen_domctl *domctl,
34 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
35 {
36 long ret = 0;
38 switch ( domctl->cmd )
39 {
41 case XEN_DOMCTL_shadow_op:
42 {
43 struct domain *d;
44 ret = -ESRCH;
45 d = rcu_lock_domain_by_id(domctl->domain);
46 if ( d != NULL )
47 {
48 ret = paging_domctl(d,
49 &domctl->u.shadow_op,
50 guest_handle_cast(u_domctl, void));
51 rcu_unlock_domain(d);
52 copy_to_guest(u_domctl, domctl, 1);
53 }
54 }
55 break;
57 case XEN_DOMCTL_ioport_permission:
58 {
59 struct domain *d;
60 unsigned int fp = domctl->u.ioport_permission.first_port;
61 unsigned int np = domctl->u.ioport_permission.nr_ports;
63 ret = -EINVAL;
64 if ( (fp + np) > 65536 )
65 break;
67 ret = -ESRCH;
68 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
69 break;
71 if ( np == 0 )
72 ret = 0;
73 else if ( domctl->u.ioport_permission.allow_access )
74 ret = ioports_permit_access(d, fp, fp + np - 1);
75 else
76 ret = ioports_deny_access(d, fp, fp + np - 1);
78 rcu_unlock_domain(d);
79 }
80 break;
82 case XEN_DOMCTL_getpageframeinfo:
83 {
84 struct page_info *page;
85 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
86 domid_t dom = domctl->domain;
87 struct domain *d;
89 ret = -EINVAL;
91 if ( unlikely(!mfn_valid(mfn)) ||
92 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
93 break;
95 page = mfn_to_page(mfn);
97 ret = xsm_getpageframeinfo(page);
98 if ( ret )
99 {
100 rcu_unlock_domain(d);
101 break;
102 }
104 if ( likely(get_page(page, d)) )
105 {
106 ret = 0;
108 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
110 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
111 {
112 switch ( page->u.inuse.type_info & PGT_type_mask )
113 {
114 case PGT_l1_page_table:
115 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
116 break;
117 case PGT_l2_page_table:
118 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
119 break;
120 case PGT_l3_page_table:
121 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
122 break;
123 case PGT_l4_page_table:
124 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
125 break;
126 }
127 }
129 put_page(page);
130 }
132 rcu_unlock_domain(d);
134 copy_to_guest(u_domctl, domctl, 1);
135 }
136 break;
138 case XEN_DOMCTL_getpageframeinfo2:
139 {
140 int n,j;
141 int num = domctl->u.getpageframeinfo2.num;
142 domid_t dom = domctl->domain;
143 struct domain *d;
144 uint32_t *arr32;
145 ret = -ESRCH;
147 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
148 break;
150 if ( unlikely(num > 1024) )
151 {
152 ret = -E2BIG;
153 rcu_unlock_domain(d);
154 break;
155 }
157 arr32 = alloc_xenheap_page();
158 if ( !arr32 )
159 {
160 ret = -ENOMEM;
161 put_domain(d);
162 break;
163 }
165 ret = 0;
166 for ( n = 0; n < num; )
167 {
168 int k = PAGE_SIZE / 4;
169 if ( (num - n) < k )
170 k = num - n;
172 if ( copy_from_guest_offset(arr32,
173 domctl->u.getpageframeinfo2.array,
174 n, k) )
175 {
176 ret = -EFAULT;
177 break;
178 }
180 for ( j = 0; j < k; j++ )
181 {
182 struct page_info *page;
183 unsigned long mfn = arr32[j];
185 page = mfn_to_page(mfn);
187 ret = xsm_getpageframeinfo(page);
188 if ( ret )
189 continue;
191 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
192 {
193 unsigned long type = 0;
195 switch( page->u.inuse.type_info & PGT_type_mask )
196 {
197 case PGT_l1_page_table:
198 type = XEN_DOMCTL_PFINFO_L1TAB;
199 break;
200 case PGT_l2_page_table:
201 type = XEN_DOMCTL_PFINFO_L2TAB;
202 break;
203 case PGT_l3_page_table:
204 type = XEN_DOMCTL_PFINFO_L3TAB;
205 break;
206 case PGT_l4_page_table:
207 type = XEN_DOMCTL_PFINFO_L4TAB;
208 break;
209 }
211 if ( page->u.inuse.type_info & PGT_pinned )
212 type |= XEN_DOMCTL_PFINFO_LPINTAB;
213 arr32[j] |= type;
214 put_page(page);
215 }
216 else
217 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
219 }
221 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
222 n, arr32, k) )
223 {
224 ret = -EFAULT;
225 break;
226 }
228 n += k;
229 }
231 free_xenheap_page(arr32);
233 rcu_unlock_domain(d);
234 }
235 break;
237 case XEN_DOMCTL_getmemlist:
238 {
239 int i;
240 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
241 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
242 uint64_t mfn;
243 struct page_info *page;
245 ret = -EINVAL;
246 if ( d != NULL )
247 {
248 ret = xsm_getmemlist(d);
249 if ( ret )
250 {
251 rcu_unlock_domain(d);
252 break;
253 }
255 spin_lock(&d->page_alloc_lock);
257 if ( unlikely(d->is_dying) ) {
258 spin_unlock(&d->page_alloc_lock);
259 goto getmemlist_out;
260 }
262 ret = i = 0;
263 page_list_for_each(page, &d->page_list)
264 {
265 if ( i >= max_pfns )
266 break;
267 mfn = page_to_mfn(page);
268 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
269 i, &mfn, 1) )
270 {
271 ret = -EFAULT;
272 break;
273 }
274 ++i;
275 }
277 spin_unlock(&d->page_alloc_lock);
279 domctl->u.getmemlist.num_pfns = i;
280 copy_to_guest(u_domctl, domctl, 1);
281 getmemlist_out:
282 rcu_unlock_domain(d);
283 }
284 }
285 break;
287 case XEN_DOMCTL_hypercall_init:
288 {
289 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
290 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
291 unsigned long mfn;
292 void *hypercall_page;
294 ret = -ESRCH;
295 if ( unlikely(d == NULL) )
296 break;
298 ret = xsm_hypercall_init(d);
299 if ( ret )
300 {
301 rcu_unlock_domain(d);
302 break;
303 }
305 mfn = gmfn_to_mfn(d, gmfn);
307 ret = -EACCES;
308 if ( !mfn_valid(mfn) ||
309 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
310 {
311 rcu_unlock_domain(d);
312 break;
313 }
315 ret = 0;
317 hypercall_page = map_domain_page(mfn);
318 hypercall_page_initialise(d, hypercall_page);
319 unmap_domain_page(hypercall_page);
321 put_page_and_type(mfn_to_page(mfn));
323 rcu_unlock_domain(d);
324 }
325 break;
327 case XEN_DOMCTL_sethvmcontext:
328 {
329 struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
330 struct domain *d;
332 ret = -ESRCH;
333 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
334 break;
336 ret = xsm_hvmcontext(d, domctl->cmd);
337 if ( ret )
338 goto sethvmcontext_out;
340 ret = -EINVAL;
341 if ( !is_hvm_domain(d) )
342 goto sethvmcontext_out;
344 ret = -ENOMEM;
345 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
346 goto sethvmcontext_out;
348 ret = -EFAULT;
349 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
350 goto sethvmcontext_out;
352 domain_pause(d);
353 ret = hvm_load(d, &c);
354 domain_unpause(d);
356 sethvmcontext_out:
357 if ( c.data != NULL )
358 xfree(c.data);
360 rcu_unlock_domain(d);
361 }
362 break;
364 case XEN_DOMCTL_gethvmcontext:
365 {
366 struct hvm_domain_context c = { 0 };
367 struct domain *d;
369 ret = -ESRCH;
370 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
371 break;
373 ret = xsm_hvmcontext(d, domctl->cmd);
374 if ( ret )
375 goto gethvmcontext_out;
377 ret = -EINVAL;
378 if ( !is_hvm_domain(d) )
379 goto gethvmcontext_out;
381 c.size = hvm_save_size(d);
383 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
384 {
385 /* Client is querying for the correct buffer size */
386 domctl->u.hvmcontext.size = c.size;
387 ret = 0;
388 goto gethvmcontext_out;
389 }
391 /* Check that the client has a big enough buffer */
392 ret = -ENOSPC;
393 if ( domctl->u.hvmcontext.size < c.size )
394 goto gethvmcontext_out;
396 /* Allocate our own marshalling buffer */
397 ret = -ENOMEM;
398 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
399 goto gethvmcontext_out;
401 domain_pause(d);
402 ret = hvm_save(d, &c);
403 domain_unpause(d);
405 domctl->u.hvmcontext.size = c.cur;
406 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
407 ret = -EFAULT;
409 gethvmcontext_out:
410 if ( copy_to_guest(u_domctl, domctl, 1) )
411 ret = -EFAULT;
413 if ( c.data != NULL )
414 xfree(c.data);
416 rcu_unlock_domain(d);
417 }
418 break;
420 case XEN_DOMCTL_gethvmcontext_partial:
421 {
422 struct domain *d;
424 ret = -ESRCH;
425 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
426 break;
428 ret = xsm_hvmcontext(d, domctl->cmd);
429 if ( ret )
430 goto gethvmcontext_partial_out;
432 ret = -EINVAL;
433 if ( !is_hvm_domain(d) )
434 goto gethvmcontext_partial_out;
436 domain_pause(d);
437 ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
438 domctl->u.hvmcontext_partial.instance,
439 domctl->u.hvmcontext_partial.buffer);
440 domain_unpause(d);
442 gethvmcontext_partial_out:
443 rcu_unlock_domain(d);
444 }
445 break;
448 case XEN_DOMCTL_set_address_size:
449 {
450 struct domain *d;
452 ret = -ESRCH;
453 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
454 break;
456 ret = xsm_address_size(d, domctl->cmd);
457 if ( ret )
458 {
459 rcu_unlock_domain(d);
460 break;
461 }
463 switch ( domctl->u.address_size.size )
464 {
465 #ifdef CONFIG_COMPAT
466 case 32:
467 ret = switch_compat(d);
468 break;
469 case 64:
470 ret = switch_native(d);
471 break;
472 #endif
473 default:
474 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
475 break;
476 }
478 rcu_unlock_domain(d);
479 }
480 break;
482 case XEN_DOMCTL_get_address_size:
483 {
484 struct domain *d;
486 ret = -ESRCH;
487 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
488 break;
490 ret = xsm_address_size(d, domctl->cmd);
491 if ( ret )
492 {
493 rcu_unlock_domain(d);
494 break;
495 }
497 domctl->u.address_size.size =
498 is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
500 ret = 0;
501 rcu_unlock_domain(d);
503 if ( copy_to_guest(u_domctl, domctl, 1) )
504 ret = -EFAULT;
505 }
506 break;
508 case XEN_DOMCTL_set_machine_address_size:
509 {
510 struct domain *d;
512 ret = -ESRCH;
513 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
514 break;
516 ret = xsm_machine_address_size(d, domctl->cmd);
517 if ( ret )
518 rcu_unlock_domain(d);
520 ret = -EBUSY;
521 if ( d->tot_pages > 0 )
522 goto set_machine_address_size_out;
524 d->arch.physaddr_bitsize = domctl->u.address_size.size;
526 ret = 0;
527 set_machine_address_size_out:
528 rcu_unlock_domain(d);
529 }
530 break;
532 case XEN_DOMCTL_get_machine_address_size:
533 {
534 struct domain *d;
536 ret = -ESRCH;
537 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
538 break;
540 ret = xsm_machine_address_size(d, domctl->cmd);
541 if ( ret )
542 {
543 rcu_unlock_domain(d);
544 break;
545 }
547 domctl->u.address_size.size = d->arch.physaddr_bitsize;
549 ret = 0;
550 rcu_unlock_domain(d);
552 if ( copy_to_guest(u_domctl, domctl, 1) )
553 ret = -EFAULT;
556 }
557 break;
559 case XEN_DOMCTL_sendtrigger:
560 {
561 struct domain *d;
562 struct vcpu *v;
564 ret = -ESRCH;
565 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
566 break;
568 ret = xsm_sendtrigger(d);
569 if ( ret )
570 goto sendtrigger_out;
572 ret = -EINVAL;
573 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
574 goto sendtrigger_out;
576 ret = -ESRCH;
577 if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
578 goto sendtrigger_out;
580 switch ( domctl->u.sendtrigger.trigger )
581 {
582 case XEN_DOMCTL_SENDTRIGGER_NMI:
583 {
584 ret = 0;
585 if ( !test_and_set_bool(v->nmi_pending) )
586 vcpu_kick(v);
587 }
588 break;
590 default:
591 ret = -ENOSYS;
592 }
594 sendtrigger_out:
595 rcu_unlock_domain(d);
596 }
597 break;
599 case XEN_DOMCTL_get_device_group:
600 {
601 struct domain *d;
602 u32 max_sdevs;
603 u8 bus, devfn;
604 XEN_GUEST_HANDLE_64(uint32) sdevs;
605 int num_sdevs;
607 ret = -ENOSYS;
608 if ( !iommu_enabled )
609 break;
611 ret = -EINVAL;
612 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
613 break;
615 bus = (domctl->u.get_device_group.machine_bdf >> 16) & 0xff;
616 devfn = (domctl->u.get_device_group.machine_bdf >> 8) & 0xff;
617 max_sdevs = domctl->u.get_device_group.max_sdevs;
618 sdevs = domctl->u.get_device_group.sdev_array;
620 num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
621 if ( num_sdevs < 0 )
622 {
623 dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
624 ret = -EFAULT;
625 domctl->u.get_device_group.num_sdevs = 0;
626 }
627 else
628 {
629 ret = 0;
630 domctl->u.get_device_group.num_sdevs = num_sdevs;
631 }
632 if ( copy_to_guest(u_domctl, domctl, 1) )
633 ret = -EFAULT;
634 rcu_unlock_domain(d);
635 }
636 break;
638 case XEN_DOMCTL_test_assign_device:
639 {
640 u8 bus, devfn;
642 ret = -ENOSYS;
643 if ( !iommu_enabled )
644 break;
646 ret = xsm_test_assign_device(domctl->u.assign_device.machine_bdf);
647 if ( ret )
648 break;
650 ret = -EINVAL;
651 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
652 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
654 if ( device_assigned(bus, devfn) )
655 {
656 gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
657 "%x:%x:%x already assigned, or non-existent\n",
658 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
659 break;
660 }
661 ret = 0;
662 }
663 break;
665 case XEN_DOMCTL_assign_device:
666 {
667 struct domain *d;
668 u8 bus, devfn;
670 ret = -ENOSYS;
671 if ( !iommu_enabled )
672 break;
674 ret = -EINVAL;
675 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
676 {
677 gdprintk(XENLOG_ERR,
678 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
679 break;
680 }
682 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
683 if ( ret )
684 goto assign_device_out;
686 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
687 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
689 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
690 {
691 ret = -ENOSYS;
692 put_domain(d);
693 break;
694 }
696 ret = -EINVAL;
698 ret = assign_device(d, bus, devfn);
699 if ( ret )
700 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
701 "assign device (%x:%x:%x) failed\n",
702 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
704 assign_device_out:
705 put_domain(d);
706 }
707 break;
709 case XEN_DOMCTL_deassign_device:
710 {
711 struct domain *d;
712 u8 bus, devfn;
714 ret = -ENOSYS;
715 if ( !iommu_enabled )
716 break;
718 ret = -EINVAL;
719 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
720 {
721 gdprintk(XENLOG_ERR,
722 "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
723 break;
724 }
726 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
727 if ( ret )
728 goto deassign_device_out;
730 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
731 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
733 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
734 {
735 ret = -ENOSYS;
736 put_domain(d);
737 break;
738 }
739 ret = 0;
740 spin_lock(&pcidevs_lock);
741 ret = deassign_device(d, bus, devfn);
742 spin_unlock(&pcidevs_lock);
743 gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n",
744 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
746 deassign_device_out:
747 put_domain(d);
748 }
749 break;
751 case XEN_DOMCTL_bind_pt_irq:
752 {
753 struct domain * d;
754 xen_domctl_bind_pt_irq_t * bind;
756 ret = -ESRCH;
757 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
758 break;
759 bind = &(domctl->u.bind_pt_irq);
761 ret = xsm_bind_pt_irq(d, bind);
762 if ( ret )
763 goto bind_out;
765 ret = -ESRCH;
766 if ( iommu_enabled )
767 {
768 spin_lock(&pcidevs_lock);
769 ret = pt_irq_create_bind_vtd(d, bind);
770 spin_unlock(&pcidevs_lock);
771 }
772 if ( ret < 0 )
773 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
775 bind_out:
776 rcu_unlock_domain(d);
777 }
778 break;
780 case XEN_DOMCTL_unbind_pt_irq:
781 {
782 struct domain * d;
783 xen_domctl_bind_pt_irq_t * bind;
785 ret = -ESRCH;
786 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
787 break;
788 bind = &(domctl->u.bind_pt_irq);
789 if ( iommu_enabled )
790 {
791 spin_lock(&pcidevs_lock);
792 ret = pt_irq_destroy_bind_vtd(d, bind);
793 spin_unlock(&pcidevs_lock);
794 }
795 if ( ret < 0 )
796 gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
797 rcu_unlock_domain(d);
798 }
799 break;
801 case XEN_DOMCTL_memory_mapping:
802 {
803 struct domain *d;
804 unsigned long gfn = domctl->u.memory_mapping.first_gfn;
805 unsigned long mfn = domctl->u.memory_mapping.first_mfn;
806 unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
807 int i;
809 ret = -EINVAL;
810 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
811 break;
813 ret = -ESRCH;
814 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
815 break;
817 ret=0;
818 if ( domctl->u.memory_mapping.add_mapping )
819 {
820 gdprintk(XENLOG_INFO,
821 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
822 gfn, mfn, nr_mfns);
824 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
825 for ( i = 0; i < nr_mfns; i++ )
826 set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
827 }
828 else
829 {
830 gdprintk(XENLOG_INFO,
831 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
832 gfn, mfn, nr_mfns);
834 for ( i = 0; i < nr_mfns; i++ )
835 clear_mmio_p2m_entry(d, gfn+i);
836 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
837 }
839 rcu_unlock_domain(d);
840 }
841 break;
843 case XEN_DOMCTL_ioport_mapping:
844 {
845 #define MAX_IOPORTS 0x10000
846 struct domain *d;
847 struct hvm_iommu *hd;
848 unsigned int fgp = domctl->u.ioport_mapping.first_gport;
849 unsigned int fmp = domctl->u.ioport_mapping.first_mport;
850 unsigned int np = domctl->u.ioport_mapping.nr_ports;
851 struct g2m_ioport *g2m_ioport;
852 int found = 0;
854 ret = -EINVAL;
855 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
856 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
857 {
858 gdprintk(XENLOG_ERR,
859 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
860 fgp, fmp, np);
861 break;
862 }
864 ret = -ESRCH;
865 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
866 break;
868 hd = domain_hvm_iommu(d);
869 if ( domctl->u.ioport_mapping.add_mapping )
870 {
871 gdprintk(XENLOG_INFO,
872 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
873 fgp, fmp, np);
875 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
876 if (g2m_ioport->mport == fmp )
877 {
878 g2m_ioport->gport = fgp;
879 g2m_ioport->np = np;
880 found = 1;
881 break;
882 }
883 if ( !found )
884 {
885 g2m_ioport = xmalloc(struct g2m_ioport);
886 g2m_ioport->gport = fgp;
887 g2m_ioport->mport = fmp;
888 g2m_ioport->np = np;
889 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
890 }
891 ret = ioports_permit_access(d, fmp, fmp + np - 1);
892 }
893 else
894 {
895 gdprintk(XENLOG_INFO,
896 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
897 fgp, fmp, np);
898 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
899 if ( g2m_ioport->mport == fmp )
900 {
901 list_del(&g2m_ioport->list);
902 xfree(g2m_ioport);
903 break;
904 }
905 ret = ioports_deny_access(d, fmp, fmp + np - 1);
906 }
907 rcu_unlock_domain(d);
908 }
909 break;
911 case XEN_DOMCTL_pin_mem_cacheattr:
912 {
913 struct domain *d;
915 ret = -ESRCH;
916 d = rcu_lock_domain_by_id(domctl->domain);
917 if ( d == NULL )
918 break;
920 ret = xsm_pin_mem_cacheattr(d);
921 if ( ret )
922 goto pin_out;
924 ret = hvm_set_mem_pinned_cacheattr(
925 d, domctl->u.pin_mem_cacheattr.start,
926 domctl->u.pin_mem_cacheattr.end,
927 domctl->u.pin_mem_cacheattr.type);
929 pin_out:
930 rcu_unlock_domain(d);
931 }
932 break;
934 case XEN_DOMCTL_set_ext_vcpucontext:
935 case XEN_DOMCTL_get_ext_vcpucontext:
936 {
937 struct xen_domctl_ext_vcpucontext *evc;
938 struct domain *d;
939 struct vcpu *v;
941 evc = &domctl->u.ext_vcpucontext;
943 ret = -ESRCH;
944 d = rcu_lock_domain_by_id(domctl->domain);
945 if ( d == NULL )
946 break;
948 ret = xsm_ext_vcpucontext(d, domctl->cmd);
949 if ( ret )
950 goto ext_vcpucontext_out;
952 ret = -ESRCH;
953 if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
954 ((v = d->vcpu[evc->vcpu]) == NULL) )
955 goto ext_vcpucontext_out;
957 if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
958 {
959 evc->size = sizeof(*evc);
960 #ifdef __x86_64__
961 evc->sysenter_callback_cs = v->arch.sysenter_callback_cs;
962 evc->sysenter_callback_eip = v->arch.sysenter_callback_eip;
963 evc->sysenter_disables_events = v->arch.sysenter_disables_events;
964 evc->syscall32_callback_cs = v->arch.syscall32_callback_cs;
965 evc->syscall32_callback_eip = v->arch.syscall32_callback_eip;
966 evc->syscall32_disables_events = v->arch.syscall32_disables_events;
967 #else
968 evc->sysenter_callback_cs = 0;
969 evc->sysenter_callback_eip = 0;
970 evc->sysenter_disables_events = 0;
971 evc->syscall32_callback_cs = 0;
972 evc->syscall32_callback_eip = 0;
973 evc->syscall32_disables_events = 0;
974 #endif
975 }
976 else
977 {
978 ret = -EINVAL;
979 if ( evc->size != sizeof(*evc) )
980 goto ext_vcpucontext_out;
981 #ifdef __x86_64__
982 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
983 v->arch.sysenter_callback_cs = evc->sysenter_callback_cs;
984 v->arch.sysenter_callback_eip = evc->sysenter_callback_eip;
985 v->arch.sysenter_disables_events = evc->sysenter_disables_events;
986 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
987 v->arch.syscall32_callback_cs = evc->syscall32_callback_cs;
988 v->arch.syscall32_callback_eip = evc->syscall32_callback_eip;
989 v->arch.syscall32_disables_events = evc->syscall32_disables_events;
990 #else
991 /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
992 if ( (evc->sysenter_callback_cs & ~3) ||
993 evc->sysenter_callback_eip ||
994 (evc->syscall32_callback_cs & ~3) ||
995 evc->syscall32_callback_eip )
996 goto ext_vcpucontext_out;
997 #endif
998 }
1000 ret = 0;
1002 ext_vcpucontext_out:
1003 rcu_unlock_domain(d);
1004 if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
1005 copy_to_guest(u_domctl, domctl, 1) )
1006 ret = -EFAULT;
1008 break;
1010 case XEN_DOMCTL_set_cpuid:
1012 struct domain *d;
1013 xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
1014 cpuid_input_t *cpuid = NULL;
1015 int i;
1017 ret = -ESRCH;
1018 d = rcu_lock_domain_by_id(domctl->domain);
1019 if ( d == NULL )
1020 break;
1022 for ( i = 0; i < MAX_CPUID_INPUT; i++ )
1024 cpuid = &d->arch.cpuids[i];
1026 if ( cpuid->input[0] == XEN_CPUID_INPUT_UNUSED )
1027 break;
1029 if ( (cpuid->input[0] == ctl->input[0]) &&
1030 ((cpuid->input[1] == XEN_CPUID_INPUT_UNUSED) ||
1031 (cpuid->input[1] == ctl->input[1])) )
1032 break;
1035 if ( i == MAX_CPUID_INPUT )
1037 ret = -ENOENT;
1039 else
1041 memcpy(cpuid, ctl, sizeof(cpuid_input_t));
1042 ret = 0;
1045 rcu_unlock_domain(d);
1047 break;
1049 case XEN_DOMCTL_suppress_spurious_page_faults:
1051 struct domain *d;
1053 ret = -ESRCH;
1054 d = rcu_lock_domain_by_id(domctl->domain);
1055 if ( d != NULL )
1057 d->arch.suppress_spurious_page_faults = 1;
1058 rcu_unlock_domain(d);
1059 ret = 0;
1062 break;
1064 case XEN_DOMCTL_debug_op:
1066 struct domain *d;
1067 struct vcpu *v;
1069 ret = -ESRCH;
1070 d = rcu_lock_domain_by_id(domctl->domain);
1071 if ( d == NULL )
1072 break;
1074 ret = -EINVAL;
1075 if ( (domctl->u.debug_op.vcpu >= MAX_VIRT_CPUS) ||
1076 ((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) )
1077 goto debug_op_out;
1079 ret = -EINVAL;
1080 if ( !is_hvm_domain(d))
1081 goto debug_op_out;
1083 ret = hvm_debug_op(v, domctl->u.debug_op.op);
1085 debug_op_out:
1086 rcu_unlock_domain(d);
1088 break;
1090 default:
1091 ret = -ENOSYS;
1092 break;
1095 return ret;
1098 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
1100 #ifdef CONFIG_COMPAT
1101 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
1102 #else
1103 #define c(fld) (c.nat->fld)
1104 #endif
1106 if ( !is_pv_32on64_domain(v->domain) )
1107 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
1108 #ifdef CONFIG_COMPAT
1109 else
1110 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
1111 #endif
1113 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
1114 if ( v->fpu_initialised )
1115 c(flags |= VGCF_i387_valid);
1116 if ( !test_bit(_VPF_down, &v->pause_flags) )
1117 c(flags |= VGCF_online);
1119 if ( is_hvm_vcpu(v) )
1121 struct segment_register sreg;
1122 memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
1123 c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
1124 c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
1125 c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
1126 c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
1127 hvm_get_segment_register(v, x86_seg_cs, &sreg);
1128 c.nat->user_regs.cs = sreg.sel;
1129 hvm_get_segment_register(v, x86_seg_ss, &sreg);
1130 c.nat->user_regs.ss = sreg.sel;
1131 hvm_get_segment_register(v, x86_seg_ds, &sreg);
1132 c.nat->user_regs.ds = sreg.sel;
1133 hvm_get_segment_register(v, x86_seg_es, &sreg);
1134 c.nat->user_regs.es = sreg.sel;
1135 hvm_get_segment_register(v, x86_seg_fs, &sreg);
1136 c.nat->user_regs.fs = sreg.sel;
1137 hvm_get_segment_register(v, x86_seg_gs, &sreg);
1138 c.nat->user_regs.gs = sreg.sel;
1140 else
1142 /* IOPL privileges are virtualised: merge back into returned eflags. */
1143 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
1144 c(user_regs.eflags |= v->arch.iopl << 12);
1146 if ( !is_pv_32on64_domain(v->domain) )
1148 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
1149 pagetable_get_pfn(v->arch.guest_table));
1150 #ifdef __x86_64__
1151 if ( !pagetable_is_null(v->arch.guest_table_user) )
1152 c.nat->ctrlreg[1] = xen_pfn_to_cr3(
1153 pagetable_get_pfn(v->arch.guest_table_user));
1154 #endif
1156 /* Merge shadow DR7 bits into real DR7. */
1157 c.nat->debugreg[7] |= c.nat->debugreg[5];
1158 c.nat->debugreg[5] = 0;
1160 #ifdef CONFIG_COMPAT
1161 else
1163 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
1164 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
1166 /* Merge shadow DR7 bits into real DR7. */
1167 c.cmp->debugreg[7] |= c.cmp->debugreg[5];
1168 c.cmp->debugreg[5] = 0;
1170 #endif
1172 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
1173 c(flags |= VGCF_in_kernel);
1176 c(vm_assist = v->domain->vm_assist);
1177 #undef c
1180 /*
1181 * Local variables:
1182 * mode: C
1183 * c-set-style: "BSD"
1184 * c-basic-offset: 4
1185 * tab-width: 4
1186 * indent-tabs-mode: nil
1187 * End:
1188 */