ia64/xen-unstable

view xen/arch/x86/domctl.c @ 19835:edfdeb150f27

Fix buildsystem to detect udev > version 124

udev removed the udevinfo symlink from versions higher than 123 and
xen's build-system could not detect if udev is in place and has the
required version.

Signed-off-by: Marc-A. Dahlhaus <mad@wol.de>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 25 13:02:37 2009 +0100 (2009-06-25)
parents 2f9e1348aa98
children
line source
1 /******************************************************************************
2 * Arch-specific domctl.c
3 *
4 * Copyright (c) 2002-2006, K A Fraser
5 */
7 #include <xen/config.h>
8 #include <xen/types.h>
9 #include <xen/lib.h>
10 #include <xen/mm.h>
11 #include <xen/guest_access.h>
12 #include <xen/compat.h>
13 #include <xen/pci.h>
14 #include <public/domctl.h>
15 #include <xen/sched.h>
16 #include <xen/domain.h>
17 #include <xen/event.h>
18 #include <xen/domain_page.h>
19 #include <asm/msr.h>
20 #include <xen/trace.h>
21 #include <xen/console.h>
22 #include <xen/iocap.h>
23 #include <xen/paging.h>
24 #include <asm/irq.h>
25 #include <asm/hvm/hvm.h>
26 #include <asm/hvm/support.h>
27 #include <asm/hvm/cacheattr.h>
28 #include <asm/processor.h>
29 #include <xsm/xsm.h>
30 #include <xen/iommu.h>
32 long arch_do_domctl(
33 struct xen_domctl *domctl,
34 XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
35 {
36 long ret = 0;
38 switch ( domctl->cmd )
39 {
41 case XEN_DOMCTL_shadow_op:
42 {
43 struct domain *d;
44 ret = -ESRCH;
45 d = rcu_lock_domain_by_id(domctl->domain);
46 if ( d != NULL )
47 {
48 ret = paging_domctl(d,
49 &domctl->u.shadow_op,
50 guest_handle_cast(u_domctl, void));
51 rcu_unlock_domain(d);
52 copy_to_guest(u_domctl, domctl, 1);
53 }
54 }
55 break;
57 case XEN_DOMCTL_ioport_permission:
58 {
59 struct domain *d;
60 unsigned int fp = domctl->u.ioport_permission.first_port;
61 unsigned int np = domctl->u.ioport_permission.nr_ports;
63 ret = -EINVAL;
64 if ( (fp + np) > 65536 )
65 break;
67 ret = -ESRCH;
68 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
69 break;
71 if ( np == 0 )
72 ret = 0;
73 else if ( domctl->u.ioport_permission.allow_access )
74 ret = ioports_permit_access(d, fp, fp + np - 1);
75 else
76 ret = ioports_deny_access(d, fp, fp + np - 1);
78 rcu_unlock_domain(d);
79 }
80 break;
82 case XEN_DOMCTL_getpageframeinfo:
83 {
84 struct page_info *page;
85 unsigned long mfn = domctl->u.getpageframeinfo.gmfn;
86 domid_t dom = domctl->domain;
87 struct domain *d;
89 ret = -EINVAL;
91 if ( unlikely(!mfn_valid(mfn)) ||
92 unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
93 break;
95 page = mfn_to_page(mfn);
97 ret = xsm_getpageframeinfo(page);
98 if ( ret )
99 {
100 rcu_unlock_domain(d);
101 break;
102 }
104 if ( likely(get_page(page, d)) )
105 {
106 ret = 0;
108 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_NOTAB;
110 if ( (page->u.inuse.type_info & PGT_count_mask) != 0 )
111 {
112 switch ( page->u.inuse.type_info & PGT_type_mask )
113 {
114 case PGT_l1_page_table:
115 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L1TAB;
116 break;
117 case PGT_l2_page_table:
118 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L2TAB;
119 break;
120 case PGT_l3_page_table:
121 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L3TAB;
122 break;
123 case PGT_l4_page_table:
124 domctl->u.getpageframeinfo.type = XEN_DOMCTL_PFINFO_L4TAB;
125 break;
126 }
127 }
129 put_page(page);
130 }
132 rcu_unlock_domain(d);
134 copy_to_guest(u_domctl, domctl, 1);
135 }
136 break;
138 case XEN_DOMCTL_getpageframeinfo2:
139 {
140 int n,j;
141 int num = domctl->u.getpageframeinfo2.num;
142 domid_t dom = domctl->domain;
143 struct domain *d;
144 uint32_t *arr32;
145 ret = -ESRCH;
147 if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) )
148 break;
150 if ( unlikely(num > 1024) )
151 {
152 ret = -E2BIG;
153 rcu_unlock_domain(d);
154 break;
155 }
157 arr32 = alloc_xenheap_page();
158 if ( !arr32 )
159 {
160 ret = -ENOMEM;
161 put_domain(d);
162 break;
163 }
165 ret = 0;
166 for ( n = 0; n < num; )
167 {
168 int k = PAGE_SIZE / 4;
169 if ( (num - n) < k )
170 k = num - n;
172 if ( copy_from_guest_offset(arr32,
173 domctl->u.getpageframeinfo2.array,
174 n, k) )
175 {
176 ret = -EFAULT;
177 break;
178 }
180 for ( j = 0; j < k; j++ )
181 {
182 struct page_info *page;
183 unsigned long mfn = arr32[j];
185 page = mfn_to_page(mfn);
187 ret = xsm_getpageframeinfo(page);
188 if ( ret )
189 continue;
191 if ( likely(mfn_valid(mfn) && get_page(page, d)) )
192 {
193 unsigned long type = 0;
195 switch( page->u.inuse.type_info & PGT_type_mask )
196 {
197 case PGT_l1_page_table:
198 type = XEN_DOMCTL_PFINFO_L1TAB;
199 break;
200 case PGT_l2_page_table:
201 type = XEN_DOMCTL_PFINFO_L2TAB;
202 break;
203 case PGT_l3_page_table:
204 type = XEN_DOMCTL_PFINFO_L3TAB;
205 break;
206 case PGT_l4_page_table:
207 type = XEN_DOMCTL_PFINFO_L4TAB;
208 break;
209 }
211 if ( page->u.inuse.type_info & PGT_pinned )
212 type |= XEN_DOMCTL_PFINFO_LPINTAB;
213 arr32[j] |= type;
214 put_page(page);
215 }
216 else
217 arr32[j] |= XEN_DOMCTL_PFINFO_XTAB;
219 }
221 if ( copy_to_guest_offset(domctl->u.getpageframeinfo2.array,
222 n, arr32, k) )
223 {
224 ret = -EFAULT;
225 break;
226 }
228 n += k;
229 }
231 free_xenheap_page(arr32);
233 rcu_unlock_domain(d);
234 }
235 break;
237 case XEN_DOMCTL_getmemlist:
238 {
239 int i;
240 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
241 unsigned long max_pfns = domctl->u.getmemlist.max_pfns;
242 uint64_t mfn;
243 struct page_info *page;
245 ret = -EINVAL;
246 if ( d != NULL )
247 {
248 ret = xsm_getmemlist(d);
249 if ( ret )
250 {
251 rcu_unlock_domain(d);
252 break;
253 }
255 spin_lock(&d->page_alloc_lock);
257 if ( unlikely(d->is_dying) ) {
258 spin_unlock(&d->page_alloc_lock);
259 goto getmemlist_out;
260 }
262 ret = i = 0;
263 page_list_for_each(page, &d->page_list)
264 {
265 if ( i >= max_pfns )
266 break;
267 mfn = page_to_mfn(page);
268 if ( copy_to_guest_offset(domctl->u.getmemlist.buffer,
269 i, &mfn, 1) )
270 {
271 ret = -EFAULT;
272 break;
273 }
274 ++i;
275 }
277 spin_unlock(&d->page_alloc_lock);
279 domctl->u.getmemlist.num_pfns = i;
280 copy_to_guest(u_domctl, domctl, 1);
281 getmemlist_out:
282 rcu_unlock_domain(d);
283 }
284 }
285 break;
287 case XEN_DOMCTL_hypercall_init:
288 {
289 struct domain *d = rcu_lock_domain_by_id(domctl->domain);
290 unsigned long gmfn = domctl->u.hypercall_init.gmfn;
291 unsigned long mfn;
292 void *hypercall_page;
294 ret = -ESRCH;
295 if ( unlikely(d == NULL) )
296 break;
298 ret = xsm_hypercall_init(d);
299 if ( ret )
300 {
301 rcu_unlock_domain(d);
302 break;
303 }
305 mfn = gmfn_to_mfn(d, gmfn);
307 ret = -EACCES;
308 if ( !mfn_valid(mfn) ||
309 !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
310 {
311 rcu_unlock_domain(d);
312 break;
313 }
315 ret = 0;
317 hypercall_page = map_domain_page(mfn);
318 hypercall_page_initialise(d, hypercall_page);
319 unmap_domain_page(hypercall_page);
321 put_page_and_type(mfn_to_page(mfn));
323 rcu_unlock_domain(d);
324 }
325 break;
327 case XEN_DOMCTL_sethvmcontext:
328 {
329 struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size };
330 struct domain *d;
332 ret = -ESRCH;
333 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
334 break;
336 ret = xsm_hvmcontext(d, domctl->cmd);
337 if ( ret )
338 goto sethvmcontext_out;
340 ret = -EINVAL;
341 if ( !is_hvm_domain(d) )
342 goto sethvmcontext_out;
344 ret = -ENOMEM;
345 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
346 goto sethvmcontext_out;
348 ret = -EFAULT;
349 if ( copy_from_guest(c.data, domctl->u.hvmcontext.buffer, c.size) != 0)
350 goto sethvmcontext_out;
352 domain_pause(d);
353 ret = hvm_load(d, &c);
354 domain_unpause(d);
356 sethvmcontext_out:
357 if ( c.data != NULL )
358 xfree(c.data);
360 rcu_unlock_domain(d);
361 }
362 break;
364 case XEN_DOMCTL_gethvmcontext:
365 {
366 struct hvm_domain_context c = { 0 };
367 struct domain *d;
369 ret = -ESRCH;
370 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
371 break;
373 ret = xsm_hvmcontext(d, domctl->cmd);
374 if ( ret )
375 goto gethvmcontext_out;
377 ret = -EINVAL;
378 if ( !is_hvm_domain(d) )
379 goto gethvmcontext_out;
381 c.size = hvm_save_size(d);
383 if ( guest_handle_is_null(domctl->u.hvmcontext.buffer) )
384 {
385 /* Client is querying for the correct buffer size */
386 domctl->u.hvmcontext.size = c.size;
387 ret = 0;
388 goto gethvmcontext_out;
389 }
391 /* Check that the client has a big enough buffer */
392 ret = -ENOSPC;
393 if ( domctl->u.hvmcontext.size < c.size )
394 goto gethvmcontext_out;
396 /* Allocate our own marshalling buffer */
397 ret = -ENOMEM;
398 if ( (c.data = xmalloc_bytes(c.size)) == NULL )
399 goto gethvmcontext_out;
401 domain_pause(d);
402 ret = hvm_save(d, &c);
403 domain_unpause(d);
405 domctl->u.hvmcontext.size = c.cur;
406 if ( copy_to_guest(domctl->u.hvmcontext.buffer, c.data, c.size) != 0 )
407 ret = -EFAULT;
409 gethvmcontext_out:
410 if ( copy_to_guest(u_domctl, domctl, 1) )
411 ret = -EFAULT;
413 if ( c.data != NULL )
414 xfree(c.data);
416 rcu_unlock_domain(d);
417 }
418 break;
420 case XEN_DOMCTL_gethvmcontext_partial:
421 {
422 struct domain *d;
424 ret = -ESRCH;
425 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
426 break;
428 ret = xsm_hvmcontext(d, domctl->cmd);
429 if ( ret )
430 goto gethvmcontext_partial_out;
432 ret = -EINVAL;
433 if ( !is_hvm_domain(d) )
434 goto gethvmcontext_partial_out;
436 domain_pause(d);
437 ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type,
438 domctl->u.hvmcontext_partial.instance,
439 domctl->u.hvmcontext_partial.buffer);
440 domain_unpause(d);
442 gethvmcontext_partial_out:
443 rcu_unlock_domain(d);
444 }
445 break;
448 case XEN_DOMCTL_set_address_size:
449 {
450 struct domain *d;
452 ret = -ESRCH;
453 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
454 break;
456 ret = xsm_address_size(d, domctl->cmd);
457 if ( ret )
458 {
459 rcu_unlock_domain(d);
460 break;
461 }
463 switch ( domctl->u.address_size.size )
464 {
465 #ifdef CONFIG_COMPAT
466 case 32:
467 ret = switch_compat(d);
468 break;
469 case 64:
470 ret = switch_native(d);
471 break;
472 #endif
473 default:
474 ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL;
475 break;
476 }
478 rcu_unlock_domain(d);
479 }
480 break;
482 case XEN_DOMCTL_get_address_size:
483 {
484 struct domain *d;
486 ret = -ESRCH;
487 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
488 break;
490 ret = xsm_address_size(d, domctl->cmd);
491 if ( ret )
492 {
493 rcu_unlock_domain(d);
494 break;
495 }
497 domctl->u.address_size.size =
498 is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
500 ret = 0;
501 rcu_unlock_domain(d);
503 if ( copy_to_guest(u_domctl, domctl, 1) )
504 ret = -EFAULT;
505 }
506 break;
508 case XEN_DOMCTL_set_machine_address_size:
509 {
510 struct domain *d;
512 ret = -ESRCH;
513 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
514 break;
516 ret = xsm_machine_address_size(d, domctl->cmd);
517 if ( ret )
518 rcu_unlock_domain(d);
520 ret = -EBUSY;
521 if ( d->tot_pages > 0 )
522 goto set_machine_address_size_out;
524 d->arch.physaddr_bitsize = domctl->u.address_size.size;
526 ret = 0;
527 set_machine_address_size_out:
528 rcu_unlock_domain(d);
529 }
530 break;
532 case XEN_DOMCTL_get_machine_address_size:
533 {
534 struct domain *d;
536 ret = -ESRCH;
537 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
538 break;
540 ret = xsm_machine_address_size(d, domctl->cmd);
541 if ( ret )
542 {
543 rcu_unlock_domain(d);
544 break;
545 }
547 domctl->u.address_size.size = d->arch.physaddr_bitsize;
549 ret = 0;
550 rcu_unlock_domain(d);
552 if ( copy_to_guest(u_domctl, domctl, 1) )
553 ret = -EFAULT;
556 }
557 break;
559 case XEN_DOMCTL_sendtrigger:
560 {
561 struct domain *d;
562 struct vcpu *v;
564 ret = -ESRCH;
565 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
566 break;
568 ret = xsm_sendtrigger(d);
569 if ( ret )
570 goto sendtrigger_out;
572 ret = -EINVAL;
573 if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
574 goto sendtrigger_out;
576 ret = -ESRCH;
577 if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus ||
578 (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
579 goto sendtrigger_out;
581 switch ( domctl->u.sendtrigger.trigger )
582 {
583 case XEN_DOMCTL_SENDTRIGGER_NMI:
584 {
585 ret = 0;
586 if ( !test_and_set_bool(v->nmi_pending) )
587 vcpu_kick(v);
588 }
589 break;
591 case XEN_DOMCTL_SENDTRIGGER_POWER:
592 {
593 extern void hvm_acpi_power_button(struct domain *d);
595 ret = -EINVAL;
596 if ( is_hvm_domain(d) )
597 {
598 ret = 0;
599 hvm_acpi_power_button(d);
600 }
601 }
602 break;
604 default:
605 ret = -ENOSYS;
606 }
608 sendtrigger_out:
609 rcu_unlock_domain(d);
610 }
611 break;
613 case XEN_DOMCTL_get_device_group:
614 {
615 struct domain *d;
616 u32 max_sdevs;
617 u8 bus, devfn;
618 XEN_GUEST_HANDLE_64(uint32) sdevs;
619 int num_sdevs;
621 ret = -ENOSYS;
622 if ( !iommu_enabled )
623 break;
625 ret = -EINVAL;
626 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
627 break;
629 bus = (domctl->u.get_device_group.machine_bdf >> 16) & 0xff;
630 devfn = (domctl->u.get_device_group.machine_bdf >> 8) & 0xff;
631 max_sdevs = domctl->u.get_device_group.max_sdevs;
632 sdevs = domctl->u.get_device_group.sdev_array;
634 num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
635 if ( num_sdevs < 0 )
636 {
637 dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
638 ret = -EFAULT;
639 domctl->u.get_device_group.num_sdevs = 0;
640 }
641 else
642 {
643 ret = 0;
644 domctl->u.get_device_group.num_sdevs = num_sdevs;
645 }
646 if ( copy_to_guest(u_domctl, domctl, 1) )
647 ret = -EFAULT;
648 rcu_unlock_domain(d);
649 }
650 break;
652 case XEN_DOMCTL_test_assign_device:
653 {
654 u8 bus, devfn;
656 ret = -ENOSYS;
657 if ( !iommu_enabled )
658 break;
660 ret = xsm_test_assign_device(domctl->u.assign_device.machine_bdf);
661 if ( ret )
662 break;
664 ret = -EINVAL;
665 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
666 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
668 if ( device_assigned(bus, devfn) )
669 {
670 gdprintk(XENLOG_ERR, "XEN_DOMCTL_test_assign_device: "
671 "%x:%x:%x already assigned, or non-existent\n",
672 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
673 break;
674 }
675 ret = 0;
676 }
677 break;
679 case XEN_DOMCTL_assign_device:
680 {
681 struct domain *d;
682 u8 bus, devfn;
684 ret = -ENOSYS;
685 if ( !iommu_enabled )
686 break;
688 ret = -EINVAL;
689 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
690 {
691 gdprintk(XENLOG_ERR,
692 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
693 break;
694 }
696 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
697 if ( ret )
698 goto assign_device_out;
700 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
701 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
703 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
704 {
705 ret = -ENOSYS;
706 put_domain(d);
707 break;
708 }
710 ret = -EINVAL;
712 ret = assign_device(d, bus, devfn);
713 if ( ret )
714 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
715 "assign device (%x:%x:%x) failed\n",
716 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
718 assign_device_out:
719 put_domain(d);
720 }
721 break;
723 case XEN_DOMCTL_deassign_device:
724 {
725 struct domain *d;
726 u8 bus, devfn;
728 ret = -ENOSYS;
729 if ( !iommu_enabled )
730 break;
732 ret = -EINVAL;
733 if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
734 {
735 gdprintk(XENLOG_ERR,
736 "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
737 break;
738 }
740 ret = xsm_assign_device(d, domctl->u.assign_device.machine_bdf);
741 if ( ret )
742 goto deassign_device_out;
744 bus = (domctl->u.assign_device.machine_bdf >> 16) & 0xff;
745 devfn = (domctl->u.assign_device.machine_bdf >> 8) & 0xff;
747 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
748 {
749 ret = -ENOSYS;
750 put_domain(d);
751 break;
752 }
753 ret = 0;
754 spin_lock(&pcidevs_lock);
755 ret = deassign_device(d, bus, devfn);
756 spin_unlock(&pcidevs_lock);
757 gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n",
758 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
760 deassign_device_out:
761 put_domain(d);
762 }
763 break;
765 case XEN_DOMCTL_bind_pt_irq:
766 {
767 struct domain * d;
768 xen_domctl_bind_pt_irq_t * bind;
770 ret = -ESRCH;
771 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
772 break;
773 bind = &(domctl->u.bind_pt_irq);
775 ret = xsm_bind_pt_irq(d, bind);
776 if ( ret )
777 goto bind_out;
779 ret = -ESRCH;
780 if ( iommu_enabled )
781 {
782 spin_lock(&pcidevs_lock);
783 ret = pt_irq_create_bind_vtd(d, bind);
784 spin_unlock(&pcidevs_lock);
785 }
786 if ( ret < 0 )
787 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
789 bind_out:
790 rcu_unlock_domain(d);
791 }
792 break;
794 case XEN_DOMCTL_unbind_pt_irq:
795 {
796 struct domain * d;
797 xen_domctl_bind_pt_irq_t * bind;
799 ret = -ESRCH;
800 if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL )
801 break;
802 bind = &(domctl->u.bind_pt_irq);
803 if ( iommu_enabled )
804 {
805 spin_lock(&pcidevs_lock);
806 ret = pt_irq_destroy_bind_vtd(d, bind);
807 spin_unlock(&pcidevs_lock);
808 }
809 if ( ret < 0 )
810 gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
811 rcu_unlock_domain(d);
812 }
813 break;
815 case XEN_DOMCTL_memory_mapping:
816 {
817 struct domain *d;
818 unsigned long gfn = domctl->u.memory_mapping.first_gfn;
819 unsigned long mfn = domctl->u.memory_mapping.first_mfn;
820 unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
821 int i;
823 ret = -EINVAL;
824 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
825 break;
827 ret = -ESRCH;
828 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
829 break;
831 ret=0;
832 if ( domctl->u.memory_mapping.add_mapping )
833 {
834 gdprintk(XENLOG_INFO,
835 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
836 gfn, mfn, nr_mfns);
838 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
839 for ( i = 0; i < nr_mfns; i++ )
840 set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
841 }
842 else
843 {
844 gdprintk(XENLOG_INFO,
845 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
846 gfn, mfn, nr_mfns);
848 for ( i = 0; i < nr_mfns; i++ )
849 clear_mmio_p2m_entry(d, gfn+i);
850 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
851 }
853 rcu_unlock_domain(d);
854 }
855 break;
857 case XEN_DOMCTL_ioport_mapping:
858 {
859 #define MAX_IOPORTS 0x10000
860 struct domain *d;
861 struct hvm_iommu *hd;
862 unsigned int fgp = domctl->u.ioport_mapping.first_gport;
863 unsigned int fmp = domctl->u.ioport_mapping.first_mport;
864 unsigned int np = domctl->u.ioport_mapping.nr_ports;
865 struct g2m_ioport *g2m_ioport;
866 int found = 0;
868 ret = -EINVAL;
869 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
870 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
871 {
872 gdprintk(XENLOG_ERR,
873 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
874 fgp, fmp, np);
875 break;
876 }
878 ret = -ESRCH;
879 if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
880 break;
882 hd = domain_hvm_iommu(d);
883 if ( domctl->u.ioport_mapping.add_mapping )
884 {
885 gdprintk(XENLOG_INFO,
886 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
887 fgp, fmp, np);
889 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
890 if (g2m_ioport->mport == fmp )
891 {
892 g2m_ioport->gport = fgp;
893 g2m_ioport->np = np;
894 found = 1;
895 break;
896 }
897 if ( !found )
898 {
899 g2m_ioport = xmalloc(struct g2m_ioport);
900 g2m_ioport->gport = fgp;
901 g2m_ioport->mport = fmp;
902 g2m_ioport->np = np;
903 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
904 }
905 ret = ioports_permit_access(d, fmp, fmp + np - 1);
906 }
907 else
908 {
909 gdprintk(XENLOG_INFO,
910 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
911 fgp, fmp, np);
912 list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
913 if ( g2m_ioport->mport == fmp )
914 {
915 list_del(&g2m_ioport->list);
916 xfree(g2m_ioport);
917 break;
918 }
919 ret = ioports_deny_access(d, fmp, fmp + np - 1);
920 }
921 rcu_unlock_domain(d);
922 }
923 break;
925 case XEN_DOMCTL_pin_mem_cacheattr:
926 {
927 struct domain *d;
929 ret = -ESRCH;
930 d = rcu_lock_domain_by_id(domctl->domain);
931 if ( d == NULL )
932 break;
934 ret = xsm_pin_mem_cacheattr(d);
935 if ( ret )
936 goto pin_out;
938 ret = hvm_set_mem_pinned_cacheattr(
939 d, domctl->u.pin_mem_cacheattr.start,
940 domctl->u.pin_mem_cacheattr.end,
941 domctl->u.pin_mem_cacheattr.type);
943 pin_out:
944 rcu_unlock_domain(d);
945 }
946 break;
948 case XEN_DOMCTL_set_ext_vcpucontext:
949 case XEN_DOMCTL_get_ext_vcpucontext:
950 {
951 struct xen_domctl_ext_vcpucontext *evc;
952 struct domain *d;
953 struct vcpu *v;
955 evc = &domctl->u.ext_vcpucontext;
957 ret = -ESRCH;
958 d = rcu_lock_domain_by_id(domctl->domain);
959 if ( d == NULL )
960 break;
962 ret = xsm_ext_vcpucontext(d, domctl->cmd);
963 if ( ret )
964 goto ext_vcpucontext_out;
966 ret = -ESRCH;
967 if ( (evc->vcpu >= d->max_vcpus) ||
968 ((v = d->vcpu[evc->vcpu]) == NULL) )
969 goto ext_vcpucontext_out;
971 if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
972 {
973 evc->size = sizeof(*evc);
974 #ifdef __x86_64__
975 evc->sysenter_callback_cs = v->arch.sysenter_callback_cs;
976 evc->sysenter_callback_eip = v->arch.sysenter_callback_eip;
977 evc->sysenter_disables_events = v->arch.sysenter_disables_events;
978 evc->syscall32_callback_cs = v->arch.syscall32_callback_cs;
979 evc->syscall32_callback_eip = v->arch.syscall32_callback_eip;
980 evc->syscall32_disables_events = v->arch.syscall32_disables_events;
981 #else
982 evc->sysenter_callback_cs = 0;
983 evc->sysenter_callback_eip = 0;
984 evc->sysenter_disables_events = 0;
985 evc->syscall32_callback_cs = 0;
986 evc->syscall32_callback_eip = 0;
987 evc->syscall32_disables_events = 0;
988 #endif
989 }
990 else
991 {
992 ret = -EINVAL;
993 if ( evc->size != sizeof(*evc) )
994 goto ext_vcpucontext_out;
995 #ifdef __x86_64__
996 fixup_guest_code_selector(d, evc->sysenter_callback_cs);
997 v->arch.sysenter_callback_cs = evc->sysenter_callback_cs;
998 v->arch.sysenter_callback_eip = evc->sysenter_callback_eip;
999 v->arch.sysenter_disables_events = evc->sysenter_disables_events;
1000 fixup_guest_code_selector(d, evc->syscall32_callback_cs);
1001 v->arch.syscall32_callback_cs = evc->syscall32_callback_cs;
1002 v->arch.syscall32_callback_eip = evc->syscall32_callback_eip;
1003 v->arch.syscall32_disables_events = evc->syscall32_disables_events;
1004 #else
1005 /* We do not support syscall/syscall32/sysenter on 32-bit Xen. */
1006 if ( (evc->sysenter_callback_cs & ~3) ||
1007 evc->sysenter_callback_eip ||
1008 (evc->syscall32_callback_cs & ~3) ||
1009 evc->syscall32_callback_eip )
1010 goto ext_vcpucontext_out;
1011 #endif
1014 ret = 0;
1016 ext_vcpucontext_out:
1017 rcu_unlock_domain(d);
1018 if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
1019 copy_to_guest(u_domctl, domctl, 1) )
1020 ret = -EFAULT;
1022 break;
1024 case XEN_DOMCTL_set_cpuid:
1026 struct domain *d;
1027 xen_domctl_cpuid_t *ctl = &domctl->u.cpuid;
1028 cpuid_input_t *cpuid = NULL;
1029 int i;
1031 ret = -ESRCH;
1032 d = rcu_lock_domain_by_id(domctl->domain);
1033 if ( d == NULL )
1034 break;
1036 for ( i = 0; i < MAX_CPUID_INPUT; i++ )
1038 cpuid = &d->arch.cpuids[i];
1040 if ( cpuid->input[0] == XEN_CPUID_INPUT_UNUSED )
1041 break;
1043 if ( (cpuid->input[0] == ctl->input[0]) &&
1044 ((cpuid->input[1] == XEN_CPUID_INPUT_UNUSED) ||
1045 (cpuid->input[1] == ctl->input[1])) )
1046 break;
1049 if ( i == MAX_CPUID_INPUT )
1051 ret = -ENOENT;
1053 else
1055 memcpy(cpuid, ctl, sizeof(cpuid_input_t));
1056 ret = 0;
1059 rcu_unlock_domain(d);
1061 break;
1063 case XEN_DOMCTL_suppress_spurious_page_faults:
1065 struct domain *d;
1067 ret = -ESRCH;
1068 d = rcu_lock_domain_by_id(domctl->domain);
1069 if ( d != NULL )
1071 d->arch.suppress_spurious_page_faults = 1;
1072 rcu_unlock_domain(d);
1073 ret = 0;
1076 break;
1078 case XEN_DOMCTL_debug_op:
1080 struct domain *d;
1081 struct vcpu *v;
1083 ret = -ESRCH;
1084 d = rcu_lock_domain_by_id(domctl->domain);
1085 if ( d == NULL )
1086 break;
1088 ret = -EINVAL;
1089 if ( (domctl->u.debug_op.vcpu >= d->max_vcpus) ||
1090 ((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) )
1091 goto debug_op_out;
1093 ret = -EINVAL;
1094 if ( !is_hvm_domain(d))
1095 goto debug_op_out;
1097 ret = hvm_debug_op(v, domctl->u.debug_op.op);
1099 debug_op_out:
1100 rcu_unlock_domain(d);
1102 break;
1104 default:
1105 ret = -ENOSYS;
1106 break;
1109 return ret;
1112 void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
1114 #ifdef CONFIG_COMPAT
1115 #define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
1116 #else
1117 #define c(fld) (c.nat->fld)
1118 #endif
1120 if ( !is_pv_32on64_domain(v->domain) )
1121 memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
1122 #ifdef CONFIG_COMPAT
1123 else
1124 XLAT_vcpu_guest_context(c.cmp, &v->arch.guest_context);
1125 #endif
1127 c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
1128 if ( v->fpu_initialised )
1129 c(flags |= VGCF_i387_valid);
1130 if ( !test_bit(_VPF_down, &v->pause_flags) )
1131 c(flags |= VGCF_online);
1133 if ( is_hvm_vcpu(v) )
1135 struct segment_register sreg;
1136 memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
1137 c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
1138 c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
1139 c.nat->ctrlreg[3] = v->arch.hvm_vcpu.guest_cr[3];
1140 c.nat->ctrlreg[4] = v->arch.hvm_vcpu.guest_cr[4];
1141 hvm_get_segment_register(v, x86_seg_cs, &sreg);
1142 c.nat->user_regs.cs = sreg.sel;
1143 hvm_get_segment_register(v, x86_seg_ss, &sreg);
1144 c.nat->user_regs.ss = sreg.sel;
1145 hvm_get_segment_register(v, x86_seg_ds, &sreg);
1146 c.nat->user_regs.ds = sreg.sel;
1147 hvm_get_segment_register(v, x86_seg_es, &sreg);
1148 c.nat->user_regs.es = sreg.sel;
1149 hvm_get_segment_register(v, x86_seg_fs, &sreg);
1150 c.nat->user_regs.fs = sreg.sel;
1151 hvm_get_segment_register(v, x86_seg_gs, &sreg);
1152 c.nat->user_regs.gs = sreg.sel;
1154 else
1156 /* IOPL privileges are virtualised: merge back into returned eflags. */
1157 BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
1158 c(user_regs.eflags |= v->arch.iopl << 12);
1160 if ( !is_pv_32on64_domain(v->domain) )
1162 c.nat->ctrlreg[3] = xen_pfn_to_cr3(
1163 pagetable_get_pfn(v->arch.guest_table));
1164 #ifdef __x86_64__
1165 c.nat->ctrlreg[1] =
1166 pagetable_is_null(v->arch.guest_table_user) ? 0
1167 : xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
1168 #endif
1170 /* Merge shadow DR7 bits into real DR7. */
1171 c.nat->debugreg[7] |= c.nat->debugreg[5];
1172 c.nat->debugreg[5] = 0;
1174 #ifdef CONFIG_COMPAT
1175 else
1177 l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
1178 c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
1180 /* Merge shadow DR7 bits into real DR7. */
1181 c.cmp->debugreg[7] |= c.cmp->debugreg[5];
1182 c.cmp->debugreg[5] = 0;
1184 #endif
1186 if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
1187 c(flags |= VGCF_in_kernel);
1190 c(vm_assist = v->domain->vm_assist);
1191 #undef c
1194 /*
1195 * Local variables:
1196 * mode: C
1197 * c-set-style: "BSD"
1198 * c-basic-offset: 4
1199 * tab-width: 4
1200 * indent-tabs-mode: nil
1201 * End:
1202 */