ia64/xen-unstable

view xen/arch/ia64/xen/dom0_ops.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 4fd4dcf2f891
children
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/domctl.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <asm/pdb.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/guest_access.h>
21 #include <xen/pci.h>
22 #include <asm/vmx.h>
23 #include <asm/dom_fw.h>
24 #include <asm/vhpt.h>
25 #include <xen/iocap.h>
26 #include <xen/errno.h>
27 #include <xen/nodemask.h>
28 #include <asm/dom_fw_utils.h>
29 #include <asm/hvm/support.h>
30 #include <xsm/xsm.h>
31 #include <public/hvm/save.h>
33 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
35 extern unsigned long total_pages;
37 long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
38 {
39 long ret = 0;
41 switch ( op->cmd )
42 {
43 case XEN_DOMCTL_getmemlist:
44 {
45 unsigned long i;
46 struct domain *d = rcu_lock_domain_by_id(op->domain);
47 unsigned long start_page = op->u.getmemlist.start_pfn;
48 unsigned long nr_pages = op->u.getmemlist.max_pfns;
49 uint64_t mfn;
51 if ( d == NULL ) {
52 ret = -EINVAL;
53 break;
54 }
56 if ( !IS_PRIV_FOR(current->domain, d) ) {
57 ret = -EPERM;
58 rcu_unlock_domain(d);
59 break;
60 }
62 for (i = 0 ; i < nr_pages ; i++) {
63 pte_t *pte;
65 pte = (pte_t *)lookup_noalloc_domain_pte(d,
66 (start_page + i) << PAGE_SHIFT);
67 if (pte && pte_present(*pte))
68 mfn = start_page + i;
69 else
70 mfn = INVALID_MFN;
72 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
73 ret = -EFAULT;
74 break;
75 }
76 }
78 op->u.getmemlist.num_pfns = i;
79 if (copy_to_guest(u_domctl, op, 1))
80 ret = -EFAULT;
81 rcu_unlock_domain(d);
82 }
83 break;
85 case XEN_DOMCTL_arch_setup:
86 {
87 xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
88 struct domain *d = rcu_lock_domain_by_id(op->domain);
90 if ( d == NULL) {
91 ret = -EINVAL;
92 break;
93 }
95 if ( !IS_PRIV_FOR(current->domain, d) ) {
96 ret = -EPERM;
97 rcu_unlock_domain(d);
98 break;
99 }
101 if (ds->flags & XEN_DOMAINSETUP_query) {
102 /* Set flags. */
103 if (is_hvm_domain(d))
104 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
105 /* Set params. */
106 ds->bp = 0; /* unknown. */
107 ds->maxmem = d->arch.convmem_end;
108 ds->xsi_va = d->arch.shared_info_va;
109 ds->hypercall_imm = d->arch.breakimm;
110 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
111 ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
112 #endif
113 /* Copy back. */
114 if ( copy_to_guest(u_domctl, op, 1) )
115 ret = -EFAULT;
116 }
117 else {
118 if (is_hvm_domain(d)
119 || (ds->flags & (XEN_DOMAINSETUP_hvm_guest
120 | XEN_DOMAINSETUP_sioemu_guest))) {
121 if (!vmx_enabled) {
122 printk("No VMX hardware feature for vmx domain.\n");
123 ret = -EINVAL;
124 } else {
125 d->is_hvm = 1;
126 if (ds->flags & XEN_DOMAINSETUP_sioemu_guest)
127 d->arch.is_sioemu = 1;
128 xen_ia64_set_convmem_end(d, ds->maxmem);
129 ret = vmx_setup_platform(d);
130 }
131 }
132 else {
133 if (ds->hypercall_imm) {
134 /* dom_fw_setup() reads d->arch.breakimm */
135 struct vcpu *v;
136 d->arch.breakimm = ds->hypercall_imm;
137 for_each_vcpu (d, v)
138 v->arch.breakimm = d->arch.breakimm;
139 }
140 domain_set_vhpt_size(d, ds->vhpt_size_log2);
141 if (ds->xsi_va)
142 d->arch.shared_info_va = ds->xsi_va;
143 ret = dom_fw_setup(d, ds->bp, ds->maxmem);
144 }
145 if (ret == 0) {
146 /*
147 * XXX IA64_SHARED_INFO_PADDR
148 * assign these pages into guest psudo physical address
149 * space for dom0 to map this page by gmfn.
150 * this is necessary for domain build, save, restore and
151 * dump-core.
152 */
153 unsigned long i;
154 for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
155 assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
156 virt_to_maddr(d->shared_info + i));
157 }
158 }
160 rcu_unlock_domain(d);
161 }
162 break;
164 case XEN_DOMCTL_shadow_op:
165 {
166 struct domain *d;
167 ret = -ESRCH;
168 d = rcu_lock_domain_by_id(op->domain);
169 if ( d != NULL )
170 {
171 if ( !IS_PRIV_FOR(current->domain, d) ) {
172 ret = -EPERM;
173 rcu_unlock_domain(d);
174 break;
175 }
177 ret = shadow_mode_control(d, &op->u.shadow_op);
178 rcu_unlock_domain(d);
179 if (copy_to_guest(u_domctl, op, 1))
180 ret = -EFAULT;
181 }
182 }
183 break;
185 case XEN_DOMCTL_ioport_permission:
186 {
187 struct domain *d;
188 unsigned int fp = op->u.ioport_permission.first_port;
189 unsigned int np = op->u.ioport_permission.nr_ports;
190 unsigned int lp = fp + np - 1;
192 ret = -ESRCH;
193 d = rcu_lock_domain_by_id(op->domain);
194 if (unlikely(d == NULL))
195 break;
197 if ( !IS_PRIV_FOR(current->domain, d) ) {
198 ret = -EPERM;
199 rcu_unlock_domain(d);
200 break;
201 }
203 if (np == 0)
204 ret = 0;
205 else {
206 if (op->u.ioport_permission.allow_access)
207 ret = ioports_permit_access(d, fp, fp, lp);
208 else
209 ret = ioports_deny_access(d, fp, lp);
210 }
212 rcu_unlock_domain(d);
213 }
214 break;
216 case XEN_DOMCTL_sendtrigger:
217 {
218 struct domain *d;
219 struct vcpu *v;
221 ret = -ESRCH;
222 d = rcu_lock_domain_by_id(op->domain);
223 if ( d == NULL )
224 break;
226 ret = -EPERM;
227 if ( !IS_PRIV_FOR(current->domain, d) ) {
228 goto sendtrigger_out;
229 }
231 ret = -EINVAL;
232 if ( op->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
233 goto sendtrigger_out;
235 ret = -ESRCH;
236 if ( op->u.sendtrigger.vcpu >= d->max_vcpus ||
237 (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
238 goto sendtrigger_out;
240 ret = 0;
241 switch (op->u.sendtrigger.trigger)
242 {
243 case XEN_DOMCTL_SENDTRIGGER_INIT:
244 {
245 if (VMX_DOMAIN(v))
246 vmx_pend_pal_init(d);
247 else
248 ret = -ENOSYS;
249 }
250 break;
252 default:
253 ret = -ENOSYS;
254 }
256 sendtrigger_out:
257 rcu_unlock_domain(d);
258 }
259 break;
261 case XEN_DOMCTL_get_device_group:
262 {
263 struct domain *d;
264 u32 max_sdevs;
265 u8 bus, devfn;
266 XEN_GUEST_HANDLE_64(uint32) sdevs;
267 int num_sdevs;
269 ret = -ENOSYS;
270 if ( !iommu_enabled )
271 break;
273 ret = -EINVAL;
274 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
275 break;
277 bus = (op->u.get_device_group.machine_bdf >> 16) & 0xff;
278 devfn = (op->u.get_device_group.machine_bdf >> 8) & 0xff;
279 max_sdevs = op->u.get_device_group.max_sdevs;
280 sdevs = op->u.get_device_group.sdev_array;
282 num_sdevs = iommu_get_device_group(d, bus, devfn, sdevs, max_sdevs);
283 if ( num_sdevs < 0 )
284 {
285 dprintk(XENLOG_ERR, "iommu_get_device_group() failed!\n");
286 ret = -EFAULT;
287 op->u.get_device_group.num_sdevs = 0;
288 }
289 else
290 {
291 ret = 0;
292 op->u.get_device_group.num_sdevs = num_sdevs;
293 }
294 if ( copy_to_guest(u_domctl, op, 1) )
295 ret = -EFAULT;
296 rcu_unlock_domain(d);
297 }
298 break;
300 case XEN_DOMCTL_test_assign_device:
301 {
302 u8 bus, devfn;
304 ret = -ENOSYS;
305 if ( !iommu_enabled )
306 break;
308 ret = -EINVAL;
309 bus = (op->u.assign_device.machine_bdf >> 16) & 0xff;
310 devfn = (op->u.assign_device.machine_bdf >> 8) & 0xff;
312 if ( device_assigned(bus, devfn) )
313 {
314 printk( "XEN_DOMCTL_test_assign_device: "
315 "%x:%x:%x already assigned, or non-existent\n",
316 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
317 break;
318 }
319 ret = 0;
320 }
321 break;
323 case XEN_DOMCTL_assign_device:
324 {
325 struct domain *d;
326 u8 bus, devfn;
328 ret = -ENOSYS;
329 if ( !iommu_enabled )
330 break;
332 ret = -EINVAL;
333 if ( unlikely((d = get_domain_by_id(op->domain)) == NULL) )
334 {
335 gdprintk(XENLOG_ERR,
336 "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
337 break;
338 }
339 bus = (op->u.assign_device.machine_bdf >> 16) & 0xff;
340 devfn = (op->u.assign_device.machine_bdf >> 8) & 0xff;
342 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
343 {
344 ret = -ENOSYS;
345 break;
346 }
348 if ( device_assigned(bus, devfn) )
349 {
350 gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: "
351 "%x:%x:%x already assigned, or non-existent\n",
352 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
353 break;
354 }
356 ret = assign_device(d, bus, devfn);
357 gdprintk(XENLOG_INFO, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
358 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
359 put_domain(d);
360 }
361 break;
363 case XEN_DOMCTL_deassign_device:
364 {
365 struct domain *d;
366 u8 bus, devfn;
368 ret = -ENOSYS;
369 if ( !iommu_enabled )
370 break;
372 ret = -EINVAL;
373 if ( unlikely((d = get_domain_by_id(op->domain)) == NULL) )
374 {
375 gdprintk(XENLOG_ERR,
376 "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n");
377 break;
378 }
379 bus = (op->u.assign_device.machine_bdf >> 16) & 0xff;
380 devfn = (op->u.assign_device.machine_bdf >> 8) & 0xff;
382 if ( !iommu_pv_enabled && !is_hvm_domain(d) )
383 {
384 ret = -ENOSYS;
385 break;
386 }
388 if ( !device_assigned(bus, devfn) )
389 break;
391 ret = 0;
392 deassign_device(d, bus, devfn);
393 gdprintk(XENLOG_INFO, "XEN_DOMCTL_deassign_device: bdf = %x:%x:%x\n",
394 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
395 put_domain(d);
396 }
397 break;
399 case XEN_DOMCTL_bind_pt_irq:
400 {
401 struct domain * d;
402 xen_domctl_bind_pt_irq_t * bind;
404 ret = -ESRCH;
405 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
406 break;
407 bind = &(op->u.bind_pt_irq);
408 if ( iommu_enabled )
409 ret = pt_irq_create_bind_vtd(d, bind);
410 if ( ret < 0 )
411 gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
412 rcu_unlock_domain(d);
413 }
414 break;
416 case XEN_DOMCTL_unbind_pt_irq:
417 {
418 struct domain * d;
419 xen_domctl_bind_pt_irq_t * bind;
421 ret = -ESRCH;
422 if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL )
423 break;
424 bind = &(op->u.bind_pt_irq);
425 if ( iommu_enabled )
426 ret = pt_irq_destroy_bind_vtd(d, bind);
427 if ( ret < 0 )
428 gdprintk(XENLOG_ERR, "pt_irq_destroy_bind failed!\n");
429 rcu_unlock_domain(d);
430 }
431 break;
433 case XEN_DOMCTL_memory_mapping:
434 {
435 struct domain *d;
436 unsigned long gfn = op->u.memory_mapping.first_gfn;
437 unsigned long mfn = op->u.memory_mapping.first_mfn;
438 unsigned long nr_mfns = op->u.memory_mapping.nr_mfns;
439 int i;
441 ret = -EINVAL;
442 if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
443 break;
445 ret = -ESRCH;
446 if ( unlikely((d = rcu_lock_domain_by_id(op->domain)) == NULL) )
447 break;
449 ret=0;
450 if ( op->u.memory_mapping.add_mapping )
451 {
452 gdprintk(XENLOG_INFO,
453 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
454 gfn, mfn, nr_mfns);
456 ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
457 for ( i = 0; i < nr_mfns; i++ )
458 assign_domain_mmio_page(d, (gfn+i)<<PAGE_SHIFT,
459 (mfn+i)<<PAGE_SHIFT, PAGE_SIZE,
460 ASSIGN_writable | ASSIGN_nocache);
461 }
462 else
463 {
464 gdprintk(XENLOG_INFO,
465 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
466 gfn, mfn, nr_mfns);
468 for ( i = 0; i < nr_mfns; i++ )
469 deassign_domain_mmio_page(d, (gfn+i)<<PAGE_SHIFT,
470 (mfn+i)<<PAGE_SHIFT, PAGE_SIZE);
471 ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
472 }
474 rcu_unlock_domain(d);
475 }
476 break;
478 case XEN_DOMCTL_ioport_mapping:
479 {
481 #define MAX_IOPORTS 0x10000
482 struct domain *d;
483 unsigned int fgp = op->u.ioport_mapping.first_gport;
484 unsigned int fmp = op->u.ioport_mapping.first_mport;
485 unsigned int np = op->u.ioport_mapping.nr_ports;
487 ret = -EINVAL;
488 if ( (np == 0) || (fgp > MAX_IOPORTS) || (fmp > MAX_IOPORTS) ||
489 ((fgp + np) > MAX_IOPORTS) || ((fmp + np) > MAX_IOPORTS) )
490 {
491 gdprintk(XENLOG_ERR,
492 "ioport_map:invalid:gport=%x mport=%x nr_ports=%x\n",
493 fgp, fmp, np);
494 break;
495 }
497 ret = -ESRCH;
498 if ( unlikely((d = rcu_lock_domain_by_id(op->domain)) == NULL) )
499 break;
501 if ( op->u.ioport_mapping.add_mapping )
502 {
503 gdprintk(XENLOG_INFO,
504 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
505 fgp, fmp, np);
507 ret = ioports_permit_access(d, fgp, fmp, fmp + np - 1);
508 }
509 else
510 {
511 gdprintk(XENLOG_INFO,
512 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
513 fgp, fmp, np);
515 ret = ioports_deny_access(d, fgp, fgp + np - 1);
516 }
517 rcu_unlock_domain(d);
518 }
519 break;
521 case XEN_DOMCTL_sethvmcontext:
522 {
523 struct hvm_domain_context c;
524 struct domain *d;
526 c.cur = 0;
527 c.size = op->u.hvmcontext.size;
528 c.data = NULL;
530 ret = -ESRCH;
531 d = rcu_lock_domain_by_id(op->domain);
532 if (d == NULL)
533 break;
535 ret = -EPERM;
536 if ( !IS_PRIV_FOR(current->domain, d) )
537 goto sethvmcontext_out;
539 #ifdef CONFIG_X86
540 ret = xsm_hvmcontext(d, op->cmd);
541 if (ret)
542 goto sethvmcontext_out;
543 #endif /* CONFIG_X86 */
545 ret = -EINVAL;
546 if (!is_hvm_domain(d))
547 goto sethvmcontext_out;
549 ret = -ENOMEM;
550 c.data = xmalloc_bytes(c.size);
551 if (c.data == NULL)
552 goto sethvmcontext_out;
554 ret = -EFAULT;
555 if (copy_from_guest(c.data, op->u.hvmcontext.buffer, c.size) != 0)
556 goto sethvmcontext_out;
558 domain_pause(d);
559 ret = hvm_load(d, &c);
560 domain_unpause(d);
562 sethvmcontext_out:
563 if (c.data != NULL)
564 xfree(c.data);
566 rcu_unlock_domain(d);
567 }
568 break;
570 case XEN_DOMCTL_gethvmcontext:
571 {
572 struct hvm_domain_context c;
573 struct domain *d;
575 ret = -ESRCH;
576 d = rcu_lock_domain_by_id(op->domain);
577 if (d == NULL)
578 break;
580 ret = -EPERM;
581 if ( !IS_PRIV_FOR(current->domain, d) )
582 goto gethvmcontext_out;
584 #ifdef CONFIG_X86
585 ret = xsm_hvmcontext(d, op->cmd);
586 if (ret)
587 goto gethvmcontext_out;
588 #endif /* CONFIG_X86 */
590 ret = -EINVAL;
591 if (!is_hvm_domain(d))
592 goto gethvmcontext_out;
594 c.cur = 0;
595 c.size = hvm_save_size(d);
596 c.data = NULL;
598 if (guest_handle_is_null(op->u.hvmcontext.buffer)) {
599 /* Client is querying for the correct buffer size */
600 op->u.hvmcontext.size = c.size;
601 ret = 0;
602 goto gethvmcontext_out;
603 }
605 /* Check that the client has a big enough buffer */
606 ret = -ENOSPC;
607 if (op->u.hvmcontext.size < c.size)
608 goto gethvmcontext_out;
610 /* Allocate our own marshalling buffer */
611 ret = -ENOMEM;
612 c.data = xmalloc_bytes(c.size);
613 if (c.data == NULL)
614 goto gethvmcontext_out;
616 domain_pause(d);
617 ret = hvm_save(d, &c);
618 domain_unpause(d);
620 op->u.hvmcontext.size = c.cur;
621 if (copy_to_guest(op->u.hvmcontext.buffer, c.data, c.size) != 0)
622 ret = -EFAULT;
624 gethvmcontext_out:
625 if (copy_to_guest(u_domctl, op, 1))
626 ret = -EFAULT;
628 if (c.data != NULL)
629 xfree(c.data);
631 rcu_unlock_domain(d);
632 }
633 break;
635 case XEN_DOMCTL_set_opt_feature:
636 {
637 struct xen_ia64_opt_feature *optf = &op->u.set_opt_feature.optf;
638 struct domain *d = rcu_lock_domain_by_id(op->domain);
640 if (d == NULL) {
641 ret = -EINVAL;
642 break;
643 }
645 ret = -EPERM;
646 if ( IS_PRIV_FOR(current->domain, d) )
647 ret = domain_opt_feature(d, optf);
649 rcu_unlock_domain(d);
650 }
651 break;
653 case XEN_DOMCTL_set_address_size:
654 {
655 struct domain *d = rcu_lock_domain_by_id(op->domain);
657 ret = -ESRCH;
658 if (d == NULL)
659 break;
661 ret = -EINVAL;
662 if (op->u.address_size.size == BITS_PER_LONG)
663 ret = 0;
665 rcu_unlock_domain(d);
666 }
667 break;
669 case XEN_DOMCTL_get_address_size:
670 {
671 struct domain *d = rcu_lock_domain_by_id(op->domain);
673 ret = -ESRCH;
674 if (d == NULL)
675 break;
677 ret = 0;
678 op->u.address_size.size = BITS_PER_LONG;
679 rcu_unlock_domain(d);
681 if (copy_to_guest(u_domctl, op, 1))
682 ret = -EFAULT;
683 }
684 break;
686 default:
687 printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
688 ret = -ENOSYS;
690 }
692 return ret;
693 }
695 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
696 {
697 long ret = 0;
699 switch ( op->cmd )
700 {
701 case XEN_SYSCTL_physinfo:
702 {
703 int i;
704 uint32_t max_array_ent;
705 XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
707 xen_sysctl_physinfo_t *pi = &op->u.physinfo;
709 max_array_ent = pi->max_cpu_id;
710 cpu_to_node_arr = pi->cpu_to_node;
712 memset(pi, 0, sizeof(*pi));
713 pi->cpu_to_node = cpu_to_node_arr;
714 pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
715 pi->cores_per_socket =
716 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
717 pi->nr_cpus = (u32)num_online_cpus();
718 pi->nr_nodes = num_online_nodes();
719 pi->total_pages = total_pages;
720 pi->free_pages = avail_domheap_pages();
721 pi->scrub_pages = avail_scrub_pages();
722 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
724 pi->max_cpu_id = last_cpu(cpu_online_map);
725 max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
727 ret = 0;
729 if (!guest_handle_is_null(cpu_to_node_arr)) {
730 for (i = 0; i <= max_array_ent; i++) {
731 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
732 if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) {
733 ret = -EFAULT;
734 break;
735 }
736 }
737 }
739 if ( copy_to_guest(u_sysctl, op, 1) )
740 ret = -EFAULT;
741 }
742 break;
744 default:
745 printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
746 ret = -ENOSYS;
748 }
750 return ret;
751 }
753 static unsigned long
754 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
755 {
756 unsigned long end;
758 /* Linux may use a 0 size! */
759 if (size == 0)
760 size = PAGE_SIZE;
762 if (size == 0)
763 printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
765 end = PAGE_ALIGN(mpaddr + size);
767 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
768 (end >> PAGE_SHIFT) - 1))
769 return -EPERM;
771 return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
772 ASSIGN_writable | ASSIGN_nocache);
773 }
775 static unsigned long
776 dom0vp_fpswa_revision(XEN_GUEST_HANDLE(uint) revision)
777 {
778 if (fpswa_interface == NULL)
779 return -ENOSYS;
780 if (copy_to_guest(revision, &fpswa_interface->revision, 1))
781 return -EFAULT;
782 return 0;
783 }
785 static unsigned long
786 dom0vp_add_io_space(struct domain *d, unsigned long phys_base,
787 unsigned long sparse, unsigned long space_number)
788 {
789 unsigned int fp, lp;
791 /*
792 * Registering new io_space roughly based on linux
793 * arch/ia64/pci/pci.c:new_space()
794 */
796 /* Skip legacy I/O port space, we already know about it */
797 if (phys_base == 0)
798 return 0;
800 /*
801 * Dom0 Linux initializes io spaces sequentially, if that changes,
802 * we'll need to add thread protection and the ability to handle
803 * a sparsely populated io_space array.
804 */
805 if (space_number > MAX_IO_SPACES || space_number != num_io_spaces)
806 return -EINVAL;
808 io_space[space_number].mmio_base = phys_base;
809 io_space[space_number].sparse = sparse;
811 num_io_spaces++;
813 fp = space_number << IO_SPACE_BITS;
814 lp = fp | 0xffff;
816 return ioports_permit_access(d, fp, fp, lp);
817 }
819 unsigned long
820 do_dom0vp_op(unsigned long cmd,
821 unsigned long arg0, unsigned long arg1, unsigned long arg2,
822 unsigned long arg3)
823 {
824 unsigned long ret = 0;
825 struct domain *d = current->domain;
827 switch (cmd) {
828 case IA64_DOM0VP_ioremap:
829 ret = dom0vp_ioremap(d, arg0, arg1);
830 break;
831 case IA64_DOM0VP_phystomach:
832 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
833 if (ret == INVALID_MFN) {
834 dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
835 __func__, ret);
836 } else {
837 ret = pte_pfn(__pte(ret));
838 }
839 perfc_incr(dom0vp_phystomach);
840 break;
841 case IA64_DOM0VP_machtophys:
842 if (!mfn_valid(arg0)) {
843 ret = INVALID_M2P_ENTRY;
844 break;
845 }
846 ret = get_gpfn_from_mfn(arg0);
847 perfc_incr(dom0vp_machtophys);
848 break;
849 case IA64_DOM0VP_zap_physmap:
850 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
851 break;
852 case IA64_DOM0VP_add_physmap:
853 if (!IS_PRIV(d))
854 return -EPERM;
855 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
856 (domid_t)arg3);
857 break;
858 case IA64_DOM0VP_add_physmap_with_gmfn:
859 if (!IS_PRIV(d))
860 return -EPERM;
861 ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
862 (domid_t)arg3);
863 break;
864 case IA64_DOM0VP_expose_p2m:
865 ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
866 break;
867 case IA64_DOM0VP_perfmon: {
868 XEN_GUEST_HANDLE(void) hnd;
869 set_xen_guest_handle(hnd, (void*)arg1);
870 ret = do_perfmon_op(arg0, hnd, arg2);
871 break;
872 }
873 case IA64_DOM0VP_fpswa_revision: {
874 XEN_GUEST_HANDLE(uint) hnd;
875 set_xen_guest_handle(hnd, (uint*)arg0);
876 ret = dom0vp_fpswa_revision(hnd);
877 break;
878 }
879 case IA64_DOM0VP_add_io_space:
880 ret = dom0vp_add_io_space(d, arg0, arg1, arg2);
881 break;
882 case IA64_DOM0VP_expose_foreign_p2m: {
883 XEN_GUEST_HANDLE(char) hnd;
884 set_xen_guest_handle(hnd, (char*)arg2);
885 ret = dom0vp_expose_foreign_p2m(d, arg0, (domid_t)arg1, hnd, arg3);
886 break;
887 }
888 case IA64_DOM0VP_unexpose_foreign_p2m:
889 ret = dom0vp_unexpose_foreign_p2m(d, arg0, arg1);
890 break;
891 case IA64_DOM0VP_get_memmap: {
892 XEN_GUEST_HANDLE(char) hnd;
893 set_xen_guest_handle(hnd, (char*)arg1);
894 ret = dom0vp_get_memmap((domid_t)arg0, hnd);
895 break;
896 }
897 default:
898 ret = -1;
899 printk("unknown dom0_vp_op 0x%lx\n", cmd);
900 break;
901 }
903 return ret;
904 }
906 /*
907 * Local variables:
908 * mode: C
909 * c-set-style: "BSD"
910 * c-basic-offset: 4
911 * tab-width: 4
912 * indent-tabs-mode: nil
913 * End:
914 */