ia64/xen-unstable

view xen/arch/ia64/xen/dom0_ops.c @ 18633:f27787b9f8d7

[IA64] Change ioports_permit_access interface().

use VTD to assing device, guest port may not be equal to host port.
Change ioports_permit_access interface to get guest pseudo physical
address.

Signed-off-by: Anthony Xu <anthony.xu@intel.com>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Fri Oct 17 17:40:15 2008 +0900 (2008-10-17)
parents 89ef37e0f4b8
children 6db3c096c244
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/domctl.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <asm/pdb.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/guest_access.h>
21 #include <asm/vmx.h>
22 #include <asm/dom_fw.h>
23 #include <asm/vhpt.h>
24 #include <xen/iocap.h>
25 #include <xen/errno.h>
26 #include <xen/nodemask.h>
27 #include <asm/dom_fw_utils.h>
28 #include <asm/hvm/support.h>
29 #include <xsm/xsm.h>
30 #include <public/hvm/save.h>
32 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
34 extern unsigned long total_pages;
36 long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
37 {
38 long ret = 0;
40 switch ( op->cmd )
41 {
42 case XEN_DOMCTL_getmemlist:
43 {
44 unsigned long i;
45 struct domain *d = rcu_lock_domain_by_id(op->domain);
46 unsigned long start_page = op->u.getmemlist.start_pfn;
47 unsigned long nr_pages = op->u.getmemlist.max_pfns;
48 uint64_t mfn;
50 if ( d == NULL ) {
51 ret = -EINVAL;
52 break;
53 }
55 if ( !IS_PRIV_FOR(current->domain, d) ) {
56 ret = -EPERM;
57 rcu_unlock_domain(d);
58 break;
59 }
61 for (i = 0 ; i < nr_pages ; i++) {
62 pte_t *pte;
64 pte = (pte_t *)lookup_noalloc_domain_pte(d,
65 (start_page + i) << PAGE_SHIFT);
66 if (pte && pte_present(*pte))
67 mfn = start_page + i;
68 else
69 mfn = INVALID_MFN;
71 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
72 ret = -EFAULT;
73 break;
74 }
75 }
77 op->u.getmemlist.num_pfns = i;
78 if (copy_to_guest(u_domctl, op, 1))
79 ret = -EFAULT;
80 rcu_unlock_domain(d);
81 }
82 break;
84 case XEN_DOMCTL_arch_setup:
85 {
86 xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
87 struct domain *d = rcu_lock_domain_by_id(op->domain);
89 if ( d == NULL) {
90 ret = -EINVAL;
91 break;
92 }
94 if ( !IS_PRIV_FOR(current->domain, d) ) {
95 ret = -EPERM;
96 rcu_unlock_domain(d);
97 break;
98 }
100 if (ds->flags & XEN_DOMAINSETUP_query) {
101 /* Set flags. */
102 if (is_hvm_domain(d))
103 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
104 /* Set params. */
105 ds->bp = 0; /* unknown. */
106 ds->maxmem = d->arch.convmem_end;
107 ds->xsi_va = d->arch.shared_info_va;
108 ds->hypercall_imm = d->arch.breakimm;
109 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
110 ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
111 #endif
112 /* Copy back. */
113 if ( copy_to_guest(u_domctl, op, 1) )
114 ret = -EFAULT;
115 }
116 else {
117 if (is_hvm_domain(d)
118 || (ds->flags & (XEN_DOMAINSETUP_hvm_guest
119 | XEN_DOMAINSETUP_sioemu_guest))) {
120 if (!vmx_enabled) {
121 printk("No VMX hardware feature for vmx domain.\n");
122 ret = -EINVAL;
123 } else {
124 d->is_hvm = 1;
125 if (ds->flags & XEN_DOMAINSETUP_sioemu_guest)
126 d->arch.is_sioemu = 1;
127 xen_ia64_set_convmem_end(d, ds->maxmem);
128 ret = vmx_setup_platform(d);
129 }
130 }
131 else {
132 if (ds->hypercall_imm) {
133 /* dom_fw_setup() reads d->arch.breakimm */
134 struct vcpu *v;
135 d->arch.breakimm = ds->hypercall_imm;
136 for_each_vcpu (d, v)
137 v->arch.breakimm = d->arch.breakimm;
138 }
139 domain_set_vhpt_size(d, ds->vhpt_size_log2);
140 if (ds->xsi_va)
141 d->arch.shared_info_va = ds->xsi_va;
142 ret = dom_fw_setup(d, ds->bp, ds->maxmem);
143 }
144 if (ret == 0) {
145 /*
146 * XXX IA64_SHARED_INFO_PADDR
147 * assign these pages into guest psudo physical address
148 * space for dom0 to map this page by gmfn.
149 * this is necessary for domain build, save, restore and
150 * dump-core.
151 */
152 unsigned long i;
153 for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
154 assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
155 virt_to_maddr(d->shared_info + i));
156 }
157 }
159 rcu_unlock_domain(d);
160 }
161 break;
163 case XEN_DOMCTL_shadow_op:
164 {
165 struct domain *d;
166 ret = -ESRCH;
167 d = rcu_lock_domain_by_id(op->domain);
168 if ( d != NULL )
169 {
170 if ( !IS_PRIV_FOR(current->domain, d) ) {
171 ret = -EPERM;
172 rcu_unlock_domain(d);
173 break;
174 }
176 ret = shadow_mode_control(d, &op->u.shadow_op);
177 rcu_unlock_domain(d);
178 if (copy_to_guest(u_domctl, op, 1))
179 ret = -EFAULT;
180 }
181 }
182 break;
184 case XEN_DOMCTL_ioport_permission:
185 {
186 struct domain *d;
187 unsigned int fp = op->u.ioport_permission.first_port;
188 unsigned int np = op->u.ioport_permission.nr_ports;
189 unsigned int lp = fp + np - 1;
191 ret = -ESRCH;
192 d = rcu_lock_domain_by_id(op->domain);
193 if (unlikely(d == NULL))
194 break;
196 if ( !IS_PRIV_FOR(current->domain, d) ) {
197 ret = -EPERM;
198 rcu_unlock_domain(d);
199 break;
200 }
202 if (np == 0)
203 ret = 0;
204 else {
205 if (op->u.ioport_permission.allow_access)
206 ret = ioports_permit_access(d, fp, fp, lp);
207 else
208 ret = ioports_deny_access(d, fp, lp);
209 }
211 rcu_unlock_domain(d);
212 }
213 break;
215 case XEN_DOMCTL_sendtrigger:
216 {
217 struct domain *d;
218 struct vcpu *v;
220 ret = -ESRCH;
221 d = rcu_lock_domain_by_id(op->domain);
222 if ( d == NULL )
223 break;
225 ret = -EPERM;
226 if ( !IS_PRIV_FOR(current->domain, d) ) {
227 goto sendtrigger_out;
228 }
230 ret = -EINVAL;
231 if ( op->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
232 goto sendtrigger_out;
234 ret = -ESRCH;
235 if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
236 goto sendtrigger_out;
238 ret = 0;
239 switch (op->u.sendtrigger.trigger)
240 {
241 case XEN_DOMCTL_SENDTRIGGER_INIT:
242 {
243 if (VMX_DOMAIN(v))
244 vmx_pend_pal_init(d);
245 else
246 ret = -ENOSYS;
247 }
248 break;
250 default:
251 ret = -ENOSYS;
252 }
254 sendtrigger_out:
255 rcu_unlock_domain(d);
256 }
257 break;
259 case XEN_DOMCTL_sethvmcontext:
260 {
261 struct hvm_domain_context c;
262 struct domain *d;
264 c.cur = 0;
265 c.size = op->u.hvmcontext.size;
266 c.data = NULL;
268 ret = -ESRCH;
269 d = rcu_lock_domain_by_id(op->domain);
270 if (d == NULL)
271 break;
273 ret = -EPERM;
274 if ( !IS_PRIV_FOR(current->domain, d) )
275 goto sethvmcontext_out;
277 #ifdef CONFIG_X86
278 ret = xsm_hvmcontext(d, op->cmd);
279 if (ret)
280 goto sethvmcontext_out;
281 #endif /* CONFIG_X86 */
283 ret = -EINVAL;
284 if (!is_hvm_domain(d))
285 goto sethvmcontext_out;
287 ret = -ENOMEM;
288 c.data = xmalloc_bytes(c.size);
289 if (c.data == NULL)
290 goto sethvmcontext_out;
292 ret = -EFAULT;
293 if (copy_from_guest(c.data, op->u.hvmcontext.buffer, c.size) != 0)
294 goto sethvmcontext_out;
296 domain_pause(d);
297 ret = hvm_load(d, &c);
298 domain_unpause(d);
300 sethvmcontext_out:
301 if (c.data != NULL)
302 xfree(c.data);
304 rcu_unlock_domain(d);
305 }
306 break;
308 case XEN_DOMCTL_gethvmcontext:
309 {
310 struct hvm_domain_context c;
311 struct domain *d;
313 ret = -ESRCH;
314 d = rcu_lock_domain_by_id(op->domain);
315 if (d == NULL)
316 break;
318 ret = -EPERM;
319 if ( !IS_PRIV_FOR(current->domain, d) )
320 goto gethvmcontext_out;
322 #ifdef CONFIG_X86
323 ret = xsm_hvmcontext(d, op->cmd);
324 if (ret)
325 goto gethvmcontext_out;
326 #endif /* CONFIG_X86 */
328 ret = -EINVAL;
329 if (!is_hvm_domain(d))
330 goto gethvmcontext_out;
332 c.cur = 0;
333 c.size = hvm_save_size(d);
334 c.data = NULL;
336 if (guest_handle_is_null(op->u.hvmcontext.buffer)) {
337 /* Client is querying for the correct buffer size */
338 op->u.hvmcontext.size = c.size;
339 ret = 0;
340 goto gethvmcontext_out;
341 }
343 /* Check that the client has a big enough buffer */
344 ret = -ENOSPC;
345 if (op->u.hvmcontext.size < c.size)
346 goto gethvmcontext_out;
348 /* Allocate our own marshalling buffer */
349 ret = -ENOMEM;
350 c.data = xmalloc_bytes(c.size);
351 if (c.data == NULL)
352 goto gethvmcontext_out;
354 domain_pause(d);
355 ret = hvm_save(d, &c);
356 domain_unpause(d);
358 op->u.hvmcontext.size = c.cur;
359 if (copy_to_guest(op->u.hvmcontext.buffer, c.data, c.size) != 0)
360 ret = -EFAULT;
362 gethvmcontext_out:
363 if (copy_to_guest(u_domctl, op, 1))
364 ret = -EFAULT;
366 if (c.data != NULL)
367 xfree(c.data);
369 rcu_unlock_domain(d);
370 }
371 break;
373 case XEN_DOMCTL_set_opt_feature:
374 {
375 struct xen_ia64_opt_feature *optf = &op->u.set_opt_feature.optf;
376 struct domain *d = rcu_lock_domain_by_id(op->domain);
378 if (d == NULL) {
379 ret = -EINVAL;
380 break;
381 }
383 ret = -EPERM;
384 if ( IS_PRIV_FOR(current->domain, d) )
385 ret = domain_opt_feature(d, optf);
387 rcu_unlock_domain(d);
388 }
389 break;
391 case XEN_DOMCTL_assign_device:
392 ret = -ENOSYS;
393 break;
395 default:
396 printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
397 ret = -ENOSYS;
399 }
401 return ret;
402 }
404 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
405 {
406 long ret = 0;
408 switch ( op->cmd )
409 {
410 case XEN_SYSCTL_physinfo:
411 {
412 int i;
413 uint32_t max_array_ent;
414 XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
416 xen_sysctl_physinfo_t *pi = &op->u.physinfo;
418 max_array_ent = pi->max_cpu_id;
419 cpu_to_node_arr = pi->cpu_to_node;
421 memset(pi, 0, sizeof(*pi));
422 pi->cpu_to_node = cpu_to_node_arr;
423 pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
424 pi->cores_per_socket =
425 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
426 pi->nr_cpus = (u32)num_online_cpus();
427 pi->nr_nodes = num_online_nodes();
428 pi->total_pages = total_pages;
429 pi->free_pages = avail_domheap_pages();
430 pi->scrub_pages = avail_scrub_pages();
431 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
433 pi->max_cpu_id = last_cpu(cpu_online_map);
434 max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
436 ret = 0;
438 if (!guest_handle_is_null(cpu_to_node_arr)) {
439 for (i = 0; i <= max_array_ent; i++) {
440 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
441 if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) {
442 ret = -EFAULT;
443 break;
444 }
445 }
446 }
448 if ( copy_to_guest(u_sysctl, op, 1) )
449 ret = -EFAULT;
450 }
451 break;
453 default:
454 printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
455 ret = -ENOSYS;
457 }
459 return ret;
460 }
462 static unsigned long
463 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
464 {
465 unsigned long end;
467 /* Linux may use a 0 size! */
468 if (size == 0)
469 size = PAGE_SIZE;
471 if (size == 0)
472 printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
474 end = PAGE_ALIGN(mpaddr + size);
476 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
477 (end >> PAGE_SHIFT) - 1))
478 return -EPERM;
480 return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
481 ASSIGN_writable | ASSIGN_nocache);
482 }
484 static unsigned long
485 dom0vp_fpswa_revision(XEN_GUEST_HANDLE(uint) revision)
486 {
487 if (fpswa_interface == NULL)
488 return -ENOSYS;
489 if (copy_to_guest(revision, &fpswa_interface->revision, 1))
490 return -EFAULT;
491 return 0;
492 }
494 static unsigned long
495 dom0vp_add_io_space(struct domain *d, unsigned long phys_base,
496 unsigned long sparse, unsigned long space_number)
497 {
498 unsigned int fp, lp;
500 /*
501 * Registering new io_space roughly based on linux
502 * arch/ia64/pci/pci.c:new_space()
503 */
505 /* Skip legacy I/O port space, we already know about it */
506 if (phys_base == 0)
507 return 0;
509 /*
510 * Dom0 Linux initializes io spaces sequentially, if that changes,
511 * we'll need to add thread protection and the ability to handle
512 * a sparsely populated io_space array.
513 */
514 if (space_number > MAX_IO_SPACES || space_number != num_io_spaces)
515 return -EINVAL;
517 io_space[space_number].mmio_base = phys_base;
518 io_space[space_number].sparse = sparse;
520 num_io_spaces++;
522 fp = space_number << IO_SPACE_BITS;
523 lp = fp | 0xffff;
525 return ioports_permit_access(d, fp, fp, lp);
526 }
528 unsigned long
529 do_dom0vp_op(unsigned long cmd,
530 unsigned long arg0, unsigned long arg1, unsigned long arg2,
531 unsigned long arg3)
532 {
533 unsigned long ret = 0;
534 struct domain *d = current->domain;
536 switch (cmd) {
537 case IA64_DOM0VP_ioremap:
538 ret = dom0vp_ioremap(d, arg0, arg1);
539 break;
540 case IA64_DOM0VP_phystomach:
541 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
542 if (ret == INVALID_MFN) {
543 dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
544 __func__, ret);
545 } else {
546 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
547 }
548 perfc_incr(dom0vp_phystomach);
549 break;
550 case IA64_DOM0VP_machtophys:
551 if (!mfn_valid(arg0)) {
552 ret = INVALID_M2P_ENTRY;
553 break;
554 }
555 ret = get_gpfn_from_mfn(arg0);
556 perfc_incr(dom0vp_machtophys);
557 break;
558 case IA64_DOM0VP_zap_physmap:
559 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
560 break;
561 case IA64_DOM0VP_add_physmap:
562 if (!IS_PRIV(d))
563 return -EPERM;
564 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
565 (domid_t)arg3);
566 break;
567 case IA64_DOM0VP_add_physmap_with_gmfn:
568 if (!IS_PRIV(d))
569 return -EPERM;
570 ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
571 (domid_t)arg3);
572 break;
573 case IA64_DOM0VP_expose_p2m:
574 ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
575 break;
576 case IA64_DOM0VP_perfmon: {
577 XEN_GUEST_HANDLE(void) hnd;
578 set_xen_guest_handle(hnd, (void*)arg1);
579 ret = do_perfmon_op(arg0, hnd, arg2);
580 break;
581 }
582 case IA64_DOM0VP_fpswa_revision: {
583 XEN_GUEST_HANDLE(uint) hnd;
584 set_xen_guest_handle(hnd, (uint*)arg0);
585 ret = dom0vp_fpswa_revision(hnd);
586 break;
587 }
588 case IA64_DOM0VP_add_io_space:
589 ret = dom0vp_add_io_space(d, arg0, arg1, arg2);
590 break;
591 case IA64_DOM0VP_expose_foreign_p2m: {
592 XEN_GUEST_HANDLE(char) hnd;
593 set_xen_guest_handle(hnd, (char*)arg2);
594 ret = dom0vp_expose_foreign_p2m(d, arg0, (domid_t)arg1, hnd, arg3);
595 break;
596 }
597 case IA64_DOM0VP_unexpose_foreign_p2m:
598 ret = dom0vp_unexpose_foreign_p2m(d, arg0, arg1);
599 break;
600 case IA64_DOM0VP_get_memmap: {
601 XEN_GUEST_HANDLE(char) hnd;
602 set_xen_guest_handle(hnd, (char*)arg1);
603 ret = dom0vp_get_memmap((domid_t)arg0, hnd);
604 break;
605 }
606 default:
607 ret = -1;
608 printk("unknown dom0_vp_op 0x%lx\n", cmd);
609 break;
610 }
612 return ret;
613 }
615 /*
616 * Local variables:
617 * mode: C
618 * c-set-style: "BSD"
619 * c-basic-offset: 4
620 * tab-width: 4
621 * indent-tabs-mode: nil
622 * End:
623 */