ia64/xen-unstable

view xen/arch/ia64/xen/dom0_ops.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 09cd682ac68e
children 0ace9a451a25
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/domctl.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <asm/pdb.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/guest_access.h>
21 #include <asm/vmx.h>
22 #include <asm/dom_fw.h>
23 #include <xen/iocap.h>
24 #include <xen/errno.h>
25 #include <xen/nodemask.h>
26 #include <asm/dom_fw_utils.h>
27 #include <asm/hvm/support.h>
28 #include <xsm/xsm.h>
29 #include <public/hvm/save.h>
31 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
33 extern unsigned long total_pages;
35 long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
36 {
37 long ret = 0;
39 if ( !IS_PRIV(current->domain) )
40 return -EPERM;
42 switch ( op->cmd )
43 {
44 case XEN_DOMCTL_getmemlist:
45 {
46 unsigned long i;
47 struct domain *d = get_domain_by_id(op->domain);
48 unsigned long start_page = op->u.getmemlist.start_pfn;
49 unsigned long nr_pages = op->u.getmemlist.max_pfns;
50 uint64_t mfn;
52 if ( d == NULL ) {
53 ret = -EINVAL;
54 break;
55 }
56 for (i = 0 ; i < nr_pages ; i++) {
57 pte_t *pte;
59 pte = (pte_t *)lookup_noalloc_domain_pte(d,
60 (start_page + i) << PAGE_SHIFT);
61 if (pte && pte_present(*pte))
62 mfn = start_page + i;
63 else
64 mfn = INVALID_MFN;
66 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
67 ret = -EFAULT;
68 break;
69 }
70 }
72 op->u.getmemlist.num_pfns = i;
73 if (copy_to_guest(u_domctl, op, 1))
74 ret = -EFAULT;
76 put_domain(d);
77 }
78 break;
80 case XEN_DOMCTL_arch_setup:
81 {
82 xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
83 struct domain *d = get_domain_by_id(op->domain);
85 if ( d == NULL) {
86 ret = -EINVAL;
87 break;
88 }
90 if (ds->flags & XEN_DOMAINSETUP_query) {
91 /* Set flags. */
92 if (is_hvm_domain(d))
93 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
94 /* Set params. */
95 ds->bp = 0; /* unknown. */
96 ds->maxmem = d->arch.convmem_end;
97 ds->xsi_va = d->arch.shared_info_va;
98 ds->hypercall_imm = d->arch.breakimm;
99 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
100 ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
101 #endif
102 /* Copy back. */
103 if ( copy_to_guest(u_domctl, op, 1) )
104 ret = -EFAULT;
105 }
106 else {
107 if (is_hvm_domain(d) || (ds->flags & XEN_DOMAINSETUP_hvm_guest)) {
108 if (!vmx_enabled) {
109 printk("No VMX hardware feature for vmx domain.\n");
110 ret = -EINVAL;
111 } else {
112 d->is_hvm = 1;
113 xen_ia64_set_convmem_end(d, ds->maxmem);
114 ret = vmx_setup_platform(d);
115 }
116 }
117 else {
118 if (ds->hypercall_imm) {
119 /* dom_fw_setup() reads d->arch.breakimm */
120 struct vcpu *v;
121 d->arch.breakimm = ds->hypercall_imm;
122 for_each_vcpu (d, v)
123 v->arch.breakimm = d->arch.breakimm;
124 }
125 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
126 if (ds->vhpt_size_log2 == -1) {
127 d->arch.has_pervcpu_vhpt = 0;
128 ds->vhpt_size_log2 = -1;
129 printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
130 "domain %d VHPT is global.\n", d->domain_id);
131 } else {
132 d->arch.has_pervcpu_vhpt = 1;
133 d->arch.vhpt_size_log2 = ds->vhpt_size_log2;
134 printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
135 "domain %d VHPT is per vcpu. size=2**%d\n",
136 d->domain_id, ds->vhpt_size_log2);
137 }
138 #endif
139 if (ds->xsi_va)
140 d->arch.shared_info_va = ds->xsi_va;
141 ret = dom_fw_setup(d, ds->bp, ds->maxmem);
142 }
143 if (ret == 0) {
144 /*
145 * XXX IA64_SHARED_INFO_PADDR
146 * assign these pages into guest psudo physical address
147 * space for dom0 to map this page by gmfn.
148 * this is necessary for domain build, save, restore and
149 * dump-core.
150 */
151 unsigned long i;
152 for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
153 assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
154 virt_to_maddr(d->shared_info + i));
155 }
156 }
158 put_domain(d);
159 }
160 break;
162 case XEN_DOMCTL_shadow_op:
163 {
164 struct domain *d;
165 ret = -ESRCH;
166 d = get_domain_by_id(op->domain);
167 if ( d != NULL )
168 {
169 ret = shadow_mode_control(d, &op->u.shadow_op);
170 put_domain(d);
171 if (copy_to_guest(u_domctl, op, 1))
172 ret = -EFAULT;
173 }
174 }
175 break;
177 case XEN_DOMCTL_ioport_permission:
178 {
179 struct domain *d;
180 unsigned int fp = op->u.ioport_permission.first_port;
181 unsigned int np = op->u.ioport_permission.nr_ports;
182 unsigned int lp = fp + np - 1;
184 ret = -ESRCH;
185 d = get_domain_by_id(op->domain);
186 if (unlikely(d == NULL))
187 break;
189 if (np == 0)
190 ret = 0;
191 else {
192 if (op->u.ioport_permission.allow_access)
193 ret = ioports_permit_access(d, fp, lp);
194 else
195 ret = ioports_deny_access(d, fp, lp);
196 }
198 put_domain(d);
199 }
200 break;
202 case XEN_DOMCTL_sendtrigger:
203 {
204 struct domain *d;
205 struct vcpu *v;
207 ret = -ESRCH;
208 d = get_domain_by_id(op->domain);
209 if ( d == NULL )
210 break;
212 ret = -EINVAL;
213 if ( op->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
214 goto sendtrigger_out;
216 ret = -ESRCH;
217 if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
218 goto sendtrigger_out;
220 ret = 0;
221 switch (op->u.sendtrigger.trigger)
222 {
223 case XEN_DOMCTL_SENDTRIGGER_INIT:
224 {
225 if (VMX_DOMAIN(v))
226 vmx_pend_pal_init(d);
227 else
228 ret = -ENOSYS;
229 }
230 break;
232 default:
233 ret = -ENOSYS;
234 }
236 sendtrigger_out:
237 put_domain(d);
238 }
239 break;
241 case XEN_DOMCTL_sethvmcontext:
242 {
243 struct hvm_domain_context c;
244 struct domain *d;
246 c.cur = 0;
247 c.size = op->u.hvmcontext.size;
248 c.data = NULL;
250 ret = -ESRCH;
251 d = rcu_lock_domain_by_id(op->domain);
252 if (d == NULL)
253 break;
255 #ifdef CONFIG_X86
256 ret = xsm_hvmcontext(d, op->cmd);
257 if (ret)
258 goto sethvmcontext_out;
259 #endif /* CONFIG_X86 */
261 ret = -EINVAL;
262 if (!is_hvm_domain(d))
263 goto sethvmcontext_out;
265 ret = -ENOMEM;
266 c.data = xmalloc_bytes(c.size);
267 if (c.data == NULL)
268 goto sethvmcontext_out;
270 ret = -EFAULT;
271 if (copy_from_guest(c.data, op->u.hvmcontext.buffer, c.size) != 0)
272 goto sethvmcontext_out;
274 domain_pause(d);
275 ret = hvm_load(d, &c);
276 domain_unpause(d);
278 sethvmcontext_out:
279 if (c.data != NULL)
280 xfree(c.data);
282 rcu_unlock_domain(d);
283 }
284 break;
286 case XEN_DOMCTL_gethvmcontext:
287 {
288 struct hvm_domain_context c;
289 struct domain *d;
291 ret = -ESRCH;
292 d = rcu_lock_domain_by_id(op->domain);
293 if (d == NULL)
294 break;
296 #ifdef CONFIG_X86
297 ret = xsm_hvmcontext(d, op->cmd);
298 if (ret)
299 goto gethvmcontext_out;
300 #endif /* CONFIG_X86 */
302 ret = -EINVAL;
303 if (!is_hvm_domain(d))
304 goto gethvmcontext_out;
306 c.cur = 0;
307 c.size = hvm_save_size(d);
308 c.data = NULL;
310 if (guest_handle_is_null(op->u.hvmcontext.buffer)) {
311 /* Client is querying for the correct buffer size */
312 op->u.hvmcontext.size = c.size;
313 ret = 0;
314 goto gethvmcontext_out;
315 }
317 /* Check that the client has a big enough buffer */
318 ret = -ENOSPC;
319 if (op->u.hvmcontext.size < c.size)
320 goto gethvmcontext_out;
322 /* Allocate our own marshalling buffer */
323 ret = -ENOMEM;
324 c.data = xmalloc_bytes(c.size);
325 if (c.data == NULL)
326 goto gethvmcontext_out;
328 domain_pause(d);
329 ret = hvm_save(d, &c);
330 domain_unpause(d);
332 op->u.hvmcontext.size = c.cur;
333 if (copy_to_guest(op->u.hvmcontext.buffer, c.data, c.size) != 0)
334 ret = -EFAULT;
336 gethvmcontext_out:
337 if (copy_to_guest(u_domctl, op, 1))
338 ret = -EFAULT;
340 if (c.data != NULL)
341 xfree(c.data);
343 rcu_unlock_domain(d);
344 }
345 break;
347 case XEN_DOMCTL_set_opt_feature:
348 {
349 struct xen_ia64_opt_feature *optf = &op->u.set_opt_feature.optf;
350 struct domain *d = get_domain_by_id(op->domain);
352 if (d == NULL) {
353 ret = -EINVAL;
354 break;
355 }
357 ret = domain_opt_feature(d, optf);
358 put_domain(d);
359 }
360 break;
362 default:
363 printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
364 ret = -ENOSYS;
366 }
368 return ret;
369 }
371 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
372 {
373 long ret = 0;
375 switch ( op->cmd )
376 {
377 case XEN_SYSCTL_physinfo:
378 {
379 int i;
380 uint32_t max_array_ent;
382 xen_sysctl_physinfo_t *pi = &op->u.physinfo;
384 pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
385 pi->cores_per_socket =
386 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
387 pi->nr_cpus = (u32)num_online_cpus();
388 pi->nr_nodes = num_online_nodes();
389 pi->total_pages = total_pages;
390 pi->free_pages = avail_domheap_pages();
391 pi->scrub_pages = avail_scrub_pages();
392 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
393 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
395 max_array_ent = pi->max_cpu_id;
396 pi->max_cpu_id = last_cpu(cpu_online_map);
397 max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
399 ret = 0;
401 if (!guest_handle_is_null(pi->cpu_to_node)) {
402 for (i = 0; i <= max_array_ent; i++) {
403 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
404 if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) {
405 ret = -EFAULT;
406 break;
407 }
408 }
409 }
411 if ( copy_to_guest(u_sysctl, op, 1) )
412 ret = -EFAULT;
413 }
414 break;
416 default:
417 printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
418 ret = -ENOSYS;
420 }
422 return ret;
423 }
425 static unsigned long
426 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
427 {
428 unsigned long end;
430 /* Linux may use a 0 size! */
431 if (size == 0)
432 size = PAGE_SIZE;
434 if (size == 0)
435 printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
437 end = PAGE_ALIGN(mpaddr + size);
439 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
440 (end >> PAGE_SHIFT) - 1))
441 return -EPERM;
443 return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
444 ASSIGN_writable | ASSIGN_nocache);
445 }
447 static unsigned long
448 dom0vp_fpswa_revision(XEN_GUEST_HANDLE(uint) revision)
449 {
450 if (fpswa_interface == NULL)
451 return -ENOSYS;
452 if (copy_to_guest(revision, &fpswa_interface->revision, 1))
453 return -EFAULT;
454 return 0;
455 }
457 static unsigned long
458 dom0vp_add_io_space(struct domain *d, unsigned long phys_base,
459 unsigned long sparse, unsigned long space_number)
460 {
461 unsigned int fp, lp;
463 /*
464 * Registering new io_space roughly based on linux
465 * arch/ia64/pci/pci.c:new_space()
466 */
468 /* Skip legacy I/O port space, we already know about it */
469 if (phys_base == 0)
470 return 0;
472 /*
473 * Dom0 Linux initializes io spaces sequentially, if that changes,
474 * we'll need to add thread protection and the ability to handle
475 * a sparsely populated io_space array.
476 */
477 if (space_number > MAX_IO_SPACES || space_number != num_io_spaces)
478 return -EINVAL;
480 io_space[space_number].mmio_base = phys_base;
481 io_space[space_number].sparse = sparse;
483 num_io_spaces++;
485 fp = space_number << IO_SPACE_BITS;
486 lp = fp | 0xffff;
488 return ioports_permit_access(d, fp, lp);
489 }
491 unsigned long
492 do_dom0vp_op(unsigned long cmd,
493 unsigned long arg0, unsigned long arg1, unsigned long arg2,
494 unsigned long arg3)
495 {
496 unsigned long ret = 0;
497 struct domain *d = current->domain;
499 switch (cmd) {
500 case IA64_DOM0VP_ioremap:
501 ret = dom0vp_ioremap(d, arg0, arg1);
502 break;
503 case IA64_DOM0VP_phystomach:
504 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
505 if (ret == INVALID_MFN) {
506 dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
507 __func__, ret);
508 } else {
509 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
510 }
511 perfc_incr(dom0vp_phystomach);
512 break;
513 case IA64_DOM0VP_machtophys:
514 if (!mfn_valid(arg0)) {
515 ret = INVALID_M2P_ENTRY;
516 break;
517 }
518 ret = get_gpfn_from_mfn(arg0);
519 perfc_incr(dom0vp_machtophys);
520 break;
521 case IA64_DOM0VP_zap_physmap:
522 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
523 break;
524 case IA64_DOM0VP_add_physmap:
525 if (!IS_PRIV(d))
526 return -EPERM;
527 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
528 (domid_t)arg3);
529 break;
530 case IA64_DOM0VP_add_physmap_with_gmfn:
531 if (!IS_PRIV(d))
532 return -EPERM;
533 ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
534 (domid_t)arg3);
535 break;
536 case IA64_DOM0VP_expose_p2m:
537 ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
538 break;
539 case IA64_DOM0VP_perfmon: {
540 XEN_GUEST_HANDLE(void) hnd;
541 set_xen_guest_handle(hnd, (void*)arg1);
542 ret = do_perfmon_op(arg0, hnd, arg2);
543 break;
544 }
545 case IA64_DOM0VP_fpswa_revision: {
546 XEN_GUEST_HANDLE(uint) hnd;
547 set_xen_guest_handle(hnd, (uint*)arg0);
548 ret = dom0vp_fpswa_revision(hnd);
549 break;
550 }
551 case IA64_DOM0VP_add_io_space:
552 ret = dom0vp_add_io_space(d, arg0, arg1, arg2);
553 break;
554 case IA64_DOM0VP_expose_foreign_p2m: {
555 XEN_GUEST_HANDLE(char) hnd;
556 set_xen_guest_handle(hnd, (char*)arg2);
557 ret = dom0vp_expose_foreign_p2m(d, arg0, (domid_t)arg1, hnd, arg3);
558 break;
559 }
560 case IA64_DOM0VP_unexpose_foreign_p2m:
561 ret = dom0vp_unexpose_foreign_p2m(d, arg0, arg1);
562 break;
563 default:
564 ret = -1;
565 printk("unknown dom0_vp_op 0x%lx\n", cmd);
566 break;
567 }
569 return ret;
570 }
572 /*
573 * Local variables:
574 * mode: C
575 * c-set-style: "BSD"
576 * c-basic-offset: 4
577 * tab-width: 4
578 * indent-tabs-mode: nil
579 * End:
580 */