ia64/xen-unstable

view xen/arch/ia64/xen/dom0_ops.c @ 16817:564fa97594a6

[IA64] Introduce dom0_vhpt_size_log2 boot option to change dom0 vhpt size

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Tue Jan 22 08:26:20 2008 -0700 (2008-01-22)
parents 0ace9a451a25
children cff4c8a1aa28
line source
1 /******************************************************************************
2 * Arch-specific dom0_ops.c
3 *
4 * Process command requests from domain-0 guest OS.
5 *
6 * Copyright (c) 2002, K A Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <public/domctl.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <asm/pdb.h>
18 #include <xen/trace.h>
19 #include <xen/console.h>
20 #include <xen/guest_access.h>
21 #include <asm/vmx.h>
22 #include <asm/dom_fw.h>
23 #include <asm/vhpt.h>
24 #include <xen/iocap.h>
25 #include <xen/errno.h>
26 #include <xen/nodemask.h>
27 #include <asm/dom_fw_utils.h>
28 #include <asm/hvm/support.h>
29 #include <xsm/xsm.h>
30 #include <public/hvm/save.h>
32 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
34 extern unsigned long total_pages;
36 long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
37 {
38 long ret = 0;
40 if ( !IS_PRIV(current->domain) )
41 return -EPERM;
43 switch ( op->cmd )
44 {
45 case XEN_DOMCTL_getmemlist:
46 {
47 unsigned long i;
48 struct domain *d = rcu_lock_domain_by_id(op->domain);
49 unsigned long start_page = op->u.getmemlist.start_pfn;
50 unsigned long nr_pages = op->u.getmemlist.max_pfns;
51 uint64_t mfn;
53 if ( d == NULL ) {
54 ret = -EINVAL;
55 break;
56 }
57 for (i = 0 ; i < nr_pages ; i++) {
58 pte_t *pte;
60 pte = (pte_t *)lookup_noalloc_domain_pte(d,
61 (start_page + i) << PAGE_SHIFT);
62 if (pte && pte_present(*pte))
63 mfn = start_page + i;
64 else
65 mfn = INVALID_MFN;
67 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
68 ret = -EFAULT;
69 break;
70 }
71 }
73 op->u.getmemlist.num_pfns = i;
74 if (copy_to_guest(u_domctl, op, 1))
75 ret = -EFAULT;
76 rcu_unlock_domain(d);
77 }
78 break;
80 case XEN_DOMCTL_arch_setup:
81 {
82 xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
83 struct domain *d = rcu_lock_domain_by_id(op->domain);
85 if ( d == NULL) {
86 ret = -EINVAL;
87 break;
88 }
90 if (ds->flags & XEN_DOMAINSETUP_query) {
91 /* Set flags. */
92 if (is_hvm_domain(d))
93 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
94 /* Set params. */
95 ds->bp = 0; /* unknown. */
96 ds->maxmem = d->arch.convmem_end;
97 ds->xsi_va = d->arch.shared_info_va;
98 ds->hypercall_imm = d->arch.breakimm;
99 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
100 ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
101 #endif
102 /* Copy back. */
103 if ( copy_to_guest(u_domctl, op, 1) )
104 ret = -EFAULT;
105 }
106 else {
107 if (is_hvm_domain(d) || (ds->flags & XEN_DOMAINSETUP_hvm_guest)) {
108 if (!vmx_enabled) {
109 printk("No VMX hardware feature for vmx domain.\n");
110 ret = -EINVAL;
111 } else {
112 d->is_hvm = 1;
113 xen_ia64_set_convmem_end(d, ds->maxmem);
114 ret = vmx_setup_platform(d);
115 }
116 }
117 else {
118 if (ds->hypercall_imm) {
119 /* dom_fw_setup() reads d->arch.breakimm */
120 struct vcpu *v;
121 d->arch.breakimm = ds->hypercall_imm;
122 for_each_vcpu (d, v)
123 v->arch.breakimm = d->arch.breakimm;
124 }
125 domain_set_vhpt_size(d, ds->vhpt_size_log2);
126 if (ds->xsi_va)
127 d->arch.shared_info_va = ds->xsi_va;
128 ret = dom_fw_setup(d, ds->bp, ds->maxmem);
129 }
130 if (ret == 0) {
131 /*
132 * XXX IA64_SHARED_INFO_PADDR
133 * assign these pages into guest psudo physical address
134 * space for dom0 to map this page by gmfn.
135 * this is necessary for domain build, save, restore and
136 * dump-core.
137 */
138 unsigned long i;
139 for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
140 assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
141 virt_to_maddr(d->shared_info + i));
142 }
143 }
145 rcu_unlock_domain(d);
146 }
147 break;
149 case XEN_DOMCTL_shadow_op:
150 {
151 struct domain *d;
152 ret = -ESRCH;
153 d = rcu_lock_domain_by_id(op->domain);
154 if ( d != NULL )
155 {
156 ret = shadow_mode_control(d, &op->u.shadow_op);
157 rcu_unlock_domain(d);
158 if (copy_to_guest(u_domctl, op, 1))
159 ret = -EFAULT;
160 }
161 }
162 break;
164 case XEN_DOMCTL_ioport_permission:
165 {
166 struct domain *d;
167 unsigned int fp = op->u.ioport_permission.first_port;
168 unsigned int np = op->u.ioport_permission.nr_ports;
169 unsigned int lp = fp + np - 1;
171 ret = -ESRCH;
172 d = rcu_lock_domain_by_id(op->domain);
173 if (unlikely(d == NULL))
174 break;
176 if (np == 0)
177 ret = 0;
178 else {
179 if (op->u.ioport_permission.allow_access)
180 ret = ioports_permit_access(d, fp, lp);
181 else
182 ret = ioports_deny_access(d, fp, lp);
183 }
185 rcu_unlock_domain(d);
186 }
187 break;
189 case XEN_DOMCTL_sendtrigger:
190 {
191 struct domain *d;
192 struct vcpu *v;
194 ret = -ESRCH;
195 d = rcu_lock_domain_by_id(op->domain);
196 if ( d == NULL )
197 break;
199 ret = -EINVAL;
200 if ( op->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
201 goto sendtrigger_out;
203 ret = -ESRCH;
204 if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
205 goto sendtrigger_out;
207 ret = 0;
208 switch (op->u.sendtrigger.trigger)
209 {
210 case XEN_DOMCTL_SENDTRIGGER_INIT:
211 {
212 if (VMX_DOMAIN(v))
213 vmx_pend_pal_init(d);
214 else
215 ret = -ENOSYS;
216 }
217 break;
219 default:
220 ret = -ENOSYS;
221 }
223 sendtrigger_out:
224 rcu_unlock_domain(d);
225 }
226 break;
228 case XEN_DOMCTL_sethvmcontext:
229 {
230 struct hvm_domain_context c;
231 struct domain *d;
233 c.cur = 0;
234 c.size = op->u.hvmcontext.size;
235 c.data = NULL;
237 ret = -ESRCH;
238 d = rcu_lock_domain_by_id(op->domain);
239 if (d == NULL)
240 break;
242 #ifdef CONFIG_X86
243 ret = xsm_hvmcontext(d, op->cmd);
244 if (ret)
245 goto sethvmcontext_out;
246 #endif /* CONFIG_X86 */
248 ret = -EINVAL;
249 if (!is_hvm_domain(d))
250 goto sethvmcontext_out;
252 ret = -ENOMEM;
253 c.data = xmalloc_bytes(c.size);
254 if (c.data == NULL)
255 goto sethvmcontext_out;
257 ret = -EFAULT;
258 if (copy_from_guest(c.data, op->u.hvmcontext.buffer, c.size) != 0)
259 goto sethvmcontext_out;
261 domain_pause(d);
262 ret = hvm_load(d, &c);
263 domain_unpause(d);
265 sethvmcontext_out:
266 if (c.data != NULL)
267 xfree(c.data);
269 rcu_unlock_domain(d);
270 }
271 break;
273 case XEN_DOMCTL_gethvmcontext:
274 {
275 struct hvm_domain_context c;
276 struct domain *d;
278 ret = -ESRCH;
279 d = rcu_lock_domain_by_id(op->domain);
280 if (d == NULL)
281 break;
283 #ifdef CONFIG_X86
284 ret = xsm_hvmcontext(d, op->cmd);
285 if (ret)
286 goto gethvmcontext_out;
287 #endif /* CONFIG_X86 */
289 ret = -EINVAL;
290 if (!is_hvm_domain(d))
291 goto gethvmcontext_out;
293 c.cur = 0;
294 c.size = hvm_save_size(d);
295 c.data = NULL;
297 if (guest_handle_is_null(op->u.hvmcontext.buffer)) {
298 /* Client is querying for the correct buffer size */
299 op->u.hvmcontext.size = c.size;
300 ret = 0;
301 goto gethvmcontext_out;
302 }
304 /* Check that the client has a big enough buffer */
305 ret = -ENOSPC;
306 if (op->u.hvmcontext.size < c.size)
307 goto gethvmcontext_out;
309 /* Allocate our own marshalling buffer */
310 ret = -ENOMEM;
311 c.data = xmalloc_bytes(c.size);
312 if (c.data == NULL)
313 goto gethvmcontext_out;
315 domain_pause(d);
316 ret = hvm_save(d, &c);
317 domain_unpause(d);
319 op->u.hvmcontext.size = c.cur;
320 if (copy_to_guest(op->u.hvmcontext.buffer, c.data, c.size) != 0)
321 ret = -EFAULT;
323 gethvmcontext_out:
324 if (copy_to_guest(u_domctl, op, 1))
325 ret = -EFAULT;
327 if (c.data != NULL)
328 xfree(c.data);
330 rcu_unlock_domain(d);
331 }
332 break;
334 case XEN_DOMCTL_set_opt_feature:
335 {
336 struct xen_ia64_opt_feature *optf = &op->u.set_opt_feature.optf;
337 struct domain *d = rcu_lock_domain_by_id(op->domain);
339 if (d == NULL) {
340 ret = -EINVAL;
341 break;
342 }
344 ret = domain_opt_feature(d, optf);
345 rcu_unlock_domain(d);
346 }
347 break;
349 default:
350 printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
351 ret = -ENOSYS;
353 }
355 return ret;
356 }
358 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
359 {
360 long ret = 0;
362 switch ( op->cmd )
363 {
364 case XEN_SYSCTL_physinfo:
365 {
366 int i;
367 uint32_t max_array_ent;
369 xen_sysctl_physinfo_t *pi = &op->u.physinfo;
371 pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
372 pi->cores_per_socket =
373 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
374 pi->nr_cpus = (u32)num_online_cpus();
375 pi->nr_nodes = num_online_nodes();
376 pi->total_pages = total_pages;
377 pi->free_pages = avail_domheap_pages();
378 pi->scrub_pages = avail_scrub_pages();
379 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
380 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
382 max_array_ent = pi->max_cpu_id;
383 pi->max_cpu_id = last_cpu(cpu_online_map);
384 max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
386 ret = 0;
388 if (!guest_handle_is_null(pi->cpu_to_node)) {
389 for (i = 0; i <= max_array_ent; i++) {
390 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
391 if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) {
392 ret = -EFAULT;
393 break;
394 }
395 }
396 }
398 if ( copy_to_guest(u_sysctl, op, 1) )
399 ret = -EFAULT;
400 }
401 break;
403 default:
404 printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
405 ret = -ENOSYS;
407 }
409 return ret;
410 }
412 static unsigned long
413 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
414 {
415 unsigned long end;
417 /* Linux may use a 0 size! */
418 if (size == 0)
419 size = PAGE_SIZE;
421 if (size == 0)
422 printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
424 end = PAGE_ALIGN(mpaddr + size);
426 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
427 (end >> PAGE_SHIFT) - 1))
428 return -EPERM;
430 return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
431 ASSIGN_writable | ASSIGN_nocache);
432 }
434 static unsigned long
435 dom0vp_fpswa_revision(XEN_GUEST_HANDLE(uint) revision)
436 {
437 if (fpswa_interface == NULL)
438 return -ENOSYS;
439 if (copy_to_guest(revision, &fpswa_interface->revision, 1))
440 return -EFAULT;
441 return 0;
442 }
444 static unsigned long
445 dom0vp_add_io_space(struct domain *d, unsigned long phys_base,
446 unsigned long sparse, unsigned long space_number)
447 {
448 unsigned int fp, lp;
450 /*
451 * Registering new io_space roughly based on linux
452 * arch/ia64/pci/pci.c:new_space()
453 */
455 /* Skip legacy I/O port space, we already know about it */
456 if (phys_base == 0)
457 return 0;
459 /*
460 * Dom0 Linux initializes io spaces sequentially, if that changes,
461 * we'll need to add thread protection and the ability to handle
462 * a sparsely populated io_space array.
463 */
464 if (space_number > MAX_IO_SPACES || space_number != num_io_spaces)
465 return -EINVAL;
467 io_space[space_number].mmio_base = phys_base;
468 io_space[space_number].sparse = sparse;
470 num_io_spaces++;
472 fp = space_number << IO_SPACE_BITS;
473 lp = fp | 0xffff;
475 return ioports_permit_access(d, fp, lp);
476 }
478 unsigned long
479 do_dom0vp_op(unsigned long cmd,
480 unsigned long arg0, unsigned long arg1, unsigned long arg2,
481 unsigned long arg3)
482 {
483 unsigned long ret = 0;
484 struct domain *d = current->domain;
486 switch (cmd) {
487 case IA64_DOM0VP_ioremap:
488 ret = dom0vp_ioremap(d, arg0, arg1);
489 break;
490 case IA64_DOM0VP_phystomach:
491 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
492 if (ret == INVALID_MFN) {
493 dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
494 __func__, ret);
495 } else {
496 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
497 }
498 perfc_incr(dom0vp_phystomach);
499 break;
500 case IA64_DOM0VP_machtophys:
501 if (!mfn_valid(arg0)) {
502 ret = INVALID_M2P_ENTRY;
503 break;
504 }
505 ret = get_gpfn_from_mfn(arg0);
506 perfc_incr(dom0vp_machtophys);
507 break;
508 case IA64_DOM0VP_zap_physmap:
509 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
510 break;
511 case IA64_DOM0VP_add_physmap:
512 if (!IS_PRIV(d))
513 return -EPERM;
514 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
515 (domid_t)arg3);
516 break;
517 case IA64_DOM0VP_add_physmap_with_gmfn:
518 if (!IS_PRIV(d))
519 return -EPERM;
520 ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
521 (domid_t)arg3);
522 break;
523 case IA64_DOM0VP_expose_p2m:
524 ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
525 break;
526 case IA64_DOM0VP_perfmon: {
527 XEN_GUEST_HANDLE(void) hnd;
528 set_xen_guest_handle(hnd, (void*)arg1);
529 ret = do_perfmon_op(arg0, hnd, arg2);
530 break;
531 }
532 case IA64_DOM0VP_fpswa_revision: {
533 XEN_GUEST_HANDLE(uint) hnd;
534 set_xen_guest_handle(hnd, (uint*)arg0);
535 ret = dom0vp_fpswa_revision(hnd);
536 break;
537 }
538 case IA64_DOM0VP_add_io_space:
539 ret = dom0vp_add_io_space(d, arg0, arg1, arg2);
540 break;
541 case IA64_DOM0VP_expose_foreign_p2m: {
542 XEN_GUEST_HANDLE(char) hnd;
543 set_xen_guest_handle(hnd, (char*)arg2);
544 ret = dom0vp_expose_foreign_p2m(d, arg0, (domid_t)arg1, hnd, arg3);
545 break;
546 }
547 case IA64_DOM0VP_unexpose_foreign_p2m:
548 ret = dom0vp_unexpose_foreign_p2m(d, arg0, arg1);
549 break;
550 default:
551 ret = -1;
552 printk("unknown dom0_vp_op 0x%lx\n", cmd);
553 break;
554 }
556 return ret;
557 }
559 /*
560 * Local variables:
561 * mode: C
562 * c-set-style: "BSD"
563 * c-basic-offset: 4
564 * tab-width: 4
565 * indent-tabs-mode: nil
566 * End:
567 */