ia64/xen-unstable

annotate xen/arch/ia64/xen/dom0_ops.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 09cd682ac68e
children 0ace9a451a25
rev   line source
djm@6458 1 /******************************************************************************
djm@6458 2 * Arch-specific dom0_ops.c
djm@6458 3 *
djm@6458 4 * Process command requests from domain-0 guest OS.
djm@6458 5 *
djm@6458 6 * Copyright (c) 2002, K A Fraser
djm@6458 7 */
djm@6458 8
djm@6458 9 #include <xen/config.h>
djm@6458 10 #include <xen/types.h>
djm@6458 11 #include <xen/lib.h>
djm@6458 12 #include <xen/mm.h>
kfraser@11296 13 #include <public/domctl.h>
kfraser@11296 14 #include <public/sysctl.h>
djm@6458 15 #include <xen/sched.h>
djm@6458 16 #include <xen/event.h>
djm@6458 17 #include <asm/pdb.h>
djm@6458 18 #include <xen/trace.h>
djm@6458 19 #include <xen/console.h>
kaf24@9133 20 #include <xen/guest_access.h>
awilliam@9005 21 #include <asm/vmx.h>
awilliam@10570 22 #include <asm/dom_fw.h>
awilliam@10816 23 #include <xen/iocap.h>
awilliam@11048 24 #include <xen/errno.h>
awilliam@12003 25 #include <xen/nodemask.h>
alex@15841 26 #include <asm/dom_fw_utils.h>
alex@16172 27 #include <asm/hvm/support.h>
alex@16172 28 #include <xsm/xsm.h>
alex@16172 29 #include <public/hvm/save.h>
awilliam@10570 30
awilliam@12003 31 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
awilliam@10570 32
awilliam@9773 33 extern unsigned long total_pages;
kfraser@11296 34
kfraser@11296 35 long arch_do_domctl(xen_domctl_t *op, XEN_GUEST_HANDLE(xen_domctl_t) u_domctl)
djm@6458 36 {
djm@6458 37 long ret = 0;
djm@6458 38
djm@6458 39 if ( !IS_PRIV(current->domain) )
djm@6458 40 return -EPERM;
djm@6458 41
djm@6458 42 switch ( op->cmd )
djm@6458 43 {
kfraser@11296 44 case XEN_DOMCTL_getmemlist:
djm@6458 45 {
awilliam@10570 46 unsigned long i;
kaf24@13663 47 struct domain *d = get_domain_by_id(op->domain);
kaf24@11356 48 unsigned long start_page = op->u.getmemlist.start_pfn;
kaf24@11356 49 unsigned long nr_pages = op->u.getmemlist.max_pfns;
kfraser@13608 50 uint64_t mfn;
djm@6458 51
awilliam@10663 52 if ( d == NULL ) {
awilliam@10663 53 ret = -EINVAL;
awilliam@10570 54 break;
awilliam@10663 55 }
awilliam@10570 56 for (i = 0 ; i < nr_pages ; i++) {
awilliam@10570 57 pte_t *pte;
djm@6799 58
awilliam@10570 59 pte = (pte_t *)lookup_noalloc_domain_pte(d,
awilliam@10570 60 (start_page + i) << PAGE_SHIFT);
awilliam@10570 61 if (pte && pte_present(*pte))
awilliam@12885 62 mfn = start_page + i;
awilliam@10570 63 else
awilliam@10570 64 mfn = INVALID_MFN;
djm@7333 65
awilliam@10570 66 if ( copy_to_guest_offset(op->u.getmemlist.buffer, i, &mfn, 1) ) {
awilliam@10570 67 ret = -EFAULT;
awilliam@10570 68 break;
awilliam@10570 69 }
awilliam@10570 70 }
djm@6458 71
awilliam@10570 72 op->u.getmemlist.num_pfns = i;
kfraser@11296 73 if (copy_to_guest(u_domctl, op, 1))
awilliam@10570 74 ret = -EFAULT;
awilliam@10570 75
awilliam@10570 76 put_domain(d);
djm@6458 77 }
djm@6458 78 break;
djm@7924 79
kfraser@11296 80 case XEN_DOMCTL_arch_setup:
djm@7924 81 {
kfraser@11296 82 xen_domctl_arch_setup_t *ds = &op->u.arch_setup;
kaf24@13663 83 struct domain *d = get_domain_by_id(op->domain);
awilliam@10570 84
awilliam@10570 85 if ( d == NULL) {
awilliam@10570 86 ret = -EINVAL;
awilliam@10570 87 break;
awilliam@10570 88 }
awilliam@10570 89
awilliam@10692 90 if (ds->flags & XEN_DOMAINSETUP_query) {
awilliam@10692 91 /* Set flags. */
alex@16748 92 if (is_hvm_domain(d))
awilliam@10692 93 ds->flags |= XEN_DOMAINSETUP_hvm_guest;
awilliam@10692 94 /* Set params. */
awilliam@10692 95 ds->bp = 0; /* unknown. */
alex@15139 96 ds->maxmem = d->arch.convmem_end;
awilliam@10692 97 ds->xsi_va = d->arch.shared_info_va;
awilliam@10692 98 ds->hypercall_imm = d->arch.breakimm;
keir@16158 99 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
keir@16158 100 ds->vhpt_size_log2 = d->arch.vhpt_size_log2;
keir@16158 101 #endif
awilliam@10692 102 /* Copy back. */
kfraser@11296 103 if ( copy_to_guest(u_domctl, op, 1) )
awilliam@10692 104 ret = -EFAULT;
awilliam@10570 105 }
awilliam@10570 106 else {
alex@16748 107 if (is_hvm_domain(d) || (ds->flags & XEN_DOMAINSETUP_hvm_guest)) {
awilliam@10692 108 if (!vmx_enabled) {
awilliam@10692 109 printk("No VMX hardware feature for vmx domain.\n");
awilliam@10692 110 ret = -EINVAL;
alex@15119 111 } else {
alex@16748 112 d->is_hvm = 1;
alex@15841 113 xen_ia64_set_convmem_end(d, ds->maxmem);
alex@16106 114 ret = vmx_setup_platform(d);
awilliam@10692 115 }
awilliam@10692 116 }
awilliam@10692 117 else {
awilliam@10692 118 if (ds->hypercall_imm) {
alex@15119 119 /* dom_fw_setup() reads d->arch.breakimm */
awilliam@10692 120 struct vcpu *v;
awilliam@10692 121 d->arch.breakimm = ds->hypercall_imm;
awilliam@10692 122 for_each_vcpu (d, v)
awilliam@10692 123 v->arch.breakimm = d->arch.breakimm;
awilliam@10692 124 }
keir@16158 125 #ifdef CONFIG_XEN_IA64_PERVCPU_VHPT
keir@16158 126 if (ds->vhpt_size_log2 == -1) {
keir@16158 127 d->arch.has_pervcpu_vhpt = 0;
keir@16158 128 ds->vhpt_size_log2 = -1;
keir@16158 129 printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
keir@16158 130 "domain %d VHPT is global.\n", d->domain_id);
keir@16158 131 } else {
keir@16158 132 d->arch.has_pervcpu_vhpt = 1;
keir@16158 133 d->arch.vhpt_size_log2 = ds->vhpt_size_log2;
keir@16158 134 printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: "
keir@16158 135 "domain %d VHPT is per vcpu. size=2**%d\n",
keir@16158 136 d->domain_id, ds->vhpt_size_log2);
keir@16158 137 }
keir@16158 138 #endif
alex@15119 139 if (ds->xsi_va)
alex@15119 140 d->arch.shared_info_va = ds->xsi_va;
alex@15119 141 ret = dom_fw_setup(d, ds->bp, ds->maxmem);
alex@15128 142 }
alex@15128 143 if (ret == 0) {
alex@15128 144 /*
alex@15128 145 * XXX IA64_SHARED_INFO_PADDR
alex@15128 146 * assign these pages into guest psudo physical address
alex@15128 147 * space for dom0 to map this page by gmfn.
alex@15128 148 * this is necessary for domain build, save, restore and
alex@15128 149 * dump-core.
alex@15128 150 */
alex@15128 151 unsigned long i;
alex@15128 152 for (i = 0; i < XSI_SIZE; i += PAGE_SIZE)
alex@15128 153 assign_domain_page(d, IA64_SHARED_INFO_PADDR + i,
alex@15128 154 virt_to_maddr(d->shared_info + i));
awilliam@10692 155 }
awilliam@10570 156 }
awilliam@10692 157
awilliam@10570 158 put_domain(d);
awilliam@10570 159 }
awilliam@10570 160 break;
awilliam@10570 161
kfraser@11296 162 case XEN_DOMCTL_shadow_op:
awilliam@10786 163 {
awilliam@10786 164 struct domain *d;
awilliam@10786 165 ret = -ESRCH;
kaf24@13663 166 d = get_domain_by_id(op->domain);
awilliam@10786 167 if ( d != NULL )
awilliam@10786 168 {
kfraser@11296 169 ret = shadow_mode_control(d, &op->u.shadow_op);
awilliam@10786 170 put_domain(d);
alex@16680 171 if (copy_to_guest(u_domctl, op, 1))
alex@16680 172 ret = -EFAULT;
awilliam@10786 173 }
awilliam@10786 174 }
awilliam@10786 175 break;
awilliam@10786 176
kfraser@11296 177 case XEN_DOMCTL_ioport_permission:
awilliam@10816 178 {
awilliam@10816 179 struct domain *d;
awilliam@10816 180 unsigned int fp = op->u.ioport_permission.first_port;
awilliam@10816 181 unsigned int np = op->u.ioport_permission.nr_ports;
awilliam@10818 182 unsigned int lp = fp + np - 1;
awilliam@10816 183
awilliam@10816 184 ret = -ESRCH;
kaf24@13663 185 d = get_domain_by_id(op->domain);
awilliam@10816 186 if (unlikely(d == NULL))
awilliam@10816 187 break;
awilliam@10816 188
awilliam@10816 189 if (np == 0)
awilliam@10816 190 ret = 0;
awilliam@10816 191 else {
awilliam@10816 192 if (op->u.ioport_permission.allow_access)
awilliam@10818 193 ret = ioports_permit_access(d, fp, lp);
awilliam@10816 194 else
awilliam@10818 195 ret = ioports_deny_access(d, fp, lp);
awilliam@10816 196 }
awilliam@10816 197
awilliam@10816 198 put_domain(d);
awilliam@10816 199 }
awilliam@10816 200 break;
keir@14108 201
keir@14108 202 case XEN_DOMCTL_sendtrigger:
keir@14108 203 {
keir@14108 204 struct domain *d;
keir@14108 205 struct vcpu *v;
keir@14108 206
keir@14108 207 ret = -ESRCH;
keir@14108 208 d = get_domain_by_id(op->domain);
keir@14108 209 if ( d == NULL )
keir@14108 210 break;
keir@14108 211
keir@14108 212 ret = -EINVAL;
keir@14108 213 if ( op->u.sendtrigger.vcpu >= MAX_VIRT_CPUS )
keir@14108 214 goto sendtrigger_out;
keir@14108 215
keir@14108 216 ret = -ESRCH;
keir@14108 217 if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
keir@14108 218 goto sendtrigger_out;
keir@14108 219
keir@14108 220 ret = 0;
keir@14108 221 switch (op->u.sendtrigger.trigger)
keir@14108 222 {
keir@14108 223 case XEN_DOMCTL_SENDTRIGGER_INIT:
keir@14108 224 {
keir@14108 225 if (VMX_DOMAIN(v))
keir@14108 226 vmx_pend_pal_init(d);
keir@14108 227 else
keir@14108 228 ret = -ENOSYS;
keir@14108 229 }
keir@14108 230 break;
keir@14108 231
keir@14108 232 default:
keir@14108 233 ret = -ENOSYS;
keir@14108 234 }
keir@14108 235
keir@14108 236 sendtrigger_out:
keir@14108 237 put_domain(d);
keir@14108 238 }
keir@14108 239 break;
keir@14108 240
alex@16172 241 case XEN_DOMCTL_sethvmcontext:
alex@16172 242 {
alex@16172 243 struct hvm_domain_context c;
alex@16172 244 struct domain *d;
alex@16172 245
alex@16172 246 c.cur = 0;
alex@16172 247 c.size = op->u.hvmcontext.size;
alex@16172 248 c.data = NULL;
alex@16172 249
alex@16172 250 ret = -ESRCH;
alex@16172 251 d = rcu_lock_domain_by_id(op->domain);
alex@16172 252 if (d == NULL)
alex@16172 253 break;
alex@16172 254
alex@16172 255 #ifdef CONFIG_X86
alex@16172 256 ret = xsm_hvmcontext(d, op->cmd);
alex@16172 257 if (ret)
alex@16172 258 goto sethvmcontext_out;
alex@16172 259 #endif /* CONFIG_X86 */
alex@16172 260
alex@16172 261 ret = -EINVAL;
alex@16172 262 if (!is_hvm_domain(d))
alex@16172 263 goto sethvmcontext_out;
alex@16172 264
alex@16172 265 ret = -ENOMEM;
alex@16172 266 c.data = xmalloc_bytes(c.size);
alex@16172 267 if (c.data == NULL)
alex@16172 268 goto sethvmcontext_out;
alex@16172 269
alex@16172 270 ret = -EFAULT;
alex@16172 271 if (copy_from_guest(c.data, op->u.hvmcontext.buffer, c.size) != 0)
alex@16172 272 goto sethvmcontext_out;
alex@16172 273
alex@16172 274 domain_pause(d);
alex@16172 275 ret = hvm_load(d, &c);
alex@16172 276 domain_unpause(d);
alex@16172 277
alex@16172 278 sethvmcontext_out:
alex@16172 279 if (c.data != NULL)
alex@16172 280 xfree(c.data);
alex@16172 281
alex@16172 282 rcu_unlock_domain(d);
alex@16172 283 }
alex@16172 284 break;
alex@16172 285
alex@16172 286 case XEN_DOMCTL_gethvmcontext:
alex@16172 287 {
alex@16172 288 struct hvm_domain_context c;
alex@16172 289 struct domain *d;
alex@16172 290
alex@16172 291 ret = -ESRCH;
alex@16172 292 d = rcu_lock_domain_by_id(op->domain);
alex@16172 293 if (d == NULL)
alex@16172 294 break;
alex@16172 295
alex@16172 296 #ifdef CONFIG_X86
alex@16172 297 ret = xsm_hvmcontext(d, op->cmd);
alex@16172 298 if (ret)
alex@16172 299 goto gethvmcontext_out;
alex@16172 300 #endif /* CONFIG_X86 */
alex@16172 301
alex@16172 302 ret = -EINVAL;
alex@16172 303 if (!is_hvm_domain(d))
alex@16172 304 goto gethvmcontext_out;
alex@16172 305
alex@16172 306 c.cur = 0;
alex@16172 307 c.size = hvm_save_size(d);
alex@16172 308 c.data = NULL;
alex@16172 309
alex@16172 310 if (guest_handle_is_null(op->u.hvmcontext.buffer)) {
alex@16172 311 /* Client is querying for the correct buffer size */
alex@16172 312 op->u.hvmcontext.size = c.size;
alex@16172 313 ret = 0;
alex@16172 314 goto gethvmcontext_out;
alex@16172 315 }
alex@16172 316
alex@16172 317 /* Check that the client has a big enough buffer */
alex@16172 318 ret = -ENOSPC;
alex@16172 319 if (op->u.hvmcontext.size < c.size)
alex@16172 320 goto gethvmcontext_out;
alex@16172 321
alex@16172 322 /* Allocate our own marshalling buffer */
alex@16172 323 ret = -ENOMEM;
alex@16172 324 c.data = xmalloc_bytes(c.size);
alex@16172 325 if (c.data == NULL)
alex@16172 326 goto gethvmcontext_out;
alex@16172 327
alex@16172 328 domain_pause(d);
alex@16172 329 ret = hvm_save(d, &c);
alex@16172 330 domain_unpause(d);
alex@16172 331
alex@16172 332 op->u.hvmcontext.size = c.cur;
alex@16172 333 if (copy_to_guest(op->u.hvmcontext.buffer, c.data, c.size) != 0)
alex@16172 334 ret = -EFAULT;
alex@16172 335
alex@16172 336 gethvmcontext_out:
alex@16172 337 if (copy_to_guest(u_domctl, op, 1))
alex@16172 338 ret = -EFAULT;
alex@16172 339
alex@16172 340 if (c.data != NULL)
alex@16172 341 xfree(c.data);
alex@16172 342
alex@16172 343 rcu_unlock_domain(d);
alex@16172 344 }
alex@16172 345 break;
alex@16172 346
alex@16476 347 case XEN_DOMCTL_set_opt_feature:
alex@16476 348 {
alex@16476 349 struct xen_ia64_opt_feature *optf = &op->u.set_opt_feature.optf;
alex@16476 350 struct domain *d = get_domain_by_id(op->domain);
alex@16476 351
alex@16476 352 if (d == NULL) {
alex@16476 353 ret = -EINVAL;
alex@16476 354 break;
alex@16476 355 }
alex@16476 356
alex@16476 357 ret = domain_opt_feature(d, optf);
alex@16652 358 put_domain(d);
alex@16476 359 }
alex@16476 360 break;
alex@16476 361
djm@6458 362 default:
kfraser@11947 363 printk("arch_do_domctl: unrecognized domctl: %d!!!\n",op->cmd);
kfraser@11296 364 ret = -ENOSYS;
kfraser@11296 365
kfraser@11296 366 }
kfraser@11296 367
kfraser@11296 368 return ret;
kfraser@11296 369 }
kfraser@11296 370
kfraser@11296 371 long arch_do_sysctl(xen_sysctl_t *op, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
kfraser@11296 372 {
kfraser@11296 373 long ret = 0;
kfraser@11296 374
kfraser@11296 375 switch ( op->cmd )
kfraser@11296 376 {
kfraser@11296 377 case XEN_SYSCTL_physinfo:
kfraser@11296 378 {
keir@16166 379 int i;
alex@15557 380 uint32_t max_array_ent;
awilliam@12003 381
kfraser@11296 382 xen_sysctl_physinfo_t *pi = &op->u.physinfo;
kfraser@11296 383
alex@15557 384 pi->threads_per_core = cpus_weight(cpu_sibling_map[0]);
kfraser@11296 385 pi->cores_per_socket =
kfraser@11296 386 cpus_weight(cpu_core_map[0]) / pi->threads_per_core;
keir@16166 387 pi->nr_cpus = (u32)num_online_cpus();
kfraser@15518 388 pi->nr_nodes = num_online_nodes();
kfraser@11296 389 pi->total_pages = total_pages;
kfraser@11296 390 pi->free_pages = avail_domheap_pages();
awilliam@13122 391 pi->scrub_pages = avail_scrub_pages();
kfraser@11296 392 pi->cpu_khz = local_cpu_data->proc_freq / 1000;
kfraser@11296 393 memset(pi->hw_cap, 0, sizeof(pi->hw_cap));
alex@15557 394
alex@15557 395 max_array_ent = pi->max_cpu_id;
alex@15557 396 pi->max_cpu_id = last_cpu(cpu_online_map);
alex@15557 397 max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
alex@15557 398
kfraser@11296 399 ret = 0;
awilliam@12003 400
alex@15557 401 if (!guest_handle_is_null(pi->cpu_to_node)) {
alex@15557 402 for (i = 0; i <= max_array_ent; i++) {
alex@15557 403 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
alex@15557 404 if (copy_to_guest_offset(pi->cpu_to_node, i, &node, 1)) {
awilliam@12003 405 ret = -EFAULT;
awilliam@12003 406 break;
awilliam@12003 407 }
awilliam@12003 408 }
awilliam@12003 409 }
awilliam@12003 410
kfraser@11296 411 if ( copy_to_guest(u_sysctl, op, 1) )
kfraser@11296 412 ret = -EFAULT;
kfraser@11296 413 }
kfraser@11296 414 break;
kfraser@11296 415
kfraser@11296 416 default:
kfraser@11947 417 printk("arch_do_sysctl: unrecognized sysctl: %d!!!\n",op->cmd);
djm@6458 418 ret = -ENOSYS;
djm@6458 419
djm@6458 420 }
djm@6458 421
djm@6458 422 return ret;
djm@6458 423 }
kaf24@8750 424
awilliam@10816 425 static unsigned long
awilliam@10816 426 dom0vp_ioremap(struct domain *d, unsigned long mpaddr, unsigned long size)
awilliam@10816 427 {
awilliam@10816 428 unsigned long end;
awilliam@10816 429
awilliam@10816 430 /* Linux may use a 0 size! */
awilliam@10816 431 if (size == 0)
awilliam@10816 432 size = PAGE_SIZE;
awilliam@10816 433
awilliam@14076 434 if (size == 0)
awilliam@14076 435 printk(XENLOG_WARNING "ioremap(): Trying to map %lx, size 0\n", mpaddr);
awilliam@14076 436
awilliam@10816 437 end = PAGE_ALIGN(mpaddr + size);
awilliam@10816 438
awilliam@10816 439 if (!iomem_access_permitted(d, mpaddr >> PAGE_SHIFT,
awilliam@10816 440 (end >> PAGE_SHIFT) - 1))
awilliam@10816 441 return -EPERM;
awilliam@10816 442
awilliam@14076 443 return assign_domain_mmio_page(d, mpaddr, mpaddr, size,
awilliam@14076 444 ASSIGN_writable | ASSIGN_nocache);
awilliam@10816 445 }
awilliam@10816 446
alex@15117 447 static unsigned long
alex@15117 448 dom0vp_fpswa_revision(XEN_GUEST_HANDLE(uint) revision)
alex@15117 449 {
alex@15117 450 if (fpswa_interface == NULL)
alex@15117 451 return -ENOSYS;
alex@15117 452 if (copy_to_guest(revision, &fpswa_interface->revision, 1))
alex@15117 453 return -EFAULT;
alex@15117 454 return 0;
alex@15117 455 }
alex@15117 456
alex@15322 457 static unsigned long
alex@15322 458 dom0vp_add_io_space(struct domain *d, unsigned long phys_base,
alex@15322 459 unsigned long sparse, unsigned long space_number)
alex@15322 460 {
alex@15322 461 unsigned int fp, lp;
alex@15322 462
alex@15322 463 /*
alex@15322 464 * Registering new io_space roughly based on linux
alex@15322 465 * arch/ia64/pci/pci.c:new_space()
alex@15322 466 */
alex@15322 467
alex@15322 468 /* Skip legacy I/O port space, we already know about it */
alex@15322 469 if (phys_base == 0)
alex@15322 470 return 0;
alex@15322 471
alex@15322 472 /*
alex@15322 473 * Dom0 Linux initializes io spaces sequentially, if that changes,
alex@15322 474 * we'll need to add thread protection and the ability to handle
alex@15322 475 * a sparsely populated io_space array.
alex@15322 476 */
alex@15322 477 if (space_number > MAX_IO_SPACES || space_number != num_io_spaces)
alex@15322 478 return -EINVAL;
alex@15322 479
alex@15322 480 io_space[space_number].mmio_base = phys_base;
alex@15322 481 io_space[space_number].sparse = sparse;
alex@15322 482
alex@15322 483 num_io_spaces++;
alex@15322 484
alex@15322 485 fp = space_number << IO_SPACE_BITS;
alex@15322 486 lp = fp | 0xffff;
alex@15322 487
alex@15322 488 return ioports_permit_access(d, fp, lp);
alex@15322 489 }
alex@15322 490
awilliam@9759 491 unsigned long
awilliam@9759 492 do_dom0vp_op(unsigned long cmd,
awilliam@9759 493 unsigned long arg0, unsigned long arg1, unsigned long arg2,
awilliam@9759 494 unsigned long arg3)
awilliam@9759 495 {
awilliam@9759 496 unsigned long ret = 0;
awilliam@9759 497 struct domain *d = current->domain;
awilliam@9759 498
awilliam@9759 499 switch (cmd) {
awilliam@9759 500 case IA64_DOM0VP_ioremap:
awilliam@10816 501 ret = dom0vp_ioremap(d, arg0, arg1);
awilliam@9759 502 break;
awilliam@9759 503 case IA64_DOM0VP_phystomach:
awilliam@9759 504 ret = ____lookup_domain_mpa(d, arg0 << PAGE_SHIFT);
awilliam@9759 505 if (ret == INVALID_MFN) {
kaf24@12038 506 dprintk(XENLOG_INFO, "%s: INVALID_MFN ret: 0x%lx\n",
kaf24@12035 507 __func__, ret);
awilliam@9759 508 } else {
awilliam@9759 509 ret = (ret & _PFN_MASK) >> PAGE_SHIFT;//XXX pte_pfn()
awilliam@9759 510 }
kfraser@14595 511 perfc_incr(dom0vp_phystomach);
awilliam@9759 512 break;
awilliam@9759 513 case IA64_DOM0VP_machtophys:
awilliam@10451 514 if (!mfn_valid(arg0)) {
awilliam@9759 515 ret = INVALID_M2P_ENTRY;
awilliam@9759 516 break;
awilliam@9759 517 }
awilliam@9759 518 ret = get_gpfn_from_mfn(arg0);
kfraser@14595 519 perfc_incr(dom0vp_machtophys);
awilliam@9759 520 break;
awilliam@9759 521 case IA64_DOM0VP_zap_physmap:
awilliam@9759 522 ret = dom0vp_zap_physmap(d, arg0, (unsigned int)arg1);
awilliam@9759 523 break;
awilliam@9759 524 case IA64_DOM0VP_add_physmap:
alex@16714 525 if (!IS_PRIV(d))
alex@16714 526 return -EPERM;
awilliam@9759 527 ret = dom0vp_add_physmap(d, arg0, arg1, (unsigned int)arg2,
awilliam@9759 528 (domid_t)arg3);
awilliam@9759 529 break;
awilliam@12794 530 case IA64_DOM0VP_add_physmap_with_gmfn:
alex@16714 531 if (!IS_PRIV(d))
alex@16714 532 return -EPERM;
awilliam@12794 533 ret = dom0vp_add_physmap_with_gmfn(d, arg0, arg1, (unsigned int)arg2,
awilliam@12794 534 (domid_t)arg3);
awilliam@12794 535 break;
awilliam@11726 536 case IA64_DOM0VP_expose_p2m:
awilliam@11726 537 ret = dom0vp_expose_p2m(d, arg0, arg1, arg2, arg3);
awilliam@11726 538 break;
awilliam@12629 539 case IA64_DOM0VP_perfmon: {
awilliam@12629 540 XEN_GUEST_HANDLE(void) hnd;
awilliam@12629 541 set_xen_guest_handle(hnd, (void*)arg1);
awilliam@12629 542 ret = do_perfmon_op(arg0, hnd, arg2);
awilliam@12629 543 break;
awilliam@12629 544 }
alex@15117 545 case IA64_DOM0VP_fpswa_revision: {
alex@15117 546 XEN_GUEST_HANDLE(uint) hnd;
alex@15117 547 set_xen_guest_handle(hnd, (uint*)arg0);
alex@15117 548 ret = dom0vp_fpswa_revision(hnd);
alex@15117 549 break;
alex@15117 550 }
alex@15322 551 case IA64_DOM0VP_add_io_space:
alex@15322 552 ret = dom0vp_add_io_space(d, arg0, arg1, arg2);
alex@15322 553 break;
alex@15842 554 case IA64_DOM0VP_expose_foreign_p2m: {
alex@15842 555 XEN_GUEST_HANDLE(char) hnd;
alex@15842 556 set_xen_guest_handle(hnd, (char*)arg2);
alex@15842 557 ret = dom0vp_expose_foreign_p2m(d, arg0, (domid_t)arg1, hnd, arg3);
alex@15842 558 break;
alex@15842 559 }
alex@15842 560 case IA64_DOM0VP_unexpose_foreign_p2m:
alex@15842 561 ret = dom0vp_unexpose_foreign_p2m(d, arg0, arg1);
alex@15842 562 break;
awilliam@9759 563 default:
awilliam@9759 564 ret = -1;
kfraser@11947 565 printk("unknown dom0_vp_op 0x%lx\n", cmd);
awilliam@9759 566 break;
awilliam@9759 567 }
awilliam@9759 568
awilliam@9759 569 return ret;
awilliam@9759 570 }
awilliam@9759 571
kaf24@8750 572 /*
kaf24@8750 573 * Local variables:
kaf24@8750 574 * mode: C
kaf24@8750 575 * c-set-style: "BSD"
kaf24@8750 576 * c-basic-offset: 4
kaf24@8750 577 * tab-width: 4
kaf24@8750 578 * indent-tabs-mode: nil
kaf24@8750 579 * End:
kaf24@8750 580 */