ia64/linux-2.6.18-xen.hg

annotate arch/ia64/xen/hypervisor.c @ 225:11646829a25f

[IA64] Kexec: Add xen_limit_pages_to_max_mfn()

Add xen_limit_pages_to_max_mfn() in keeping with x86.
On ia64 it seems that it just needs to be a wrapper
for xen_create_contiguous_region().

Signed-off-by: Simon Horman <horms@verge.net.au>
author Alex Williamson <alex.williamson@hp.com>
date Thu Sep 27 13:42:38 2007 -0600 (2007-09-27)
parents e1466633683c
children 9514c93e8053 c235ca877ee0
rev   line source
ian@26 1 /******************************************************************************
ian@26 2 * include/asm-ia64/shadow.h
ian@26 3 *
ian@26 4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
ian@26 5 * VA Linux Systems Japan K.K.
ian@26 6 *
ian@26 7 * This program is free software; you can redistribute it and/or modify
ian@26 8 * it under the terms of the GNU General Public License as published by
ian@26 9 * the Free Software Foundation; either version 2 of the License, or
ian@26 10 * (at your option) any later version.
ian@26 11 *
ian@26 12 * This program is distributed in the hope that it will be useful,
ian@26 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
ian@26 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
ian@26 15 * GNU General Public License for more details.
ian@26 16 *
ian@26 17 * You should have received a copy of the GNU General Public License
ian@26 18 * along with this program; if not, write to the Free Software
ian@26 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
ian@26 20 *
ian@26 21 */
ian@26 22
ian@26 23 //#include <linux/kernel.h>
ian@26 24 #include <linux/spinlock.h>
ian@26 25 #include <linux/bootmem.h>
ian@26 26 #include <linux/module.h>
ian@26 27 #include <linux/vmalloc.h>
ian@26 28 #include <linux/efi.h>
ian@26 29 #include <asm/page.h>
ian@26 30 #include <asm/pgalloc.h>
ian@26 31 #include <asm/meminit.h>
ian@26 32 #include <asm/hypervisor.h>
ian@26 33 #include <asm/hypercall.h>
ian@26 34 #include <xen/interface/memory.h>
ian@26 35 #include <xen/xencons.h>
ian@26 36 #include <xen/balloon.h>
ian@26 37
alex@90 38 shared_info_t *HYPERVISOR_shared_info __read_mostly = (shared_info_t *)XSI_BASE;
ian@26 39 EXPORT_SYMBOL(HYPERVISOR_shared_info);
ian@26 40
ian@26 41 start_info_t *xen_start_info;
ian@26 42 EXPORT_SYMBOL(xen_start_info);
ian@26 43
ian@26 44 EXPORT_SYMBOL(running_on_xen);
ian@26 45
ian@26 46 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
ian@26 47 static int p2m_expose_init(void);
ian@26 48 #else
ian@26 49 #define p2m_expose_init() (-ENOSYS)
ian@26 50 #define p2m_expose_resume() ((void)0)
ian@26 51 #endif
ian@26 52
ian@26 53 EXPORT_SYMBOL(__hypercall);
ian@26 54
ian@26 55 void __init
ian@26 56 xen_setup(char **cmdline_p)
ian@26 57 {
ian@26 58 extern void dig_setup(char **cmdline_p);
ian@26 59 if (ia64_platform_is("xen"))
ian@26 60 dig_setup(cmdline_p);
ian@26 61
ian@26 62 if (!is_running_on_xen() || !is_initial_xendomain())
ian@26 63 return;
ian@26 64
ian@26 65 if (xen_start_info->console.dom0.info_size >=
ian@26 66 sizeof(struct dom0_vga_console_info)) {
ian@26 67 const struct dom0_vga_console_info *info =
ian@26 68 (struct dom0_vga_console_info *)(
ian@26 69 (char *)xen_start_info +
ian@26 70 xen_start_info->console.dom0.info_off);
ian@26 71 dom0_init_screen_info(info);
ian@26 72 }
ian@26 73 xen_start_info->console.domU.mfn = 0;
ian@26 74 xen_start_info->console.domU.evtchn = 0;
ian@26 75 }
ian@26 76
ian@26 77 void __cpuinit
ian@26 78 xen_cpu_init(void)
ian@26 79 {
ian@26 80 extern void xen_smp_intr_init(void);
ian@26 81 xen_smp_intr_init();
ian@26 82 }
ian@26 83
ian@26 84 //XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
ian@26 85 // move those to lib/contiguous_bitmap?
ian@26 86 //XXX discontigmem/sparsemem
ian@26 87
ian@26 88 /*
ian@26 89 * Bitmap is indexed by page number. If bit is set, the page is part of a
ian@26 90 * xen_create_contiguous_region() area of memory.
ian@26 91 */
alex@90 92 unsigned long *contiguous_bitmap __read_mostly;
ian@26 93
ian@26 94 #ifdef CONFIG_VIRTUAL_MEM_MAP
ian@26 95 /* Following logic is stolen from create_mem_map_table() for virtual memmap */
ian@26 96 static int
ian@26 97 create_contiguous_bitmap(u64 start, u64 end, void *arg)
ian@26 98 {
ian@26 99 unsigned long address, start_page, end_page;
ian@26 100 unsigned long bitmap_start, bitmap_end;
ian@26 101 unsigned char *bitmap;
ian@26 102 int node;
ian@26 103 pgd_t *pgd;
ian@26 104 pud_t *pud;
ian@26 105 pmd_t *pmd;
ian@26 106 pte_t *pte;
ian@26 107
ian@26 108 bitmap_start = (unsigned long)contiguous_bitmap +
ian@26 109 ((__pa(start) >> PAGE_SHIFT) >> 3);
ian@26 110 bitmap_end = (unsigned long)contiguous_bitmap +
ian@26 111 (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
ian@26 112
ian@26 113 start_page = bitmap_start & PAGE_MASK;
ian@26 114 end_page = PAGE_ALIGN(bitmap_end);
ian@26 115 node = paddr_to_nid(__pa(start));
ian@26 116
ian@26 117 bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
ian@26 118 end_page - start_page);
ian@26 119 BUG_ON(!bitmap);
ian@26 120 memset(bitmap, 0, end_page - start_page);
ian@26 121
ian@26 122 for (address = start_page; address < end_page; address += PAGE_SIZE) {
ian@26 123 pgd = pgd_offset_k(address);
ian@26 124 if (pgd_none(*pgd))
ian@26 125 pgd_populate(&init_mm, pgd,
ian@26 126 alloc_bootmem_pages_node(NODE_DATA(node),
ian@26 127 PAGE_SIZE));
ian@26 128 pud = pud_offset(pgd, address);
ian@26 129
ian@26 130 if (pud_none(*pud))
ian@26 131 pud_populate(&init_mm, pud,
ian@26 132 alloc_bootmem_pages_node(NODE_DATA(node),
ian@26 133 PAGE_SIZE));
ian@26 134 pmd = pmd_offset(pud, address);
ian@26 135
ian@26 136 if (pmd_none(*pmd))
ian@26 137 pmd_populate_kernel(&init_mm, pmd,
ian@26 138 alloc_bootmem_pages_node
ian@26 139 (NODE_DATA(node), PAGE_SIZE));
ian@26 140 pte = pte_offset_kernel(pmd, address);
ian@26 141
ian@26 142 if (pte_none(*pte))
ian@26 143 set_pte(pte,
ian@26 144 pfn_pte(__pa(bitmap + (address - start_page))
ian@26 145 >> PAGE_SHIFT, PAGE_KERNEL));
ian@26 146 }
ian@26 147 return 0;
ian@26 148 }
ian@26 149 #endif
ian@26 150
ian@26 151 static void
ian@26 152 __contiguous_bitmap_init(unsigned long size)
ian@26 153 {
ian@26 154 contiguous_bitmap = alloc_bootmem_pages(size);
ian@26 155 BUG_ON(!contiguous_bitmap);
ian@26 156 memset(contiguous_bitmap, 0, size);
ian@26 157 }
ian@26 158
ian@26 159 void
alex@43 160 xen_contiguous_bitmap_init(unsigned long end_pfn)
ian@26 161 {
ian@26 162 unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
ian@26 163 #ifndef CONFIG_VIRTUAL_MEM_MAP
ian@26 164 __contiguous_bitmap_init(size);
ian@26 165 #else
ian@26 166 unsigned long max_gap = 0;
ian@26 167
ian@26 168 efi_memmap_walk(find_largest_hole, (u64*)&max_gap);
ian@26 169 if (max_gap < LARGE_GAP) {
ian@26 170 __contiguous_bitmap_init(size);
ian@26 171 } else {
ian@26 172 unsigned long map_size = PAGE_ALIGN(size);
ian@26 173 vmalloc_end -= map_size;
ian@26 174 contiguous_bitmap = (unsigned long*)vmalloc_end;
ian@26 175 efi_memmap_walk(create_contiguous_bitmap, NULL);
ian@26 176 }
ian@26 177 #endif
ian@26 178 }
ian@26 179
ian@26 180 #if 0
ian@26 181 int
ian@26 182 contiguous_bitmap_test(void* p)
ian@26 183 {
ian@26 184 return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
ian@26 185 }
ian@26 186 #endif
ian@26 187
ian@26 188 static void contiguous_bitmap_set(
ian@26 189 unsigned long first_page, unsigned long nr_pages)
ian@26 190 {
ian@26 191 unsigned long start_off, end_off, curr_idx, end_idx;
ian@26 192
ian@26 193 curr_idx = first_page / BITS_PER_LONG;
ian@26 194 start_off = first_page & (BITS_PER_LONG-1);
ian@26 195 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
ian@26 196 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
ian@26 197
ian@26 198 if (curr_idx == end_idx) {
ian@26 199 contiguous_bitmap[curr_idx] |=
ian@26 200 ((1UL<<end_off)-1) & -(1UL<<start_off);
ian@26 201 } else {
ian@26 202 contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
ian@26 203 while ( ++curr_idx < end_idx )
ian@26 204 contiguous_bitmap[curr_idx] = ~0UL;
ian@26 205 contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
ian@26 206 }
ian@26 207 }
ian@26 208
ian@26 209 static void contiguous_bitmap_clear(
ian@26 210 unsigned long first_page, unsigned long nr_pages)
ian@26 211 {
ian@26 212 unsigned long start_off, end_off, curr_idx, end_idx;
ian@26 213
ian@26 214 curr_idx = first_page / BITS_PER_LONG;
ian@26 215 start_off = first_page & (BITS_PER_LONG-1);
ian@26 216 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
ian@26 217 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
ian@26 218
ian@26 219 if (curr_idx == end_idx) {
ian@26 220 contiguous_bitmap[curr_idx] &=
ian@26 221 -(1UL<<end_off) | ((1UL<<start_off)-1);
ian@26 222 } else {
ian@26 223 contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
ian@26 224 while ( ++curr_idx != end_idx )
ian@26 225 contiguous_bitmap[curr_idx] = 0;
ian@26 226 contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
ian@26 227 }
ian@26 228 }
ian@26 229
ian@26 230 // __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
ian@26 231 // are based on i386 xen_create_contiguous_region(),
ian@26 232 // xen_destroy_contiguous_region()
ian@26 233
ian@26 234 /* Protected by balloon_lock. */
ian@26 235 #define MAX_CONTIG_ORDER 7
ian@26 236 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
ian@26 237
ian@26 238 /* Ensure multi-page extents are contiguous in machine memory. */
ian@26 239 int
ian@26 240 __xen_create_contiguous_region(unsigned long vstart,
ian@26 241 unsigned int order, unsigned int address_bits)
ian@26 242 {
ian@26 243 unsigned long error = 0;
ian@26 244 unsigned long gphys = __pa(vstart);
ian@26 245 unsigned long start_gpfn = gphys >> PAGE_SHIFT;
ian@26 246 unsigned long num_gpfn = 1 << order;
ian@26 247 unsigned long i;
ian@26 248 unsigned long flags;
ian@26 249
ian@26 250 unsigned long *in_frames = discontig_frames, out_frame;
ian@26 251 int success;
ian@26 252 struct xen_memory_exchange exchange = {
ian@26 253 .in = {
ian@26 254 .nr_extents = num_gpfn,
ian@26 255 .extent_order = 0,
ian@26 256 .domid = DOMID_SELF
ian@26 257 },
ian@26 258 .out = {
ian@26 259 .nr_extents = 1,
ian@26 260 .extent_order = order,
ian@26 261 .address_bits = address_bits,
ian@26 262 .domid = DOMID_SELF
ian@26 263 },
ian@26 264 .nr_exchanged = 0
ian@26 265 };
ian@26 266
ian@26 267 if (unlikely(order > MAX_CONTIG_ORDER))
ian@26 268 return -ENOMEM;
ian@26 269
ian@26 270 set_xen_guest_handle(exchange.in.extent_start, in_frames);
ian@26 271 set_xen_guest_handle(exchange.out.extent_start, &out_frame);
ian@26 272
ian@26 273 scrub_pages(vstart, num_gpfn);
ian@26 274
ian@26 275 balloon_lock(flags);
ian@26 276
ian@26 277 /* Get a new contiguous memory extent. */
ian@26 278 for (i = 0; i < num_gpfn; i++) {
ian@26 279 in_frames[i] = start_gpfn + i;
ian@26 280 }
ian@26 281 out_frame = start_gpfn;
ian@26 282 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
ian@26 283 success = (exchange.nr_exchanged == num_gpfn);
ian@26 284 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
ian@26 285 BUG_ON(success && (error != 0));
ian@26 286 if (unlikely(error == -ENOSYS)) {
ian@26 287 /* Compatibility when XENMEM_exchange is unsupported. */
ian@26 288 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
ian@26 289 &exchange.in);
ian@26 290 BUG_ON(error != num_gpfn);
ian@26 291 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
ian@26 292 &exchange.out);
ian@26 293 if (error != 1) {
ian@26 294 /* Couldn't get special memory: fall back to normal. */
ian@26 295 for (i = 0; i < num_gpfn; i++) {
ian@26 296 in_frames[i] = start_gpfn + i;
ian@26 297 }
ian@26 298 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
ian@26 299 &exchange.in);
ian@26 300 BUG_ON(error != num_gpfn);
ian@26 301 success = 0;
ian@26 302 } else
ian@26 303 success = 1;
ian@26 304 }
ian@26 305 if (success)
ian@26 306 contiguous_bitmap_set(start_gpfn, num_gpfn);
ian@26 307 #if 0
ian@26 308 if (success) {
ian@26 309 unsigned long mfn;
ian@26 310 unsigned long mfn_prev = ~0UL;
ian@26 311 for (i = 0; i < num_gpfn; i++) {
ian@26 312 mfn = pfn_to_mfn_for_dma(start_gpfn + i);
ian@26 313 if (mfn_prev != ~0UL && mfn != mfn_prev + 1) {
ian@26 314 xprintk("\n");
ian@26 315 xprintk("%s:%d order %d "
ian@26 316 "start 0x%lx bus 0x%lx "
ian@26 317 "machine 0x%lx\n",
ian@26 318 __func__, __LINE__, order,
ian@26 319 vstart, virt_to_bus((void*)vstart),
ian@26 320 phys_to_machine_for_dma(gphys));
ian@26 321 xprintk("mfn: ");
ian@26 322 for (i = 0; i < num_gpfn; i++) {
ian@26 323 mfn = pfn_to_mfn_for_dma(
ian@26 324 start_gpfn + i);
ian@26 325 xprintk("0x%lx ", mfn);
ian@26 326 }
ian@26 327 xprintk("\n");
ian@26 328 break;
ian@26 329 }
ian@26 330 mfn_prev = mfn;
ian@26 331 }
ian@26 332 }
ian@26 333 #endif
ian@26 334 balloon_unlock(flags);
ian@26 335 return success? 0: -ENOMEM;
ian@26 336 }
ian@26 337
ian@26 338 void
ian@26 339 __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
ian@26 340 {
ian@26 341 unsigned long flags;
ian@26 342 unsigned long error = 0;
ian@26 343 unsigned long start_gpfn = __pa(vstart) >> PAGE_SHIFT;
ian@26 344 unsigned long num_gpfn = 1UL << order;
ian@26 345 unsigned long i;
ian@26 346
ian@26 347 unsigned long *out_frames = discontig_frames, in_frame;
ian@26 348 int success;
ian@26 349 struct xen_memory_exchange exchange = {
ian@26 350 .in = {
ian@26 351 .nr_extents = 1,
ian@26 352 .extent_order = order,
ian@26 353 .domid = DOMID_SELF
ian@26 354 },
ian@26 355 .out = {
ian@26 356 .nr_extents = num_gpfn,
ian@26 357 .extent_order = 0,
ian@26 358 .address_bits = 0,
ian@26 359 .domid = DOMID_SELF
ian@26 360 },
ian@26 361 .nr_exchanged = 0
ian@26 362 };
ian@26 363
ian@26 364
ian@26 365 if (!test_bit(start_gpfn, contiguous_bitmap))
ian@26 366 return;
ian@26 367
ian@26 368 if (unlikely(order > MAX_CONTIG_ORDER))
ian@26 369 return;
ian@26 370
ian@26 371 set_xen_guest_handle(exchange.in.extent_start, &in_frame);
ian@26 372 set_xen_guest_handle(exchange.out.extent_start, out_frames);
ian@26 373
ian@26 374 scrub_pages(vstart, num_gpfn);
ian@26 375
ian@26 376 balloon_lock(flags);
ian@26 377
ian@26 378 contiguous_bitmap_clear(start_gpfn, num_gpfn);
ian@26 379
ian@26 380 /* Do the exchange for non-contiguous MFNs. */
ian@26 381 in_frame = start_gpfn;
ian@26 382 for (i = 0; i < num_gpfn; i++) {
ian@26 383 out_frames[i] = start_gpfn + i;
ian@26 384 }
ian@26 385 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
ian@26 386 success = (exchange.nr_exchanged == 1);
ian@26 387 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
ian@26 388 BUG_ON(success && (error != 0));
ian@26 389 if (unlikely(error == -ENOSYS)) {
ian@26 390 /* Compatibility when XENMEM_exchange is unsupported. */
ian@26 391 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
ian@26 392 &exchange.in);
ian@26 393 BUG_ON(error != 1);
ian@26 394
ian@26 395 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
ian@26 396 &exchange.out);
ian@26 397 BUG_ON(error != num_gpfn);
ian@26 398 }
ian@26 399 balloon_unlock(flags);
ian@26 400 }
ian@26 401
alex@225 402 int
alex@225 403 xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order,
alex@225 404 unsigned int address_bits)
alex@225 405 {
alex@225 406 return xen_create_contiguous_region((unsigned long)page_address(pages),
alex@225 407 order, address_bits);
alex@225 408 }
alex@225 409
ian@26 410
ian@26 411 ///////////////////////////////////////////////////////////////////////////
ian@26 412 // grant table hack
ian@26 413 // cmd: GNTTABOP_xxx
ian@26 414
ian@26 415 #include <linux/mm.h>
ian@26 416 #include <xen/interface/xen.h>
ian@26 417 #include <xen/gnttab.h>
ian@26 418
kfraser@106 419 void *arch_gnttab_alloc_shared(unsigned long *frames)
kfraser@106 420 {
kfraser@106 421 return __va(frames[0] << PAGE_SHIFT);
kfraser@106 422 }
kfraser@106 423
ian@26 424 static void
ian@26 425 gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
ian@26 426 {
ian@26 427 uint32_t flags;
ian@26 428
ian@26 429 flags = uop->flags;
ian@26 430
ian@26 431 if (flags & GNTMAP_host_map) {
ian@26 432 if (flags & GNTMAP_application_map) {
ian@26 433 xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
ian@26 434 BUG();
ian@26 435 }
ian@26 436 if (flags & GNTMAP_contains_pte) {
ian@26 437 xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
ian@26 438 BUG();
ian@26 439 }
ian@26 440 } else if (flags & GNTMAP_device_map) {
ian@26 441 xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
ian@26 442 BUG();//XXX not yet. actually this flag is not used.
ian@26 443 } else {
ian@26 444 BUG();
ian@26 445 }
ian@26 446 }
ian@26 447
ian@26 448 int
ian@26 449 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
ian@26 450 {
ian@26 451 if (cmd == GNTTABOP_map_grant_ref) {
ian@26 452 unsigned int i;
ian@26 453 for (i = 0; i < count; i++) {
ian@26 454 gnttab_map_grant_ref_pre(
ian@26 455 (struct gnttab_map_grant_ref*)uop + i);
ian@26 456 }
ian@26 457 }
alex@187 458 return xencomm_hypercall_grant_table_op(cmd, uop, count);
ian@26 459 }
ian@26 460 EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
ian@26 461
ian@26 462 ///////////////////////////////////////////////////////////////////////////
ian@26 463 // foreign mapping
ian@26 464 #include <linux/efi.h>
ian@26 465 #include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
ian@26 466
ian@26 467 static unsigned long privcmd_resource_min = 0;
ian@26 468 // Xen/ia64 currently can handle pseudo physical address bits up to
ian@26 469 // (PAGE_SHIFT * 3)
ian@26 470 static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
ian@26 471 static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
ian@26 472
ian@26 473 static unsigned long
ian@26 474 md_end_addr(const efi_memory_desc_t *md)
ian@26 475 {
ian@26 476 return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
ian@26 477 }
ian@26 478
ian@26 479 #define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE (1024 * 1024 * 1024UL)
ian@26 480 static int
ian@26 481 xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
ian@26 482 {
ian@26 483 return (start < end &&
ian@26 484 (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
ian@26 485 }
ian@26 486
ian@26 487 static int __init
ian@26 488 xen_ia64_privcmd_init(void)
ian@26 489 {
ian@26 490 void *efi_map_start, *efi_map_end, *p;
ian@26 491 u64 efi_desc_size;
ian@26 492 efi_memory_desc_t *md;
ian@26 493 unsigned long tmp_min;
ian@26 494 unsigned long tmp_max;
ian@26 495 unsigned long gap_size;
ian@26 496 unsigned long prev_end;
ian@26 497
ian@26 498 if (!is_running_on_xen())
ian@26 499 return -1;
ian@26 500
ian@26 501 efi_map_start = __va(ia64_boot_param->efi_memmap);
ian@26 502 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
ian@26 503 efi_desc_size = ia64_boot_param->efi_memdesc_size;
ian@26 504
ian@26 505 // at first check the used highest address
ian@26 506 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
ian@26 507 // nothing
ian@26 508 }
ian@26 509 md = p - efi_desc_size;
ian@26 510 privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
ian@26 511 if (xen_ia64_privcmd_check_size(privcmd_resource_min,
ian@26 512 privcmd_resource_max)) {
ian@26 513 goto out;
ian@26 514 }
ian@26 515
ian@26 516 // the used highest address is too large. try to find the largest gap.
ian@26 517 tmp_min = privcmd_resource_max;
ian@26 518 tmp_max = 0;
ian@26 519 gap_size = 0;
ian@26 520 prev_end = 0;
ian@26 521 for (p = efi_map_start;
ian@26 522 p < efi_map_end - efi_desc_size;
ian@26 523 p += efi_desc_size) {
ian@26 524 unsigned long end;
ian@26 525 efi_memory_desc_t* next;
ian@26 526 unsigned long next_start;
ian@26 527
ian@26 528 md = p;
ian@26 529 end = md_end_addr(md);
ian@26 530 if (end > privcmd_resource_max) {
ian@26 531 break;
ian@26 532 }
ian@26 533 if (end < prev_end) {
ian@26 534 // work around.
ian@26 535 // Xen may pass incompletely sorted memory
ian@26 536 // descriptors like
ian@26 537 // [x, x + length]
ian@26 538 // [x, x]
ian@26 539 // this order should be reversed.
ian@26 540 continue;
ian@26 541 }
ian@26 542 next = p + efi_desc_size;
ian@26 543 next_start = next->phys_addr;
ian@26 544 if (next_start > privcmd_resource_max) {
ian@26 545 next_start = privcmd_resource_max;
ian@26 546 }
ian@26 547 if (end < next_start && gap_size < (next_start - end)) {
ian@26 548 tmp_min = end;
ian@26 549 tmp_max = next_start;
ian@26 550 gap_size = tmp_max - tmp_min;
ian@26 551 }
ian@26 552 prev_end = end;
ian@26 553 }
ian@26 554
ian@26 555 privcmd_resource_min = GRANULEROUNDUP(tmp_min);
ian@26 556 if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
ian@26 557 privcmd_resource_max = tmp_max;
ian@26 558 goto out;
ian@26 559 }
ian@26 560
ian@26 561 privcmd_resource_min = tmp_min;
ian@26 562 privcmd_resource_max = tmp_max;
ian@26 563 if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
ian@26 564 privcmd_resource_max)) {
ian@26 565 // Any large enough gap isn't found.
ian@26 566 // go ahead anyway with the warning hoping that large region
ian@26 567 // won't be requested.
ian@26 568 printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
ian@26 569 }
ian@26 570
ian@26 571 out:
ian@26 572 printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
ian@26 573 privcmd_resource_min, privcmd_resource_max,
ian@26 574 (privcmd_resource_max - privcmd_resource_min) >> 20);
ian@26 575 BUG_ON(privcmd_resource_min >= privcmd_resource_max);
ian@26 576
ian@26 577 // XXX this should be somewhere appropriate
ian@26 578 (void)p2m_expose_init();
ian@26 579
ian@26 580 return 0;
ian@26 581 }
ian@26 582 late_initcall(xen_ia64_privcmd_init);
ian@26 583
ian@26 584 struct xen_ia64_privcmd_entry {
ian@26 585 atomic_t map_count;
ian@26 586 #define INVALID_GPFN (~0UL)
ian@26 587 unsigned long gpfn;
ian@26 588 };
ian@26 589
ian@26 590 struct xen_ia64_privcmd_range {
ian@26 591 atomic_t ref_count;
ian@26 592 unsigned long pgoff; // in PAGE_SIZE
ian@26 593 struct resource* res;
ian@26 594
alex@197 595 // for foreign domain p2m mapping
alex@197 596 void* private;
alex@197 597 void (*callback)(struct xen_ia64_privcmd_range* range, void* arg);
alex@197 598
ian@26 599 unsigned long num_entries;
ian@26 600 struct xen_ia64_privcmd_entry entries[0];
ian@26 601 };
ian@26 602
ian@26 603 struct xen_ia64_privcmd_vma {
ian@26 604 int is_privcmd_mmapped;
ian@26 605 struct xen_ia64_privcmd_range* range;
ian@26 606
ian@26 607 unsigned long num_entries;
ian@26 608 struct xen_ia64_privcmd_entry* entries;
ian@26 609 };
ian@26 610
ian@26 611 static void
ian@26 612 xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
ian@26 613 {
ian@26 614 atomic_set(&entry->map_count, 0);
ian@26 615 entry->gpfn = INVALID_GPFN;
ian@26 616 }
ian@26 617
ian@26 618 static int
ian@26 619 xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
ian@26 620 unsigned long addr,
ian@26 621 struct xen_ia64_privcmd_range* privcmd_range,
ian@26 622 int i,
ian@26 623 unsigned long gmfn,
ian@26 624 pgprot_t prot,
ian@26 625 domid_t domid)
ian@26 626 {
ian@26 627 int error = 0;
ian@26 628 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
ian@26 629 unsigned long gpfn;
ian@26 630 unsigned long flags;
ian@26 631
ian@26 632 if ((addr & ~PAGE_MASK) != 0 || gmfn == INVALID_MFN) {
ian@26 633 error = -EINVAL;
ian@26 634 goto out;
ian@26 635 }
ian@26 636
ian@26 637 if (entry->gpfn != INVALID_GPFN) {
ian@26 638 error = -EBUSY;
ian@26 639 goto out;
ian@26 640 }
ian@26 641 gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
ian@26 642
ian@26 643 flags = ASSIGN_writable;
ian@26 644 if (pgprot_val(prot) == PROT_READ) {
ian@26 645 flags = ASSIGN_readonly;
ian@26 646 }
ian@26 647 error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
ian@26 648 if (error != 0) {
ian@26 649 goto out;
ian@26 650 }
ian@26 651
ian@26 652 prot = vma->vm_page_prot;
ian@26 653 error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
ian@26 654 if (error != 0) {
ian@26 655 error = HYPERVISOR_zap_physmap(gpfn, 0);
ian@26 656 if (error) {
ian@26 657 BUG();//XXX
ian@26 658 }
ian@26 659 } else {
ian@26 660 atomic_inc(&entry->map_count);
ian@26 661 entry->gpfn = gpfn;
ian@26 662 }
ian@26 663
ian@26 664 out:
ian@26 665 return error;
ian@26 666 }
ian@26 667
ian@26 668 static void
ian@26 669 xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
ian@26 670 int i)
ian@26 671 {
ian@26 672 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
ian@26 673 unsigned long gpfn = entry->gpfn;
ian@26 674 //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
ian@26 675 // (vma->vm_pgoff - privcmd_range->pgoff);
ian@26 676 int error;
ian@26 677
ian@26 678 error = HYPERVISOR_zap_physmap(gpfn, 0);
ian@26 679 if (error) {
ian@26 680 BUG();//XXX
ian@26 681 }
ian@26 682 entry->gpfn = INVALID_GPFN;
ian@26 683 }
ian@26 684
ian@26 685 static void
ian@26 686 xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
ian@26 687 int i)
ian@26 688 {
ian@26 689 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
ian@26 690 if (entry->gpfn != INVALID_GPFN) {
ian@26 691 atomic_inc(&entry->map_count);
ian@26 692 } else {
ian@26 693 BUG_ON(atomic_read(&entry->map_count) != 0);
ian@26 694 }
ian@26 695 }
ian@26 696
ian@26 697 static void
ian@26 698 xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
ian@26 699 int i)
ian@26 700 {
ian@26 701 struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
ian@26 702 if (entry->gpfn != INVALID_GPFN &&
ian@26 703 atomic_dec_and_test(&entry->map_count)) {
ian@26 704 xen_ia64_privcmd_entry_munmap(privcmd_range, i);
ian@26 705 }
ian@26 706 }
ian@26 707
ian@26 708 static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
ian@26 709 static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
ian@26 710
ian@26 711 struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
ian@26 712 .open = &xen_ia64_privcmd_vma_open,
ian@26 713 .close = &xen_ia64_privcmd_vma_close,
ian@26 714 };
ian@26 715
ian@26 716 static void
ian@26 717 __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
ian@26 718 struct xen_ia64_privcmd_vma* privcmd_vma,
ian@26 719 struct xen_ia64_privcmd_range* privcmd_range)
ian@26 720 {
ian@26 721 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
ian@26 722 unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
ian@26 723 unsigned long i;
ian@26 724
ian@26 725 BUG_ON(entry_offset < 0);
ian@26 726 BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
ian@26 727
ian@26 728 privcmd_vma->range = privcmd_range;
ian@26 729 privcmd_vma->num_entries = num_entries;
ian@26 730 privcmd_vma->entries = &privcmd_range->entries[entry_offset];
ian@26 731 vma->vm_private_data = privcmd_vma;
ian@26 732 for (i = 0; i < privcmd_vma->num_entries; i++) {
ian@26 733 xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
ian@26 734 }
ian@26 735
ian@26 736 vma->vm_private_data = privcmd_vma;
ian@26 737 vma->vm_ops = &xen_ia64_privcmd_vm_ops;
ian@26 738 }
ian@26 739
ian@26 740 static void
ian@26 741 xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
ian@26 742 {
ian@26 743 struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
ian@26 744 struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
ian@26 745 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
ian@26 746
ian@26 747 atomic_inc(&privcmd_range->ref_count);
ian@26 748 // vm_op->open() can't fail.
ian@26 749 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
ian@26 750 // copy original value if necessary
ian@26 751 privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
ian@26 752
ian@26 753 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
ian@26 754 }
ian@26 755
ian@26 756 static void
ian@26 757 xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
ian@26 758 {
ian@26 759 struct xen_ia64_privcmd_vma* privcmd_vma =
ian@26 760 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
ian@26 761 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
ian@26 762 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
ian@26 763 unsigned long i;
ian@26 764
ian@26 765 for (i = 0; i < privcmd_vma->num_entries; i++) {
ian@26 766 xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
ian@26 767 }
ian@26 768 vma->vm_private_data = NULL;
ian@26 769 kfree(privcmd_vma);
ian@26 770
ian@26 771 if (atomic_dec_and_test(&privcmd_range->ref_count)) {
ian@26 772 #if 1
ian@26 773 for (i = 0; i < privcmd_range->num_entries; i++) {
ian@26 774 struct xen_ia64_privcmd_entry* entry =
ian@26 775 &privcmd_range->entries[i];
ian@26 776 BUG_ON(atomic_read(&entry->map_count) != 0);
ian@26 777 BUG_ON(entry->gpfn != INVALID_GPFN);
ian@26 778 }
ian@26 779 #endif
alex@197 780 if (privcmd_range->callback)
alex@197 781 (*privcmd_range->callback)(privcmd_range,
alex@197 782 privcmd_range->private);
ian@26 783 release_resource(privcmd_range->res);
ian@26 784 kfree(privcmd_range->res);
ian@26 785 vfree(privcmd_range);
ian@26 786 }
ian@26 787 }
ian@26 788
ian@26 789 int
ian@26 790 privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
ian@26 791 {
ian@26 792 struct xen_ia64_privcmd_vma* privcmd_vma =
ian@26 793 (struct xen_ia64_privcmd_vma *)vma->vm_private_data;
ian@26 794 return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
ian@26 795 }
ian@26 796
ian@26 797 int
ian@26 798 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
ian@26 799 {
ian@26 800 int error;
ian@26 801 unsigned long size = vma->vm_end - vma->vm_start;
ian@26 802 unsigned long num_entries = size >> PAGE_SHIFT;
ian@26 803 struct xen_ia64_privcmd_range* privcmd_range = NULL;
ian@26 804 struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
ian@26 805 struct resource* res = NULL;
ian@26 806 unsigned long i;
ian@26 807 BUG_ON(!is_running_on_xen());
ian@26 808
ian@26 809 BUG_ON(file->private_data != NULL);
ian@26 810
ian@26 811 error = -ENOMEM;
ian@26 812 privcmd_range =
ian@26 813 vmalloc(sizeof(*privcmd_range) +
ian@26 814 sizeof(privcmd_range->entries[0]) * num_entries);
ian@26 815 if (privcmd_range == NULL) {
ian@26 816 goto out_enomem0;
ian@26 817 }
ian@26 818 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
ian@26 819 if (privcmd_vma == NULL) {
ian@26 820 goto out_enomem1;
ian@26 821 }
ian@26 822 privcmd_vma->is_privcmd_mmapped = 0;
ian@26 823
ian@26 824 res = kzalloc(sizeof(*res), GFP_KERNEL);
ian@26 825 if (res == NULL) {
ian@26 826 goto out_enomem1;
ian@26 827 }
ian@26 828 res->name = "Xen privcmd mmap";
ian@26 829 error = allocate_resource(&iomem_resource, res, size,
ian@26 830 privcmd_resource_min, privcmd_resource_max,
ian@26 831 privcmd_resource_align, NULL, NULL);
ian@26 832 if (error) {
ian@26 833 goto out_enomem1;
ian@26 834 }
ian@26 835 privcmd_range->res = res;
ian@26 836
ian@26 837 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
ian@26 838 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
ian@26 839
ian@26 840 atomic_set(&privcmd_range->ref_count, 1);
ian@26 841 privcmd_range->pgoff = vma->vm_pgoff;
ian@26 842 privcmd_range->num_entries = num_entries;
alex@197 843 privcmd_range->private = NULL;
alex@197 844 privcmd_range->callback = NULL;
ian@26 845 for (i = 0; i < privcmd_range->num_entries; i++) {
ian@26 846 xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
ian@26 847 }
ian@26 848
ian@26 849 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
ian@26 850 return 0;
ian@26 851
ian@26 852 out_enomem1:
ian@26 853 kfree(res);
ian@26 854 kfree(privcmd_vma);
ian@26 855 out_enomem0:
ian@26 856 vfree(privcmd_range);
ian@26 857 return error;
ian@26 858 }
ian@26 859
ian@26 860 int
ian@26 861 direct_remap_pfn_range(struct vm_area_struct *vma,
ian@26 862 unsigned long address, // process virtual address
ian@26 863 unsigned long gmfn, // gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE
ian@26 864 unsigned long size,
ian@26 865 pgprot_t prot,
ian@26 866 domid_t domid) // target domain
ian@26 867 {
ian@26 868 struct xen_ia64_privcmd_vma* privcmd_vma =
ian@26 869 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
ian@26 870 struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
ian@26 871 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
ian@26 872
ian@26 873 unsigned long i;
ian@26 874 unsigned long offset;
ian@26 875 int error = 0;
ian@26 876 BUG_ON(!is_running_on_xen());
ian@26 877
ian@26 878 #if 0
ian@26 879 if (prot != vm->vm_page_prot) {
ian@26 880 return -EINVAL;
ian@26 881 }
ian@26 882 #endif
ian@26 883
ian@26 884 i = (address - vma->vm_start) >> PAGE_SHIFT;
ian@26 885 for (offset = 0; offset < size; offset += PAGE_SIZE) {
ian@26 886 error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
ian@26 887 if (error != 0) {
ian@26 888 break;
ian@26 889 }
ian@26 890
ian@26 891 i++;
ian@26 892 gmfn++;
ian@26 893 }
ian@26 894
ian@26 895 return error;
ian@26 896 }
ian@26 897
ian@26 898
ian@26 899 ///////////////////////////////////////////////////////////////////////////
ian@26 900 // expose p2m table
ian@26 901 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
ian@26 902 #include <linux/cpu.h>
ian@26 903 #include <asm/uaccess.h>
ian@26 904
ian@26 905 int p2m_initialized __read_mostly = 0;
ian@26 906
ian@26 907 unsigned long p2m_min_low_pfn __read_mostly;
ian@26 908 unsigned long p2m_max_low_pfn __read_mostly;
ian@26 909 unsigned long p2m_convert_min_pfn __read_mostly;
ian@26 910 unsigned long p2m_convert_max_pfn __read_mostly;
ian@26 911
ian@26 912 static struct resource p2m_resource = {
ian@26 913 .name = "Xen p2m table",
ian@26 914 .flags = IORESOURCE_MEM,
ian@26 915 };
ian@26 916 static unsigned long p2m_assign_start_pfn __read_mostly;
ian@26 917 static unsigned long p2m_assign_end_pfn __read_mostly;
ian@26 918 static unsigned long p2m_expose_size; // this is referenced only when resume.
ian@26 919 // so __read_mostly doesn't make sense.
ian@26 920 volatile const pte_t* p2m_pte __read_mostly;
ian@26 921
alex@87 922 #define GRANULE_PFN PTRS_PER_PTE
alex@87 923 static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
ian@26 924
ian@26 925 #define ROUNDDOWN(x, y) ((x) & ~((y) - 1))
ian@26 926 #define ROUNDUP(x, y) (((x) + (y) - 1) & ~((y) - 1))
ian@26 927
ian@26 928 #define P2M_PREFIX "Xen p2m: "
ian@26 929
ian@26 930 static int xen_ia64_p2m_expose __read_mostly = 1;
ian@26 931 module_param(xen_ia64_p2m_expose, int, 0);
ian@26 932 MODULE_PARM_DESC(xen_ia64_p2m_expose,
ian@26 933 "enable/disable xen/ia64 p2m exposure optimization\n");
ian@26 934
ian@26 935 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 936 static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
ian@26 937 module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
ian@26 938 MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
ian@26 939 "use/unuse dtr to map exposed p2m table\n");
ian@26 940
ian@26 941 static const int p2m_page_shifts[] = {
ian@26 942 _PAGE_SIZE_4K,
ian@26 943 _PAGE_SIZE_8K,
ian@26 944 _PAGE_SIZE_16K,
ian@26 945 _PAGE_SIZE_64K,
ian@26 946 _PAGE_SIZE_256K,
ian@26 947 _PAGE_SIZE_1M,
ian@26 948 _PAGE_SIZE_4M,
ian@26 949 _PAGE_SIZE_16M,
ian@26 950 _PAGE_SIZE_64M,
ian@26 951 _PAGE_SIZE_256M,
ian@26 952 };
ian@26 953
ian@26 954 struct p2m_itr_arg {
ian@26 955 unsigned long vaddr;
ian@26 956 unsigned long pteval;
ian@26 957 unsigned long log_page_size;
ian@26 958 };
ian@26 959 static struct p2m_itr_arg p2m_itr_arg __read_mostly;
ian@26 960
ian@26 961 // This should be in asm-ia64/kregs.h
ian@26 962 #define IA64_TR_P2M_TABLE 3
ian@26 963
ian@26 964 static void
ian@26 965 p2m_itr(void* info)
ian@26 966 {
ian@26 967 struct p2m_itr_arg* arg = (struct p2m_itr_arg*)info;
ian@26 968 ia64_itr(0x2, IA64_TR_P2M_TABLE,
ian@26 969 arg->vaddr, arg->pteval, arg->log_page_size);
ian@26 970 ia64_srlz_d();
ian@26 971 }
ian@26 972
ian@26 973 static int
ian@26 974 p2m_expose_dtr_call(struct notifier_block *self,
ian@26 975 unsigned long event, void* ptr)
ian@26 976 {
ian@26 977 unsigned int cpu = (unsigned int)(long)ptr;
ian@26 978 if (event != CPU_ONLINE)
ian@26 979 return 0;
ian@26 980 if (p2m_initialized && xen_ia64_p2m_expose_use_dtr) {
ian@26 981 unsigned int me = get_cpu();
ian@26 982 if (cpu == me)
ian@26 983 p2m_itr(&p2m_itr_arg);
ian@26 984 else
ian@26 985 smp_call_function_single(cpu, &p2m_itr, &p2m_itr_arg,
ian@26 986 1, 1);
ian@26 987 put_cpu();
ian@26 988 }
ian@26 989 return 0;
ian@26 990 }
ian@26 991
ian@26 992 static struct notifier_block p2m_expose_dtr_hotplug_notifier = {
ian@26 993 .notifier_call = p2m_expose_dtr_call,
ian@26 994 .next = NULL,
ian@26 995 .priority = 0
ian@26 996 };
ian@26 997 #endif
ian@26 998
alex@197 999 static inline unsigned long
alex@197 1000 p2m_table_size(unsigned long num_pfn)
alex@197 1001 {
alex@197 1002 return ((num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT;
alex@197 1003 }
alex@197 1004
ian@26 1005 static int
ian@26 1006 p2m_expose_init(void)
ian@26 1007 {
ian@26 1008 unsigned long num_pfn;
ian@26 1009 unsigned long p2m_size = 0;
ian@26 1010 unsigned long align = ~0UL;
ian@26 1011 int error = 0;
ian@26 1012 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1013 int i;
ian@26 1014 unsigned long log_page_size = 0;
ian@26 1015 #endif
ian@26 1016
ian@26 1017 if (!xen_ia64_p2m_expose)
ian@26 1018 return -ENOSYS;
ian@26 1019 if (p2m_initialized)
ian@26 1020 return 0;
ian@26 1021
ian@26 1022 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1023 error = register_cpu_notifier(&p2m_expose_dtr_hotplug_notifier);
ian@26 1024 if (error < 0)
ian@26 1025 return error;
ian@26 1026 #endif
ian@26 1027
ian@26 1028 lock_cpu_hotplug();
ian@26 1029 if (p2m_initialized)
ian@26 1030 goto out;
ian@26 1031
ian@26 1032 #ifdef CONFIG_DISCONTIGMEM
ian@26 1033 p2m_min_low_pfn = min_low_pfn;
ian@26 1034 p2m_max_low_pfn = max_low_pfn;
ian@26 1035 #else
ian@26 1036 p2m_min_low_pfn = 0;
ian@26 1037 p2m_max_low_pfn = max_pfn;
ian@26 1038 #endif
ian@26 1039
ian@26 1040 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1041 if (xen_ia64_p2m_expose_use_dtr) {
alex@85 1042 unsigned long page_size = 0;
ian@26 1043 unsigned long granule_pfn = 0;
alex@197 1044 p2m_size = p2m_table_size(p2m_max_low_pfn - p2m_min_low_pfn);
ian@26 1045 for (i = 0;
ian@26 1046 i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]);
ian@26 1047 i++) {
ian@26 1048 log_page_size = p2m_page_shifts[i];
ian@26 1049 page_size = 1UL << log_page_size;
ian@26 1050 if (page_size < p2m_size)
ian@26 1051 continue;
ian@26 1052
ian@26 1053 granule_pfn = max(page_size >> PAGE_SHIFT,
ian@26 1054 p2m_granule_pfn);
ian@26 1055 p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
ian@26 1056 granule_pfn);
ian@26 1057 p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
ian@26 1058 granule_pfn);
ian@26 1059 num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
ian@26 1060 p2m_expose_size = num_pfn << PAGE_SHIFT;
alex@197 1061 p2m_size = p2m_table_size(num_pfn);
ian@26 1062 p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT);
ian@26 1063 if (p2m_size == page_size)
ian@26 1064 break;
ian@26 1065 }
ian@26 1066 if (p2m_size != page_size) {
ian@26 1067 printk(KERN_ERR "p2m_size != page_size\n");
ian@26 1068 error = -EINVAL;
ian@26 1069 goto out;
ian@26 1070 }
ian@26 1071 align = max(privcmd_resource_align, granule_pfn << PAGE_SHIFT);
ian@26 1072 } else
ian@26 1073 #endif
ian@26 1074 {
ian@26 1075 BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
ian@26 1076 p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
ian@26 1077 p2m_granule_pfn);
ian@26 1078 p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn);
ian@26 1079 num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
ian@26 1080 p2m_expose_size = num_pfn << PAGE_SHIFT;
alex@197 1081 p2m_size = p2m_table_size(num_pfn);
ian@26 1082 p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
ian@26 1083 align = max(privcmd_resource_align,
ian@26 1084 p2m_granule_pfn << PAGE_SHIFT);
ian@26 1085 }
ian@26 1086
ian@26 1087 // use privcmd region
ian@26 1088 error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
ian@26 1089 privcmd_resource_min, privcmd_resource_max,
ian@26 1090 align, NULL, NULL);
ian@26 1091 if (error) {
ian@26 1092 printk(KERN_ERR P2M_PREFIX
ian@26 1093 "can't allocate region for p2m exposure "
alex@62 1094 "[0x%016lx, 0x%016lx] 0x%016lx\n",
ian@26 1095 p2m_convert_min_pfn, p2m_convert_max_pfn, p2m_size);
ian@26 1096 goto out;
ian@26 1097 }
ian@26 1098
ian@26 1099 p2m_assign_start_pfn = p2m_resource.start >> PAGE_SHIFT;
ian@26 1100 p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
ian@26 1101
ian@26 1102 error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
ian@26 1103 p2m_assign_start_pfn,
ian@26 1104 p2m_expose_size, p2m_granule_pfn);
ian@26 1105 if (error) {
ian@26 1106 printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
ian@26 1107 error);
ian@26 1108 printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
ian@26 1109 "expose_size 0x%016lx granule 0x%016lx\n",
ian@26 1110 p2m_convert_min_pfn, p2m_assign_start_pfn,
ian@26 1111 p2m_expose_size, p2m_granule_pfn);;
ian@26 1112 release_resource(&p2m_resource);
ian@26 1113 goto out;
ian@26 1114 }
ian@26 1115 p2m_pte = (volatile const pte_t*)pfn_to_kaddr(p2m_assign_start_pfn);
ian@26 1116 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1117 if (xen_ia64_p2m_expose_use_dtr) {
ian@26 1118 p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
ian@26 1119 << PAGE_SHIFT);
ian@26 1120 p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
ian@26 1121 PAGE_KERNEL));
ian@26 1122 p2m_itr_arg.log_page_size = log_page_size;
ian@26 1123 smp_mb();
ian@26 1124 smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
ian@26 1125 p2m_itr(&p2m_itr_arg);
ian@26 1126 }
ian@26 1127 #endif
ian@26 1128 smp_mb();
ian@26 1129 p2m_initialized = 1;
ian@26 1130 printk(P2M_PREFIX "assign p2m table of [0x%016lx, 0x%016lx)\n",
ian@26 1131 p2m_convert_min_pfn << PAGE_SHIFT,
alex@62 1132 (p2m_convert_max_pfn << PAGE_SHIFT) + PAGE_SIZE);
ian@26 1133 printk(P2M_PREFIX "to [0x%016lx, 0x%016lx) (%ld KBytes)\n",
ian@26 1134 p2m_assign_start_pfn << PAGE_SHIFT,
alex@62 1135 (p2m_assign_end_pfn << PAGE_SHIFT) + PAGE_SIZE,
ian@26 1136 p2m_size / 1024);
ian@26 1137 out:
ian@26 1138 unlock_cpu_hotplug();
ian@26 1139 return error;
ian@26 1140 }
ian@26 1141
ian@26 1142 #ifdef notyet
ian@26 1143 void
ian@26 1144 p2m_expose_cleanup(void)
ian@26 1145 {
ian@26 1146 BUG_ON(!p2m_initialized);
ian@26 1147 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1148 unregister_cpu_notifier(&p2m_expose_dtr_hotplug_notifier);
ian@26 1149 #endif
ian@26 1150 release_resource(&p2m_resource);
ian@26 1151 }
ian@26 1152 #endif
ian@26 1153
ian@26 1154 static void
ian@26 1155 p2m_expose_resume(void)
ian@26 1156 {
ian@26 1157 int error;
ian@26 1158
ian@26 1159 if (!xen_ia64_p2m_expose || !p2m_initialized)
ian@26 1160 return;
ian@26 1161
ian@26 1162 /*
ian@26 1163 * We can't call {lock, unlock}_cpu_hotplug() because
ian@26 1164 * they require process context.
ian@26 1165 * We don't need them because we're the only one cpu and
ian@26 1166 * interrupts are masked when resume.
ian@26 1167 */
ian@26 1168 error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
ian@26 1169 p2m_assign_start_pfn,
ian@26 1170 p2m_expose_size, p2m_granule_pfn);
ian@26 1171 if (error) {
ian@26 1172 printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
ian@26 1173 error);
ian@26 1174 printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
ian@26 1175 "expose_size 0x%016lx granule 0x%016lx\n",
ian@26 1176 p2m_convert_min_pfn, p2m_assign_start_pfn,
ian@26 1177 p2m_expose_size, p2m_granule_pfn);;
ian@26 1178 p2m_initialized = 0;
ian@26 1179 smp_mb();
ian@26 1180 ia64_ptr(0x2, p2m_itr_arg.vaddr, p2m_itr_arg.log_page_size);
ian@26 1181
ian@26 1182 /*
ian@26 1183 * We can't call those clean up functions because they
ian@26 1184 * require process context.
ian@26 1185 */
ian@26 1186 #if 0
ian@26 1187 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1188 if (xen_ia64_p2m_expose_use_dtr)
ian@26 1189 unregister_cpu_notifier(
ian@26 1190 &p2m_expose_dtr_hotplug_notifier);
ian@26 1191 #endif
ian@26 1192 release_resource(&p2m_resource);
ian@26 1193 #endif
ian@26 1194 }
ian@26 1195 }
ian@26 1196
ian@26 1197 //XXX inlinize?
ian@26 1198 unsigned long
ian@26 1199 p2m_phystomach(unsigned long gpfn)
ian@26 1200 {
ian@26 1201 volatile const pte_t* pte;
ian@26 1202 unsigned long mfn;
ian@26 1203 unsigned long pteval;
ian@26 1204
ian@26 1205 if (!p2m_initialized ||
ian@26 1206 gpfn < p2m_min_low_pfn || gpfn > p2m_max_low_pfn
ian@26 1207 /* || !pfn_valid(gpfn) */)
ian@26 1208 return INVALID_MFN;
ian@26 1209 pte = p2m_pte + (gpfn - p2m_convert_min_pfn);
ian@26 1210
ian@26 1211 mfn = INVALID_MFN;
ian@26 1212 if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
ian@26 1213 pte_present(__pte(pteval)) &&
ian@26 1214 pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
ian@26 1215 mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
ian@26 1216
ian@26 1217 return mfn;
ian@26 1218 }
ian@26 1219
ian@26 1220 EXPORT_SYMBOL_GPL(p2m_initialized);
ian@26 1221 EXPORT_SYMBOL_GPL(p2m_min_low_pfn);
ian@26 1222 EXPORT_SYMBOL_GPL(p2m_max_low_pfn);
ian@26 1223 EXPORT_SYMBOL_GPL(p2m_convert_min_pfn);
ian@26 1224 EXPORT_SYMBOL_GPL(p2m_convert_max_pfn);
ian@26 1225 EXPORT_SYMBOL_GPL(p2m_pte);
ian@26 1226 EXPORT_SYMBOL_GPL(p2m_phystomach);
alex@197 1227
alex@197 1228 ///////////////////////////////////////////////////////////////////////////
alex@197 1229 // foreign domain p2m mapping
alex@197 1230 #include <asm/xen/xencomm.h>
alex@197 1231 #include <xen/public/privcmd.h>
alex@197 1232
alex@197 1233 struct foreign_p2m_private {
alex@197 1234 unsigned long gpfn;
alex@197 1235 domid_t domid;
alex@197 1236 };
alex@197 1237
alex@197 1238 static void
alex@197 1239 xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range* privcmd_range,
alex@197 1240 void* arg)
alex@197 1241 {
alex@197 1242 struct foreign_p2m_private* private = (struct foreign_p2m_private*)arg;
alex@197 1243 int ret;
alex@197 1244
alex@197 1245 privcmd_range->private = NULL;
alex@197 1246 privcmd_range->callback = NULL;
alex@197 1247
alex@197 1248 ret = HYPERVISOR_unexpose_foreign_p2m(private->gpfn, private->domid);
alex@197 1249 if (ret)
alex@197 1250 printk(KERN_WARNING
alex@197 1251 "unexpose_foreign_p2m hypercall failed.\n");
alex@197 1252 kfree(private);
alex@197 1253 }
alex@197 1254
alex@197 1255 int
alex@197 1256 xen_foreign_p2m_expose(privcmd_hypercall_t* hypercall)
alex@197 1257 {
alex@197 1258 // hypercall->
alex@197 1259 // arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
alex@197 1260 // arg1: va
alex@197 1261 // arg2: domid
alex@197 1262 // arg3: __user* memmap_info
alex@197 1263 // arg4: flags
alex@197 1264
alex@197 1265 int ret = 0;
alex@197 1266 struct mm_struct* mm = current->mm;
alex@197 1267
alex@197 1268 unsigned long vaddr = hypercall->arg[1];
alex@197 1269 domid_t domid = hypercall->arg[2];
alex@197 1270 struct xen_ia64_memmap_info __user *u_memmap_info =
alex@197 1271 (struct xen_ia64_memmap_info __user *)hypercall->arg[3];
alex@197 1272
alex@197 1273 struct xen_ia64_memmap_info memmap_info;
alex@197 1274 size_t memmap_size;
alex@197 1275 struct xen_ia64_memmap_info* k_memmap_info = NULL;
alex@197 1276 unsigned long max_gpfn;
alex@197 1277 unsigned long p2m_size;
alex@197 1278 struct resource* res;
alex@197 1279 unsigned long gpfn;
alex@197 1280
alex@197 1281 struct vm_area_struct* vma;
alex@197 1282 void* p;
alex@197 1283 unsigned long prev_src_gpfn_end;
alex@197 1284
alex@197 1285 struct xen_ia64_privcmd_vma* privcmd_vma;
alex@197 1286 struct xen_ia64_privcmd_range* privcmd_range;
alex@197 1287 struct foreign_p2m_private* private = NULL;
alex@197 1288
alex@197 1289 BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
alex@197 1290
alex@197 1291 private = kmalloc(sizeof(*private), GFP_KERNEL);
alex@197 1292 if (private == NULL)
alex@197 1293 goto kfree_out;
alex@197 1294
alex@197 1295 if (copy_from_user(&memmap_info, u_memmap_info, sizeof(memmap_info)))
alex@197 1296 return -EFAULT;
alex@197 1297 /* memmap_info integrity check */
alex@197 1298 if (memmap_info.efi_memdesc_size < sizeof(efi_memory_desc_t) ||
alex@197 1299 memmap_info.efi_memmap_size < memmap_info.efi_memdesc_size ||
alex@197 1300 (memmap_info.efi_memmap_size % memmap_info.efi_memdesc_size)
alex@197 1301 != 0) {
alex@197 1302 ret = -EINVAL;
alex@197 1303 goto kfree_out;
alex@197 1304 }
alex@197 1305
alex@197 1306 memmap_size = sizeof(*k_memmap_info) + memmap_info.efi_memmap_size;
alex@197 1307 k_memmap_info = kmalloc(memmap_size, GFP_KERNEL);
alex@197 1308 if (k_memmap_info == NULL)
alex@197 1309 return -ENOMEM;
alex@197 1310 if (copy_from_user(k_memmap_info, u_memmap_info, memmap_size)) {
alex@197 1311 ret = -EFAULT;
alex@197 1312 goto kfree_out;
alex@197 1313 }
alex@197 1314 /* k_memmap_info integrity check is done by the expose foreng p2m
alex@197 1315 hypercall */
alex@197 1316
alex@197 1317 max_gpfn = HYPERVISOR_memory_op(XENMEM_maximum_gpfn, &domid);
alex@197 1318 if (max_gpfn < 0) {
alex@197 1319 ret = max_gpfn;
alex@197 1320 goto kfree_out;
alex@197 1321 }
alex@197 1322 p2m_size = p2m_table_size(max_gpfn + 1);
alex@197 1323
alex@197 1324 down_write(&mm->mmap_sem);
alex@197 1325
alex@197 1326 vma = find_vma(mm, vaddr);
alex@197 1327 if (vma == NULL || vma->vm_ops != &xen_ia64_privcmd_vm_ops ||
alex@197 1328 vaddr != vma->vm_start ||
alex@197 1329 (vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_EXEC) ||
alex@197 1330 !privcmd_enforce_singleshot_mapping(vma))
alex@197 1331 goto mmap_out;
alex@197 1332
alex@197 1333 privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
alex@197 1334 res = privcmd_vma->range->res;
alex@197 1335 if (p2m_size > (res->end - res->start + 1) ||
alex@197 1336 p2m_size > vma->vm_end - vma->vm_start) {
alex@197 1337 ret = -EINVAL;
alex@197 1338 goto mmap_out;
alex@197 1339 }
alex@197 1340
alex@197 1341 gpfn = res->start >> PAGE_SHIFT;
alex@197 1342 // arg0: dest_gpfn
alex@197 1343 // arg1: domid
alex@197 1344 // arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
alex@197 1345 // arg3: flags
alex@197 1346 // The hypercall checks its intergirty/simplfies it and
alex@197 1347 // copy it back for us.
alex@197 1348 ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
alex@197 1349 xencomm_map_no_alloc(k_memmap_info, memmap_size),
alex@197 1350 hypercall->arg[4]);
alex@197 1351 if (ret)
alex@197 1352 goto mmap_out;
alex@197 1353
alex@197 1354 privcmd_range = (struct xen_ia64_privcmd_range*)privcmd_vma->range;
alex@197 1355 prev_src_gpfn_end = 0;
alex@197 1356 for (p = k_memmap_info->memdesc;
alex@197 1357 p < (void*)&k_memmap_info->memdesc[0] +
alex@197 1358 k_memmap_info->efi_memmap_size;
alex@197 1359 p += k_memmap_info->efi_memdesc_size) {
alex@197 1360 efi_memory_desc_t* md = p;
alex@197 1361 unsigned long src_gpfn = md->phys_addr >> PAGE_SHIFT;
alex@197 1362 unsigned long src_gpfn_end =
alex@197 1363 (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
alex@197 1364 PAGE_SHIFT;
alex@197 1365 unsigned long num_src_gpfn;
alex@197 1366 unsigned long gpfn_offset;
alex@197 1367 unsigned long size;
alex@197 1368 unsigned int i;
alex@197 1369
alex@197 1370 if (src_gpfn <= prev_src_gpfn_end)
alex@197 1371 src_gpfn = prev_src_gpfn_end + 1;
alex@197 1372 if (src_gpfn_end <= prev_src_gpfn_end)
alex@197 1373 continue;
alex@197 1374
alex@197 1375 src_gpfn &= ~(PTRS_PER_PTE - 1);
alex@197 1376 src_gpfn_end = (src_gpfn_end + PTRS_PER_PTE - 1) &
alex@197 1377 ~(PTRS_PER_PTE - 1);
alex@197 1378 num_src_gpfn = src_gpfn_end - src_gpfn;
alex@197 1379 gpfn_offset = src_gpfn / PTRS_PER_PTE;
alex@197 1380 size = p2m_table_size(num_src_gpfn);
alex@197 1381
alex@197 1382 prev_src_gpfn_end = src_gpfn_end;
alex@197 1383 ret = remap_pfn_range(vma,
alex@197 1384 vaddr + (gpfn_offset << PAGE_SHIFT),
alex@197 1385 gpfn + gpfn_offset, size,
alex@197 1386 vma->vm_page_prot);
alex@197 1387 if (ret) {
alex@197 1388 for (i = 0; i < gpfn + gpfn_offset; i++) {
alex@197 1389 struct xen_ia64_privcmd_entry* entry =
alex@197 1390 &privcmd_range->entries[i];
alex@197 1391 BUG_ON(atomic_read(&entry->map_count) != 1 &&
alex@197 1392 atomic_read(&entry->map_count) != 0);
alex@197 1393 atomic_set(&entry->map_count, 0);
alex@197 1394 entry->gpfn = INVALID_GPFN;
alex@197 1395 }
alex@197 1396 (void)HYPERVISOR_unexpose_foreign_p2m(gpfn, domid);
alex@197 1397 goto mmap_out;
alex@197 1398 }
alex@197 1399
alex@197 1400 for (i = gpfn_offset;
alex@197 1401 i < gpfn_offset + (size >> PAGE_SHIFT);
alex@197 1402 i++) {
alex@197 1403 struct xen_ia64_privcmd_entry* entry =
alex@197 1404 &privcmd_range->entries[i];
alex@197 1405 BUG_ON(atomic_read(&entry->map_count) != 0);
alex@197 1406 BUG_ON(entry->gpfn != INVALID_GPFN);
alex@197 1407 atomic_inc(&entry->map_count);
alex@197 1408 entry->gpfn = gpfn + i;
alex@197 1409 }
alex@197 1410 }
alex@197 1411
alex@197 1412 private->gpfn = gpfn;
alex@197 1413 private->domid = domid;
alex@197 1414
alex@197 1415 privcmd_range->callback = &xen_foreign_p2m_unexpose;
alex@197 1416 privcmd_range->private = private;
alex@197 1417
alex@197 1418 mmap_out:
alex@197 1419 up_write(&mm->mmap_sem);
alex@197 1420 kfree_out:
alex@197 1421 kfree(k_memmap_info);
alex@197 1422 if (ret != 0)
alex@197 1423 kfree(private);
alex@197 1424 return ret;
alex@197 1425 }
ian@26 1426 #endif
ian@26 1427
ian@26 1428 ///////////////////////////////////////////////////////////////////////////
ian@26 1429 // for xenoprof
ian@26 1430
ian@26 1431 struct resource*
ian@26 1432 xen_ia64_allocate_resource(unsigned long size)
ian@26 1433 {
ian@26 1434 struct resource* res;
ian@26 1435 int error;
ian@26 1436
alex@43 1437 res = kzalloc(sizeof(*res), GFP_KERNEL);
ian@26 1438 if (res == NULL)
ian@26 1439 return ERR_PTR(-ENOMEM);
ian@26 1440
ian@26 1441 res->name = "Xen";
ian@26 1442 res->flags = IORESOURCE_MEM;
ian@26 1443 error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
ian@26 1444 privcmd_resource_min, privcmd_resource_max,
ian@26 1445 IA64_GRANULE_SIZE, NULL, NULL);
ian@26 1446 if (error) {
ian@26 1447 kfree(res);
ian@26 1448 return ERR_PTR(error);
ian@26 1449 }
ian@26 1450 return res;
ian@26 1451 }
ian@26 1452 EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
ian@26 1453
ian@26 1454 void
ian@26 1455 xen_ia64_release_resource(struct resource* res)
ian@26 1456 {
ian@26 1457 release_resource(res);
ian@26 1458 kfree(res);
ian@26 1459 }
ian@26 1460 EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
ian@26 1461
ian@26 1462 void
ian@26 1463 xen_ia64_unmap_resource(struct resource* res)
ian@26 1464 {
ian@26 1465 unsigned long gpfn = res->start >> PAGE_SHIFT;
ian@26 1466 unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
ian@26 1467 unsigned long i;
ian@26 1468
ian@26 1469 for (i = 0; i < nr_pages; i++) {
ian@26 1470 int error = HYPERVISOR_zap_physmap(gpfn + i, 0);
ian@26 1471 if (error)
ian@26 1472 printk(KERN_ERR
ian@26 1473 "%s:%d zap_phsymap failed %d gpfn %lx\n",
ian@26 1474 __func__, __LINE__, error, gpfn + i);
ian@26 1475 }
ian@26 1476 xen_ia64_release_resource(res);
ian@26 1477 }
ian@26 1478 EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
ian@26 1479
ian@26 1480 ///////////////////////////////////////////////////////////////////////////
ian@26 1481 // suspend/resume
ian@26 1482 void
ian@26 1483 xen_post_suspend(int suspend_cancelled)
ian@26 1484 {
ian@26 1485 if (suspend_cancelled)
ian@26 1486 return;
ian@26 1487
ian@26 1488 p2m_expose_resume();
ian@26 1489 /* add more if necessary */
ian@26 1490 }