ia64/linux-2.6.18-xen.hg

annotate arch/ia64/xen/hypervisor.c @ 392:71a415f9179b

[IA64] Coding style fix

Mainly white spaces, // comments and * ops.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Fri Jan 18 14:20:59 2008 -0700 (2008-01-18)
parents 32df30e24087
children 99478ffd81ee
rev   line source
ian@26 1 /******************************************************************************
ian@26 2 * include/asm-ia64/shadow.h
ian@26 3 *
ian@26 4 * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
ian@26 5 * VA Linux Systems Japan K.K.
ian@26 6 *
ian@26 7 * This program is free software; you can redistribute it and/or modify
ian@26 8 * it under the terms of the GNU General Public License as published by
ian@26 9 * the Free Software Foundation; either version 2 of the License, or
ian@26 10 * (at your option) any later version.
ian@26 11 *
ian@26 12 * This program is distributed in the hope that it will be useful,
ian@26 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
ian@26 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
ian@26 15 * GNU General Public License for more details.
ian@26 16 *
ian@26 17 * You should have received a copy of the GNU General Public License
ian@26 18 * along with this program; if not, write to the Free Software
ian@26 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
ian@26 20 *
ian@26 21 */
ian@26 22
ian@26 23 #include <linux/spinlock.h>
ian@26 24 #include <linux/bootmem.h>
ian@26 25 #include <linux/module.h>
ian@26 26 #include <linux/vmalloc.h>
ian@26 27 #include <linux/efi.h>
ian@26 28 #include <asm/page.h>
ian@26 29 #include <asm/pgalloc.h>
ian@26 30 #include <asm/meminit.h>
ian@26 31 #include <asm/hypervisor.h>
ian@26 32 #include <asm/hypercall.h>
ian@26 33 #include <xen/interface/memory.h>
ian@26 34 #include <xen/xencons.h>
ian@26 35 #include <xen/balloon.h>
ian@26 36
alex@392 37 shared_info_t *HYPERVISOR_shared_info __read_mostly =
alex@392 38 (shared_info_t *)XSI_BASE;
ian@26 39 EXPORT_SYMBOL(HYPERVISOR_shared_info);
ian@26 40
ian@26 41 start_info_t *xen_start_info;
ian@26 42 EXPORT_SYMBOL(xen_start_info);
ian@26 43
ian@26 44 EXPORT_SYMBOL(running_on_xen);
ian@26 45
ian@26 46 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
ian@26 47 static int p2m_expose_init(void);
ian@26 48 #else
ian@26 49 #define p2m_expose_init() (-ENOSYS)
ian@26 50 #define p2m_expose_resume() ((void)0)
ian@26 51 #endif
ian@26 52
ian@26 53 EXPORT_SYMBOL(__hypercall);
ian@26 54
ian@26 55 void __init
ian@26 56 xen_setup(char **cmdline_p)
ian@26 57 {
keir@261 58 struct dom0_vga_console_info *info;
ian@26 59 extern void dig_setup(char **cmdline_p);
keir@261 60
ian@26 61 if (ia64_platform_is("xen"))
ian@26 62 dig_setup(cmdline_p);
alex@392 63
ian@26 64 if (!is_running_on_xen() || !is_initial_xendomain())
ian@26 65 return;
ian@26 66
keir@261 67 info = (void *)((char *)xen_start_info +
keir@261 68 xen_start_info->console.dom0.info_off);
keir@261 69 dom0_init_screen_info(info, xen_start_info->console.dom0.info_size);
keir@261 70
ian@26 71 xen_start_info->console.domU.mfn = 0;
ian@26 72 xen_start_info->console.domU.evtchn = 0;
ian@26 73 }
ian@26 74
ian@26 75 void __cpuinit
ian@26 76 xen_cpu_init(void)
ian@26 77 {
ian@26 78 extern void xen_smp_intr_init(void);
ian@26 79 xen_smp_intr_init();
ian@26 80 }
ian@26 81
alex@392 82 /*
alex@392 83 *XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
alex@392 84 * move those to lib/contiguous_bitmap?
alex@392 85 *XXX discontigmem/sparsemem
alex@392 86 */
ian@26 87
ian@26 88 /*
ian@26 89 * Bitmap is indexed by page number. If bit is set, the page is part of a
ian@26 90 * xen_create_contiguous_region() area of memory.
ian@26 91 */
alex@90 92 unsigned long *contiguous_bitmap __read_mostly;
ian@26 93
ian@26 94 #ifdef CONFIG_VIRTUAL_MEM_MAP
ian@26 95 /* Following logic is stolen from create_mem_map_table() for virtual memmap */
ian@26 96 static int
ian@26 97 create_contiguous_bitmap(u64 start, u64 end, void *arg)
ian@26 98 {
ian@26 99 unsigned long address, start_page, end_page;
ian@26 100 unsigned long bitmap_start, bitmap_end;
ian@26 101 unsigned char *bitmap;
ian@26 102 int node;
ian@26 103 pgd_t *pgd;
ian@26 104 pud_t *pud;
ian@26 105 pmd_t *pmd;
ian@26 106 pte_t *pte;
ian@26 107
ian@26 108 bitmap_start = (unsigned long)contiguous_bitmap +
alex@392 109 ((__pa(start) >> PAGE_SHIFT) >> 3);
ian@26 110 bitmap_end = (unsigned long)contiguous_bitmap +
alex@392 111 (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
ian@26 112
ian@26 113 start_page = bitmap_start & PAGE_MASK;
ian@26 114 end_page = PAGE_ALIGN(bitmap_end);
ian@26 115 node = paddr_to_nid(__pa(start));
ian@26 116
ian@26 117 bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
alex@392 118 end_page - start_page);
ian@26 119 BUG_ON(!bitmap);
ian@26 120 memset(bitmap, 0, end_page - start_page);
ian@26 121
ian@26 122 for (address = start_page; address < end_page; address += PAGE_SIZE) {
ian@26 123 pgd = pgd_offset_k(address);
ian@26 124 if (pgd_none(*pgd))
ian@26 125 pgd_populate(&init_mm, pgd,
alex@392 126 alloc_bootmem_pages_node(NODE_DATA(node),
alex@392 127 PAGE_SIZE));
ian@26 128 pud = pud_offset(pgd, address);
ian@26 129
ian@26 130 if (pud_none(*pud))
ian@26 131 pud_populate(&init_mm, pud,
alex@392 132 alloc_bootmem_pages_node(NODE_DATA(node),
alex@392 133 PAGE_SIZE));
ian@26 134 pmd = pmd_offset(pud, address);
ian@26 135
ian@26 136 if (pmd_none(*pmd))
ian@26 137 pmd_populate_kernel(&init_mm, pmd,
alex@392 138 alloc_bootmem_pages_node
alex@392 139 (NODE_DATA(node), PAGE_SIZE));
ian@26 140 pte = pte_offset_kernel(pmd, address);
ian@26 141
ian@26 142 if (pte_none(*pte))
ian@26 143 set_pte(pte,
alex@392 144 pfn_pte(__pa(bitmap + (address - start_page))
alex@392 145 >> PAGE_SHIFT, PAGE_KERNEL));
ian@26 146 }
ian@26 147 return 0;
ian@26 148 }
ian@26 149 #endif
ian@26 150
ian@26 151 static void
ian@26 152 __contiguous_bitmap_init(unsigned long size)
ian@26 153 {
ian@26 154 contiguous_bitmap = alloc_bootmem_pages(size);
ian@26 155 BUG_ON(!contiguous_bitmap);
ian@26 156 memset(contiguous_bitmap, 0, size);
ian@26 157 }
ian@26 158
ian@26 159 void
alex@43 160 xen_contiguous_bitmap_init(unsigned long end_pfn)
ian@26 161 {
ian@26 162 unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
ian@26 163 #ifndef CONFIG_VIRTUAL_MEM_MAP
ian@26 164 __contiguous_bitmap_init(size);
ian@26 165 #else
ian@26 166 unsigned long max_gap = 0;
ian@26 167
ian@26 168 efi_memmap_walk(find_largest_hole, (u64*)&max_gap);
ian@26 169 if (max_gap < LARGE_GAP) {
ian@26 170 __contiguous_bitmap_init(size);
ian@26 171 } else {
ian@26 172 unsigned long map_size = PAGE_ALIGN(size);
ian@26 173 vmalloc_end -= map_size;
ian@26 174 contiguous_bitmap = (unsigned long*)vmalloc_end;
ian@26 175 efi_memmap_walk(create_contiguous_bitmap, NULL);
ian@26 176 }
ian@26 177 #endif
ian@26 178 }
ian@26 179
ian@26 180 #if 0
ian@26 181 int
ian@26 182 contiguous_bitmap_test(void* p)
ian@26 183 {
ian@26 184 return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
ian@26 185 }
ian@26 186 #endif
ian@26 187
ian@26 188 static void contiguous_bitmap_set(
ian@26 189 unsigned long first_page, unsigned long nr_pages)
ian@26 190 {
ian@26 191 unsigned long start_off, end_off, curr_idx, end_idx;
ian@26 192
ian@26 193 curr_idx = first_page / BITS_PER_LONG;
ian@26 194 start_off = first_page & (BITS_PER_LONG-1);
ian@26 195 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
ian@26 196 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
ian@26 197
ian@26 198 if (curr_idx == end_idx) {
ian@26 199 contiguous_bitmap[curr_idx] |=
ian@26 200 ((1UL<<end_off)-1) & -(1UL<<start_off);
ian@26 201 } else {
ian@26 202 contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
ian@26 203 while ( ++curr_idx < end_idx )
ian@26 204 contiguous_bitmap[curr_idx] = ~0UL;
ian@26 205 contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
ian@26 206 }
ian@26 207 }
ian@26 208
ian@26 209 static void contiguous_bitmap_clear(
ian@26 210 unsigned long first_page, unsigned long nr_pages)
ian@26 211 {
ian@26 212 unsigned long start_off, end_off, curr_idx, end_idx;
ian@26 213
ian@26 214 curr_idx = first_page / BITS_PER_LONG;
ian@26 215 start_off = first_page & (BITS_PER_LONG-1);
ian@26 216 end_idx = (first_page + nr_pages) / BITS_PER_LONG;
ian@26 217 end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
ian@26 218
ian@26 219 if (curr_idx == end_idx) {
ian@26 220 contiguous_bitmap[curr_idx] &=
ian@26 221 -(1UL<<end_off) | ((1UL<<start_off)-1);
ian@26 222 } else {
ian@26 223 contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
ian@26 224 while ( ++curr_idx != end_idx )
ian@26 225 contiguous_bitmap[curr_idx] = 0;
ian@26 226 contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
ian@26 227 }
ian@26 228 }
ian@26 229
alex@392 230 /*
alex@392 231 * __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
alex@392 232 * are based on i386 xen_create_contiguous_region(),
alex@392 233 * xen_destroy_contiguous_region()
alex@392 234 */
ian@26 235
ian@26 236 /* Protected by balloon_lock. */
ian@26 237 #define MAX_CONTIG_ORDER 7
ian@26 238 static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
ian@26 239
ian@26 240 /* Ensure multi-page extents are contiguous in machine memory. */
ian@26 241 int
ian@26 242 __xen_create_contiguous_region(unsigned long vstart,
ian@26 243 unsigned int order, unsigned int address_bits)
ian@26 244 {
ian@26 245 unsigned long error = 0;
ian@26 246 unsigned long gphys = __pa(vstart);
ian@26 247 unsigned long start_gpfn = gphys >> PAGE_SHIFT;
ian@26 248 unsigned long num_gpfn = 1 << order;
ian@26 249 unsigned long i;
ian@26 250 unsigned long flags;
ian@26 251
ian@26 252 unsigned long *in_frames = discontig_frames, out_frame;
ian@26 253 int success;
ian@26 254 struct xen_memory_exchange exchange = {
ian@26 255 .in = {
ian@26 256 .nr_extents = num_gpfn,
ian@26 257 .extent_order = 0,
ian@26 258 .domid = DOMID_SELF
ian@26 259 },
ian@26 260 .out = {
ian@26 261 .nr_extents = 1,
ian@26 262 .extent_order = order,
ian@26 263 .address_bits = address_bits,
ian@26 264 .domid = DOMID_SELF
ian@26 265 },
ian@26 266 .nr_exchanged = 0
ian@26 267 };
ian@26 268
ian@26 269 if (unlikely(order > MAX_CONTIG_ORDER))
ian@26 270 return -ENOMEM;
ian@26 271
ian@26 272 set_xen_guest_handle(exchange.in.extent_start, in_frames);
ian@26 273 set_xen_guest_handle(exchange.out.extent_start, &out_frame);
ian@26 274
ian@26 275 scrub_pages(vstart, num_gpfn);
ian@26 276
ian@26 277 balloon_lock(flags);
ian@26 278
ian@26 279 /* Get a new contiguous memory extent. */
alex@392 280 for (i = 0; i < num_gpfn; i++)
ian@26 281 in_frames[i] = start_gpfn + i;
ian@26 282 out_frame = start_gpfn;
ian@26 283 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
ian@26 284 success = (exchange.nr_exchanged == num_gpfn);
ian@26 285 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
ian@26 286 BUG_ON(success && (error != 0));
ian@26 287 if (unlikely(error == -ENOSYS)) {
ian@26 288 /* Compatibility when XENMEM_exchange is unsupported. */
ian@26 289 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
ian@26 290 &exchange.in);
ian@26 291 BUG_ON(error != num_gpfn);
ian@26 292 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
ian@26 293 &exchange.out);
ian@26 294 if (error != 1) {
ian@26 295 /* Couldn't get special memory: fall back to normal. */
ian@26 296 for (i = 0; i < num_gpfn; i++) {
ian@26 297 in_frames[i] = start_gpfn + i;
ian@26 298 }
ian@26 299 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
ian@26 300 &exchange.in);
ian@26 301 BUG_ON(error != num_gpfn);
ian@26 302 success = 0;
ian@26 303 } else
ian@26 304 success = 1;
ian@26 305 }
ian@26 306 if (success)
ian@26 307 contiguous_bitmap_set(start_gpfn, num_gpfn);
ian@26 308 #if 0
ian@26 309 if (success) {
ian@26 310 unsigned long mfn;
ian@26 311 unsigned long mfn_prev = ~0UL;
ian@26 312 for (i = 0; i < num_gpfn; i++) {
ian@26 313 mfn = pfn_to_mfn_for_dma(start_gpfn + i);
ian@26 314 if (mfn_prev != ~0UL && mfn != mfn_prev + 1) {
ian@26 315 xprintk("\n");
ian@26 316 xprintk("%s:%d order %d "
ian@26 317 "start 0x%lx bus 0x%lx "
ian@26 318 "machine 0x%lx\n",
ian@26 319 __func__, __LINE__, order,
ian@26 320 vstart, virt_to_bus((void*)vstart),
ian@26 321 phys_to_machine_for_dma(gphys));
ian@26 322 xprintk("mfn: ");
ian@26 323 for (i = 0; i < num_gpfn; i++) {
ian@26 324 mfn = pfn_to_mfn_for_dma(
ian@26 325 start_gpfn + i);
ian@26 326 xprintk("0x%lx ", mfn);
ian@26 327 }
ian@26 328 xprintk("\n");
ian@26 329 break;
ian@26 330 }
ian@26 331 mfn_prev = mfn;
ian@26 332 }
ian@26 333 }
ian@26 334 #endif
ian@26 335 balloon_unlock(flags);
ian@26 336 return success? 0: -ENOMEM;
ian@26 337 }
ian@26 338
ian@26 339 void
ian@26 340 __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
ian@26 341 {
ian@26 342 unsigned long flags;
ian@26 343 unsigned long error = 0;
ian@26 344 unsigned long start_gpfn = __pa(vstart) >> PAGE_SHIFT;
ian@26 345 unsigned long num_gpfn = 1UL << order;
ian@26 346 unsigned long i;
ian@26 347
ian@26 348 unsigned long *out_frames = discontig_frames, in_frame;
ian@26 349 int success;
ian@26 350 struct xen_memory_exchange exchange = {
ian@26 351 .in = {
ian@26 352 .nr_extents = 1,
ian@26 353 .extent_order = order,
ian@26 354 .domid = DOMID_SELF
ian@26 355 },
ian@26 356 .out = {
ian@26 357 .nr_extents = num_gpfn,
ian@26 358 .extent_order = 0,
ian@26 359 .address_bits = 0,
ian@26 360 .domid = DOMID_SELF
ian@26 361 },
ian@26 362 .nr_exchanged = 0
alex@392 363 };
ian@26 364
ian@26 365
ian@26 366 if (!test_bit(start_gpfn, contiguous_bitmap))
ian@26 367 return;
ian@26 368
ian@26 369 if (unlikely(order > MAX_CONTIG_ORDER))
ian@26 370 return;
ian@26 371
ian@26 372 set_xen_guest_handle(exchange.in.extent_start, &in_frame);
ian@26 373 set_xen_guest_handle(exchange.out.extent_start, out_frames);
ian@26 374
ian@26 375 scrub_pages(vstart, num_gpfn);
ian@26 376
ian@26 377 balloon_lock(flags);
ian@26 378
ian@26 379 contiguous_bitmap_clear(start_gpfn, num_gpfn);
ian@26 380
alex@392 381 /* Do the exchange for non-contiguous MFNs. */
ian@26 382 in_frame = start_gpfn;
alex@392 383 for (i = 0; i < num_gpfn; i++)
ian@26 384 out_frames[i] = start_gpfn + i;
ian@26 385 error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
ian@26 386 success = (exchange.nr_exchanged == 1);
ian@26 387 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
ian@26 388 BUG_ON(success && (error != 0));
ian@26 389 if (unlikely(error == -ENOSYS)) {
alex@392 390 /* Compatibility when XENMEM_exchange is unsupported. */
ian@26 391 error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
ian@26 392 &exchange.in);
ian@26 393 BUG_ON(error != 1);
ian@26 394
ian@26 395 error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
ian@26 396 &exchange.out);
ian@26 397 BUG_ON(error != num_gpfn);
ian@26 398 }
ian@26 399 balloon_unlock(flags);
ian@26 400 }
ian@26 401
alex@225 402 int
alex@225 403 xen_limit_pages_to_max_mfn(struct page *pages, unsigned int order,
alex@225 404 unsigned int address_bits)
alex@225 405 {
alex@225 406 return xen_create_contiguous_region((unsigned long)page_address(pages),
alex@225 407 order, address_bits);
alex@225 408 }
alex@225 409
alex@392 410 /****************************************************************************
alex@392 411 * grant table hack
alex@392 412 * cmd: GNTTABOP_xxx
alex@392 413 */
ian@26 414 #include <linux/mm.h>
ian@26 415 #include <xen/interface/xen.h>
ian@26 416 #include <xen/gnttab.h>
ian@26 417
kfraser@106 418 void *arch_gnttab_alloc_shared(unsigned long *frames)
kfraser@106 419 {
kfraser@106 420 return __va(frames[0] << PAGE_SHIFT);
kfraser@106 421 }
kfraser@106 422
ian@26 423 static void
ian@26 424 gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
ian@26 425 {
ian@26 426 uint32_t flags;
ian@26 427
ian@26 428 flags = uop->flags;
ian@26 429
ian@26 430 if (flags & GNTMAP_host_map) {
ian@26 431 if (flags & GNTMAP_application_map) {
alex@392 432 xprintd("GNTMAP_application_map is not supported yet:"
alex@392 433 " flags 0x%x\n", flags);
ian@26 434 BUG();
ian@26 435 }
ian@26 436 if (flags & GNTMAP_contains_pte) {
alex@392 437 xprintd("GNTMAP_contains_pte is not supported yet"
alex@392 438 " flags 0x%x\n", flags);
ian@26 439 BUG();
ian@26 440 }
ian@26 441 } else if (flags & GNTMAP_device_map) {
alex@392 442 xprintd("GNTMAP_device_map is not supported yet 0x%x\n",
alex@392 443 flags);
alex@392 444 BUG(); /* XXX not yet. actually this flag is not used. */
ian@26 445 } else {
ian@26 446 BUG();
ian@26 447 }
ian@26 448 }
ian@26 449
ian@26 450 int
ian@26 451 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
ian@26 452 {
ian@26 453 if (cmd == GNTTABOP_map_grant_ref) {
ian@26 454 unsigned int i;
ian@26 455 for (i = 0; i < count; i++) {
ian@26 456 gnttab_map_grant_ref_pre(
ian@26 457 (struct gnttab_map_grant_ref*)uop + i);
ian@26 458 }
ian@26 459 }
alex@187 460 return xencomm_hypercall_grant_table_op(cmd, uop, count);
ian@26 461 }
ian@26 462 EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
ian@26 463
alex@392 464 /**************************************************************************
alex@392 465 * foreign mapping
alex@392 466 */
ian@26 467 #include <linux/efi.h>
alex@392 468 #include <asm/meminit.h> /* for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() */
ian@26 469
ian@26 470 static unsigned long privcmd_resource_min = 0;
alex@392 471 /* Xen/ia64 currently can handle pseudo physical address bits up to
alex@392 472 * (PAGE_SHIFT * 3) */
alex@392 473 static unsigned long privcmd_resource_max =
alex@392 474 GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
ian@26 475 static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
ian@26 476
ian@26 477 static unsigned long
ian@26 478 md_end_addr(const efi_memory_desc_t *md)
ian@26 479 {
ian@26 480 return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
ian@26 481 }
ian@26 482
ian@26 483 #define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE (1024 * 1024 * 1024UL)
ian@26 484 static int
ian@26 485 xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
ian@26 486 {
ian@26 487 return (start < end &&
ian@26 488 (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
ian@26 489 }
ian@26 490
ian@26 491 static int __init
ian@26 492 xen_ia64_privcmd_init(void)
ian@26 493 {
ian@26 494 void *efi_map_start, *efi_map_end, *p;
ian@26 495 u64 efi_desc_size;
ian@26 496 efi_memory_desc_t *md;
ian@26 497 unsigned long tmp_min;
ian@26 498 unsigned long tmp_max;
ian@26 499 unsigned long gap_size;
ian@26 500 unsigned long prev_end;
ian@26 501
ian@26 502 if (!is_running_on_xen())
ian@26 503 return -1;
ian@26 504
ian@26 505 efi_map_start = __va(ia64_boot_param->efi_memmap);
ian@26 506 efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
ian@26 507 efi_desc_size = ia64_boot_param->efi_memdesc_size;
ian@26 508
alex@392 509 /* at first check the used highest address */
ian@26 510 for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
alex@392 511 /* nothing */;
ian@26 512 }
ian@26 513 md = p - efi_desc_size;
ian@26 514 privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
ian@26 515 if (xen_ia64_privcmd_check_size(privcmd_resource_min,
alex@392 516 privcmd_resource_max))
ian@26 517 goto out;
ian@26 518
alex@392 519 /* the used highest address is too large.
alex@392 520 * try to find the largest gap. */
ian@26 521 tmp_min = privcmd_resource_max;
ian@26 522 tmp_max = 0;
ian@26 523 gap_size = 0;
ian@26 524 prev_end = 0;
ian@26 525 for (p = efi_map_start;
ian@26 526 p < efi_map_end - efi_desc_size;
ian@26 527 p += efi_desc_size) {
ian@26 528 unsigned long end;
ian@26 529 efi_memory_desc_t* next;
ian@26 530 unsigned long next_start;
ian@26 531
ian@26 532 md = p;
ian@26 533 end = md_end_addr(md);
alex@392 534 if (end > privcmd_resource_max)
ian@26 535 break;
ian@26 536 if (end < prev_end) {
alex@392 537 /* work around.
alex@392 538 * Xen may pass incompletely sorted memory
alex@392 539 * descriptors like
alex@392 540 * [x, x + length]
alex@392 541 * [x, x]
alex@392 542 * this order should be reversed. */
ian@26 543 continue;
ian@26 544 }
ian@26 545 next = p + efi_desc_size;
ian@26 546 next_start = next->phys_addr;
alex@392 547 if (next_start > privcmd_resource_max)
ian@26 548 next_start = privcmd_resource_max;
ian@26 549 if (end < next_start && gap_size < (next_start - end)) {
ian@26 550 tmp_min = end;
ian@26 551 tmp_max = next_start;
ian@26 552 gap_size = tmp_max - tmp_min;
ian@26 553 }
ian@26 554 prev_end = end;
ian@26 555 }
ian@26 556
ian@26 557 privcmd_resource_min = GRANULEROUNDUP(tmp_min);
ian@26 558 if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
ian@26 559 privcmd_resource_max = tmp_max;
ian@26 560 goto out;
ian@26 561 }
ian@26 562
ian@26 563 privcmd_resource_min = tmp_min;
ian@26 564 privcmd_resource_max = tmp_max;
ian@26 565 if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
ian@26 566 privcmd_resource_max)) {
alex@392 567 /* Any large enough gap isn't found.
alex@392 568 * go ahead anyway with the warning hoping that large region
alex@392 569 * won't be requested. */
alex@392 570 printk(KERN_WARNING "xen privcmd: "
alex@392 571 "large enough region for privcmd mmap is not found.\n");
ian@26 572 }
ian@26 573
ian@26 574 out:
alex@392 575 printk(KERN_INFO "xen privcmd uses pseudo physical addr range "
alex@392 576 "[0x%lx, 0x%lx] (%ldMB)\n",
ian@26 577 privcmd_resource_min, privcmd_resource_max,
ian@26 578 (privcmd_resource_max - privcmd_resource_min) >> 20);
ian@26 579 BUG_ON(privcmd_resource_min >= privcmd_resource_max);
ian@26 580
alex@392 581 /* XXX this should be somewhere appropriate */
ian@26 582 (void)p2m_expose_init();
ian@26 583
ian@26 584 return 0;
ian@26 585 }
ian@26 586 late_initcall(xen_ia64_privcmd_init);
ian@26 587
ian@26 588 struct xen_ia64_privcmd_entry {
ian@26 589 atomic_t map_count;
ian@26 590 #define INVALID_GPFN (~0UL)
ian@26 591 unsigned long gpfn;
ian@26 592 };
ian@26 593
ian@26 594 struct xen_ia64_privcmd_range {
ian@26 595 atomic_t ref_count;
alex@392 596 unsigned long pgoff; /* in PAGE_SIZE */
alex@392 597 struct resource *res;
ian@26 598
alex@392 599 /* for foreign domain p2m mapping */
alex@392 600 void *private;
alex@392 601 void (*callback)(struct xen_ia64_privcmd_range *range, void *arg);
alex@197 602
ian@26 603 unsigned long num_entries;
ian@26 604 struct xen_ia64_privcmd_entry entries[0];
ian@26 605 };
ian@26 606
ian@26 607 struct xen_ia64_privcmd_vma {
ian@26 608 int is_privcmd_mmapped;
alex@392 609 struct xen_ia64_privcmd_range *range;
ian@26 610
ian@26 611 unsigned long num_entries;
alex@392 612 struct xen_ia64_privcmd_entry *entries;
ian@26 613 };
ian@26 614
ian@26 615 static void
alex@392 616 xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry *entry)
ian@26 617 {
ian@26 618 atomic_set(&entry->map_count, 0);
ian@26 619 entry->gpfn = INVALID_GPFN;
ian@26 620 }
ian@26 621
ian@26 622 static int
alex@392 623 xen_ia64_privcmd_entry_mmap(struct vm_area_struct *vma,
ian@26 624 unsigned long addr,
alex@392 625 struct xen_ia64_privcmd_range *privcmd_range,
ian@26 626 int i,
ian@26 627 unsigned long gmfn,
ian@26 628 pgprot_t prot,
ian@26 629 domid_t domid)
ian@26 630 {
ian@26 631 int error = 0;
alex@392 632 struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
ian@26 633 unsigned long gpfn;
ian@26 634 unsigned long flags;
ian@26 635
ian@26 636 if ((addr & ~PAGE_MASK) != 0 || gmfn == INVALID_MFN) {
ian@26 637 error = -EINVAL;
ian@26 638 goto out;
ian@26 639 }
ian@26 640
ian@26 641 if (entry->gpfn != INVALID_GPFN) {
ian@26 642 error = -EBUSY;
ian@26 643 goto out;
ian@26 644 }
ian@26 645 gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
ian@26 646
ian@26 647 flags = ASSIGN_writable;
alex@392 648 if (pgprot_val(prot) == PROT_READ)
ian@26 649 flags = ASSIGN_readonly;
ian@26 650 error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
alex@392 651 if (error != 0)
ian@26 652 goto out;
ian@26 653
ian@26 654 prot = vma->vm_page_prot;
ian@26 655 error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
ian@26 656 if (error != 0) {
ian@26 657 error = HYPERVISOR_zap_physmap(gpfn, 0);
alex@392 658 if (error)
alex@392 659 BUG(); /* XXX */
ian@26 660 } else {
ian@26 661 atomic_inc(&entry->map_count);
ian@26 662 entry->gpfn = gpfn;
ian@26 663 }
ian@26 664
ian@26 665 out:
ian@26 666 return error;
ian@26 667 }
ian@26 668
ian@26 669 static void
alex@392 670 xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range *privcmd_range,
ian@26 671 int i)
ian@26 672 {
alex@392 673 struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
ian@26 674 unsigned long gpfn = entry->gpfn;
alex@392 675 /* gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
alex@392 676 (vma->vm_pgoff - privcmd_range->pgoff); */
ian@26 677 int error;
ian@26 678
ian@26 679 error = HYPERVISOR_zap_physmap(gpfn, 0);
alex@392 680 if (error)
alex@392 681 BUG(); /* XXX */
ian@26 682 entry->gpfn = INVALID_GPFN;
ian@26 683 }
ian@26 684
ian@26 685 static void
alex@392 686 xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range *privcmd_range,
ian@26 687 int i)
ian@26 688 {
alex@392 689 struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
alex@392 690 if (entry->gpfn != INVALID_GPFN)
ian@26 691 atomic_inc(&entry->map_count);
alex@392 692 else
ian@26 693 BUG_ON(atomic_read(&entry->map_count) != 0);
ian@26 694 }
ian@26 695
ian@26 696 static void
alex@392 697 xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range *privcmd_range,
ian@26 698 int i)
ian@26 699 {
alex@392 700 struct xen_ia64_privcmd_entry *entry = &privcmd_range->entries[i];
ian@26 701 if (entry->gpfn != INVALID_GPFN &&
alex@392 702 atomic_dec_and_test(&entry->map_count))
ian@26 703 xen_ia64_privcmd_entry_munmap(privcmd_range, i);
ian@26 704 }
ian@26 705
alex@392 706 static void xen_ia64_privcmd_vma_open(struct vm_area_struct *vma);
alex@392 707 static void xen_ia64_privcmd_vma_close(struct vm_area_struct *vma);
ian@26 708
ian@26 709 struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
ian@26 710 .open = &xen_ia64_privcmd_vma_open,
ian@26 711 .close = &xen_ia64_privcmd_vma_close,
ian@26 712 };
ian@26 713
ian@26 714 static void
alex@392 715 __xen_ia64_privcmd_vma_open(struct vm_area_struct *vma,
alex@392 716 struct xen_ia64_privcmd_vma *privcmd_vma,
alex@392 717 struct xen_ia64_privcmd_range *privcmd_range)
ian@26 718 {
ian@26 719 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
alex@392 720 unsigned long num_entries =
alex@392 721 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
ian@26 722 unsigned long i;
ian@26 723
ian@26 724 BUG_ON(entry_offset < 0);
ian@26 725 BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
ian@26 726
ian@26 727 privcmd_vma->range = privcmd_range;
ian@26 728 privcmd_vma->num_entries = num_entries;
ian@26 729 privcmd_vma->entries = &privcmd_range->entries[entry_offset];
ian@26 730 vma->vm_private_data = privcmd_vma;
alex@392 731 for (i = 0; i < privcmd_vma->num_entries; i++)
ian@26 732 xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
ian@26 733
ian@26 734 vma->vm_private_data = privcmd_vma;
ian@26 735 vma->vm_ops = &xen_ia64_privcmd_vm_ops;
ian@26 736 }
ian@26 737
ian@26 738 static void
alex@392 739 xen_ia64_privcmd_vma_open(struct vm_area_struct *vma)
ian@26 740 {
alex@392 741 struct xen_ia64_privcmd_vma *old_privcmd_vma =
alex@392 742 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
alex@392 743 struct xen_ia64_privcmd_vma *privcmd_vma =
alex@392 744 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
alex@392 745 struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
ian@26 746
ian@26 747 atomic_inc(&privcmd_range->ref_count);
alex@392 748 /* vm_op->open() can't fail. */
ian@26 749 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
alex@392 750 /* copy original value if necessary */
ian@26 751 privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
ian@26 752
ian@26 753 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
ian@26 754 }
ian@26 755
ian@26 756 static void
alex@392 757 xen_ia64_privcmd_vma_close(struct vm_area_struct *vma)
ian@26 758 {
alex@392 759 struct xen_ia64_privcmd_vma *privcmd_vma =
ian@26 760 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
alex@392 761 struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
ian@26 762 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
ian@26 763 unsigned long i;
ian@26 764
ian@26 765 for (i = 0; i < privcmd_vma->num_entries; i++) {
ian@26 766 xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
alex@277 767 cond_resched();
ian@26 768 }
ian@26 769 vma->vm_private_data = NULL;
ian@26 770 kfree(privcmd_vma);
ian@26 771
ian@26 772 if (atomic_dec_and_test(&privcmd_range->ref_count)) {
ian@26 773 #if 1
ian@26 774 for (i = 0; i < privcmd_range->num_entries; i++) {
alex@392 775 struct xen_ia64_privcmd_entry *entry =
ian@26 776 &privcmd_range->entries[i];
ian@26 777 BUG_ON(atomic_read(&entry->map_count) != 0);
ian@26 778 BUG_ON(entry->gpfn != INVALID_GPFN);
ian@26 779 }
ian@26 780 #endif
alex@197 781 if (privcmd_range->callback)
alex@197 782 (*privcmd_range->callback)(privcmd_range,
alex@197 783 privcmd_range->private);
ian@26 784 release_resource(privcmd_range->res);
ian@26 785 kfree(privcmd_range->res);
ian@26 786 vfree(privcmd_range);
ian@26 787 }
ian@26 788 }
ian@26 789
ian@26 790 int
ian@26 791 privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
ian@26 792 {
alex@392 793 struct xen_ia64_privcmd_vma *privcmd_vma =
ian@26 794 (struct xen_ia64_privcmd_vma *)vma->vm_private_data;
ian@26 795 return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
ian@26 796 }
ian@26 797
ian@26 798 int
ian@26 799 privcmd_mmap(struct file * file, struct vm_area_struct * vma)
ian@26 800 {
ian@26 801 int error;
ian@26 802 unsigned long size = vma->vm_end - vma->vm_start;
ian@26 803 unsigned long num_entries = size >> PAGE_SHIFT;
alex@392 804 struct xen_ia64_privcmd_range *privcmd_range = NULL;
alex@392 805 struct xen_ia64_privcmd_vma *privcmd_vma = NULL;
alex@392 806 struct resource *res = NULL;
ian@26 807 unsigned long i;
ian@26 808 BUG_ON(!is_running_on_xen());
ian@26 809
ian@26 810 BUG_ON(file->private_data != NULL);
ian@26 811
ian@26 812 error = -ENOMEM;
ian@26 813 privcmd_range =
ian@26 814 vmalloc(sizeof(*privcmd_range) +
ian@26 815 sizeof(privcmd_range->entries[0]) * num_entries);
alex@392 816 if (privcmd_range == NULL)
ian@26 817 goto out_enomem0;
ian@26 818 privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
alex@392 819 if (privcmd_vma == NULL)
ian@26 820 goto out_enomem1;
ian@26 821 privcmd_vma->is_privcmd_mmapped = 0;
ian@26 822
ian@26 823 res = kzalloc(sizeof(*res), GFP_KERNEL);
alex@392 824 if (res == NULL)
ian@26 825 goto out_enomem1;
ian@26 826 res->name = "Xen privcmd mmap";
ian@26 827 error = allocate_resource(&iomem_resource, res, size,
ian@26 828 privcmd_resource_min, privcmd_resource_max,
ian@26 829 privcmd_resource_align, NULL, NULL);
alex@392 830 if (error)
ian@26 831 goto out_enomem1;
ian@26 832 privcmd_range->res = res;
ian@26 833
ian@26 834 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
ian@26 835 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
ian@26 836
ian@26 837 atomic_set(&privcmd_range->ref_count, 1);
ian@26 838 privcmd_range->pgoff = vma->vm_pgoff;
ian@26 839 privcmd_range->num_entries = num_entries;
alex@197 840 privcmd_range->private = NULL;
alex@197 841 privcmd_range->callback = NULL;
alex@392 842 for (i = 0; i < privcmd_range->num_entries; i++)
ian@26 843 xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
ian@26 844
ian@26 845 __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
ian@26 846 return 0;
ian@26 847
ian@26 848 out_enomem1:
ian@26 849 kfree(res);
ian@26 850 kfree(privcmd_vma);
ian@26 851 out_enomem0:
ian@26 852 vfree(privcmd_range);
ian@26 853 return error;
ian@26 854 }
ian@26 855
ian@26 856 int
ian@26 857 direct_remap_pfn_range(struct vm_area_struct *vma,
alex@392 858 unsigned long address, /* process virtual address */
alex@392 859 unsigned long gmfn, /* gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE */
ian@26 860 unsigned long size,
ian@26 861 pgprot_t prot,
alex@392 862 domid_t domid) /* target domain */
ian@26 863 {
alex@392 864 struct xen_ia64_privcmd_vma *privcmd_vma =
ian@26 865 (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
alex@392 866 struct xen_ia64_privcmd_range *privcmd_range = privcmd_vma->range;
ian@26 867 unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
ian@26 868
ian@26 869 unsigned long i;
ian@26 870 unsigned long offset;
ian@26 871 int error = 0;
ian@26 872 BUG_ON(!is_running_on_xen());
ian@26 873
ian@26 874 #if 0
alex@392 875 if (prot != vm->vm_page_prot)
ian@26 876 return -EINVAL;
ian@26 877 #endif
ian@26 878
ian@26 879 i = (address - vma->vm_start) >> PAGE_SHIFT;
ian@26 880 for (offset = 0; offset < size; offset += PAGE_SIZE) {
ian@26 881 error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
alex@392 882 if (error != 0)
ian@26 883 break;
ian@26 884
ian@26 885 i++;
ian@26 886 gmfn++;
alex@392 887 }
ian@26 888
ian@26 889 return error;
ian@26 890 }
ian@26 891
ian@26 892
alex@392 893 /**************************************************************************
alex@392 894 * expose p2m table
alex@392 895 */
ian@26 896 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M
ian@26 897 #include <linux/cpu.h>
ian@26 898 #include <asm/uaccess.h>
ian@26 899
ian@26 900 int p2m_initialized __read_mostly = 0;
ian@26 901
ian@26 902 unsigned long p2m_min_low_pfn __read_mostly;
ian@26 903 unsigned long p2m_max_low_pfn __read_mostly;
ian@26 904 unsigned long p2m_convert_min_pfn __read_mostly;
ian@26 905 unsigned long p2m_convert_max_pfn __read_mostly;
ian@26 906
ian@26 907 static struct resource p2m_resource = {
ian@26 908 .name = "Xen p2m table",
ian@26 909 .flags = IORESOURCE_MEM,
ian@26 910 };
ian@26 911 static unsigned long p2m_assign_start_pfn __read_mostly;
ian@26 912 static unsigned long p2m_assign_end_pfn __read_mostly;
alex@392 913 static unsigned long p2m_expose_size; /* this is referenced only when resume.
alex@392 914 * so __read_mostly doesn't make sense.
alex@392 915 */
alex@392 916 volatile const pte_t *p2m_pte __read_mostly;
ian@26 917
alex@87 918 #define GRANULE_PFN PTRS_PER_PTE
alex@87 919 static unsigned long p2m_granule_pfn __read_mostly = GRANULE_PFN;
ian@26 920
ian@26 921 #define ROUNDDOWN(x, y) ((x) & ~((y) - 1))
ian@26 922 #define ROUNDUP(x, y) (((x) + (y) - 1) & ~((y) - 1))
ian@26 923
ian@26 924 #define P2M_PREFIX "Xen p2m: "
ian@26 925
ian@26 926 static int xen_ia64_p2m_expose __read_mostly = 1;
ian@26 927 module_param(xen_ia64_p2m_expose, int, 0);
ian@26 928 MODULE_PARM_DESC(xen_ia64_p2m_expose,
alex@392 929 "enable/disable xen/ia64 p2m exposure optimization\n");
ian@26 930
ian@26 931 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 932 static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
ian@26 933 module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
ian@26 934 MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
alex@392 935 "use/unuse dtr to map exposed p2m table\n");
ian@26 936
ian@26 937 static const int p2m_page_shifts[] = {
ian@26 938 _PAGE_SIZE_4K,
ian@26 939 _PAGE_SIZE_8K,
ian@26 940 _PAGE_SIZE_16K,
ian@26 941 _PAGE_SIZE_64K,
ian@26 942 _PAGE_SIZE_256K,
ian@26 943 _PAGE_SIZE_1M,
ian@26 944 _PAGE_SIZE_4M,
ian@26 945 _PAGE_SIZE_16M,
ian@26 946 _PAGE_SIZE_64M,
ian@26 947 _PAGE_SIZE_256M,
ian@26 948 };
ian@26 949
ian@26 950 struct p2m_itr_arg {
ian@26 951 unsigned long vaddr;
ian@26 952 unsigned long pteval;
ian@26 953 unsigned long log_page_size;
ian@26 954 };
ian@26 955 static struct p2m_itr_arg p2m_itr_arg __read_mostly;
ian@26 956
alex@392 957 /* This should be in asm-ia64/kregs.h */
ian@26 958 #define IA64_TR_P2M_TABLE 3
ian@26 959
ian@26 960 static void
alex@392 961 p2m_itr(void *info)
ian@26 962 {
alex@392 963 struct p2m_itr_arg *arg = (struct p2m_itr_arg*)info;
ian@26 964 ia64_itr(0x2, IA64_TR_P2M_TABLE,
alex@392 965 arg->vaddr, arg->pteval, arg->log_page_size);
ian@26 966 ia64_srlz_d();
ian@26 967 }
ian@26 968
ian@26 969 static int
ian@26 970 p2m_expose_dtr_call(struct notifier_block *self,
alex@392 971 unsigned long event, void *ptr)
ian@26 972 {
ian@26 973 unsigned int cpu = (unsigned int)(long)ptr;
ian@26 974 if (event != CPU_ONLINE)
ian@26 975 return 0;
ian@26 976 if (p2m_initialized && xen_ia64_p2m_expose_use_dtr) {
ian@26 977 unsigned int me = get_cpu();
ian@26 978 if (cpu == me)
ian@26 979 p2m_itr(&p2m_itr_arg);
ian@26 980 else
ian@26 981 smp_call_function_single(cpu, &p2m_itr, &p2m_itr_arg,
ian@26 982 1, 1);
ian@26 983 put_cpu();
ian@26 984 }
ian@26 985 return 0;
ian@26 986 }
ian@26 987
ian@26 988 static struct notifier_block p2m_expose_dtr_hotplug_notifier = {
ian@26 989 .notifier_call = p2m_expose_dtr_call,
ian@26 990 .next = NULL,
ian@26 991 .priority = 0
ian@26 992 };
ian@26 993 #endif
ian@26 994
alex@197 995 static inline unsigned long
alex@197 996 p2m_table_size(unsigned long num_pfn)
alex@197 997 {
alex@197 998 return ((num_pfn + PTRS_PER_PTE - 1) / PTRS_PER_PTE) << PAGE_SHIFT;
alex@197 999 }
alex@197 1000
ian@26 1001 static int
ian@26 1002 p2m_expose_init(void)
ian@26 1003 {
ian@26 1004 unsigned long num_pfn;
ian@26 1005 unsigned long p2m_size = 0;
ian@26 1006 unsigned long align = ~0UL;
ian@26 1007 int error = 0;
ian@26 1008 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1009 int i;
ian@26 1010 unsigned long log_page_size = 0;
ian@26 1011 #endif
ian@26 1012
ian@26 1013 if (!xen_ia64_p2m_expose)
ian@26 1014 return -ENOSYS;
ian@26 1015 if (p2m_initialized)
ian@26 1016 return 0;
ian@26 1017
ian@26 1018 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1019 error = register_cpu_notifier(&p2m_expose_dtr_hotplug_notifier);
ian@26 1020 if (error < 0)
ian@26 1021 return error;
ian@26 1022 #endif
ian@26 1023
ian@26 1024 lock_cpu_hotplug();
ian@26 1025 if (p2m_initialized)
ian@26 1026 goto out;
ian@26 1027
ian@26 1028 #ifdef CONFIG_DISCONTIGMEM
ian@26 1029 p2m_min_low_pfn = min_low_pfn;
ian@26 1030 p2m_max_low_pfn = max_low_pfn;
ian@26 1031 #else
ian@26 1032 p2m_min_low_pfn = 0;
ian@26 1033 p2m_max_low_pfn = max_pfn;
ian@26 1034 #endif
ian@26 1035
ian@26 1036 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1037 if (xen_ia64_p2m_expose_use_dtr) {
alex@85 1038 unsigned long page_size = 0;
ian@26 1039 unsigned long granule_pfn = 0;
alex@197 1040 p2m_size = p2m_table_size(p2m_max_low_pfn - p2m_min_low_pfn);
ian@26 1041 for (i = 0;
ian@26 1042 i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]);
ian@26 1043 i++) {
ian@26 1044 log_page_size = p2m_page_shifts[i];
ian@26 1045 page_size = 1UL << log_page_size;
ian@26 1046 if (page_size < p2m_size)
ian@26 1047 continue;
ian@26 1048
ian@26 1049 granule_pfn = max(page_size >> PAGE_SHIFT,
alex@392 1050 p2m_granule_pfn);
ian@26 1051 p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
alex@392 1052 granule_pfn);
ian@26 1053 p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
alex@392 1054 granule_pfn);
ian@26 1055 num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
ian@26 1056 p2m_expose_size = num_pfn << PAGE_SHIFT;
alex@197 1057 p2m_size = p2m_table_size(num_pfn);
alex@392 1058 p2m_size = ROUNDUP(p2m_size,
alex@392 1059 granule_pfn << PAGE_SHIFT);
ian@26 1060 if (p2m_size == page_size)
ian@26 1061 break;
ian@26 1062 }
ian@26 1063 if (p2m_size != page_size) {
ian@26 1064 printk(KERN_ERR "p2m_size != page_size\n");
ian@26 1065 error = -EINVAL;
ian@26 1066 goto out;
ian@26 1067 }
ian@26 1068 align = max(privcmd_resource_align, granule_pfn << PAGE_SHIFT);
ian@26 1069 } else
ian@26 1070 #endif
ian@26 1071 {
ian@26 1072 BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1));
ian@26 1073 p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn,
alex@392 1074 p2m_granule_pfn);
alex@392 1075 p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn,
alex@392 1076 p2m_granule_pfn);
ian@26 1077 num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn;
ian@26 1078 p2m_expose_size = num_pfn << PAGE_SHIFT;
alex@197 1079 p2m_size = p2m_table_size(num_pfn);
ian@26 1080 p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT);
ian@26 1081 align = max(privcmd_resource_align,
alex@392 1082 p2m_granule_pfn << PAGE_SHIFT);
ian@26 1083 }
ian@26 1084
alex@392 1085 /* use privcmd region */
ian@26 1086 error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size,
alex@392 1087 privcmd_resource_min, privcmd_resource_max,
alex@392 1088 align, NULL, NULL);
ian@26 1089 if (error) {
ian@26 1090 printk(KERN_ERR P2M_PREFIX
ian@26 1091 "can't allocate region for p2m exposure "
alex@62 1092 "[0x%016lx, 0x%016lx] 0x%016lx\n",
ian@26 1093 p2m_convert_min_pfn, p2m_convert_max_pfn, p2m_size);
ian@26 1094 goto out;
ian@26 1095 }
ian@26 1096
ian@26 1097 p2m_assign_start_pfn = p2m_resource.start >> PAGE_SHIFT;
ian@26 1098 p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT;
ian@26 1099
ian@26 1100 error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
alex@392 1101 p2m_assign_start_pfn,
alex@392 1102 p2m_expose_size, p2m_granule_pfn);
ian@26 1103 if (error) {
ian@26 1104 printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
ian@26 1105 error);
ian@26 1106 printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
ian@26 1107 "expose_size 0x%016lx granule 0x%016lx\n",
ian@26 1108 p2m_convert_min_pfn, p2m_assign_start_pfn,
ian@26 1109 p2m_expose_size, p2m_granule_pfn);;
ian@26 1110 release_resource(&p2m_resource);
ian@26 1111 goto out;
ian@26 1112 }
ian@26 1113 p2m_pte = (volatile const pte_t*)pfn_to_kaddr(p2m_assign_start_pfn);
ian@26 1114 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1115 if (xen_ia64_p2m_expose_use_dtr) {
ian@26 1116 p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn
alex@392 1117 << PAGE_SHIFT);
ian@26 1118 p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn,
alex@392 1119 PAGE_KERNEL));
ian@26 1120 p2m_itr_arg.log_page_size = log_page_size;
ian@26 1121 smp_mb();
ian@26 1122 smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1);
ian@26 1123 p2m_itr(&p2m_itr_arg);
ian@26 1124 }
ian@26 1125 #endif
ian@26 1126 smp_mb();
ian@26 1127 p2m_initialized = 1;
ian@26 1128 printk(P2M_PREFIX "assign p2m table of [0x%016lx, 0x%016lx)\n",
ian@26 1129 p2m_convert_min_pfn << PAGE_SHIFT,
alex@62 1130 (p2m_convert_max_pfn << PAGE_SHIFT) + PAGE_SIZE);
ian@26 1131 printk(P2M_PREFIX "to [0x%016lx, 0x%016lx) (%ld KBytes)\n",
ian@26 1132 p2m_assign_start_pfn << PAGE_SHIFT,
alex@62 1133 (p2m_assign_end_pfn << PAGE_SHIFT) + PAGE_SIZE,
ian@26 1134 p2m_size / 1024);
ian@26 1135 out:
ian@26 1136 unlock_cpu_hotplug();
ian@26 1137 return error;
ian@26 1138 }
ian@26 1139
ian@26 1140 #ifdef notyet
ian@26 1141 void
ian@26 1142 p2m_expose_cleanup(void)
ian@26 1143 {
ian@26 1144 BUG_ON(!p2m_initialized);
ian@26 1145 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1146 unregister_cpu_notifier(&p2m_expose_dtr_hotplug_notifier);
ian@26 1147 #endif
ian@26 1148 release_resource(&p2m_resource);
ian@26 1149 }
ian@26 1150 #endif
ian@26 1151
ian@26 1152 static void
ian@26 1153 p2m_expose_resume(void)
ian@26 1154 {
ian@26 1155 int error;
ian@26 1156
ian@26 1157 if (!xen_ia64_p2m_expose || !p2m_initialized)
ian@26 1158 return;
ian@26 1159
ian@26 1160 /*
ian@26 1161 * We can't call {lock, unlock}_cpu_hotplug() because
ian@26 1162 * they require process context.
ian@26 1163 * We don't need them because we're the only one cpu and
ian@26 1164 * interrupts are masked when resume.
ian@26 1165 */
ian@26 1166 error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn,
alex@392 1167 p2m_assign_start_pfn,
alex@392 1168 p2m_expose_size, p2m_granule_pfn);
ian@26 1169 if (error) {
ian@26 1170 printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n",
ian@26 1171 error);
ian@26 1172 printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx "
ian@26 1173 "expose_size 0x%016lx granule 0x%016lx\n",
ian@26 1174 p2m_convert_min_pfn, p2m_assign_start_pfn,
ian@26 1175 p2m_expose_size, p2m_granule_pfn);;
ian@26 1176 p2m_initialized = 0;
ian@26 1177 smp_mb();
ian@26 1178 ia64_ptr(0x2, p2m_itr_arg.vaddr, p2m_itr_arg.log_page_size);
ian@26 1179
ian@26 1180 /*
ian@26 1181 * We can't call those clean up functions because they
ian@26 1182 * require process context.
ian@26 1183 */
ian@26 1184 #if 0
ian@26 1185 #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
ian@26 1186 if (xen_ia64_p2m_expose_use_dtr)
ian@26 1187 unregister_cpu_notifier(
ian@26 1188 &p2m_expose_dtr_hotplug_notifier);
ian@26 1189 #endif
ian@26 1190 release_resource(&p2m_resource);
ian@26 1191 #endif
ian@26 1192 }
ian@26 1193 }
ian@26 1194
alex@392 1195 /* XXX inlinize? */
ian@26 1196 unsigned long
ian@26 1197 p2m_phystomach(unsigned long gpfn)
ian@26 1198 {
alex@392 1199 volatile const pte_t *pte;
ian@26 1200 unsigned long mfn;
ian@26 1201 unsigned long pteval;
ian@26 1202
ian@26 1203 if (!p2m_initialized ||
ian@26 1204 gpfn < p2m_min_low_pfn || gpfn > p2m_max_low_pfn
ian@26 1205 /* || !pfn_valid(gpfn) */)
ian@26 1206 return INVALID_MFN;
ian@26 1207 pte = p2m_pte + (gpfn - p2m_convert_min_pfn);
ian@26 1208
ian@26 1209 mfn = INVALID_MFN;
ian@26 1210 if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 &&
alex@392 1211 pte_present(__pte(pteval)) &&
alex@392 1212 pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT)))
ian@26 1213 mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT;
ian@26 1214
ian@26 1215 return mfn;
ian@26 1216 }
ian@26 1217
ian@26 1218 EXPORT_SYMBOL_GPL(p2m_initialized);
ian@26 1219 EXPORT_SYMBOL_GPL(p2m_min_low_pfn);
ian@26 1220 EXPORT_SYMBOL_GPL(p2m_max_low_pfn);
ian@26 1221 EXPORT_SYMBOL_GPL(p2m_convert_min_pfn);
ian@26 1222 EXPORT_SYMBOL_GPL(p2m_convert_max_pfn);
ian@26 1223 EXPORT_SYMBOL_GPL(p2m_pte);
ian@26 1224 EXPORT_SYMBOL_GPL(p2m_phystomach);
alex@197 1225
alex@392 1226 /**************************************************************************
alex@392 1227 * foreign domain p2m mapping
alex@392 1228 */
alex@197 1229 #include <asm/xen/xencomm.h>
alex@197 1230 #include <xen/public/privcmd.h>
alex@197 1231
alex@197 1232 struct foreign_p2m_private {
alex@197 1233 unsigned long gpfn;
alex@197 1234 domid_t domid;
alex@197 1235 };
alex@197 1236
alex@197 1237 static void
alex@392 1238 xen_foreign_p2m_unexpose(struct xen_ia64_privcmd_range *privcmd_range,
alex@392 1239 void *arg)
alex@197 1240 {
alex@392 1241 struct foreign_p2m_private *private = (struct foreign_p2m_private*)arg;
alex@197 1242 int ret;
alex@197 1243
alex@197 1244 privcmd_range->private = NULL;
alex@197 1245 privcmd_range->callback = NULL;
alex@197 1246
alex@197 1247 ret = HYPERVISOR_unexpose_foreign_p2m(private->gpfn, private->domid);
alex@197 1248 if (ret)
alex@197 1249 printk(KERN_WARNING
alex@197 1250 "unexpose_foreign_p2m hypercall failed.\n");
alex@197 1251 kfree(private);
alex@197 1252 }
alex@197 1253
alex@197 1254 int
alex@392 1255 xen_foreign_p2m_expose(privcmd_hypercall_t *hypercall)
alex@197 1256 {
alex@392 1257 /*
alex@392 1258 * hypercall->
alex@392 1259 * arg0: cmd = IA64_DOM0VP_expose_foreign_p2m
alex@392 1260 * arg1: va
alex@392 1261 * arg2: domid
alex@392 1262 * arg3: __user* memmap_info
alex@392 1263 * arg4: flags
alex@392 1264 */
alex@197 1265
alex@197 1266 int ret = 0;
alex@392 1267 struct mm_struct *mm = current->mm;
alex@197 1268
alex@197 1269 unsigned long vaddr = hypercall->arg[1];
alex@197 1270 domid_t domid = hypercall->arg[2];
alex@197 1271 struct xen_ia64_memmap_info __user *u_memmap_info =
alex@197 1272 (struct xen_ia64_memmap_info __user *)hypercall->arg[3];
alex@197 1273
alex@197 1274 struct xen_ia64_memmap_info memmap_info;
alex@197 1275 size_t memmap_size;
alex@392 1276 struct xen_ia64_memmap_info *k_memmap_info = NULL;
alex@197 1277 unsigned long max_gpfn;
alex@197 1278 unsigned long p2m_size;
alex@392 1279 struct resource *res;
alex@197 1280 unsigned long gpfn;
alex@197 1281
alex@392 1282 struct vm_area_struct *vma;
alex@392 1283 void *p;
alex@197 1284 unsigned long prev_src_gpfn_end;
alex@197 1285
alex@392 1286 struct xen_ia64_privcmd_vma *privcmd_vma;
alex@392 1287 struct xen_ia64_privcmd_range *privcmd_range;
alex@392 1288 struct foreign_p2m_private *private = NULL;
alex@197 1289
alex@197 1290 BUG_ON(hypercall->arg[0] != IA64_DOM0VP_expose_foreign_p2m);
alex@197 1291
alex@197 1292 private = kmalloc(sizeof(*private), GFP_KERNEL);
alex@197 1293 if (private == NULL)
alex@197 1294 goto kfree_out;
alex@197 1295
alex@197 1296 if (copy_from_user(&memmap_info, u_memmap_info, sizeof(memmap_info)))
alex@197 1297 return -EFAULT;
alex@197 1298 /* memmap_info integrity check */
alex@197 1299 if (memmap_info.efi_memdesc_size < sizeof(efi_memory_desc_t) ||
alex@197 1300 memmap_info.efi_memmap_size < memmap_info.efi_memdesc_size ||
alex@197 1301 (memmap_info.efi_memmap_size % memmap_info.efi_memdesc_size)
alex@197 1302 != 0) {
alex@197 1303 ret = -EINVAL;
alex@197 1304 goto kfree_out;
alex@197 1305 }
alex@197 1306
alex@197 1307 memmap_size = sizeof(*k_memmap_info) + memmap_info.efi_memmap_size;
alex@197 1308 k_memmap_info = kmalloc(memmap_size, GFP_KERNEL);
alex@197 1309 if (k_memmap_info == NULL)
alex@197 1310 return -ENOMEM;
alex@197 1311 if (copy_from_user(k_memmap_info, u_memmap_info, memmap_size)) {
alex@197 1312 ret = -EFAULT;
alex@197 1313 goto kfree_out;
alex@197 1314 }
alex@197 1315 /* k_memmap_info integrity check is done by the expose foreng p2m
alex@197 1316 hypercall */
alex@197 1317
alex@197 1318 max_gpfn = HYPERVISOR_memory_op(XENMEM_maximum_gpfn, &domid);
alex@197 1319 if (max_gpfn < 0) {
alex@197 1320 ret = max_gpfn;
alex@197 1321 goto kfree_out;
alex@197 1322 }
alex@197 1323 p2m_size = p2m_table_size(max_gpfn + 1);
alex@197 1324
alex@197 1325 down_write(&mm->mmap_sem);
alex@197 1326
alex@197 1327 vma = find_vma(mm, vaddr);
alex@197 1328 if (vma == NULL || vma->vm_ops != &xen_ia64_privcmd_vm_ops ||
alex@197 1329 vaddr != vma->vm_start ||
alex@197 1330 (vma->vm_flags & VM_WRITE) || (vma->vm_flags & VM_EXEC) ||
alex@197 1331 !privcmd_enforce_singleshot_mapping(vma))
alex@197 1332 goto mmap_out;
alex@197 1333
alex@197 1334 privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
alex@197 1335 res = privcmd_vma->range->res;
alex@197 1336 if (p2m_size > (res->end - res->start + 1) ||
alex@197 1337 p2m_size > vma->vm_end - vma->vm_start) {
alex@197 1338 ret = -EINVAL;
alex@197 1339 goto mmap_out;
alex@197 1340 }
alex@197 1341
alex@197 1342 gpfn = res->start >> PAGE_SHIFT;
alex@392 1343 /*
alex@392 1344 * arg0: dest_gpfn
alex@392 1345 * arg1: domid
alex@392 1346 * arg2: XEN_GUEST_HANDLE(char) buffer: memmap_info
alex@392 1347 * arg3: flags
alex@392 1348 * The hypercall checks its intergirty/simplfies it and
alex@392 1349 * copy it back for us.
alex@392 1350 */
alex@197 1351 ret = xencomm_arch_expose_foreign_p2m(gpfn, domid,
alex@197 1352 xencomm_map_no_alloc(k_memmap_info, memmap_size),
alex@197 1353 hypercall->arg[4]);
alex@197 1354 if (ret)
alex@197 1355 goto mmap_out;
alex@197 1356
alex@197 1357 privcmd_range = (struct xen_ia64_privcmd_range*)privcmd_vma->range;
alex@197 1358 prev_src_gpfn_end = 0;
alex@197 1359 for (p = k_memmap_info->memdesc;
alex@197 1360 p < (void*)&k_memmap_info->memdesc[0] +
alex@197 1361 k_memmap_info->efi_memmap_size;
alex@197 1362 p += k_memmap_info->efi_memdesc_size) {
alex@197 1363 efi_memory_desc_t* md = p;
alex@197 1364 unsigned long src_gpfn = md->phys_addr >> PAGE_SHIFT;
alex@197 1365 unsigned long src_gpfn_end =
alex@197 1366 (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >>
alex@197 1367 PAGE_SHIFT;
alex@197 1368 unsigned long num_src_gpfn;
alex@197 1369 unsigned long gpfn_offset;
alex@197 1370 unsigned long size;
alex@197 1371 unsigned int i;
alex@197 1372
alex@197 1373 if (src_gpfn <= prev_src_gpfn_end)
alex@197 1374 src_gpfn = prev_src_gpfn_end + 1;
alex@197 1375 if (src_gpfn_end <= prev_src_gpfn_end)
alex@197 1376 continue;
alex@197 1377
alex@197 1378 src_gpfn &= ~(PTRS_PER_PTE - 1);
alex@197 1379 src_gpfn_end = (src_gpfn_end + PTRS_PER_PTE - 1) &
alex@197 1380 ~(PTRS_PER_PTE - 1);
alex@197 1381 num_src_gpfn = src_gpfn_end - src_gpfn;
alex@197 1382 gpfn_offset = src_gpfn / PTRS_PER_PTE;
alex@197 1383 size = p2m_table_size(num_src_gpfn);
alex@197 1384
alex@197 1385 prev_src_gpfn_end = src_gpfn_end;
alex@197 1386 ret = remap_pfn_range(vma,
alex@197 1387 vaddr + (gpfn_offset << PAGE_SHIFT),
alex@197 1388 gpfn + gpfn_offset, size,
alex@197 1389 vma->vm_page_prot);
alex@197 1390 if (ret) {
alex@197 1391 for (i = 0; i < gpfn + gpfn_offset; i++) {
alex@392 1392 struct xen_ia64_privcmd_entry *entry =
alex@197 1393 &privcmd_range->entries[i];
alex@197 1394 BUG_ON(atomic_read(&entry->map_count) != 1 &&
alex@197 1395 atomic_read(&entry->map_count) != 0);
alex@197 1396 atomic_set(&entry->map_count, 0);
alex@197 1397 entry->gpfn = INVALID_GPFN;
alex@197 1398 }
alex@197 1399 (void)HYPERVISOR_unexpose_foreign_p2m(gpfn, domid);
alex@197 1400 goto mmap_out;
alex@197 1401 }
alex@197 1402
alex@197 1403 for (i = gpfn_offset;
alex@197 1404 i < gpfn_offset + (size >> PAGE_SHIFT);
alex@197 1405 i++) {
alex@392 1406 struct xen_ia64_privcmd_entry *entry =
alex@197 1407 &privcmd_range->entries[i];
alex@197 1408 BUG_ON(atomic_read(&entry->map_count) != 0);
alex@197 1409 BUG_ON(entry->gpfn != INVALID_GPFN);
alex@197 1410 atomic_inc(&entry->map_count);
alex@197 1411 entry->gpfn = gpfn + i;
alex@197 1412 }
alex@197 1413 }
alex@197 1414
alex@197 1415 private->gpfn = gpfn;
alex@197 1416 private->domid = domid;
alex@197 1417
alex@197 1418 privcmd_range->callback = &xen_foreign_p2m_unexpose;
alex@197 1419 privcmd_range->private = private;
alex@197 1420
alex@197 1421 mmap_out:
alex@197 1422 up_write(&mm->mmap_sem);
alex@197 1423 kfree_out:
alex@197 1424 kfree(k_memmap_info);
alex@197 1425 if (ret != 0)
alex@197 1426 kfree(private);
alex@197 1427 return ret;
alex@197 1428 }
ian@26 1429 #endif
ian@26 1430
alex@392 1431 /**************************************************************************
alex@392 1432 * for xenoprof
alex@392 1433 */
ian@26 1434 struct resource*
ian@26 1435 xen_ia64_allocate_resource(unsigned long size)
ian@26 1436 {
alex@392 1437 struct resource *res;
ian@26 1438 int error;
ian@26 1439
alex@43 1440 res = kzalloc(sizeof(*res), GFP_KERNEL);
ian@26 1441 if (res == NULL)
ian@26 1442 return ERR_PTR(-ENOMEM);
ian@26 1443
ian@26 1444 res->name = "Xen";
ian@26 1445 res->flags = IORESOURCE_MEM;
ian@26 1446 error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size),
alex@392 1447 privcmd_resource_min, privcmd_resource_max,
alex@392 1448 IA64_GRANULE_SIZE, NULL, NULL);
ian@26 1449 if (error) {
ian@26 1450 kfree(res);
ian@26 1451 return ERR_PTR(error);
ian@26 1452 }
ian@26 1453 return res;
ian@26 1454 }
ian@26 1455 EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource);
ian@26 1456
ian@26 1457 void
alex@392 1458 xen_ia64_release_resource(struct resource *res)
ian@26 1459 {
ian@26 1460 release_resource(res);
ian@26 1461 kfree(res);
ian@26 1462 }
ian@26 1463 EXPORT_SYMBOL_GPL(xen_ia64_release_resource);
ian@26 1464
ian@26 1465 void
alex@392 1466 xen_ia64_unmap_resource(struct resource *res)
ian@26 1467 {
ian@26 1468 unsigned long gpfn = res->start >> PAGE_SHIFT;
ian@26 1469 unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT;
ian@26 1470 unsigned long i;
ian@26 1471
ian@26 1472 for (i = 0; i < nr_pages; i++) {
ian@26 1473 int error = HYPERVISOR_zap_physmap(gpfn + i, 0);
ian@26 1474 if (error)
ian@26 1475 printk(KERN_ERR
ian@26 1476 "%s:%d zap_phsymap failed %d gpfn %lx\n",
ian@26 1477 __func__, __LINE__, error, gpfn + i);
ian@26 1478 }
ian@26 1479 xen_ia64_release_resource(res);
ian@26 1480 }
ian@26 1481 EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource);
ian@26 1482
alex@392 1483 /**************************************************************************
alex@392 1484 * opt feature
alex@392 1485 */
alex@256 1486 void
alex@256 1487 xen_ia64_enable_opt_feature(void)
alex@256 1488 {
alex@256 1489 /* Enable region 7 identity map optimizations in Xen */
alex@256 1490 struct xen_ia64_opt_feature optf;
alex@256 1491
alex@256 1492 optf.cmd = XEN_IA64_OPTF_IDENT_MAP_REG7;
alex@256 1493 optf.on = XEN_IA64_OPTF_ON;
alex@256 1494 optf.pgprot = pgprot_val(PAGE_KERNEL);
alex@256 1495 optf.key = 0; /* No key on linux. */
alex@256 1496 HYPERVISOR_opt_feature(&optf);
alex@256 1497 }
alex@256 1498
alex@392 1499 /**************************************************************************
alex@392 1500 * suspend/resume
alex@392 1501 */
ian@26 1502 void
ian@26 1503 xen_post_suspend(int suspend_cancelled)
ian@26 1504 {
ian@26 1505 if (suspend_cancelled)
ian@26 1506 return;
ian@26 1507
ian@26 1508 p2m_expose_resume();
alex@256 1509 xen_ia64_enable_opt_feature();
ian@26 1510 /* add more if necessary */
ian@26 1511 }