ia64/xen-unstable

view xen/arch/ia64/xen/xenmem.c @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents ccde0eab2545
children 2d28f31aae8b
line source
1 /*
2 * Xen memory allocator routines
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 * Copyright (C) 2005 Intel Corp.
7 *
8 * Routines used by ia64 machines with contiguous (or virtually contiguous)
9 * memory.
10 */
12 #include <linux/config.h>
13 #include <asm/pgtable.h>
14 #include <xen/mm.h>
16 struct page_info *frame_table;
17 unsigned long frame_table_size;
18 unsigned long max_page;
20 struct page_info *mem_map;
21 #define MAX_DMA_ADDRESS ~0UL // FIXME???
23 #ifdef CONFIG_VIRTUAL_MEM_MAP
24 static unsigned long num_dma_physpages;
25 #endif
27 /*
28 * Set up the page tables.
29 */
30 unsigned long *mpt_table;
31 unsigned long mpt_table_size;
33 void
34 paging_init (void)
35 {
36 unsigned int mpt_order;
37 unsigned long i;
39 /* Create machine to physical mapping table
40 * NOTE: similar to frame table, later we may need virtually
41 * mapped mpt table if large hole exists. Also MAX_ORDER needs
42 * to be changed in common code, which only support 16M by far
43 */
44 mpt_table_size = max_page * sizeof(unsigned long);
45 mpt_order = get_order(mpt_table_size);
46 ASSERT(mpt_order <= MAX_ORDER);
47 if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
48 panic("Not enough memory to bootstrap Xen.\n");
50 printk("machine to physical table: 0x%lx\n", (u64)mpt_table);
51 for (i = 0; i < (1UL << mpt_order); i++) {
52 mpt_table[i] = INVALID_M2P_ENTRY;
53 }
54 }
56 /* FIXME: postpone support to machines with big holes between physical memorys.
57 * Current hack allows only efi memdesc upto 4G place. (See efi.c)
58 */
59 #ifndef CONFIG_VIRTUAL_MEM_MAP
60 #define FT_ALIGN_SIZE (16UL << 20)
61 void __init init_frametable(void)
62 {
63 unsigned long pfn;
64 frame_table_size = max_page * sizeof(struct page_info);
65 frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
67 /* Request continuous trunk from boot allocator, since HV
68 * address is identity mapped */
69 pfn = alloc_boot_pages(
70 frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
71 if (pfn == 0)
72 panic("Not enough memory for frame table.\n");
74 frame_table = __va(pfn << PAGE_SHIFT);
75 memset(frame_table, 0, frame_table_size);
76 printk("size of frame_table: %lukB\n",
77 frame_table_size >> 10);
78 }
79 #endif