ia64/xen-unstable

annotate xen/arch/ia64/xen/xenmem.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents 005dd6b1cf8e
children 0df3bf8aac1e
rev   line source
djm@6458 1 /*
djm@6458 2 * Xen memory allocator routines
djm@6458 3 *
djm@6458 4 * Copyright (C) 2005 Hewlett-Packard Co
djm@6458 5 * Dan Magenheimer <dan.magenheimer@hp.com>
djm@6458 6 * Copyright (C) 2005 Intel Corp.
djm@6458 7 *
djm@6458 8 * Routines used by ia64 machines with contiguous (or virtually contiguous)
djm@6458 9 * memory.
djm@6458 10 */
djm@6458 11
djm@6458 12 #include <linux/config.h>
djm@6458 13 #include <asm/pgtable.h>
djm@6458 14 #include <xen/mm.h>
djm@6458 15
awilliam@9689 16 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
awilliam@9689 17 #include <linux/efi.h>
awilliam@9689 18 #include <asm/pgalloc.h>
djm@6458 19
awilliam@11609 20 extern unsigned long frametable_pg_dir[];
djm@6458 21
awilliam@11609 22 #define FRAMETABLE_PGD_OFFSET(ADDR) \
awilliam@11609 23 (frametable_pg_dir + (((ADDR) >> PGDIR_SHIFT) & \
awilliam@11609 24 ((1UL << (PAGE_SHIFT - 3)) - 1)))
awilliam@11609 25
awilliam@11609 26 #define FRAMETABLE_PMD_OFFSET(PGD, ADDR) \
awilliam@11609 27 __va((unsigned long *)(PGD) + (((ADDR) >> PMD_SHIFT) & \
awilliam@11609 28 ((1UL << (PAGE_SHIFT - 3)) - 1)))
awilliam@11609 29
awilliam@11609 30 #define FRAMETABLE_PTE_OFFSET(PMD, ADDR) \
awilliam@11609 31 (pte_t *)__va((unsigned long *)(PMD) + (((ADDR) >> PAGE_SHIFT) & \
awilliam@11609 32 ((1UL << (PAGE_SHIFT - 3)) - 1)))
awilliam@9689 33
awilliam@9689 34 static unsigned long table_size;
awilliam@9689 35 static int opt_contig_mem = 0;
awilliam@9689 36 boolean_param("contig_mem", opt_contig_mem);
awilliam@9689 37 #else
awilliam@9689 38 #define opt_contig_mem 1
djm@6458 39 #endif
djm@6458 40
awilliam@11743 41 struct page_info *frame_table __read_mostly;
awilliam@9689 42 unsigned long max_page;
awilliam@9689 43
djm@6458 44 /*
djm@6458 45 * Set up the page tables.
djm@6458 46 */
awilliam@11743 47 volatile unsigned long *mpt_table __read_mostly;
djm@6458 48
alex@15131 49 void __init
djm@6458 50 paging_init (void)
djm@6458 51 {
djm@6458 52 unsigned int mpt_order;
awilliam@9689 53 unsigned long mpt_table_size;
awilliam@9082 54 unsigned long i;
awilliam@9082 55
awilliam@9689 56 if (!opt_contig_mem) {
awilliam@9689 57 /* mpt_table is already allocated at this point. */
awilliam@9689 58 return;
awilliam@9689 59 }
awilliam@9689 60
djm@6458 61 /* Create machine to physical mapping table
djm@6458 62 * NOTE: similar to frame table, later we may need virtually
djm@6458 63 * mapped mpt table if large hole exists. Also MAX_ORDER needs
djm@6458 64 * to be changed in common code, which only support 16M by far
djm@6458 65 */
djm@6458 66 mpt_table_size = max_page * sizeof(unsigned long);
djm@6458 67 mpt_order = get_order(mpt_table_size);
djm@6458 68 ASSERT(mpt_order <= MAX_ORDER);
djm@6458 69 if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
djm@6458 70 panic("Not enough memory to bootstrap Xen.\n");
djm@6458 71
awilliam@9491 72 printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
awilliam@9491 73 "mpt_order %u max_page 0x%lx\n",
awilliam@9491 74 (u64)mpt_table, mpt_table_size, mpt_order, max_page);
awilliam@9491 75 for (i = 0;
awilliam@9491 76 i < ((1UL << mpt_order) << PAGE_SHIFT) / sizeof(mpt_table[0]);
awilliam@9491 77 i++) {
awilliam@9082 78 mpt_table[i] = INVALID_M2P_ENTRY;
awilliam@9082 79 }
djm@6458 80 }
djm@6458 81
awilliam@9689 82 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
awilliam@9689 83
alex@15131 84 static unsigned long __init
awilliam@9689 85 alloc_dir_page(void)
awilliam@9689 86 {
awilliam@9689 87 unsigned long mfn = alloc_boot_pages(1, 1);
awilliam@9689 88 unsigned long dir;
awilliam@9689 89 if (!mfn)
awilliam@9689 90 panic("Not enough memory for virtual frame table!\n");
awilliam@9689 91 ++table_size;
awilliam@9689 92 dir = mfn << PAGE_SHIFT;
kfraser@15405 93 clear_page(__va(dir));
awilliam@11609 94 return dir;
awilliam@9689 95 }
awilliam@9689 96
alex@15131 97 static inline unsigned long __init
awilliam@9689 98 alloc_table_page(unsigned long fill)
awilliam@9689 99 {
awilliam@9689 100 unsigned long mfn = alloc_boot_pages(1, 1);
awilliam@9689 101 unsigned long *table;
awilliam@9689 102 unsigned long i;
awilliam@9689 103 if (!mfn)
awilliam@9689 104 panic("Not enough memory for virtual frame table!\n");
awilliam@9689 105 ++table_size;
awilliam@9689 106 table = (unsigned long *)__va((mfn << PAGE_SHIFT));
awilliam@9689 107 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
awilliam@9689 108 table[i] = fill;
awilliam@9689 109 return mfn;
awilliam@9689 110 }
awilliam@9689 111
alex@15131 112 static void __init
awilliam@11609 113 create_page_table(unsigned long start_page, unsigned long end_page,
awilliam@11609 114 unsigned long fill)
awilliam@11609 115 {
awilliam@11609 116 unsigned long address;
awilliam@11609 117 unsigned long *dir;
awilliam@11609 118 pte_t *pteptr;
awilliam@11609 119
awilliam@11609 120 for (address = start_page; address < end_page; address += PAGE_SIZE) {
awilliam@11609 121 dir = FRAMETABLE_PGD_OFFSET(address);
awilliam@11609 122 if (!*dir)
awilliam@11609 123 *dir = alloc_dir_page();
awilliam@11609 124 dir = FRAMETABLE_PMD_OFFSET(*dir, address);
awilliam@11609 125 if (!*dir)
awilliam@11609 126 *dir = alloc_dir_page();
awilliam@11609 127 pteptr = FRAMETABLE_PTE_OFFSET(*dir, address);
awilliam@11609 128 if (pte_none(*pteptr))
awilliam@11609 129 set_pte(pteptr, pfn_pte(alloc_table_page(fill),
awilliam@11609 130 PAGE_KERNEL));
awilliam@11609 131 }
awilliam@11609 132 }
awilliam@11609 133
alex@15131 134 static int __init
awilliam@9689 135 create_frametable_page_table (u64 start, u64 end, void *arg)
awilliam@9689 136 {
awilliam@9689 137 struct page_info *map_start, *map_end;
awilliam@11609 138 unsigned long start_page, end_page;
awilliam@9689 139
awilliam@9689 140 map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
awilliam@9689 141 map_end = frame_table + (__pa(end) >> PAGE_SHIFT);
awilliam@9689 142
awilliam@9689 143 start_page = (unsigned long) map_start & PAGE_MASK;
awilliam@9689 144 end_page = PAGE_ALIGN((unsigned long) map_end);
awilliam@9689 145
awilliam@11609 146 create_page_table(start_page, end_page, 0L);
awilliam@9689 147 return 0;
awilliam@9689 148 }
awilliam@9689 149
alex@15131 150 static int __init
awilliam@9689 151 create_mpttable_page_table (u64 start, u64 end, void *arg)
awilliam@9689 152 {
awilliam@10420 153 unsigned long map_start, map_end;
awilliam@11609 154 unsigned long start_page, end_page;
awilliam@9689 155
awilliam@10420 156 map_start = (unsigned long)(mpt_table + (__pa(start) >> PAGE_SHIFT));
awilliam@10420 157 map_end = (unsigned long)(mpt_table + (__pa(end) >> PAGE_SHIFT));
awilliam@9689 158
awilliam@10420 159 start_page = map_start & PAGE_MASK;
awilliam@10420 160 end_page = PAGE_ALIGN(map_end);
awilliam@9689 161
awilliam@11609 162 create_page_table(start_page, end_page, INVALID_M2P_ENTRY);
awilliam@9689 163 return 0;
awilliam@9689 164 }
awilliam@9689 165
alex@15131 166 void __init init_virtual_frametable(void)
awilliam@9689 167 {
awilliam@9689 168 /* Allocate virtual frame_table */
awilliam@9689 169 frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
awilliam@9689 170 table_size = 0;
awilliam@9689 171 efi_memmap_walk(create_frametable_page_table, NULL);
awilliam@9689 172
awilliam@9689 173 printk("size of virtual frame_table: %lukB\n",
awilliam@9689 174 ((table_size << PAGE_SHIFT) >> 10));
awilliam@9689 175
awilliam@9689 176 /* Allocate virtual mpt_table */
awilliam@9689 177 table_size = 0;
awilliam@9689 178 mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
awilliam@9689 179 efi_memmap_walk(create_mpttable_page_table, NULL);
awilliam@9689 180
awilliam@9689 181 printk("virtual machine to physical table: %p size: %lukB\n"
awilliam@9689 182 "max_page: 0x%lx\n",
awilliam@9689 183 mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);
awilliam@12795 184
awilliam@12795 185 /*
awilliam@12795 186 * XXX work around for translate_domain_pte().
awilliam@12795 187 * It returns mfn=0 when the machine page isn't present. This
awilliam@12795 188 * behavior is a work around for memory mapped I/O where no device
awilliam@12795 189 * is assigned. Xen might access page_info of mfn=0, so it must
awilliam@12795 190 * be guaranteed that it exists. Otherwise xen panics with tlb miss
awilliam@12795 191 * fault in xen's virtual address area.
awilliam@12795 192 *
awilliam@12795 193 * Once translate_domain_pte() is fixed correctly, this will
awilliam@12795 194 * be removed.
awilliam@12795 195 */
awilliam@12795 196 if (!mfn_valid(0)) {
awilliam@12795 197 printk("allocating frame table/mpt table at mfn 0.\n");
awilliam@12795 198 create_frametable_page_table(0, PAGE_SIZE, NULL);
awilliam@12795 199 create_mpttable_page_table(0, PAGE_SIZE, NULL);
awilliam@12795 200 }
awilliam@9689 201 }
awilliam@9689 202
awilliam@9689 203 int
awilliam@9689 204 ia64_mfn_valid (unsigned long pfn)
awilliam@9689 205 {
awilliam@9689 206 extern long ia64_frametable_probe(unsigned long);
awilliam@9689 207 struct page_info *pg;
awilliam@9689 208 int valid;
awilliam@9689 209
awilliam@9689 210 if (opt_contig_mem)
awilliam@9689 211 return 1;
awilliam@9689 212 pg = mfn_to_page(pfn);
awilliam@9689 213 valid = ia64_frametable_probe((unsigned long)pg);
awilliam@9689 214 /* more check the whole struct of page_info */
awilliam@9689 215 if (valid)
awilliam@9689 216 valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
awilliam@9689 217 return valid;
awilliam@9689 218 }
awilliam@9689 219
awilliam@9689 220 EXPORT_SYMBOL(ia64_mfn_valid);
awilliam@9689 221
awilliam@9689 222 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
awilliam@9689 223
djm@6458 224 /* FIXME: postpone support to machines with big holes between physical memorys.
djm@6458 225 * Current hack allows only efi memdesc upto 4G place. (See efi.c)
djm@6458 226 */
djm@6458 227 #define FT_ALIGN_SIZE (16UL << 20)
djm@6458 228 void __init init_frametable(void)
djm@6458 229 {
awilliam@9005 230 unsigned long pfn;
awilliam@9689 231 unsigned long frame_table_size;
awilliam@9689 232
awilliam@9689 233 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
awilliam@9689 234 if (!opt_contig_mem) {
awilliam@9689 235 init_virtual_frametable();
awilliam@9689 236 return;
awilliam@9689 237 }
awilliam@9689 238 #endif
awilliam@9689 239
kaf24@8726 240 frame_table_size = max_page * sizeof(struct page_info);
djm@6458 241 frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
djm@6458 242
djm@6458 243 /* Request continuous trunk from boot allocator, since HV
djm@6458 244 * address is identity mapped */
djm@6458 245 pfn = alloc_boot_pages(
djm@6458 246 frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
djm@6458 247 if (pfn == 0)
djm@6458 248 panic("Not enough memory for frame table.\n");
djm@6458 249
djm@6458 250 frame_table = __va(pfn << PAGE_SHIFT);
djm@6458 251 memset(frame_table, 0, frame_table_size);
djm@6458 252 printk("size of frame_table: %lukB\n",
djm@6458 253 frame_table_size >> 10);
djm@6458 254 }