ia64/xen-unstable

view xen/arch/ia64/xen/xenmem.c @ 12795:85b9711cb895

[IA64] Allocate frame table/mpt table at mfn=0

Allocate frame table/mpt table at mfn=0 even when memory isn't assigned at
mfn=0 as work around for transate_domain_pte().

transate_domain_pte() returns mfn=0 when the machine page isn't present
as a work around for memory mapped I/O where no device is assigned.
Xen might access page_info of mfn=0, so it must be guaranteed that it
exists. Otherwise xen panics with a tlb miss fault in xen's virtual
address area.

Once transate_domain_pte() is fixed correctly, this will be removed.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author awilliam@xenbuild.aw
date Tue Dec 05 13:42:37 2006 -0700 (2006-12-05)
parents 6fae3a36f50b
children 26492c6476f0
line source
1 /*
2 * Xen memory allocator routines
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 * Copyright (C) 2005 Intel Corp.
7 *
8 * Routines used by ia64 machines with contiguous (or virtually contiguous)
9 * memory.
10 */
12 #include <linux/config.h>
13 #include <asm/pgtable.h>
14 #include <xen/mm.h>
16 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
17 #include <linux/efi.h>
18 #include <asm/pgalloc.h>
20 extern unsigned long frametable_pg_dir[];
22 #define FRAMETABLE_PGD_OFFSET(ADDR) \
23 (frametable_pg_dir + (((ADDR) >> PGDIR_SHIFT) & \
24 ((1UL << (PAGE_SHIFT - 3)) - 1)))
26 #define FRAMETABLE_PMD_OFFSET(PGD, ADDR) \
27 __va((unsigned long *)(PGD) + (((ADDR) >> PMD_SHIFT) & \
28 ((1UL << (PAGE_SHIFT - 3)) - 1)))
30 #define FRAMETABLE_PTE_OFFSET(PMD, ADDR) \
31 (pte_t *)__va((unsigned long *)(PMD) + (((ADDR) >> PAGE_SHIFT) & \
32 ((1UL << (PAGE_SHIFT - 3)) - 1)))
34 static unsigned long table_size;
35 static int opt_contig_mem = 0;
36 boolean_param("contig_mem", opt_contig_mem);
37 #else
38 #define opt_contig_mem 1
39 #endif
41 struct page_info *frame_table __read_mostly;
42 unsigned long max_page;
44 /*
45 * Set up the page tables.
46 */
47 volatile unsigned long *mpt_table __read_mostly;
49 void
50 paging_init (void)
51 {
52 unsigned int mpt_order;
53 unsigned long mpt_table_size;
54 unsigned long i;
56 if (!opt_contig_mem) {
57 /* mpt_table is already allocated at this point. */
58 return;
59 }
61 /* Create machine to physical mapping table
62 * NOTE: similar to frame table, later we may need virtually
63 * mapped mpt table if large hole exists. Also MAX_ORDER needs
64 * to be changed in common code, which only support 16M by far
65 */
66 mpt_table_size = max_page * sizeof(unsigned long);
67 mpt_order = get_order(mpt_table_size);
68 ASSERT(mpt_order <= MAX_ORDER);
69 if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
70 panic("Not enough memory to bootstrap Xen.\n");
72 printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
73 "mpt_order %u max_page 0x%lx\n",
74 (u64)mpt_table, mpt_table_size, mpt_order, max_page);
75 for (i = 0;
76 i < ((1UL << mpt_order) << PAGE_SHIFT) / sizeof(mpt_table[0]);
77 i++) {
78 mpt_table[i] = INVALID_M2P_ENTRY;
79 }
80 }
82 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
84 static unsigned long
85 alloc_dir_page(void)
86 {
87 unsigned long mfn = alloc_boot_pages(1, 1);
88 unsigned long dir;
89 if (!mfn)
90 panic("Not enough memory for virtual frame table!\n");
91 ++table_size;
92 dir = mfn << PAGE_SHIFT;
93 memset(__va(dir), 0, PAGE_SIZE);
94 return dir;
95 }
97 static inline unsigned long
98 alloc_table_page(unsigned long fill)
99 {
100 unsigned long mfn = alloc_boot_pages(1, 1);
101 unsigned long *table;
102 unsigned long i;
103 if (!mfn)
104 panic("Not enough memory for virtual frame table!\n");
105 ++table_size;
106 table = (unsigned long *)__va((mfn << PAGE_SHIFT));
107 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
108 table[i] = fill;
109 return mfn;
110 }
112 static void
113 create_page_table(unsigned long start_page, unsigned long end_page,
114 unsigned long fill)
115 {
116 unsigned long address;
117 unsigned long *dir;
118 pte_t *pteptr;
120 for (address = start_page; address < end_page; address += PAGE_SIZE) {
121 dir = FRAMETABLE_PGD_OFFSET(address);
122 if (!*dir)
123 *dir = alloc_dir_page();
124 dir = FRAMETABLE_PMD_OFFSET(*dir, address);
125 if (!*dir)
126 *dir = alloc_dir_page();
127 pteptr = FRAMETABLE_PTE_OFFSET(*dir, address);
128 if (pte_none(*pteptr))
129 set_pte(pteptr, pfn_pte(alloc_table_page(fill),
130 PAGE_KERNEL));
131 }
132 }
134 static int
135 create_frametable_page_table (u64 start, u64 end, void *arg)
136 {
137 struct page_info *map_start, *map_end;
138 unsigned long start_page, end_page;
140 map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
141 map_end = frame_table + (__pa(end) >> PAGE_SHIFT);
143 start_page = (unsigned long) map_start & PAGE_MASK;
144 end_page = PAGE_ALIGN((unsigned long) map_end);
146 create_page_table(start_page, end_page, 0L);
147 return 0;
148 }
150 static int
151 create_mpttable_page_table (u64 start, u64 end, void *arg)
152 {
153 unsigned long map_start, map_end;
154 unsigned long start_page, end_page;
156 map_start = (unsigned long)(mpt_table + (__pa(start) >> PAGE_SHIFT));
157 map_end = (unsigned long)(mpt_table + (__pa(end) >> PAGE_SHIFT));
159 start_page = map_start & PAGE_MASK;
160 end_page = PAGE_ALIGN(map_end);
162 create_page_table(start_page, end_page, INVALID_M2P_ENTRY);
163 return 0;
164 }
166 void init_virtual_frametable(void)
167 {
168 /* Allocate virtual frame_table */
169 frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
170 table_size = 0;
171 efi_memmap_walk(create_frametable_page_table, NULL);
173 printk("size of virtual frame_table: %lukB\n",
174 ((table_size << PAGE_SHIFT) >> 10));
176 /* Allocate virtual mpt_table */
177 table_size = 0;
178 mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
179 efi_memmap_walk(create_mpttable_page_table, NULL);
181 printk("virtual machine to physical table: %p size: %lukB\n"
182 "max_page: 0x%lx\n",
183 mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);
185 /*
186 * XXX work around for translate_domain_pte().
187 * It returns mfn=0 when the machine page isn't present. This
188 * behavior is a work around for memory mapped I/O where no device
189 * is assigned. Xen might access page_info of mfn=0, so it must
190 * be guaranteed that it exists. Otherwise xen panics with tlb miss
191 * fault in xen's virtual address area.
192 *
193 * Once translate_domain_pte() is fixed correctly, this will
194 * be removed.
195 */
196 if (!mfn_valid(0)) {
197 printk("allocating frame table/mpt table at mfn 0.\n");
198 create_frametable_page_table(0, PAGE_SIZE, NULL);
199 create_mpttable_page_table(0, PAGE_SIZE, NULL);
200 }
201 }
203 int
204 ia64_mfn_valid (unsigned long pfn)
205 {
206 extern long ia64_frametable_probe(unsigned long);
207 struct page_info *pg;
208 int valid;
210 if (opt_contig_mem)
211 return 1;
212 pg = mfn_to_page(pfn);
213 valid = ia64_frametable_probe((unsigned long)pg);
214 /* more check the whole struct of page_info */
215 if (valid)
216 valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
217 return valid;
218 }
220 EXPORT_SYMBOL(ia64_mfn_valid);
222 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
224 /* FIXME: postpone support to machines with big holes between physical memorys.
225 * Current hack allows only efi memdesc upto 4G place. (See efi.c)
226 */
227 #define FT_ALIGN_SIZE (16UL << 20)
228 void __init init_frametable(void)
229 {
230 unsigned long pfn;
231 unsigned long frame_table_size;
233 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
234 if (!opt_contig_mem) {
235 init_virtual_frametable();
236 return;
237 }
238 #endif
240 frame_table_size = max_page * sizeof(struct page_info);
241 frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
243 /* Request continuous trunk from boot allocator, since HV
244 * address is identity mapped */
245 pfn = alloc_boot_pages(
246 frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
247 if (pfn == 0)
248 panic("Not enough memory for frame table.\n");
250 frame_table = __va(pfn << PAGE_SHIFT);
251 memset(frame_table, 0, frame_table_size);
252 printk("size of frame_table: %lukB\n",
253 frame_table_size >> 10);
254 }