ia64/xen-unstable

view xen/arch/ia64/xen/xenmem.c @ 19848:5839491bbf20

[IA64] replace MAX_VCPUS with d->max_vcpus where necessary.

don't use MAX_VCPUS, and use vcpu::max_vcpus.
The changeset of 2f9e1348aa98 introduced max_vcpus to allow more vcpus
per guest. This patch is ia64 counter part.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Isaku Yamahata <yamahata@valinux.co.jp>
date Mon Jun 29 11:26:05 2009 +0900 (2009-06-29)
parents 78bea2f2b0e5
children
line source
1 /*
2 * Xen memory allocator routines
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 * Copyright (C) 2005 Intel Corp.
7 *
8 * Routines used by ia64 machines with contiguous (or virtually contiguous)
9 * memory.
10 */
12 #include <linux/config.h>
13 #include <asm/pgtable.h>
14 #include <xen/mm.h>
16 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
17 #include <linux/efi.h>
18 #include <asm/pgalloc.h>
20 #define FRAMETABLE_PGD_OFFSET(ADDR) \
21 (frametable_pg_dir + (((ADDR) >> PGDIR_SHIFT) & \
22 ((1UL << (PAGE_SHIFT - 3)) - 1)))
24 #define FRAMETABLE_PMD_OFFSET(PGD, ADDR) \
25 __va((unsigned long *)(PGD) + (((ADDR) >> PMD_SHIFT) & \
26 ((1UL << (PAGE_SHIFT - 3)) - 1)))
28 #define FRAMETABLE_PTE_OFFSET(PMD, ADDR) \
29 (pte_t *)__va((unsigned long *)(PMD) + (((ADDR) >> PAGE_SHIFT) & \
30 ((1UL << (PAGE_SHIFT - 3)) - 1)))
32 static unsigned long table_size;
33 static int opt_contig_mem = 0;
34 boolean_param("contig_mem", opt_contig_mem);
35 #else
36 #define opt_contig_mem 1
37 #endif
39 struct page_info *frame_table __read_mostly;
40 unsigned long max_page;
42 /*
43 * Set up the page tables.
44 */
45 volatile unsigned long *mpt_table __read_mostly;
47 void __init
48 paging_init (void)
49 {
50 unsigned int mpt_order;
51 unsigned long mpt_table_size;
52 struct page_info *page;
53 unsigned long i;
55 if (!opt_contig_mem) {
56 /* mpt_table is already allocated at this point. */
57 return;
58 }
60 /* Create machine to physical mapping table
61 * NOTE: similar to frame table, later we may need virtually
62 * mapped mpt table if large hole exists. Also MAX_ORDER needs
63 * to be changed in common code, which only support 16M by far
64 */
65 mpt_table_size = max_page * sizeof(unsigned long);
66 mpt_order = get_order(mpt_table_size);
67 ASSERT(mpt_order <= MAX_ORDER);
68 page = alloc_domheap_pages(NULL, mpt_order, 0);
69 if (page == NULL)
70 panic("Not enough memory to bootstrap Xen.\n");
72 mpt_table = page_to_virt(page);
73 printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
74 "mpt_order %u max_page 0x%lx\n",
75 (u64)mpt_table, mpt_table_size, mpt_order, max_page);
76 for (i = 0;
77 i < ((1UL << mpt_order) << PAGE_SHIFT) / sizeof(mpt_table[0]);
78 i++) {
79 mpt_table[i] = INVALID_M2P_ENTRY;
80 }
81 }
83 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
85 static unsigned long __init
86 alloc_dir_page(void)
87 {
88 unsigned long mfn = alloc_boot_pages(1, 1);
89 unsigned long dir;
90 if (!mfn)
91 panic("Not enough memory for virtual frame table!\n");
92 ++table_size;
93 dir = mfn << PAGE_SHIFT;
94 clear_page(__va(dir));
95 return dir;
96 }
98 static inline unsigned long __init
99 alloc_table_page(unsigned long fill)
100 {
101 unsigned long mfn = alloc_boot_pages(1, 1);
102 unsigned long *table;
103 unsigned long i;
104 if (!mfn)
105 panic("Not enough memory for virtual frame table!\n");
106 ++table_size;
107 table = (unsigned long *)__va((mfn << PAGE_SHIFT));
108 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
109 table[i] = fill;
110 return mfn;
111 }
113 static void __init
114 create_page_table(unsigned long start_page, unsigned long end_page,
115 unsigned long fill)
116 {
117 unsigned long address;
118 unsigned long *dir;
119 pte_t *pteptr;
121 for (address = start_page; address < end_page; address += PAGE_SIZE) {
122 dir = FRAMETABLE_PGD_OFFSET(address);
123 if (!*dir)
124 *dir = alloc_dir_page();
125 dir = FRAMETABLE_PMD_OFFSET(*dir, address);
126 if (!*dir)
127 *dir = alloc_dir_page();
128 pteptr = FRAMETABLE_PTE_OFFSET(*dir, address);
129 if (pte_none(*pteptr))
130 set_pte(pteptr, pfn_pte(alloc_table_page(fill),
131 PAGE_KERNEL));
132 }
133 }
135 static int __init
136 create_frametable_page_table (u64 start, u64 end, void *arg)
137 {
138 struct page_info *map_start, *map_end;
139 unsigned long start_page, end_page;
141 map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
142 map_end = frame_table + (__pa(end) >> PAGE_SHIFT);
144 start_page = (unsigned long) map_start & PAGE_MASK;
145 end_page = PAGE_ALIGN((unsigned long) map_end);
147 create_page_table(start_page, end_page, 0L);
148 return 0;
149 }
151 static int __init
152 create_mpttable_page_table (u64 start, u64 end, void *arg)
153 {
154 unsigned long map_start, map_end;
155 unsigned long start_page, end_page;
157 map_start = (unsigned long)(mpt_table + (__pa(start) >> PAGE_SHIFT));
158 map_end = (unsigned long)(mpt_table + (__pa(end) >> PAGE_SHIFT));
160 start_page = map_start & PAGE_MASK;
161 end_page = PAGE_ALIGN(map_end);
163 create_page_table(start_page, end_page, INVALID_M2P_ENTRY);
164 return 0;
165 }
167 void __init init_virtual_frametable(void)
168 {
169 /* Allocate virtual frame_table */
170 frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
171 table_size = 0;
172 efi_memmap_walk(create_frametable_page_table, NULL);
174 printk("size of virtual frame_table: %lukB\n",
175 ((table_size << PAGE_SHIFT) >> 10));
177 /* Allocate virtual mpt_table */
178 table_size = 0;
179 mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
180 efi_memmap_walk(create_mpttable_page_table, NULL);
182 printk("virtual machine to physical table: %p size: %lukB\n"
183 "max_page: 0x%lx\n",
184 mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);
186 /*
187 * XXX work around for translate_domain_pte().
188 * It returns mfn=0 when the machine page isn't present. This
189 * behavior is a work around for memory mapped I/O where no device
190 * is assigned. Xen might access page_info of mfn=0, so it must
191 * be guaranteed that it exists. Otherwise xen panics with tlb miss
192 * fault in xen's virtual address area.
193 *
194 * Once translate_domain_pte() is fixed correctly, this will
195 * be removed.
196 */
197 if (!mfn_valid(0)) {
198 printk("allocating frame table/mpt table at mfn 0.\n");
199 create_frametable_page_table(0, PAGE_SIZE, NULL);
200 create_mpttable_page_table(0, PAGE_SIZE, NULL);
201 }
202 }
204 int
205 ia64_mfn_valid (unsigned long pfn)
206 {
207 extern long ia64_frametable_probe(unsigned long);
208 struct page_info *pg;
209 int valid;
211 if (opt_contig_mem)
212 return 1;
213 pg = mfn_to_page(pfn);
214 valid = ia64_frametable_probe((unsigned long)pg);
215 /* more check the whole struct of page_info */
216 if (valid)
217 valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
218 return valid;
219 }
221 EXPORT_SYMBOL(ia64_mfn_valid);
223 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
225 /* FIXME: postpone support to machines with big holes between physical memorys.
226 * Current hack allows only efi memdesc upto 4G place. (See efi.c)
227 */
228 #define FT_ALIGN_SIZE (16UL << 20)
229 void __init init_frametable(void)
230 {
231 unsigned long pfn;
232 unsigned long frame_table_size;
234 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
235 if (!opt_contig_mem) {
236 init_virtual_frametable();
237 return;
238 }
239 #endif
241 frame_table_size = max_page * sizeof(struct page_info);
242 frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
244 /* Request continuous trunk from boot allocator, since HV
245 * address is identity mapped */
246 pfn = alloc_boot_pages(
247 frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
248 if (pfn == 0)
249 panic("Not enough memory for frame table.\n");
251 frame_table = __va(pfn << PAGE_SHIFT);
252 memset(frame_table, 0, frame_table_size);
253 printk("size of frame_table: %lukB\n",
254 frame_table_size >> 10);
255 }