ia64/xen-unstable

view xen/arch/ia64/xen/xenmem.c @ 9768:63af1c14fa18

[IA64] missed chunk of Kevin's hypercall cleanup patch

Missed this chunk of Kevin's patch when merging with dom0vp changes

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 22:30:07 2006 -0600 (2006-04-25)
parents f6e8c269f6af
children fcfc614d3713
line source
1 /*
2 * Xen memory allocator routines
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 * Copyright (C) 2005 Intel Corp.
7 *
8 * Routines used by ia64 machines with contiguous (or virtually contiguous)
9 * memory.
10 */
12 #include <linux/config.h>
13 #include <asm/pgtable.h>
14 #include <xen/mm.h>
16 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
17 #include <linux/efi.h>
18 #include <asm/pgalloc.h>
20 extern pgd_t frametable_pg_dir[];
22 #define frametable_pgd_offset(addr) \
23 (frametable_pg_dir + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
25 static unsigned long table_size;
26 static int opt_contig_mem = 0;
27 boolean_param("contig_mem", opt_contig_mem);
28 #else
29 #define opt_contig_mem 1
30 #endif
32 struct page_info *frame_table;
33 unsigned long max_page;
35 /*
36 * Set up the page tables.
37 */
38 unsigned long *mpt_table;
40 void
41 paging_init (void)
42 {
43 unsigned int mpt_order;
44 unsigned long mpt_table_size;
45 unsigned long i;
47 if (!opt_contig_mem) {
48 /* mpt_table is already allocated at this point. */
49 return;
50 }
52 /* Create machine to physical mapping table
53 * NOTE: similar to frame table, later we may need virtually
54 * mapped mpt table if large hole exists. Also MAX_ORDER needs
55 * to be changed in common code, which only support 16M by far
56 */
57 mpt_table_size = max_page * sizeof(unsigned long);
58 mpt_order = get_order(mpt_table_size);
59 ASSERT(mpt_order <= MAX_ORDER);
60 if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
61 panic("Not enough memory to bootstrap Xen.\n");
63 printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
64 "mpt_order %u max_page 0x%lx\n",
65 (u64)mpt_table, mpt_table_size, mpt_order, max_page);
66 for (i = 0;
67 i < ((1UL << mpt_order) << PAGE_SHIFT) / sizeof(mpt_table[0]);
68 i++) {
69 mpt_table[i] = INVALID_M2P_ENTRY;
70 }
71 }
73 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
75 static inline void *
76 alloc_dir_page(void)
77 {
78 unsigned long mfn = alloc_boot_pages(1, 1);
79 unsigned long dir;
80 if (!mfn)
81 panic("Not enough memory for virtual frame table!\n");
82 ++table_size;
83 dir = mfn << PAGE_SHIFT;
84 memset(__va(dir), 0, PAGE_SIZE);
85 return (void *)dir;
86 }
88 static inline unsigned long
89 alloc_table_page(unsigned long fill)
90 {
91 unsigned long mfn = alloc_boot_pages(1, 1);
92 unsigned long *table;
93 unsigned long i;
94 if (!mfn)
95 panic("Not enough memory for virtual frame table!\n");
96 ++table_size;
97 table = (unsigned long *)__va((mfn << PAGE_SHIFT));
98 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
99 table[i] = fill;
100 return mfn;
101 }
103 int
104 create_frametable_page_table (u64 start, u64 end, void *arg)
105 {
106 unsigned long address, start_page, end_page;
107 struct page_info *map_start, *map_end;
108 pgd_t *pgd;
109 pud_t *pud;
110 pmd_t *pmd;
111 pte_t *pte;
113 map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
114 map_end = frame_table + (__pa(end) >> PAGE_SHIFT);
116 start_page = (unsigned long) map_start & PAGE_MASK;
117 end_page = PAGE_ALIGN((unsigned long) map_end);
119 for (address = start_page; address < end_page; address += PAGE_SIZE) {
120 pgd = frametable_pgd_offset(address);
121 if (pgd_none(*pgd))
122 pgd_populate(NULL, pgd, alloc_dir_page());
123 pud = pud_offset(pgd, address);
125 if (pud_none(*pud))
126 pud_populate(NULL, pud, alloc_dir_page());
127 pmd = pmd_offset(pud, address);
129 if (pmd_none(*pmd))
130 pmd_populate_kernel(NULL, pmd, alloc_dir_page());
131 pte = pte_offset_kernel(pmd, address);
133 if (pte_none(*pte))
134 set_pte(pte, pfn_pte(alloc_table_page(0), PAGE_KERNEL));
135 }
136 return 0;
137 }
139 int
140 create_mpttable_page_table (u64 start, u64 end, void *arg)
141 {
142 unsigned long address, start_page, end_page;
143 unsigned long *map_start, *map_end;
144 pgd_t *pgd;
145 pud_t *pud;
146 pmd_t *pmd;
147 pte_t *pte;
149 map_start = mpt_table + (__pa(start) >> PAGE_SHIFT);
150 map_end = mpt_table + (__pa(end) >> PAGE_SHIFT);
152 start_page = (unsigned long) map_start & PAGE_MASK;
153 end_page = PAGE_ALIGN((unsigned long) map_end);
155 for (address = start_page; address < end_page; address += PAGE_SIZE) {
156 pgd = frametable_pgd_offset(address);
157 if (pgd_none(*pgd))
158 pgd_populate(NULL, pgd, alloc_dir_page());
159 pud = pud_offset(pgd, address);
161 if (pud_none(*pud))
162 pud_populate(NULL, pud, alloc_dir_page());
163 pmd = pmd_offset(pud, address);
165 if (pmd_none(*pmd))
166 pmd_populate_kernel(NULL, pmd, alloc_dir_page());
167 pte = pte_offset_kernel(pmd, address);
169 if (pte_none(*pte))
170 set_pte(pte, pfn_pte(alloc_table_page(INVALID_M2P_ENTRY), PAGE_KERNEL));
171 }
172 return 0;
173 }
175 void init_virtual_frametable(void)
176 {
177 /* Allocate virtual frame_table */
178 frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
179 table_size = 0;
180 efi_memmap_walk(create_frametable_page_table, NULL);
182 printk("size of virtual frame_table: %lukB\n",
183 ((table_size << PAGE_SHIFT) >> 10));
185 /* Allocate virtual mpt_table */
186 table_size = 0;
187 mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
188 efi_memmap_walk(create_mpttable_page_table, NULL);
190 printk("virtual machine to physical table: %p size: %lukB\n"
191 "max_page: 0x%lx\n",
192 mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);
193 }
195 int
196 ia64_mfn_valid (unsigned long pfn)
197 {
198 extern long ia64_frametable_probe(unsigned long);
199 struct page_info *pg;
200 int valid;
202 if (opt_contig_mem)
203 return 1;
204 pg = mfn_to_page(pfn);
205 valid = ia64_frametable_probe((unsigned long)pg);
206 /* more check the whole struct of page_info */
207 if (valid)
208 valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
209 return valid;
210 }
212 EXPORT_SYMBOL(ia64_mfn_valid);
214 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
216 /* FIXME: postpone support to machines with big holes between physical memorys.
217 * Current hack allows only efi memdesc upto 4G place. (See efi.c)
218 */
219 #define FT_ALIGN_SIZE (16UL << 20)
220 void __init init_frametable(void)
221 {
222 unsigned long pfn;
223 unsigned long frame_table_size;
225 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
226 if (!opt_contig_mem) {
227 init_virtual_frametable();
228 return;
229 }
230 #endif
232 frame_table_size = max_page * sizeof(struct page_info);
233 frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
235 /* Request continuous trunk from boot allocator, since HV
236 * address is identity mapped */
237 pfn = alloc_boot_pages(
238 frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
239 if (pfn == 0)
240 panic("Not enough memory for frame table.\n");
242 frame_table = __va(pfn << PAGE_SHIFT);
243 memset(frame_table, 0, frame_table_size);
244 printk("size of frame_table: %lukB\n",
245 frame_table_size >> 10);
246 }