ia64/xen-unstable

view xen/arch/ia64/xen/xenmem.c @ 17871:0df3bf8aac1e

[IA64] trivial compilation fix caused by c/s 17847:8a0415fac759.

This patch fixes the following compilation error caused
by c/s 17847:8a0415fac759.

machine_kexec.c: In function 'arch_crash_save_vmcoreinfo':
machine_kexec.c:201: error: 'frametable_pg_dir' undeclared (first use in this function)
machine_kexec.c:201: error: (Each undeclared identifier is reported only once
machine_kexec.c:201: error: for each function it appears in.)

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jun 16 16:35:17 2008 +0100 (2008-06-16)
parents 005dd6b1cf8e
children 696351cde9a4
line source
1 /*
2 * Xen memory allocator routines
3 *
4 * Copyright (C) 2005 Hewlett-Packard Co
5 * Dan Magenheimer <dan.magenheimer@hp.com>
6 * Copyright (C) 2005 Intel Corp.
7 *
8 * Routines used by ia64 machines with contiguous (or virtually contiguous)
9 * memory.
10 */
12 #include <linux/config.h>
13 #include <asm/pgtable.h>
14 #include <xen/mm.h>
16 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
17 #include <linux/efi.h>
18 #include <asm/pgalloc.h>
20 #define FRAMETABLE_PGD_OFFSET(ADDR) \
21 (frametable_pg_dir + (((ADDR) >> PGDIR_SHIFT) & \
22 ((1UL << (PAGE_SHIFT - 3)) - 1)))
24 #define FRAMETABLE_PMD_OFFSET(PGD, ADDR) \
25 __va((unsigned long *)(PGD) + (((ADDR) >> PMD_SHIFT) & \
26 ((1UL << (PAGE_SHIFT - 3)) - 1)))
28 #define FRAMETABLE_PTE_OFFSET(PMD, ADDR) \
29 (pte_t *)__va((unsigned long *)(PMD) + (((ADDR) >> PAGE_SHIFT) & \
30 ((1UL << (PAGE_SHIFT - 3)) - 1)))
32 static unsigned long table_size;
33 static int opt_contig_mem = 0;
34 boolean_param("contig_mem", opt_contig_mem);
35 #else
36 #define opt_contig_mem 1
37 #endif
39 struct page_info *frame_table __read_mostly;
40 unsigned long max_page;
42 /*
43 * Set up the page tables.
44 */
45 volatile unsigned long *mpt_table __read_mostly;
47 void __init
48 paging_init (void)
49 {
50 unsigned int mpt_order;
51 unsigned long mpt_table_size;
52 unsigned long i;
54 if (!opt_contig_mem) {
55 /* mpt_table is already allocated at this point. */
56 return;
57 }
59 /* Create machine to physical mapping table
60 * NOTE: similar to frame table, later we may need virtually
61 * mapped mpt table if large hole exists. Also MAX_ORDER needs
62 * to be changed in common code, which only support 16M by far
63 */
64 mpt_table_size = max_page * sizeof(unsigned long);
65 mpt_order = get_order(mpt_table_size);
66 ASSERT(mpt_order <= MAX_ORDER);
67 if ((mpt_table = alloc_xenheap_pages(mpt_order)) == NULL)
68 panic("Not enough memory to bootstrap Xen.\n");
70 printk("machine to physical table: 0x%lx mpt_table_size 0x%lx\n"
71 "mpt_order %u max_page 0x%lx\n",
72 (u64)mpt_table, mpt_table_size, mpt_order, max_page);
73 for (i = 0;
74 i < ((1UL << mpt_order) << PAGE_SHIFT) / sizeof(mpt_table[0]);
75 i++) {
76 mpt_table[i] = INVALID_M2P_ENTRY;
77 }
78 }
80 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
82 static unsigned long __init
83 alloc_dir_page(void)
84 {
85 unsigned long mfn = alloc_boot_pages(1, 1);
86 unsigned long dir;
87 if (!mfn)
88 panic("Not enough memory for virtual frame table!\n");
89 ++table_size;
90 dir = mfn << PAGE_SHIFT;
91 clear_page(__va(dir));
92 return dir;
93 }
95 static inline unsigned long __init
96 alloc_table_page(unsigned long fill)
97 {
98 unsigned long mfn = alloc_boot_pages(1, 1);
99 unsigned long *table;
100 unsigned long i;
101 if (!mfn)
102 panic("Not enough memory for virtual frame table!\n");
103 ++table_size;
104 table = (unsigned long *)__va((mfn << PAGE_SHIFT));
105 for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++)
106 table[i] = fill;
107 return mfn;
108 }
110 static void __init
111 create_page_table(unsigned long start_page, unsigned long end_page,
112 unsigned long fill)
113 {
114 unsigned long address;
115 unsigned long *dir;
116 pte_t *pteptr;
118 for (address = start_page; address < end_page; address += PAGE_SIZE) {
119 dir = FRAMETABLE_PGD_OFFSET(address);
120 if (!*dir)
121 *dir = alloc_dir_page();
122 dir = FRAMETABLE_PMD_OFFSET(*dir, address);
123 if (!*dir)
124 *dir = alloc_dir_page();
125 pteptr = FRAMETABLE_PTE_OFFSET(*dir, address);
126 if (pte_none(*pteptr))
127 set_pte(pteptr, pfn_pte(alloc_table_page(fill),
128 PAGE_KERNEL));
129 }
130 }
132 static int __init
133 create_frametable_page_table (u64 start, u64 end, void *arg)
134 {
135 struct page_info *map_start, *map_end;
136 unsigned long start_page, end_page;
138 map_start = frame_table + (__pa(start) >> PAGE_SHIFT);
139 map_end = frame_table + (__pa(end) >> PAGE_SHIFT);
141 start_page = (unsigned long) map_start & PAGE_MASK;
142 end_page = PAGE_ALIGN((unsigned long) map_end);
144 create_page_table(start_page, end_page, 0L);
145 return 0;
146 }
148 static int __init
149 create_mpttable_page_table (u64 start, u64 end, void *arg)
150 {
151 unsigned long map_start, map_end;
152 unsigned long start_page, end_page;
154 map_start = (unsigned long)(mpt_table + (__pa(start) >> PAGE_SHIFT));
155 map_end = (unsigned long)(mpt_table + (__pa(end) >> PAGE_SHIFT));
157 start_page = map_start & PAGE_MASK;
158 end_page = PAGE_ALIGN(map_end);
160 create_page_table(start_page, end_page, INVALID_M2P_ENTRY);
161 return 0;
162 }
164 void __init init_virtual_frametable(void)
165 {
166 /* Allocate virtual frame_table */
167 frame_table = (struct page_info *) VIRT_FRAME_TABLE_ADDR;
168 table_size = 0;
169 efi_memmap_walk(create_frametable_page_table, NULL);
171 printk("size of virtual frame_table: %lukB\n",
172 ((table_size << PAGE_SHIFT) >> 10));
174 /* Allocate virtual mpt_table */
175 table_size = 0;
176 mpt_table = (unsigned long *)VIRT_FRAME_TABLE_END - max_page;
177 efi_memmap_walk(create_mpttable_page_table, NULL);
179 printk("virtual machine to physical table: %p size: %lukB\n"
180 "max_page: 0x%lx\n",
181 mpt_table, ((table_size << PAGE_SHIFT) >> 10), max_page);
183 /*
184 * XXX work around for translate_domain_pte().
185 * It returns mfn=0 when the machine page isn't present. This
186 * behavior is a work around for memory mapped I/O where no device
187 * is assigned. Xen might access page_info of mfn=0, so it must
188 * be guaranteed that it exists. Otherwise xen panics with tlb miss
189 * fault in xen's virtual address area.
190 *
191 * Once translate_domain_pte() is fixed correctly, this will
192 * be removed.
193 */
194 if (!mfn_valid(0)) {
195 printk("allocating frame table/mpt table at mfn 0.\n");
196 create_frametable_page_table(0, PAGE_SIZE, NULL);
197 create_mpttable_page_table(0, PAGE_SIZE, NULL);
198 }
199 }
201 int
202 ia64_mfn_valid (unsigned long pfn)
203 {
204 extern long ia64_frametable_probe(unsigned long);
205 struct page_info *pg;
206 int valid;
208 if (opt_contig_mem)
209 return 1;
210 pg = mfn_to_page(pfn);
211 valid = ia64_frametable_probe((unsigned long)pg);
212 /* more check the whole struct of page_info */
213 if (valid)
214 valid = ia64_frametable_probe((unsigned long)(pg+1)-1);
215 return valid;
216 }
218 EXPORT_SYMBOL(ia64_mfn_valid);
220 #endif /* CONFIG_VIRTUAL_FRAME_TABLE */
222 /* FIXME: postpone support to machines with big holes between physical memorys.
223 * Current hack allows only efi memdesc upto 4G place. (See efi.c)
224 */
225 #define FT_ALIGN_SIZE (16UL << 20)
226 void __init init_frametable(void)
227 {
228 unsigned long pfn;
229 unsigned long frame_table_size;
231 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
232 if (!opt_contig_mem) {
233 init_virtual_frametable();
234 return;
235 }
236 #endif
238 frame_table_size = max_page * sizeof(struct page_info);
239 frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
241 /* Request continuous trunk from boot allocator, since HV
242 * address is identity mapped */
243 pfn = alloc_boot_pages(
244 frame_table_size >> PAGE_SHIFT, FT_ALIGN_SIZE >> PAGE_SHIFT);
245 if (pfn == 0)
246 panic("Not enough memory for frame table.\n");
248 frame_table = __va(pfn << PAGE_SHIFT);
249 memset(frame_table, 0, frame_table_size);
250 printk("size of frame_table: %lukB\n",
251 frame_table_size >> 10);
252 }