ia64/xen-unstable

view xen/arch/x86/x86_32/mm.c @ 6708:aa0990ef260f

merge
author iap10@freefall.cl.cam.ac.uk
date Thu Sep 08 17:42:49 2005 +0000 (2005-09-08)
parents 3bde4219c681 e3fd0fa58364
children 2704a88c3295 cdfa7dd00c44
line source
1 /******************************************************************************
2 * arch/x86/x86_32/mm.c
3 *
4 * Modifications to Linux original are copyright (c) 2004, K A Fraser
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #include <xen/config.h>
22 #include <xen/lib.h>
23 #include <xen/init.h>
24 #include <xen/mm.h>
25 #include <xen/sched.h>
26 #include <asm/current.h>
27 #include <asm/page.h>
28 #include <asm/flushtlb.h>
29 #include <asm/fixmap.h>
31 extern l1_pgentry_t *mapcache;
33 unsigned int PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
34 unsigned int PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
36 static unsigned long mpt_size;
38 struct pfn_info *alloc_xen_pagetable(void)
39 {
40 extern int early_boot;
41 extern unsigned long xenheap_phys_start;
42 struct pfn_info *pg;
44 if ( !early_boot )
45 {
46 void *v = alloc_xenheap_page();
47 return ((v == NULL) ? NULL : virt_to_page(v));
48 }
50 pg = phys_to_page(xenheap_phys_start);
51 xenheap_phys_start += PAGE_SIZE;
52 return pg;
53 }
55 void free_xen_pagetable(struct pfn_info *pg)
56 {
57 free_xenheap_page(page_to_virt(pg));
58 }
60 l2_pgentry_t *virt_to_xen_l2e(unsigned long v)
61 {
62 return &idle_pg_table_l2[l2_linear_offset(v)];
63 }
65 void __init paging_init(void)
66 {
67 void *ioremap_pt;
68 unsigned long v;
69 struct pfn_info *pg;
70 int i, mapcache_order;
72 #ifdef CONFIG_X86_PAE
73 printk("PAE enabled, limit: %d GB\n", MACHPHYS_MBYTES);
74 #else
75 printk("PAE disabled.\n");
76 #endif
78 idle0_vcpu.arch.monitor_table = mk_pagetable(__pa(idle_pg_table));
80 if ( cpu_has_pge )
81 {
82 /* Suitable Xen mapping can be GLOBAL. */
83 set_in_cr4(X86_CR4_PGE);
84 PAGE_HYPERVISOR |= _PAGE_GLOBAL;
85 PAGE_HYPERVISOR_NOCACHE |= _PAGE_GLOBAL;
86 /* Transform early mappings (e.g., the frametable). */
87 for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
88 if ( (l2e_get_flags(idle_pg_table_l2[l2_linear_offset(v)]) &
89 (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT) )
90 l2e_add_flags(idle_pg_table_l2[l2_linear_offset(v)],
91 _PAGE_GLOBAL);
92 }
94 /*
95 * Allocate and map the machine-to-phys table and create read-only mapping
96 * of MPT for guest-OS use.
97 */
98 mpt_size = (max_page * BYTES_PER_LONG) + (1UL << L2_PAGETABLE_SHIFT) - 1;
99 mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL);
100 for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
101 {
102 if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL )
103 panic("Not enough memory to bootstrap Xen.\n");
104 idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i] =
105 l2e_from_page(pg, PAGE_HYPERVISOR | _PAGE_PSE);
106 idle_pg_table_l2[l2_linear_offset(RO_MPT_VIRT_START) + i] =
107 l2e_from_page(pg, (__PAGE_HYPERVISOR | _PAGE_PSE) & ~_PAGE_RW);
108 }
109 memset((void *)RDWR_MPT_VIRT_START, 0x55, mpt_size);
111 /* Create page tables for ioremap(). */
112 for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
113 {
114 ioremap_pt = alloc_xenheap_page();
115 clear_page(ioremap_pt);
116 idle_pg_table_l2[l2_linear_offset(IOREMAP_VIRT_START) + i] =
117 l2e_from_page(virt_to_page(ioremap_pt), __PAGE_HYPERVISOR);
118 }
120 /* Set up mapping cache for domain pages. */
121 mapcache_order = get_order_from_bytes(
122 MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
123 mapcache = alloc_xenheap_pages(mapcache_order);
124 memset(mapcache, 0, PAGE_SIZE << mapcache_order);
125 for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
126 idle_pg_table_l2[l2_linear_offset(MAPCACHE_VIRT_START) + i] =
127 l2e_from_page(virt_to_page(mapcache) + i, __PAGE_HYPERVISOR);
128 }
130 void __init zap_low_mappings(l2_pgentry_t *base)
131 {
132 int i;
133 u32 addr;
135 for (i = 0; ; i++) {
136 addr = (i << L2_PAGETABLE_SHIFT);
137 if (addr >= HYPERVISOR_VIRT_START)
138 break;
139 if (l2e_get_paddr(base[i]) != addr)
140 continue;
141 base[i] = l2e_empty();
142 }
143 flush_tlb_all_pge();
144 }
146 void subarch_init_memory(struct domain *dom_xen)
147 {
148 unsigned long m2p_start_mfn;
149 unsigned int i, j;
151 /*
152 * We are rather picky about the layout of 'struct pfn_info'. The
153 * count_info and domain fields must be adjacent, as we perform atomic
154 * 64-bit operations on them. Also, just for sanity, we assert the size
155 * of the structure here.
156 */
157 if ( (offsetof(struct pfn_info, u.inuse._domain) !=
158 (offsetof(struct pfn_info, count_info) + sizeof(u32))) ||
159 (sizeof(struct pfn_info) != 24) )
160 {
161 printk("Weird pfn_info layout (%ld,%ld,%d)\n",
162 offsetof(struct pfn_info, count_info),
163 offsetof(struct pfn_info, u.inuse._domain),
164 sizeof(struct pfn_info));
165 BUG();
166 }
168 /* M2P table is mappable read-only by privileged domains. */
169 for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
170 {
171 m2p_start_mfn = l2e_get_pfn(
172 idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i]);
173 for ( j = 0; j < L2_PAGETABLE_ENTRIES; j++ )
174 {
175 frame_table[m2p_start_mfn+j].count_info = PGC_allocated | 1;
176 /* Ensure it's only mapped read-only by domains. */
177 frame_table[m2p_start_mfn+j].u.inuse.type_info = PGT_gdt_page | 1;
178 page_set_owner(&frame_table[m2p_start_mfn+j], dom_xen);
179 }
180 }
181 }
184 long do_stack_switch(unsigned long ss, unsigned long esp)
185 {
186 int nr = smp_processor_id();
187 struct tss_struct *t = &init_tss[nr];
189 if ( (ss & 3) != 1 )
190 return -EPERM;
192 current->arch.guest_context.kernel_ss = ss;
193 current->arch.guest_context.kernel_sp = esp;
194 t->ss1 = ss;
195 t->esp1 = esp;
197 return 0;
198 }
200 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
201 int check_descriptor(struct desc_struct *d)
202 {
203 unsigned long base, limit;
204 u32 a = d->a, b = d->b;
206 /* A not-present descriptor will always fault, so is safe. */
207 if ( !(b & _SEGMENT_P) )
208 goto good;
210 /*
211 * We don't allow a DPL of zero. There is no legitimate reason for
212 * specifying DPL==0, and it gets rather dangerous if we also accept call
213 * gates (consider a call gate pointing at another kernel descriptor with
214 * DPL 0 -- this would get the OS ring-0 privileges).
215 */
216 if ( (b & _SEGMENT_DPL) == 0 )
217 goto bad;
219 if ( !(b & _SEGMENT_S) )
220 {
221 /*
222 * System segment:
223 * 1. Don't allow interrupt or trap gates as they belong in the IDT.
224 * 2. Don't allow TSS descriptors or task gates as we don't
225 * virtualise x86 tasks.
226 * 3. Don't allow LDT descriptors because they're unnecessary and
227 * I'm uneasy about allowing an LDT page to contain LDT
228 * descriptors. In any case, Xen automatically creates the
229 * required descriptor when reloading the LDT register.
230 * 4. We allow call gates but they must not jump to a private segment.
231 */
233 /* Disallow everything but call gates. */
234 if ( (b & _SEGMENT_TYPE) != 0xc00 )
235 goto bad;
237 /* Can't allow far jump to a Xen-private segment. */
238 if ( !VALID_CODESEL(a>>16) )
239 goto bad;
241 /* Reserved bits must be zero. */
242 if ( (b & 0xe0) != 0 )
243 goto bad;
245 /* No base/limit check is needed for a call gate. */
246 goto good;
247 }
249 /* Check that base is at least a page away from Xen-private area. */
250 base = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
251 if ( base >= (GUEST_SEGMENT_MAX_ADDR - PAGE_SIZE) )
252 goto bad;
254 /* Check and truncate the limit if necessary. */
255 limit = (b&0xf0000) | (a&0xffff);
256 limit++; /* We add one because limit is inclusive. */
257 if ( (b & _SEGMENT_G) )
258 limit <<= 12;
260 if ( (b & (_SEGMENT_CODE | _SEGMENT_EC)) == _SEGMENT_EC )
261 {
262 /*
263 * DATA, GROWS-DOWN.
264 * Grows-down limit check.
265 * NB. limit == 0xFFFFF provides no access (if G=1).
266 * limit == 0x00000 provides 4GB-4kB access (if G=1).
267 */
268 if ( (base + limit) > base )
269 {
270 limit = -(base & PAGE_MASK);
271 goto truncate;
272 }
273 }
274 else
275 {
276 /*
277 * DATA, GROWS-UP.
278 * CODE (CONFORMING AND NON-CONFORMING).
279 * Grows-up limit check.
280 * NB. limit == 0xFFFFF provides 4GB access (if G=1).
281 * limit == 0x00000 provides 4kB access (if G=1).
282 */
283 if ( ((base + limit) <= base) ||
284 ((base + limit) > GUEST_SEGMENT_MAX_ADDR) )
285 {
286 limit = GUEST_SEGMENT_MAX_ADDR - base;
287 truncate:
288 if ( !(b & _SEGMENT_G) )
289 goto bad; /* too dangerous; too hard to work out... */
290 limit = (limit >> 12) - 1;
291 d->a &= ~0x0ffff; d->a |= limit & 0x0ffff;
292 d->b &= ~0xf0000; d->b |= limit & 0xf0000;
293 }
294 }
296 good:
297 return 1;
298 bad:
299 return 0;
300 }
302 void memguard_guard_stack(void *p)
303 {
304 memguard_guard_range(p, PAGE_SIZE);
305 }
307 /*
308 * Local variables:
309 * mode: C
310 * c-set-style: "BSD"
311 * c-basic-offset: 4
312 * tab-width: 4
313 * indent-tabs-mode: nil
314 * End:
315 */