ia64/xen-unstable

view xen/arch/ia64/xen/mm_init.c @ 9405:29dfadcc5029

[IA64] Followup to xen time cleanup

Clean up to xen time handler. Tristan #if 0 some code because it seems
redundant, which however is actually problematic logic as a reason for
an intermittent timer oops issue of dom0. So delete it now.

Also remove vcpu_wake, since wakeup current has nothing meaningful and
simply waste cpu cycle.

Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Mon Mar 27 15:32:08 2006 -0700 (2006-03-27)
parents 815758308556
children 4ed269e73e95
line source
1 /*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #ifdef XEN
12 #include <xen/sched.h>
13 #endif
14 #include <linux/bootmem.h>
15 #include <linux/efi.h>
16 #include <linux/elf.h>
17 #include <linux/mm.h>
18 #include <linux/mmzone.h>
19 #include <linux/module.h>
20 #ifndef XEN
21 #include <linux/personality.h>
22 #endif
23 #include <linux/reboot.h>
24 #include <linux/slab.h>
25 #include <linux/swap.h>
26 #ifndef XEN
27 #include <linux/proc_fs.h>
28 #endif
30 #ifndef XEN
31 #include <asm/a.out.h>
32 #endif
33 #include <asm/bitops.h>
34 #include <asm/dma.h>
35 #ifndef XEN
36 #include <asm/ia32.h>
37 #endif
38 #include <asm/io.h>
39 #include <asm/machvec.h>
40 #include <asm/numa.h>
41 #include <asm/patch.h>
42 #include <asm/pgalloc.h>
43 #include <asm/sal.h>
44 #include <asm/sections.h>
45 #include <asm/system.h>
46 #include <asm/tlb.h>
47 #include <asm/uaccess.h>
48 #include <asm/unistd.h>
49 #include <asm/mca.h>
50 #include <asm/vhpt.h>
52 #ifndef XEN
53 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
54 #endif
56 extern void ia64_tlb_init (void);
58 unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
60 #ifdef CONFIG_VIRTUAL_MEM_MAP
61 unsigned long vmalloc_end = VMALLOC_END_INIT;
62 EXPORT_SYMBOL(vmalloc_end);
63 struct page_info *vmem_map;
64 EXPORT_SYMBOL(vmem_map);
65 #endif
67 // static int pgt_cache_water[2] = { 25, 50 };
69 #ifndef XEN
70 struct page_info *zero_page_memmap_ptr; /* map entry for zero page */
71 EXPORT_SYMBOL(zero_page_memmap_ptr);
73 void *high_memory;
74 EXPORT_SYMBOL(high_memory);
76 /////////////////////////////////////////////
77 // following from linux-2.6.7/mm/mmap.c
78 /* description of effects of mapping type and prot in current implementation.
79 * this is due to the limited x86 page protection hardware. The expected
80 * behavior is in parens:
81 *
82 * map_type prot
83 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
84 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
85 * w: (no) no w: (no) no w: (yes) yes w: (no) no
86 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
87 *
88 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
89 * w: (no) no w: (no) no w: (copy) copy w: (no) no
90 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
91 *
92 */
93 pgprot_t protection_map[16] = {
94 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
95 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
96 };
98 void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
99 {
100 printf("insert_vm_struct: called, not implemented yet\n");
101 }
103 /////////////////////////////////////////////
104 //following from linux/mm/memory.c
106 #ifndef __ARCH_HAS_4LEVEL_HACK
107 /*
108 * Allocate page upper directory.
109 *
110 * We've already handled the fast-path in-line, and we own the
111 * page table lock.
112 *
113 * On a two-level or three-level page table, this ends up actually being
114 * entirely optimized away.
115 */
116 pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
117 {
118 pud_t *new;
120 spin_unlock(&mm->page_table_lock);
121 new = pud_alloc_one(mm, address);
122 spin_lock(&mm->page_table_lock);
123 if (!new)
124 return NULL;
126 /*
127 * Because we dropped the lock, we should re-check the
128 * entry, as somebody else could have populated it..
129 */
130 if (pgd_present(*pgd)) {
131 pud_free(new);
132 goto out;
133 }
134 pgd_populate(mm, pgd, new);
135 out:
136 return pud_offset(pgd, address);
137 }
139 /*
140 * Allocate page middle directory.
141 *
142 * We've already handled the fast-path in-line, and we own the
143 * page table lock.
144 *
145 * On a two-level page table, this ends up actually being entirely
146 * optimized away.
147 */
148 pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
149 {
150 pmd_t *new;
152 spin_unlock(&mm->page_table_lock);
153 new = pmd_alloc_one(mm, address);
154 spin_lock(&mm->page_table_lock);
155 if (!new)
156 return NULL;
158 /*
159 * Because we dropped the lock, we should re-check the
160 * entry, as somebody else could have populated it..
161 */
162 if (pud_present(*pud)) {
163 pmd_free(new);
164 goto out;
165 }
166 pud_populate(mm, pud, new);
167 out:
168 return pmd_offset(pud, address);
169 }
170 #endif
172 pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
173 {
174 if (!pmd_present(*pmd)) {
175 struct page_info *new;
177 spin_unlock(&mm->page_table_lock);
178 new = pte_alloc_one(mm, address);
179 spin_lock(&mm->page_table_lock);
180 if (!new)
181 return NULL;
183 /*
184 * Because we dropped the lock, we should re-check the
185 * entry, as somebody else could have populated it..
186 */
187 if (pmd_present(*pmd)) {
188 pte_free(new);
189 goto out;
190 }
191 inc_page_state(nr_page_table_pages);
192 pmd_populate(mm, pmd, new);
193 }
194 out:
195 return pte_offset_map(pmd, address);
196 }
197 /////////////////////////////////////////////
198 #endif /* XEN */
200 #if 0
201 void
202 update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
203 {
204 unsigned long addr;
205 struct page_info *page;
207 if (!pte_exec(pte))
208 return; /* not an executable page... */
210 page = pte_page(pte);
211 /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
212 addr = (unsigned long) page_address(page);
214 if (test_bit(PG_arch_1, &page->flags))
215 return; /* i-cache is already coherent with d-cache */
217 flush_icache_range(addr, addr + PAGE_SIZE);
218 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
219 }
220 #endif
222 #if 0
223 inline void
224 ia64_set_rbs_bot (void)
225 {
226 #ifdef XEN
227 unsigned long stack_size = MAX_USER_STACK_SIZE;
228 #else
229 unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
230 #endif
232 if (stack_size > MAX_USER_STACK_SIZE)
233 stack_size = MAX_USER_STACK_SIZE;
234 current->arch._thread.rbs_bot = STACK_TOP - stack_size;
235 }
236 #endif
238 /*
239 * This performs some platform-dependent address space initialization.
240 * On IA-64, we want to setup the VM area for the register backing
241 * store (which grows upwards) and install the gateway page which is
242 * used for signal trampolines, etc.
243 */
244 #if 0
245 void
246 ia64_init_addr_space (void)
247 {
248 #ifdef XEN
249 printf("ia64_init_addr_space: called, not implemented\n");
250 #else
251 struct vm_area_struct *vma;
253 ia64_set_rbs_bot();
255 /*
256 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
257 * the problem. When the process attempts to write to the register backing store
258 * for the first time, it will get a SEGFAULT in this case.
259 */
260 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
261 if (vma) {
262 memset(vma, 0, sizeof(*vma));
263 vma->vm_mm = current->mm;
264 vma->vm_start = current->arch._thread.rbs_bot & PAGE_MASK;
265 vma->vm_end = vma->vm_start + PAGE_SIZE;
266 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
267 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
268 insert_vm_struct(current->mm, vma);
269 }
271 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
272 if (!(current->personality & MMAP_PAGE_ZERO)) {
273 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
274 if (vma) {
275 memset(vma, 0, sizeof(*vma));
276 vma->vm_mm = current->mm;
277 vma->vm_end = PAGE_SIZE;
278 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
279 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
280 insert_vm_struct(current->mm, vma);
281 }
282 }
283 #endif
284 }
285 #endif
287 void setup_gate (void)
288 {
289 printk("setup_gate not-implemented.\n");
290 }
292 void __devinit
293 ia64_mmu_init (void *my_cpu_data)
294 {
295 unsigned long psr, impl_va_bits;
296 #if 0
297 unsigned long pta;
298 #endif
299 extern void __devinit tlb_init (void);
300 int cpu;
302 #ifdef CONFIG_DISABLE_VHPT
303 # define VHPT_ENABLE_BIT 0
304 #else
305 # define VHPT_ENABLE_BIT 1
306 #endif
308 /* Pin mapping for percpu area into TLB */
309 psr = ia64_clear_ic();
310 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
311 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
312 PERCPU_PAGE_SHIFT);
314 ia64_set_psr(psr);
315 ia64_srlz_i();
317 /*
318 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
319 * address space. The IA-64 architecture guarantees that at least 50 bits of
320 * virtual address space are implemented but if we pick a large enough page size
321 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
322 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
323 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
324 * problem in practice. Alternatively, we could truncate the top of the mapped
325 * address space to not permit mappings that would overlap with the VMLPT.
326 * --davidm 00/12/06
327 */
328 # define pte_bits 3
329 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
330 /*
331 * The virtual page table has to cover the entire implemented address space within
332 * a region even though not all of this space may be mappable. The reason for
333 * this is that the Access bit and Dirty bit fault handlers perform
334 * non-speculative accesses to the virtual page table, so the address range of the
335 * virtual page table itself needs to be covered by virtual page table.
336 */
337 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
338 # define POW2(n) (1ULL << (n))
340 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
342 if (impl_va_bits < 51 || impl_va_bits > 61)
343 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
345 #ifdef XEN
346 vhpt_init();
347 #endif
348 #if 0
349 /* place the VMLPT at the end of each page-table mapped region: */
350 pta = POW2(61) - POW2(vmlpt_bits);
352 if (POW2(mapped_space_bits) >= pta)
353 panic("mm/init: overlap between virtually mapped linear page table and "
354 "mapped kernel space!");
355 /*
356 * Set the (virtually mapped linear) page table address. Bit
357 * 8 selects between the short and long format, bits 2-7 the
358 * size of the table, and bit 0 whether the VHPT walker is
359 * enabled.
360 */
361 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
362 #endif
363 ia64_tlb_init();
365 #ifdef CONFIG_HUGETLB_PAGE
366 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
367 ia64_srlz_d();
368 #endif
370 cpu = smp_processor_id();
372 #ifndef XEN
373 /* mca handler uses cr.lid as key to pick the right entry */
374 ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
376 /* insert this percpu data information into our list for MCA recovery purposes */
377 ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
378 /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
379 ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
380 ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
381 ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
382 ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
383 ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
384 #endif
385 }
387 #ifdef CONFIG_VIRTUAL_MEM_MAP
389 int
390 create_mem_map_page_table (u64 start, u64 end, void *arg)
391 {
392 unsigned long address, start_page, end_page;
393 struct page_info *map_start, *map_end;
394 int node;
395 pgd_t *pgd;
396 pmd_t *pmd;
397 pte_t *pte;
399 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
400 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
402 start_page = (unsigned long) map_start & PAGE_MASK;
403 end_page = PAGE_ALIGN((unsigned long) map_end);
404 node = paddr_to_nid(__pa(start));
406 for (address = start_page; address < end_page; address += PAGE_SIZE) {
407 pgd = pgd_offset_k(address);
408 if (pgd_none(*pgd))
409 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
410 pmd = pmd_offset(pgd, address);
412 if (pmd_none(*pmd))
413 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
414 pte = pte_offset_kernel(pmd, address);
416 if (pte_none(*pte))
417 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
418 PAGE_KERNEL));
419 }
420 return 0;
421 }
423 struct memmap_init_callback_data {
424 struct page_info *start;
425 struct page_info *end;
426 int nid;
427 unsigned long zone;
428 };
430 static int
431 virtual_memmap_init (u64 start, u64 end, void *arg)
432 {
433 struct memmap_init_callback_data *args;
434 struct page_info *map_start, *map_end;
436 args = (struct memmap_init_callback_data *) arg;
438 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
439 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
441 if (map_start < args->start)
442 map_start = args->start;
443 if (map_end > args->end)
444 map_end = args->end;
446 /*
447 * We have to initialize "out of bounds" struct page_info elements that fit completely
448 * on the same pages that were allocated for the "in bounds" elements because they
449 * may be referenced later (and found to be "reserved").
450 */
451 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page_info);
452 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
453 / sizeof(struct page_info));
455 if (map_start < map_end)
456 memmap_init_zone(map_start, (unsigned long) (map_end - map_start),
457 args->nid, args->zone, page_to_mfn(map_start));
458 return 0;
459 }
461 void
462 memmap_init (struct page_info *start, unsigned long size, int nid,
463 unsigned long zone, unsigned long start_pfn)
464 {
465 if (!vmem_map)
466 memmap_init_zone(start, size, nid, zone, start_pfn);
467 else {
468 struct memmap_init_callback_data args;
470 args.start = start;
471 args.end = start + size;
472 args.nid = nid;
473 args.zone = zone;
475 efi_memmap_walk(virtual_memmap_init, &args);
476 }
477 }
479 int
480 ia64_mfn_valid (unsigned long pfn)
481 {
482 char byte;
483 struct page_info *pg = mfn_to_page(pfn);
485 return (__get_user(byte, (char *) pg) == 0)
486 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
487 || (__get_user(byte, (char *) (pg + 1) - 1) == 0));
488 }
489 EXPORT_SYMBOL(ia64_mfn_valid);
491 int
492 find_largest_hole (u64 start, u64 end, void *arg)
493 {
494 u64 *max_gap = arg;
496 static u64 last_end = PAGE_OFFSET;
498 /* NOTE: this algorithm assumes efi memmap table is ordered */
500 #ifdef XEN
501 //printf("find_largest_hole: start=%lx,end=%lx,max_gap=%lx\n",start,end,*(unsigned long *)arg);
502 #endif
503 if (*max_gap < (start - last_end))
504 *max_gap = start - last_end;
505 last_end = end;
506 #ifdef XEN
507 //printf("find_largest_hole2: max_gap=%lx,last_end=%lx\n",*max_gap,last_end);
508 #endif
509 return 0;
510 }
511 #endif /* CONFIG_VIRTUAL_MEM_MAP */
513 #ifndef XEN
514 static int
515 count_reserved_pages (u64 start, u64 end, void *arg)
516 {
517 unsigned long num_reserved = 0;
518 unsigned long *count = arg;
520 for (; start < end; start += PAGE_SIZE)
521 if (PageReserved(virt_to_page(start)))
522 ++num_reserved;
523 *count += num_reserved;
524 return 0;
525 }
526 #endif
528 /*
529 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
530 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
531 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
532 * useful for performance testing, but conceivably could also come in handy for debugging
533 * purposes.
534 */
536 static int nolwsys;
538 static int __init
539 nolwsys_setup (char *s)
540 {
541 nolwsys = 1;
542 return 1;
543 }
545 __setup("nolwsys", nolwsys_setup);
547 void
548 mem_init (void)
549 {
550 #ifdef CONFIG_PCI
551 /*
552 * This needs to be called _after_ the command line has been parsed but _before_
553 * any drivers that may need the PCI DMA interface are initialized or bootmem has
554 * been freed.
555 */
556 platform_dma_init();
557 #endif
559 }