ia64/xen-unstable

view xen/arch/ia64/xen/mm_init.c @ 16785:af3550f53874

[IA64] domheap: Don't pin xenheap down. Now it's unnecessary.

Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jan 17 12:05:43 2008 -0700 (2008-01-17)
parents dc227a849d02
children 34a84a5306f7
line source
1 /*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <xen/sched.h>
12 #include <asm/vhpt.h>
13 #include <asm/xenmca.h>
14 #include <asm/meminit.h>
15 #include <asm/page.h>
17 struct ia64_mca_tlb_info ia64_mca_tlb_list[NR_CPUS];
19 extern void ia64_tlb_init (void);
21 void __devinit
22 ia64_mmu_init (void *my_cpu_data)
23 {
24 unsigned long psr, impl_va_bits;
25 extern void __devinit tlb_init (void);
26 int cpu;
28 /* Pin mapping for percpu area into TLB */
29 psr = ia64_clear_ic();
30 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
31 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
32 PERCPU_PAGE_SHIFT);
34 ia64_set_psr(psr);
35 ia64_srlz_i();
37 /*
38 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
39 * address space. The IA-64 architecture guarantees that at least 50 bits of
40 * virtual address space are implemented but if we pick a large enough page size
41 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
42 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
43 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
44 * problem in practice. Alternatively, we could truncate the top of the mapped
45 * address space to not permit mappings that would overlap with the VMLPT.
46 * --davidm 00/12/06
47 */
48 # define pte_bits 3
49 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
50 /*
51 * The virtual page table has to cover the entire implemented address space within
52 * a region even though not all of this space may be mappable. The reason for
53 * this is that the Access bit and Dirty bit fault handlers perform
54 * non-speculative accesses to the virtual page table, so the address range of the
55 * virtual page table itself needs to be covered by virtual page table.
56 */
57 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
58 # define POW2(n) (1ULL << (n))
60 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
62 if (impl_va_bits < 51 || impl_va_bits > 61)
63 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
65 #ifdef XEN
66 vhpt_init();
67 #endif
68 ia64_tlb_init();
70 #ifdef CONFIG_HUGETLB_PAGE
71 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
72 ia64_srlz_d();
73 #endif
75 cpu = smp_processor_id();
77 /* mca handler uses cr.lid as key to pick the right entry */
78 ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
80 /* insert this percpu data information into our list for MCA recovery purposes */
81 #ifdef XEN
82 ia64_mca_tlb_list[cpu].percpu_paddr = __pa(my_cpu_data);
83 #else
84 ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
85 /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
86 ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
87 ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
88 ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
89 ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
90 ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
91 #endif
92 }
94 void __init
95 mem_init (void)
96 {
97 #ifdef CONFIG_PCI
98 /*
99 * This needs to be called _after_ the command line has been parsed but _before_
100 * any drivers that may need the PCI DMA interface are initialized or bootmem has
101 * been freed.
102 */
103 platform_dma_init();
104 #endif
106 }