ia64/xen-unstable

view xen/arch/ia64/xen/mm_init.c @ 9768:63af1c14fa18

[IA64] missed chunk of Kevin's hypercall cleanup patch

Missed this chunk of Kevin's patch when merging with dom0vp changes

Signed-off-by Kevin Tian <kevin.tian@intel.com>
author awilliam@xenbuild.aw
date Tue Apr 25 22:30:07 2006 -0600 (2006-04-25)
parents 4ed269e73e95
children e4213aa1b98d
line source
1 /*
2 * Initialize MMU support.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7 #include <linux/config.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
11 #include <xen/sched.h>
12 #include <asm/vhpt.h>
14 extern void ia64_tlb_init (void);
16 void __devinit
17 ia64_mmu_init (void *my_cpu_data)
18 {
19 unsigned long psr, impl_va_bits;
20 #if 0
21 unsigned long pta;
22 #endif
23 extern void __devinit tlb_init (void);
24 int cpu;
26 #ifdef CONFIG_DISABLE_VHPT
27 # define VHPT_ENABLE_BIT 0
28 #else
29 # define VHPT_ENABLE_BIT 1
30 #endif
32 /* Pin mapping for percpu area into TLB */
33 psr = ia64_clear_ic();
34 ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
35 pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
36 PERCPU_PAGE_SHIFT);
38 ia64_set_psr(psr);
39 ia64_srlz_i();
41 /*
42 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
43 * address space. The IA-64 architecture guarantees that at least 50 bits of
44 * virtual address space are implemented but if we pick a large enough page size
45 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
46 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
47 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
48 * problem in practice. Alternatively, we could truncate the top of the mapped
49 * address space to not permit mappings that would overlap with the VMLPT.
50 * --davidm 00/12/06
51 */
52 # define pte_bits 3
53 # define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
54 /*
55 * The virtual page table has to cover the entire implemented address space within
56 * a region even though not all of this space may be mappable. The reason for
57 * this is that the Access bit and Dirty bit fault handlers perform
58 * non-speculative accesses to the virtual page table, so the address range of the
59 * virtual page table itself needs to be covered by virtual page table.
60 */
61 # define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
62 # define POW2(n) (1ULL << (n))
64 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
66 if (impl_va_bits < 51 || impl_va_bits > 61)
67 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
69 #ifdef XEN
70 vhpt_init();
71 #endif
72 #if 0
73 /* place the VMLPT at the end of each page-table mapped region: */
74 pta = POW2(61) - POW2(vmlpt_bits);
76 if (POW2(mapped_space_bits) >= pta)
77 panic("mm/init: overlap between virtually mapped linear page table and "
78 "mapped kernel space!");
79 /*
80 * Set the (virtually mapped linear) page table address. Bit
81 * 8 selects between the short and long format, bits 2-7 the
82 * size of the table, and bit 0 whether the VHPT walker is
83 * enabled.
84 */
85 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
86 #endif
87 ia64_tlb_init();
89 #ifdef CONFIG_HUGETLB_PAGE
90 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
91 ia64_srlz_d();
92 #endif
94 cpu = smp_processor_id();
96 #ifndef XEN
97 /* mca handler uses cr.lid as key to pick the right entry */
98 ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
100 /* insert this percpu data information into our list for MCA recovery purposes */
101 ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
102 /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
103 ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
104 ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
105 ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
106 ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
107 ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
108 #endif
109 }
111 void
112 mem_init (void)
113 {
114 #ifdef CONFIG_PCI
115 /*
116 * This needs to be called _after_ the command line has been parsed but _before_
117 * any drivers that may need the PCI DMA interface are initialized or bootmem has
118 * been freed.
119 */
120 platform_dma_init();
121 #endif
123 }