ia64/linux-2.6.18-xen.hg

view include/asm-m68k/tlbflush.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 #ifndef _M68K_TLBFLUSH_H
2 #define _M68K_TLBFLUSH_H
5 #ifndef CONFIG_SUN3
7 #include <asm/current.h>
9 static inline void flush_tlb_kernel_page(void *addr)
10 {
11 if (CPU_IS_040_OR_060) {
12 mm_segment_t old_fs = get_fs();
13 set_fs(KERNEL_DS);
14 __asm__ __volatile__(".chip 68040\n\t"
15 "pflush (%0)\n\t"
16 ".chip 68k"
17 : : "a" (addr));
18 set_fs(old_fs);
19 } else
20 __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
21 }
23 /*
24 * flush all user-space atc entries.
25 */
26 static inline void __flush_tlb(void)
27 {
28 if (CPU_IS_040_OR_060)
29 __asm__ __volatile__(".chip 68040\n\t"
30 "pflushan\n\t"
31 ".chip 68k");
32 else
33 __asm__ __volatile__("pflush #0,#4");
34 }
36 static inline void __flush_tlb040_one(unsigned long addr)
37 {
38 __asm__ __volatile__(".chip 68040\n\t"
39 "pflush (%0)\n\t"
40 ".chip 68k"
41 : : "a" (addr));
42 }
44 static inline void __flush_tlb_one(unsigned long addr)
45 {
46 if (CPU_IS_040_OR_060)
47 __flush_tlb040_one(addr);
48 else
49 __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
50 }
52 #define flush_tlb() __flush_tlb()
54 /*
55 * flush all atc entries (both kernel and user-space entries).
56 */
57 static inline void flush_tlb_all(void)
58 {
59 if (CPU_IS_040_OR_060)
60 __asm__ __volatile__(".chip 68040\n\t"
61 "pflusha\n\t"
62 ".chip 68k");
63 else
64 __asm__ __volatile__("pflusha");
65 }
67 static inline void flush_tlb_mm(struct mm_struct *mm)
68 {
69 if (mm == current->active_mm)
70 __flush_tlb();
71 }
73 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
74 {
75 if (vma->vm_mm == current->active_mm) {
76 mm_segment_t old_fs = get_fs();
77 set_fs(USER_DS);
78 __flush_tlb_one(addr);
79 set_fs(old_fs);
80 }
81 }
83 static inline void flush_tlb_range(struct vm_area_struct *vma,
84 unsigned long start, unsigned long end)
85 {
86 if (vma->vm_mm == current->active_mm)
87 __flush_tlb();
88 }
90 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
91 {
92 flush_tlb_all();
93 }
95 static inline void flush_tlb_pgtables(struct mm_struct *mm,
96 unsigned long start, unsigned long end)
97 {
98 }
100 #else
103 /* Reserved PMEGs. */
104 extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
105 extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
106 extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
107 extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
109 /* Flush all userspace mappings one by one... (why no flush command,
110 sun?) */
111 static inline void flush_tlb_all(void)
112 {
113 unsigned long addr;
114 unsigned char ctx, oldctx;
116 oldctx = sun3_get_context();
117 for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
118 for(ctx = 0; ctx < 8; ctx++) {
119 sun3_put_context(ctx);
120 sun3_put_segmap(addr, SUN3_INVALID_PMEG);
121 }
122 }
124 sun3_put_context(oldctx);
125 /* erase all of the userspace pmeg maps, we've clobbered them
126 all anyway */
127 for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
128 if(pmeg_alloc[addr] == 1) {
129 pmeg_alloc[addr] = 0;
130 pmeg_ctx[addr] = 0;
131 pmeg_vaddr[addr] = 0;
132 }
133 }
135 }
137 /* Clear user TLB entries within the context named in mm */
138 static inline void flush_tlb_mm (struct mm_struct *mm)
139 {
140 unsigned char oldctx;
141 unsigned char seg;
142 unsigned long i;
144 oldctx = sun3_get_context();
145 sun3_put_context(mm->context);
147 for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
148 seg = sun3_get_segmap(i);
149 if(seg == SUN3_INVALID_PMEG)
150 continue;
152 sun3_put_segmap(i, SUN3_INVALID_PMEG);
153 pmeg_alloc[seg] = 0;
154 pmeg_ctx[seg] = 0;
155 pmeg_vaddr[seg] = 0;
156 }
158 sun3_put_context(oldctx);
160 }
162 /* Flush a single TLB page. In this case, we're limited to flushing a
163 single PMEG */
164 static inline void flush_tlb_page (struct vm_area_struct *vma,
165 unsigned long addr)
166 {
167 unsigned char oldctx;
168 unsigned char i;
170 oldctx = sun3_get_context();
171 sun3_put_context(vma->vm_mm->context);
172 addr &= ~SUN3_PMEG_MASK;
173 if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
174 {
175 pmeg_alloc[i] = 0;
176 pmeg_ctx[i] = 0;
177 pmeg_vaddr[i] = 0;
178 sun3_put_segmap (addr, SUN3_INVALID_PMEG);
179 }
180 sun3_put_context(oldctx);
182 }
183 /* Flush a range of pages from TLB. */
185 static inline void flush_tlb_range (struct vm_area_struct *vma,
186 unsigned long start, unsigned long end)
187 {
188 struct mm_struct *mm = vma->vm_mm;
189 unsigned char seg, oldctx;
191 start &= ~SUN3_PMEG_MASK;
193 oldctx = sun3_get_context();
194 sun3_put_context(mm->context);
196 while(start < end)
197 {
198 if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
199 goto next;
200 if(pmeg_ctx[seg] == mm->context) {
201 pmeg_alloc[seg] = 0;
202 pmeg_ctx[seg] = 0;
203 pmeg_vaddr[seg] = 0;
204 }
205 sun3_put_segmap(start, SUN3_INVALID_PMEG);
206 next:
207 start += SUN3_PMEG_SIZE;
208 }
209 }
211 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
212 {
213 flush_tlb_all();
214 }
216 /* Flush kernel page from TLB. */
217 static inline void flush_tlb_kernel_page (unsigned long addr)
218 {
219 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
220 }
222 static inline void flush_tlb_pgtables(struct mm_struct *mm,
223 unsigned long start, unsigned long end)
224 {
225 }
227 #endif
229 #endif /* _M68K_TLBFLUSH_H */