ia64/linux-2.6.18-xen.hg

view include/asm-arm/tlbflush.h @ 452:c7ed6fe5dca0

kexec: dont initialise regions in reserve_memory()

There is no need to initialise efi_memmap_res and boot_param_res in
reserve_memory() for the initial xen domain as it is done in
machine_kexec_setup_resources() using values from the kexec hypercall.

Signed-off-by: Simon Horman <horms@verge.net.au>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 28 10:55:18 2008 +0000 (2008-02-28)
parents 831230e53067
children
line source
1 /*
2 * linux/include/asm-arm/tlbflush.h
3 *
4 * Copyright (C) 1999-2003 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #ifndef _ASMARM_TLBFLUSH_H
11 #define _ASMARM_TLBFLUSH_H
14 #ifndef CONFIG_MMU
16 #define tlb_flush(tlb) ((void) tlb)
18 #else /* CONFIG_MMU */
20 #include <asm/glue.h>
22 #define TLB_V3_PAGE (1 << 0)
23 #define TLB_V4_U_PAGE (1 << 1)
24 #define TLB_V4_D_PAGE (1 << 2)
25 #define TLB_V4_I_PAGE (1 << 3)
26 #define TLB_V6_U_PAGE (1 << 4)
27 #define TLB_V6_D_PAGE (1 << 5)
28 #define TLB_V6_I_PAGE (1 << 6)
30 #define TLB_V3_FULL (1 << 8)
31 #define TLB_V4_U_FULL (1 << 9)
32 #define TLB_V4_D_FULL (1 << 10)
33 #define TLB_V4_I_FULL (1 << 11)
34 #define TLB_V6_U_FULL (1 << 12)
35 #define TLB_V6_D_FULL (1 << 13)
36 #define TLB_V6_I_FULL (1 << 14)
38 #define TLB_V6_U_ASID (1 << 16)
39 #define TLB_V6_D_ASID (1 << 17)
40 #define TLB_V6_I_ASID (1 << 18)
42 #define TLB_DCLEAN (1 << 30)
43 #define TLB_WB (1 << 31)
45 /*
46 * MMU TLB Model
47 * =============
48 *
49 * We have the following to choose from:
50 * v3 - ARMv3
51 * v4 - ARMv4 without write buffer
52 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
53 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
54 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
55 */
56 #undef _TLB
57 #undef MULTI_TLB
59 #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
61 #ifdef CONFIG_CPU_TLB_V3
62 # define v3_possible_flags v3_tlb_flags
63 # define v3_always_flags v3_tlb_flags
64 # ifdef _TLB
65 # define MULTI_TLB 1
66 # else
67 # define _TLB v3
68 # endif
69 #else
70 # define v3_possible_flags 0
71 # define v3_always_flags (-1UL)
72 #endif
74 #define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
76 #ifdef CONFIG_CPU_TLB_V4WT
77 # define v4_possible_flags v4_tlb_flags
78 # define v4_always_flags v4_tlb_flags
79 # ifdef _TLB
80 # define MULTI_TLB 1
81 # else
82 # define _TLB v4
83 # endif
84 #else
85 # define v4_possible_flags 0
86 # define v4_always_flags (-1UL)
87 #endif
89 #define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
90 TLB_V4_I_FULL | TLB_V4_D_FULL | \
91 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
93 #ifdef CONFIG_CPU_TLB_V4WBI
94 # define v4wbi_possible_flags v4wbi_tlb_flags
95 # define v4wbi_always_flags v4wbi_tlb_flags
96 # ifdef _TLB
97 # define MULTI_TLB 1
98 # else
99 # define _TLB v4wbi
100 # endif
101 #else
102 # define v4wbi_possible_flags 0
103 # define v4wbi_always_flags (-1UL)
104 #endif
106 #define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
107 TLB_V4_I_FULL | TLB_V4_D_FULL | \
108 TLB_V4_D_PAGE)
110 #ifdef CONFIG_CPU_TLB_V4WB
111 # define v4wb_possible_flags v4wb_tlb_flags
112 # define v4wb_always_flags v4wb_tlb_flags
113 # ifdef _TLB
114 # define MULTI_TLB 1
115 # else
116 # define _TLB v4wb
117 # endif
118 #else
119 # define v4wb_possible_flags 0
120 # define v4wb_always_flags (-1UL)
121 #endif
123 #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
124 TLB_V6_I_FULL | TLB_V6_D_FULL | \
125 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
126 TLB_V6_I_ASID | TLB_V6_D_ASID)
128 #ifdef CONFIG_CPU_TLB_V6
129 # define v6wbi_possible_flags v6wbi_tlb_flags
130 # define v6wbi_always_flags v6wbi_tlb_flags
131 # ifdef _TLB
132 # define MULTI_TLB 1
133 # else
134 # define _TLB v6wbi
135 # endif
136 #else
137 # define v6wbi_possible_flags 0
138 # define v6wbi_always_flags (-1UL)
139 #endif
141 #ifndef _TLB
142 #error Unknown TLB model
143 #endif
145 #ifndef __ASSEMBLY__
147 struct cpu_tlb_fns {
148 void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
149 void (*flush_kern_range)(unsigned long, unsigned long);
150 unsigned long tlb_flags;
151 };
153 /*
154 * Select the calling method
155 */
156 #ifdef MULTI_TLB
158 #define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
159 #define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
161 #else
163 #define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
164 #define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
166 extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
167 extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
169 #endif
171 extern struct cpu_tlb_fns cpu_tlb;
173 #define __cpu_tlb_flags cpu_tlb.tlb_flags
175 /*
176 * TLB Management
177 * ==============
178 *
179 * The arch/arm/mm/tlb-*.S files implement these methods.
180 *
181 * The TLB specific code is expected to perform whatever tests it
182 * needs to determine if it should invalidate the TLB for each
183 * call. Start addresses are inclusive and end addresses are
184 * exclusive; it is safe to round these addresses down.
185 *
186 * flush_tlb_all()
187 *
188 * Invalidate the entire TLB.
189 *
190 * flush_tlb_mm(mm)
191 *
192 * Invalidate all TLB entries in a particular address
193 * space.
194 * - mm - mm_struct describing address space
195 *
196 * flush_tlb_range(mm,start,end)
197 *
198 * Invalidate a range of TLB entries in the specified
199 * address space.
200 * - mm - mm_struct describing address space
201 * - start - start address (may not be aligned)
202 * - end - end address (exclusive, may not be aligned)
203 *
204 * flush_tlb_page(vaddr,vma)
205 *
206 * Invalidate the specified page in the specified address range.
207 * - vaddr - virtual address (may not be aligned)
208 * - vma - vma_struct describing address range
209 *
210 * flush_kern_tlb_page(kaddr)
211 *
212 * Invalidate the TLB entry for the specified page. The address
213 * will be in the kernels virtual memory space. Current uses
214 * only require the D-TLB to be invalidated.
215 * - kaddr - Kernel virtual memory address
216 */
218 /*
219 * We optimise the code below by:
220 * - building a set of TLB flags that might be set in __cpu_tlb_flags
221 * - building a set of TLB flags that will always be set in __cpu_tlb_flags
222 * - if we're going to need __cpu_tlb_flags, access it once and only once
223 *
224 * This allows us to build optimal assembly for the single-CPU type case,
225 * and as close to optimal given the compiler constrants for multi-CPU
226 * case. We could do better for the multi-CPU case if the compiler
227 * implemented the "%?" method, but this has been discontinued due to too
228 * many people getting it wrong.
229 */
230 #define possible_tlb_flags (v3_possible_flags | \
231 v4_possible_flags | \
232 v4wbi_possible_flags | \
233 v4wb_possible_flags | \
234 v6wbi_possible_flags)
236 #define always_tlb_flags (v3_always_flags & \
237 v4_always_flags & \
238 v4wbi_always_flags & \
239 v4wb_always_flags & \
240 v6wbi_always_flags)
242 #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
244 static inline void local_flush_tlb_all(void)
245 {
246 const int zero = 0;
247 const unsigned int __tlb_flag = __cpu_tlb_flags;
249 if (tlb_flag(TLB_WB))
250 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
252 if (tlb_flag(TLB_V3_FULL))
253 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
254 if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL))
255 asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
256 if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL))
257 asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
258 if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL))
259 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
260 }
262 static inline void local_flush_tlb_mm(struct mm_struct *mm)
263 {
264 const int zero = 0;
265 const int asid = ASID(mm);
266 const unsigned int __tlb_flag = __cpu_tlb_flags;
268 if (tlb_flag(TLB_WB))
269 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
271 if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) {
272 if (tlb_flag(TLB_V3_FULL))
273 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (zero));
274 if (tlb_flag(TLB_V4_U_FULL))
275 asm("mcr%? p15, 0, %0, c8, c7, 0" : : "r" (zero));
276 if (tlb_flag(TLB_V4_D_FULL))
277 asm("mcr%? p15, 0, %0, c8, c6, 0" : : "r" (zero));
278 if (tlb_flag(TLB_V4_I_FULL))
279 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
280 }
282 if (tlb_flag(TLB_V6_U_ASID))
283 asm("mcr%? p15, 0, %0, c8, c7, 2" : : "r" (asid));
284 if (tlb_flag(TLB_V6_D_ASID))
285 asm("mcr%? p15, 0, %0, c8, c6, 2" : : "r" (asid));
286 if (tlb_flag(TLB_V6_I_ASID))
287 asm("mcr%? p15, 0, %0, c8, c5, 2" : : "r" (asid));
288 }
290 static inline void
291 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
292 {
293 const int zero = 0;
294 const unsigned int __tlb_flag = __cpu_tlb_flags;
296 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
298 if (tlb_flag(TLB_WB))
299 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
301 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
302 if (tlb_flag(TLB_V3_PAGE))
303 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (uaddr));
304 if (tlb_flag(TLB_V4_U_PAGE))
305 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
306 if (tlb_flag(TLB_V4_D_PAGE))
307 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
308 if (tlb_flag(TLB_V4_I_PAGE))
309 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
310 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
311 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
312 }
314 if (tlb_flag(TLB_V6_U_PAGE))
315 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (uaddr));
316 if (tlb_flag(TLB_V6_D_PAGE))
317 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (uaddr));
318 if (tlb_flag(TLB_V6_I_PAGE))
319 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (uaddr));
320 }
322 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
323 {
324 const int zero = 0;
325 const unsigned int __tlb_flag = __cpu_tlb_flags;
327 kaddr &= PAGE_MASK;
329 if (tlb_flag(TLB_WB))
330 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
332 if (tlb_flag(TLB_V3_PAGE))
333 asm("mcr%? p15, 0, %0, c6, c0, 0" : : "r" (kaddr));
334 if (tlb_flag(TLB_V4_U_PAGE))
335 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
336 if (tlb_flag(TLB_V4_D_PAGE))
337 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
338 if (tlb_flag(TLB_V4_I_PAGE))
339 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
340 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
341 asm("mcr%? p15, 0, %0, c8, c5, 0" : : "r" (zero));
343 if (tlb_flag(TLB_V6_U_PAGE))
344 asm("mcr%? p15, 0, %0, c8, c7, 1" : : "r" (kaddr));
345 if (tlb_flag(TLB_V6_D_PAGE))
346 asm("mcr%? p15, 0, %0, c8, c6, 1" : : "r" (kaddr));
347 if (tlb_flag(TLB_V6_I_PAGE))
348 asm("mcr%? p15, 0, %0, c8, c5, 1" : : "r" (kaddr));
350 /* The ARM ARM states that the completion of a TLB maintenance
351 * operation is only guaranteed by a DSB instruction
352 */
353 if (tlb_flag(TLB_V6_U_PAGE | TLB_V6_D_PAGE | TLB_V6_I_PAGE))
354 asm("mcr%? p15, 0, %0, c7, c10, 4" : : "r" (zero));
355 }
357 /*
358 * flush_pmd_entry
359 *
360 * Flush a PMD entry (word aligned, or double-word aligned) to
361 * RAM if the TLB for the CPU we are running on requires this.
362 * This is typically used when we are creating PMD entries.
363 *
364 * clean_pmd_entry
365 *
366 * Clean (but don't drain the write buffer) if the CPU requires
367 * these operations. This is typically used when we are removing
368 * PMD entries.
369 */
370 static inline void flush_pmd_entry(pmd_t *pmd)
371 {
372 const unsigned int zero = 0;
373 const unsigned int __tlb_flag = __cpu_tlb_flags;
375 if (tlb_flag(TLB_DCLEAN))
376 asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
377 : : "r" (pmd));
378 if (tlb_flag(TLB_WB))
379 asm("mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
380 : : "r" (zero));
381 }
383 static inline void clean_pmd_entry(pmd_t *pmd)
384 {
385 const unsigned int __tlb_flag = __cpu_tlb_flags;
387 if (tlb_flag(TLB_DCLEAN))
388 asm("mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
389 : : "r" (pmd));
390 }
392 #undef tlb_flag
393 #undef always_tlb_flags
394 #undef possible_tlb_flags
396 /*
397 * Convert calls to our calling convention.
398 */
399 #define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
400 #define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
402 #ifndef CONFIG_SMP
403 #define flush_tlb_all local_flush_tlb_all
404 #define flush_tlb_mm local_flush_tlb_mm
405 #define flush_tlb_page local_flush_tlb_page
406 #define flush_tlb_kernel_page local_flush_tlb_kernel_page
407 #define flush_tlb_range local_flush_tlb_range
408 #define flush_tlb_kernel_range local_flush_tlb_kernel_range
409 #else
410 extern void flush_tlb_all(void);
411 extern void flush_tlb_mm(struct mm_struct *mm);
412 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
413 extern void flush_tlb_kernel_page(unsigned long kaddr);
414 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
415 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
416 #endif
418 /*
419 * if PG_dcache_dirty is set for the page, we need to ensure that any
420 * cache entries for the kernels virtual memory range are written
421 * back to the page.
422 */
423 extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
425 /*
426 * ARM processors do not cache TLB tables in RAM.
427 */
428 #define flush_tlb_pgtables(mm,start,end) do { } while (0)
430 #endif
432 #endif /* CONFIG_MMU */
434 #endif