ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-x86_64/mach-xen/asm/tlbflush.h @ 11221:7c9d7fc3dce5

[HVM] Fix SMBIOS entry point copy destination.
Spotted by Xiaowei Yang <xiaowei.yang@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@localhost.localdomain
date Sat Aug 19 12:06:36 2006 +0100 (2006-08-19)
parents 4b06313b9790
children 3adf00179a63
line source
1 #ifndef _X8664_TLBFLUSH_H
2 #define _X8664_TLBFLUSH_H
4 #include <linux/config.h>
5 #include <linux/mm.h>
6 #include <asm/processor.h>
8 #define __flush_tlb() xen_tlb_flush()
10 /*
11 * Global pages have to be flushed a bit differently. Not a real
12 * performance problem because this does not happen often.
13 */
14 #define __flush_tlb_global() xen_tlb_flush()
17 extern unsigned long pgkern_mask;
19 #define __flush_tlb_all() __flush_tlb_global()
21 #define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr)
24 /*
25 * TLB flushing:
26 *
27 * - flush_tlb() flushes the current mm struct TLBs
28 * - flush_tlb_all() flushes all processes TLBs
29 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
30 * - flush_tlb_page(vma, vmaddr) flushes one page
31 * - flush_tlb_range(vma, start, end) flushes a range of pages
32 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
33 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
34 *
35 * x86-64 can only flush individual pages or full VMs. For a range flush
36 * we always do the full VM. Might be worth trying if for a small
37 * range a few INVLPGs in a row are a win.
38 */
40 #ifndef CONFIG_SMP
42 #define flush_tlb() __flush_tlb()
43 #define flush_tlb_all() __flush_tlb_all()
44 #define local_flush_tlb() __flush_tlb()
46 static inline void flush_tlb_mm(struct mm_struct *mm)
47 {
48 if (mm == current->active_mm)
49 __flush_tlb();
50 }
52 static inline void flush_tlb_page(struct vm_area_struct *vma,
53 unsigned long addr)
54 {
55 if (vma->vm_mm == current->active_mm)
56 __flush_tlb_one(addr);
57 }
59 static inline void flush_tlb_range(struct vm_area_struct *vma,
60 unsigned long start, unsigned long end)
61 {
62 if (vma->vm_mm == current->active_mm)
63 __flush_tlb();
64 }
66 #else
68 #include <asm/smp.h>
70 #define local_flush_tlb() \
71 __flush_tlb()
73 extern void flush_tlb_all(void);
74 extern void flush_tlb_current_task(void);
75 extern void flush_tlb_mm(struct mm_struct *);
76 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
78 #define flush_tlb() flush_tlb_current_task()
80 static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
81 {
82 flush_tlb_mm(vma->vm_mm);
83 }
85 #define TLBSTATE_OK 1
86 #define TLBSTATE_LAZY 2
88 /* Roughly an IPI every 20MB with 4k pages for freeing page table
89 ranges. Cost is about 42k of memory for each CPU. */
90 #define ARCH_FREE_PTE_NR 5350
92 #endif
94 #define flush_tlb_kernel_range(start, end) flush_tlb_all()
96 static inline void flush_tlb_pgtables(struct mm_struct *mm,
97 unsigned long start, unsigned long end)
98 {
99 /* x86_64 does not keep any page table caches in a software TLB.
100 The CPUs do in their hardware TLBs, but they are handled
101 by the normal TLB flushing algorithms. */
102 }
104 #endif /* _X8664_TLBFLUSH_H */