ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/asm/tlbflush.h @ 7922:0ee00faf332d

Adapt to removal of #ifdef ia64 in xmalloc (handle SMP_CACHE_SHIFT)
author djm@kirby.fc.hp.com
date Wed Nov 23 15:23:28 2005 -0600 (2005-11-23)
parents d34925e4144b
children b417cb20f1db
line source
1 #ifndef _ASM_IA64_TLBFLUSH_H
2 #define _ASM_IA64_TLBFLUSH_H
4 /*
5 * Copyright (C) 2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
9 #include <linux/config.h>
11 #include <linux/mm.h>
13 #include <asm/intrinsics.h>
14 #include <asm/mmu_context.h>
15 #include <asm/page.h>
17 /*
18 * Now for some TLB flushing routines. This is the kind of stuff that
19 * can be very expensive, so try to avoid them whenever possible.
20 */
22 /*
23 * Flush everything (kernel mapping may also have changed due to
24 * vmalloc/vfree).
25 */
26 extern void local_flush_tlb_all (void);
28 #ifdef CONFIG_SMP
29 extern void smp_flush_tlb_all (void);
30 extern void smp_flush_tlb_mm (struct mm_struct *mm);
31 # define flush_tlb_all() smp_flush_tlb_all()
32 #else
33 # define flush_tlb_all() local_flush_tlb_all()
34 #endif
36 static inline void
37 local_finish_flush_tlb_mm (struct mm_struct *mm)
38 {
39 #ifndef XEN
40 // FIXME SMP?
41 if (mm == current->active_mm)
42 activate_context(mm);
43 #endif
44 }
46 /*
47 * Flush a specified user mapping. This is called, e.g., as a result of fork() and
48 * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
49 * the PTEs of the parent task.
50 */
51 static inline void
52 flush_tlb_mm (struct mm_struct *mm)
53 {
54 if (!mm)
55 return;
57 #ifndef XEN
58 // FIXME SMP?
59 mm->context = 0;
60 #endif
62 if (atomic_read(&mm->mm_users) == 0)
63 return; /* happens as a result of exit_mmap() */
65 #ifdef CONFIG_SMP
66 smp_flush_tlb_mm(mm);
67 #else
68 local_finish_flush_tlb_mm(mm);
69 #endif
70 }
72 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
74 /*
75 * Page-granular tlb flush.
76 */
77 static inline void
78 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
79 {
80 #ifdef CONFIG_SMP
81 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
82 #else
83 if (vma->vm_mm == current->active_mm)
84 ia64_ptcl(addr, (PAGE_SHIFT << 2));
85 #ifndef XEN
86 // FIXME SMP?
87 else
88 vma->vm_mm->context = 0;
89 #endif
90 #endif
91 }
93 /*
94 * Flush the TLB entries mapping the virtually mapped linear page
95 * table corresponding to address range [START-END).
96 */
97 static inline void
98 flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
99 {
100 /*
101 * Deprecated. The virtual page table is now flushed via the normal gather/flush
102 * interface (see tlb.h).
103 */
104 }
106 #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
108 #endif /* _ASM_IA64_TLBFLUSH_H */