ia64/xen-unstable

view xen/include/asm-ia64/linux-xen/asm/tlbflush.h @ 9770:ced37bea0647

[IA64] FPH enabling + cleanup

Move contents of switch_to macro from xensystem.h to context_switch function.
Initialize FPU on all processors. FPH is always enabled in Xen.
Speed up context-switch (a little bit!) by not enabling/disabling FPH.
Cleanup (unused function/variablesi/fields, debug printf...)
vmx_ia64_switch_to removed (was unused).

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
author awilliam@xenbuild.aw
date Tue Apr 25 22:35:41 2006 -0600 (2006-04-25)
parents 212eb6a2d8cd
children
line source
1 #ifndef _ASM_IA64_TLBFLUSH_H
2 #define _ASM_IA64_TLBFLUSH_H
4 /*
5 * Copyright (C) 2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
9 #include <linux/config.h>
11 #include <linux/mm.h>
13 #include <asm/intrinsics.h>
14 #include <asm/mmu_context.h>
15 #include <asm/page.h>
17 /*
18 * Now for some TLB flushing routines. This is the kind of stuff that
19 * can be very expensive, so try to avoid them whenever possible.
20 */
22 /*
23 * Flush everything (kernel mapping may also have changed due to
24 * vmalloc/vfree).
25 */
26 extern void local_flush_tlb_all (void);
28 #ifdef CONFIG_SMP
29 extern void smp_flush_tlb_all (void);
30 extern void smp_flush_tlb_mm (struct mm_struct *mm);
31 # define flush_tlb_all() smp_flush_tlb_all()
32 #else
33 # define flush_tlb_all() local_flush_tlb_all()
34 #endif
36 #ifndef XEN
37 static inline void
38 local_finish_flush_tlb_mm (struct mm_struct *mm)
39 {
40 #ifndef XEN
41 // FIXME SMP?
42 if (mm == current->active_mm)
43 activate_context(mm);
44 #endif
45 }
47 /*
48 * Flush a specified user mapping. This is called, e.g., as a result of fork() and
49 * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
50 * the PTEs of the parent task.
51 */
52 static inline void
53 flush_tlb_mm (struct mm_struct *mm)
54 {
55 if (!mm)
56 return;
58 #ifndef XEN
59 // FIXME SMP?
60 mm->context = 0;
61 #endif
63 if (atomic_read(&mm->mm_users) == 0)
64 return; /* happens as a result of exit_mmap() */
66 #ifdef CONFIG_SMP
67 smp_flush_tlb_mm(mm);
68 #else
69 local_finish_flush_tlb_mm(mm);
70 #endif
71 }
73 extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end);
75 /*
76 * Page-granular tlb flush.
77 */
78 static inline void
79 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
80 {
81 #ifdef CONFIG_SMP
82 flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
83 #else
84 #ifdef XEN
85 if (vma->vm_mm == current->domain->arch.mm)
86 #else
87 if (vma->vm_mm == current->active_mm)
88 #endif
89 ia64_ptcl(addr, (PAGE_SHIFT << 2));
90 #ifndef XEN
91 // FIXME SMP?
92 else
93 vma->vm_mm->context = 0;
94 #endif
95 #endif
96 }
98 /*
99 * Flush the TLB entries mapping the virtually mapped linear page
100 * table corresponding to address range [START-END).
101 */
102 static inline void
103 flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
104 {
105 /*
106 * Deprecated. The virtual page table is now flushed via the normal gather/flush
107 * interface (see tlb.h).
108 */
109 }
112 #define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */
113 #endif /* XEN */
115 #ifdef XEN
116 extern void flush_tlb_mask(cpumask_t mask);
117 #endif
119 #endif /* _ASM_IA64_TLBFLUSH_H */