ia64/xen-unstable

view linux-2.4.29-xen-sparse/include/asm-xen/mmu_context.h @ 3887:4385894c52ae

bitkeeper revision 1.1230.2.4 (421a95cepOZORm0EbZfqBeZ6PZ8MwA)

Merge freefall.cl.cam.ac.uk:/auto/groups/xeno/users/cl349/BK/xen-unstable.bk
into freefall.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
author iap10@freefall.cl.cam.ac.uk
date Tue Feb 22 02:15:42 2005 +0000 (2005-02-22)
parents 0a4b76b6b5a0
children 6244d80c59e5 445b12a7221a
line source
1 #ifndef __I386_MMU_CONTEXT_H
2 #define __I386_MMU_CONTEXT_H
4 #include <linux/config.h>
5 #include <asm/desc.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
9 /*
10 * hooks to add arch specific data into the mm struct.
11 * Note that destroy_context is called even if init_new_context
12 * fails.
13 */
14 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
15 void destroy_context(struct mm_struct *mm);
17 #ifdef CONFIG_SMP
19 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
20 {
21 if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
22 cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
23 }
24 #else
25 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
26 {
27 }
28 #endif
30 extern pgd_t *cur_pgd;
32 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
33 {
34 if (prev != next) {
35 /* stop flush ipis for the previous mm */
36 clear_bit(cpu, &prev->cpu_vm_mask);
37 #ifdef CONFIG_SMP
38 cpu_tlbstate[cpu].state = TLBSTATE_OK;
39 cpu_tlbstate[cpu].active_mm = next;
40 #endif
42 /* Re-load page tables */
43 cur_pgd = next->pgd;
44 queue_pt_switch(__pa(cur_pgd));
45 /* load_LDT, if either the previous or next thread
46 * has a non-default LDT.
47 */
48 if (next->context.size+prev->context.size)
49 load_LDT(&next->context);
50 }
51 #ifdef CONFIG_SMP
52 else {
53 cpu_tlbstate[cpu].state = TLBSTATE_OK;
54 if(cpu_tlbstate[cpu].active_mm != next)
55 out_of_line_bug();
56 if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
57 /* We were in lazy tlb mode and leave_mm disabled
58 * tlb flush IPI delivery. We must reload %cr3.
59 */
60 cur_pgd = next->pgd;
61 queue_pt_switch(__pa(cur_pgd));
62 load_LDT(next);
63 }
64 }
65 #endif
66 }
68 #define activate_mm(prev, next) \
69 do { \
70 switch_mm((prev),(next),NULL,smp_processor_id()); \
71 flush_page_update_queue(); \
72 } while ( 0 )
74 #endif