ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h @ 8534:da7873110bbb

Tiny bootstrap cleanup.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Mon Jan 09 19:46:46 2006 +0100 (2006-01-09)
parents 25599e222c33
children
line source
1 #ifndef __I386_SCHED_H
2 #define __I386_SCHED_H
4 #include <linux/config.h>
5 #include <asm/desc.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
10 /*
11 * Used for LDT copy/destruction.
12 */
13 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
14 void destroy_context(struct mm_struct *mm);
17 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18 {
19 #if 0 /* XEN: no lazy tlb */
20 unsigned cpu = smp_processor_id();
21 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
22 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
23 #endif
24 }
26 #define prepare_arch_switch(rq,next) __prepare_arch_switch()
27 #define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
28 #define task_running(rq, p) ((rq)->curr == (p))
30 static inline void __prepare_arch_switch(void)
31 {
32 /*
33 * Save away %fs and %gs. No need to save %es and %ds, as those
34 * are always kernel segments while inside the kernel. Must
35 * happen before reload of cr3/ldt (i.e., not in __switch_to).
36 */
37 asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
38 : "=m" (current->thread.fs),
39 "=m" (current->thread.gs));
40 asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
41 : : "r" (0) );
42 }
44 extern void mm_pin(struct mm_struct *mm);
45 extern void mm_unpin(struct mm_struct *mm);
46 void mm_pin_all(void);
48 static inline void switch_mm(struct mm_struct *prev,
49 struct mm_struct *next,
50 struct task_struct *tsk)
51 {
52 int cpu = smp_processor_id();
53 struct mmuext_op _op[2], *op = _op;
55 if (likely(prev != next)) {
56 if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
57 mm_pin(next);
59 /* stop flush ipis for the previous mm */
60 cpu_clear(cpu, prev->cpu_vm_mask);
61 #if 0 /* XEN: no lazy tlb */
62 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
63 per_cpu(cpu_tlbstate, cpu).active_mm = next;
64 #endif
65 cpu_set(cpu, next->cpu_vm_mask);
67 /* Re-load page tables: load_cr3(next->pgd) */
68 per_cpu(cur_pgd, cpu) = next->pgd;
69 op->cmd = MMUEXT_NEW_BASEPTR;
70 op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
71 op++;
73 /*
74 * load the LDT, if the LDT is different:
75 */
76 if (unlikely(prev->context.ldt != next->context.ldt)) {
77 /* load_LDT_nolock(&next->context, cpu) */
78 op->cmd = MMUEXT_SET_LDT;
79 op->arg1.linear_addr = (unsigned long)next->context.ldt;
80 op->arg2.nr_ents = next->context.size;
81 op++;
82 }
84 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
85 }
86 #if 0 /* XEN: no lazy tlb */
87 else {
88 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
89 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
91 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
92 /* We were in lazy tlb mode and leave_mm disabled
93 * tlb flush IPI delivery. We must reload %cr3.
94 */
95 load_cr3(next->pgd);
96 load_LDT_nolock(&next->context, cpu);
97 }
98 }
99 #endif
100 }
102 #define deactivate_mm(tsk, mm) \
103 asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
105 #define activate_mm(prev, next) \
106 switch_mm((prev),(next),NULL)
108 #endif