ia64/xen-unstable

view linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/mmu_context.h @ 6885:2f11c5b3c586

More build fixes for latest binutils. No 'movl' between
a segment register and memory location.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Thu Sep 15 07:21:31 2005 +0000 (2005-09-15)
parents e2f0a6fdb7d9
children 3233e7ecfa9f
line source
1 #ifndef __X86_64_MMU_CONTEXT_H
2 #define __X86_64_MMU_CONTEXT_H
4 #include <linux/config.h>
5 #include <asm/desc.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
8 #include <asm/page.h>
9 #include <asm/pda.h>
10 #include <asm/pgtable.h>
11 #include <asm/tlbflush.h>
13 /*
14 * possibly do the LDT unload here?
15 */
16 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
17 void destroy_context(struct mm_struct *mm);
19 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
20 {
21 #if 0 /* XEN: no lazy tlb */
22 if (read_pda(mmu_state) == TLBSTATE_OK)
23 write_pda(mmu_state, TLBSTATE_LAZY);
24 #endif
25 }
27 #define prepare_arch_switch(rq,next) __prepare_arch_switch()
28 #define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
29 #define task_running(rq, p) ((rq)->curr == (p))
31 static inline void __prepare_arch_switch(void)
32 {
33 /*
34 * Save away %es, %ds, %fs and %gs. Must happen before reload
35 * of cr3/ldt (i.e., not in __switch_to).
36 */
37 __asm__ __volatile__ (
38 "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
39 : "=m" (current->thread.es),
40 "=m" (current->thread.ds),
41 "=m" (current->thread.fsindex),
42 "=m" (current->thread.gsindex) );
44 if (current->thread.ds)
45 __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
47 if (current->thread.es)
48 __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
50 if (current->thread.fsindex) {
51 __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
52 current->thread.fs = 0;
53 }
55 if (current->thread.gsindex) {
56 load_gs_index(0);
57 current->thread.gs = 0;
58 }
59 }
61 extern void mm_pin(struct mm_struct *mm);
62 extern void mm_unpin(struct mm_struct *mm);
63 void mm_pin_all(void);
65 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
66 struct task_struct *tsk)
67 {
68 unsigned cpu = smp_processor_id();
69 struct mmuext_op _op[3], *op = _op;
71 if (likely(prev != next)) {
72 if (!next->context.pinned)
73 mm_pin(next);
75 /* stop flush ipis for the previous mm */
76 clear_bit(cpu, &prev->cpu_vm_mask);
77 #if 0 /* XEN: no lazy tlb */
78 write_pda(mmu_state, TLBSTATE_OK);
79 write_pda(active_mm, next);
80 #endif
81 set_bit(cpu, &next->cpu_vm_mask);
83 /* load_cr3(next->pgd) */
84 per_cpu(cur_pgd, smp_processor_id()) = next->pgd;
85 op->cmd = MMUEXT_NEW_BASEPTR;
86 op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
87 op++;
89 /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
90 op->cmd = MMUEXT_NEW_USER_BASEPTR;
91 op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
92 op++;
94 if (unlikely(next->context.ldt != prev->context.ldt)) {
95 /* load_LDT_nolock(&next->context, cpu) */
96 op->cmd = MMUEXT_SET_LDT;
97 op->arg1.linear_addr = (unsigned long)next->context.ldt;
98 op->arg2.nr_ents = next->context.size;
99 op++;
100 }
102 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
103 }
105 #if 0 /* XEN: no lazy tlb */
106 else {
107 write_pda(mmu_state, TLBSTATE_OK);
108 if (read_pda(active_mm) != next)
109 out_of_line_bug();
110 if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
111 /* We were in lazy tlb mode and leave_mm disabled
112 * tlb flush IPI delivery. We must reload CR3
113 * to make sure to use no freed page tables.
114 */
115 load_cr3(next->pgd);
116 xen_new_user_pt(__pa(__user_pgd(next->pgd)));
117 load_LDT_nolock(&next->context, cpu);
118 }
119 }
120 #endif
121 }
123 #define deactivate_mm(tsk,mm) do { \
124 load_gs_index(0); \
125 asm volatile("movl %0,%%fs"::"r"(0)); \
126 } while(0)
128 #define activate_mm(prev, next) do { \
129 switch_mm((prev),(next),NULL); \
130 } while (0)
132 #endif