ia64/xen-unstable
changeset 4426:5396c1c1d634
bitkeeper revision 1.1236.1.186 (424d5359kAhRjA4t3oq3aye_393FaQ)
Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
Merge firebug.cl.cam.ac.uk:/auto/groups/xeno-xenod/BK/xen-unstable.bk
into firebug.cl.cam.ac.uk:/local/scratch/kaf24/xen-unstable.bk
author | kaf24@firebug.cl.cam.ac.uk |
---|---|
date | Fri Apr 01 13:57:45 2005 +0000 (2005-04-01) |
parents | a6d955deec8e bc90a74f8571 |
children | 3b7f9e76f29a |
files | linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h |
line diff
1.1 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c Fri Apr 01 13:37:55 2005 +0000 1.2 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c Fri Apr 01 13:57:45 2005 +0000 1.3 @@ -445,22 +445,7 @@ struct task_struct fastcall * __switch_t 1.4 physdev_op_t iopl_op, iobmp_op; 1.5 multicall_entry_t _mcl[8], *mcl = _mcl; 1.6 1.7 - /* 1.8 - * Save away %fs and %gs. No need to save %es and %ds, as 1.9 - * those are always kernel segments while inside the kernel. 1.10 - */ 1.11 - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); 1.12 - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); 1.13 - 1.14 - /* 1.15 - * We clobber FS and GS here so that we avoid a GPF when restoring 1.16 - * previous task's FS/GS values in Xen when the LDT is switched. 1.17 - */ 1.18 - __asm__ __volatile__ ( 1.19 - "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : 1.20 - "eax" ); 1.21 - 1.22 - /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 1.23 + /* XEN NOTE: FS/GS saved in switch_mm(), not here. */ 1.24 1.25 /* 1.26 * This is basically '__unlazy_fpu', except that we queue a
2.1 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Fri Apr 01 13:37:55 2005 +0000 2.2 +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Fri Apr 01 13:57:45 2005 +0000 2.3 @@ -16,13 +16,31 @@ void destroy_context(struct mm_struct *m 2.4 2.5 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 2.6 { 2.7 -#ifdef CONFIG_SMP 2.8 +#if 0 /* XEN */ 2.9 unsigned cpu = smp_processor_id(); 2.10 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) 2.11 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; 2.12 #endif 2.13 } 2.14 2.15 +#define prepare_arch_switch(rq,next) __prepare_arch_switch() 2.16 +#define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock) 2.17 +#define task_running(rq, p) ((rq)->curr == (p)) 2.18 + 2.19 +static inline void __prepare_arch_switch(void) 2.20 +{ 2.21 + /* 2.22 + * Save away %fs and %gs. No need to save %es and %ds, as those 2.23 + * are always kernel segments while inside the kernel. Must 2.24 + * happen before reload of cr3/ldt (i.e., not in __switch_to). 2.25 + */ 2.26 + __asm__ __volatile__ ( "movl %%fs,%0 ; movl %%gs,%1" 2.27 + : "=m" (*(int *)¤t->thread.fs), 2.28 + "=m" (*(int *)¤t->thread.gs)); 2.29 + __asm__ __volatile__ ( "movl %0,%%fs ; movl %0,%%gs" 2.30 + : : "r" (0) ); 2.31 +} 2.32 + 2.33 static inline void switch_mm(struct mm_struct *prev, 2.34 struct mm_struct *next, 2.35 struct task_struct *tsk) 2.36 @@ -32,7 +50,7 @@ static inline void switch_mm(struct mm_s 2.37 if (likely(prev != next)) { 2.38 /* stop flush ipis for the previous mm */ 2.39 cpu_clear(cpu, prev->cpu_vm_mask); 2.40 -#ifdef CONFIG_SMP 2.41 +#if 0 /* XEN */ 2.42 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; 2.43 per_cpu(cpu_tlbstate, cpu).active_mm = next; 2.44 #endif 2.45 @@ -47,7 +65,7 @@ static inline void switch_mm(struct mm_s 2.46 if (unlikely(prev->context.ldt != next->context.ldt)) 2.47 load_LDT_nolock(&next->context, cpu); 2.48 } 2.49 -#ifdef CONFIG_SMP 2.50 +#if 0 /* XEN */ 2.51 else { 2.52 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; 2.53 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);