direct-io.hg
changeset 6172:c42c85c6f590
Small change to remove difference between Xen and mainline deactivate_mm.
While there, cleanup prepare_arch_switch as well. This generates
identical code.
Signed-off-by: Chris Wright <chrisw@osdl.org>
While there, cleanup prepare_arch_switch as well. This generates
identical code.
Signed-off-by: Chris Wright <chrisw@osdl.org>
author | vh249@arcadians.cl.cam.ac.uk |
---|---|
date | Mon Aug 15 12:26:20 2005 +0000 (2005-08-15) |
parents | a42bf05b188c |
children | 750b2a013d45 |
files | linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h |
line diff
1.1 --- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Mon Aug 15 07:52:34 2005 +0000 1.2 +++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h Mon Aug 15 12:26:20 2005 +0000 1.3 @@ -34,10 +34,10 @@ static inline void __prepare_arch_switch 1.4 * are always kernel segments while inside the kernel. Must 1.5 * happen before reload of cr3/ldt (i.e., not in __switch_to). 1.6 */ 1.7 - __asm__ __volatile__ ( "mov %%fs,%0 ; mov %%gs,%1" 1.8 + asm volatile ( "mov %%fs,%0 ; mov %%gs,%1" 1.9 : "=m" (*(int *)¤t->thread.fs), 1.10 "=m" (*(int *)¤t->thread.gs)); 1.11 - __asm__ __volatile__ ( "mov %0,%%fs ; mov %0,%%gs" 1.12 + asm volatile ( "mov %0,%%fs ; mov %0,%%gs" 1.13 : : "r" (0) ); 1.14 } 1.15 1.16 @@ -100,7 +100,7 @@ static inline void switch_mm(struct mm_s 1.17 } 1.18 1.19 #define deactivate_mm(tsk, mm) \ 1.20 - asm("mov %0,%%fs ; mov %0,%%gs": :"r" (0)) 1.21 + asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) 1.22 1.23 #define activate_mm(prev, next) \ 1.24 switch_mm((prev),(next),NULL)