ia64/xen-unstable
changeset 18669:7989e3999e83
x86, spinlock: Get rid of .text.lock out-of-line section.
We don't care about code bloat now that spinlock operations are not
inlined into callers. This will make backtraces easier to read.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
We don't care about code bloat now that spinlock operations are not
inlined into callers. This will make backtraces easier to read.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Mon Oct 20 17:17:55 2008 +0100 (2008-10-20) |
parents | 54d74fc0037c |
children | 1eba0c88655f |
files | xen/arch/x86/x86_32/xen.lds.S xen/arch/x86/x86_64/xen.lds.S xen/include/asm-x86/rwlock.h xen/include/asm-x86/spinlock.h |
line diff
1.1 --- a/xen/arch/x86/x86_32/xen.lds.S Mon Oct 20 17:16:45 2008 +0100 1.2 +++ b/xen/arch/x86/x86_32/xen.lds.S Mon Oct 20 17:17:55 2008 +0100 1.3 @@ -26,7 +26,6 @@ SECTIONS 1.4 *(.fixup) 1.5 *(.gnu.warning) 1.6 } :text =0x9090 1.7 - .text.lock : { *(.text.lock) } :text /* out-of-line lock text */ 1.8 1.9 _etext = .; /* End of text section */ 1.10
2.1 --- a/xen/arch/x86/x86_64/xen.lds.S Mon Oct 20 17:16:45 2008 +0100 2.2 +++ b/xen/arch/x86/x86_64/xen.lds.S Mon Oct 20 17:17:55 2008 +0100 2.3 @@ -24,7 +24,6 @@ SECTIONS 2.4 *(.fixup) 2.5 *(.gnu.warning) 2.6 } :text = 0x9090 2.7 - .text.lock : { *(.text.lock) } :text /* out-of-line lock text */ 2.8 2.9 _etext = .; /* End of text section */ 2.10
3.1 --- a/xen/include/asm-x86/rwlock.h Mon Oct 20 17:16:45 2008 +0100 3.2 +++ b/xen/include/asm-x86/rwlock.h Mon Oct 20 17:17:55 2008 +0100 3.3 @@ -22,25 +22,19 @@ 3.4 3.5 #define __build_read_lock_ptr(rw, helper) \ 3.6 asm volatile(LOCK "subl $1,(%0)\n\t" \ 3.7 - "js 2f\n" \ 3.8 + "jns 1f\n\t" \ 3.9 + "call " helper "\n\t" \ 3.10 "1:\n" \ 3.11 - ".section .text.lock,\"ax\"\n" \ 3.12 - "2:\tcall " helper "\n\t" \ 3.13 - "jmp 1b\n" \ 3.14 - ".previous" \ 3.15 ::"a" (rw) : "memory") 3.16 3.17 #define __build_read_lock_const(rw, helper) \ 3.18 asm volatile(LOCK "subl $1,%0\n\t" \ 3.19 - "js 2f\n" \ 3.20 - "1:\n" \ 3.21 - ".section .text.lock,\"ax\"\n" \ 3.22 - "2:\tpush %%"__OP"ax\n\t" \ 3.23 + "jns 1f\n\t" \ 3.24 + "push %%"__OP"ax\n\t" \ 3.25 "lea %0,%%"__OP"ax\n\t" \ 3.26 "call " helper "\n\t" \ 3.27 "pop %%"__OP"ax\n\t" \ 3.28 - "jmp 1b\n" \ 3.29 - ".previous" \ 3.30 + "1:\n" \ 3.31 :"=m" (*(volatile int *)rw) : : "memory") 3.32 3.33 #define __build_read_lock(rw, helper) do { \ 3.34 @@ -52,25 +46,19 @@ 3.35 3.36 #define __build_write_lock_ptr(rw, helper) \ 3.37 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 3.38 - "jnz 2f\n" \ 3.39 + "jz 1f\n\t" \ 3.40 + "call " helper "\n\t" \ 3.41 "1:\n" \ 3.42 - ".section .text.lock,\"ax\"\n" \ 3.43 - "2:\tcall " helper "\n\t" \ 3.44 - "jmp 1b\n" \ 3.45 - ".previous" \ 3.46 ::"a" (rw) : "memory") 3.47 3.48 #define __build_write_lock_const(rw, helper) \ 3.49 asm volatile(LOCK "subl $" RW_LOCK_BIAS_STR ",(%0)\n\t" \ 3.50 - "jnz 2f\n" \ 3.51 - "1:\n" \ 3.52 - ".section .text.lock,\"ax\"\n" \ 3.53 - "2:\tpush %%"__OP"ax\n\t" \ 3.54 + "jz 1f\n\t" \ 3.55 + "push %%"__OP"ax\n\t" \ 3.56 "lea %0,%%"__OP"ax\n\t" \ 3.57 "call " helper "\n\t" \ 3.58 "pop %%"__OP"ax\n\t" \ 3.59 - "jmp 1b\n" \ 3.60 - ".previous" \ 3.61 + "1:\n" \ 3.62 :"=m" (*(volatile int *)rw) : : "memory") 3.63 3.64 #define __build_write_lock(rw, helper) do { \
4.1 --- a/xen/include/asm-x86/spinlock.h Mon Oct 20 17:16:45 2008 +0100 4.2 +++ b/xen/include/asm-x86/spinlock.h Mon Oct 20 17:17:55 2008 +0100 4.3 @@ -18,14 +18,13 @@ typedef struct { 4.4 static inline void _raw_spin_lock(raw_spinlock_t *lock) 4.5 { 4.6 asm volatile ( 4.7 - "1: lock; decb %0 \n" 4.8 - " js 2f \n" 4.9 - ".section .text.lock,\"ax\"\n" 4.10 + "1: lock; decw %0 \n" 4.11 + " jns 3f \n" 4.12 "2: rep; nop \n" 4.13 - " cmpb $0,%0 \n" 4.14 + " cmpw $0,%0 \n" 4.15 " jle 2b \n" 4.16 " jmp 1b \n" 4.17 - ".previous" 4.18 + "3:" 4.19 : "=m" (lock->lock) : : "memory" ); 4.20 } 4.21 4.22 @@ -33,16 +32,16 @@ static inline void _raw_spin_unlock(raw_ 4.23 { 4.24 ASSERT(_raw_spin_is_locked(lock)); 4.25 asm volatile ( 4.26 - "movb $1,%0" 4.27 + "movw $1,%0" 4.28 : "=m" (lock->lock) : : "memory" ); 4.29 } 4.30 4.31 static inline int _raw_spin_trylock(raw_spinlock_t *lock) 4.32 { 4.33 - char oldval; 4.34 + s16 oldval; 4.35 asm volatile ( 4.36 - "xchgb %b0,%1" 4.37 - :"=q" (oldval), "=m" (lock->lock) 4.38 + "xchgw %w0,%1" 4.39 + :"=r" (oldval), "=m" (lock->lock) 4.40 :"0" (0) : "memory" ); 4.41 return (oldval > 0); 4.42 }