ia64/xen-unstable

changeset 19480:d669f5d1f876

Simplify spinlock code and re-enable IRQs where possible when spinning.

Based on a patch by Juergen Gross.

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Tue Mar 31 15:03:59 2009 +0100 (2009-03-31)
parents 909bb1245930
children f6fd1c2e4da6
files xen/common/spinlock.c xen/include/asm-ia64/linux-xen/asm/spinlock.h xen/include/asm-x86/spinlock.h
line diff
     1.1 --- a/xen/common/spinlock.c	Tue Mar 31 14:04:50 2009 +0100
     1.2 +++ b/xen/common/spinlock.c	Tue Mar 31 15:03:59 2009 +0100
     1.3 @@ -2,6 +2,7 @@
     1.4  #include <xen/irq.h>
     1.5  #include <xen/smp.h>
     1.6  #include <xen/spinlock.h>
     1.7 +#include <asm/processor.h>
     1.8  
     1.9  #ifndef NDEBUG
    1.10  
    1.11 @@ -43,7 +44,9 @@ void spin_debug_disable(void)
    1.12  void _spin_lock(spinlock_t *lock)
    1.13  {
    1.14      check_lock(&lock->debug);
    1.15 -    _raw_spin_lock(&lock->raw);
    1.16 +    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
    1.17 +        while ( likely(_raw_spin_is_locked(&lock->raw)) )
    1.18 +            cpu_relax();
    1.19  }
    1.20  
    1.21  void _spin_lock_irq(spinlock_t *lock)
    1.22 @@ -51,7 +54,13 @@ void _spin_lock_irq(spinlock_t *lock)
    1.23      ASSERT(local_irq_is_enabled());
    1.24      local_irq_disable();
    1.25      check_lock(&lock->debug);
    1.26 -    _raw_spin_lock(&lock->raw);
    1.27 +    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
    1.28 +    {
    1.29 +        local_irq_enable();
    1.30 +        while ( likely(_raw_spin_is_locked(&lock->raw)) )
    1.31 +            cpu_relax();
    1.32 +        local_irq_disable();
    1.33 +    }
    1.34  }
    1.35  
    1.36  unsigned long _spin_lock_irqsave(spinlock_t *lock)
    1.37 @@ -59,7 +68,13 @@ unsigned long _spin_lock_irqsave(spinloc
    1.38      unsigned long flags;
    1.39      local_irq_save(flags);
    1.40      check_lock(&lock->debug);
    1.41 -    _raw_spin_lock(&lock->raw);
    1.42 +    while ( unlikely(!_raw_spin_trylock(&lock->raw)) )
    1.43 +    {
    1.44 +        local_irq_restore(flags);
    1.45 +        while ( likely(_raw_spin_is_locked(&lock->raw)) )
    1.46 +            cpu_relax();
    1.47 +        local_irq_save(flags);
    1.48 +    }
    1.49      return flags;
    1.50  }
    1.51  
     2.1 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h	Tue Mar 31 14:04:50 2009 +0100
     2.2 +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h	Tue Mar 31 15:03:59 2009 +0100
     2.3 @@ -21,111 +21,9 @@
     2.4  
     2.5  typedef struct {
     2.6  	volatile unsigned int lock;
     2.7 -#ifdef CONFIG_PREEMPT
     2.8 -	unsigned int break_lock;
     2.9 -#endif
    2.10 -#ifdef DEBUG_SPINLOCK
    2.11 -	void *locker;
    2.12 -#endif
    2.13  } raw_spinlock_t;
    2.14  
    2.15 -#ifdef XEN
    2.16 -#ifdef DEBUG_SPINLOCK
    2.17 -#define _RAW_SPIN_LOCK_UNLOCKED	/*(raw_spinlock_t)*/ { 0, NULL }
    2.18 -#else
    2.19 -#define _RAW_SPIN_LOCK_UNLOCKED	/*(raw_spinlock_t)*/ { 0 }
    2.20 -#endif
    2.21 -#else
    2.22  #define _RAW_SPIN_LOCK_UNLOCKED	/*(raw_spinlock_t)*/ { 0 }
    2.23 -#endif
    2.24 -
    2.25 -#ifdef ASM_SUPPORTED
    2.26 -/*
    2.27 - * Try to get the lock.  If we fail to get the lock, make a non-standard call to
    2.28 - * ia64_spinlock_contention().  We do not use a normal call because that would force all
    2.29 - * callers of spin_lock() to be non-leaf routines.  Instead, ia64_spinlock_contention() is
    2.30 - * carefully coded to touch only those registers that spin_lock() marks "clobbered".
    2.31 - */
    2.32 -
    2.33 -#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory"
    2.34 -
    2.35 -static inline void
    2.36 -_raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags)
    2.37 -{
    2.38 -	register volatile unsigned int *ptr asm ("r31") = &lock->lock;
    2.39 -
    2.40 -#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
    2.41 -# ifdef CONFIG_ITANIUM
    2.42 -	/* don't use brl on Itanium... */
    2.43 -	asm volatile ("{\n\t"
    2.44 -		      "  mov ar.ccv = r0\n\t"
    2.45 -		      "  mov r28 = ip\n\t"
    2.46 -		      "  mov r30 = 1;;\n\t"
    2.47 -		      "}\n\t"
    2.48 -		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t"
    2.49 -		      "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t"
    2.50 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
    2.51 -		      "mov b6 = r29;;\n\t"
    2.52 -		      "mov r27=%2\n\t"
    2.53 -		      "(p14) br.cond.spnt.many b6"
    2.54 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
    2.55 -# else
    2.56 -	asm volatile ("{\n\t"
    2.57 -		      "  mov ar.ccv = r0\n\t"
    2.58 -		      "  mov r28 = ip\n\t"
    2.59 -		      "  mov r30 = 1;;\n\t"
    2.60 -		      "}\n\t"
    2.61 -		      "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t"
    2.62 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
    2.63 -		      "mov r27=%2\n\t"
    2.64 -		      "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;"
    2.65 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
    2.66 -# endif /* CONFIG_MCKINLEY */
    2.67 -#else
    2.68 -# ifdef CONFIG_ITANIUM
    2.69 -	/* don't use brl on Itanium... */
    2.70 -	/* mis-declare, so we get the entry-point, not it's function descriptor: */
    2.71 -	asm volatile ("mov r30 = 1\n\t"
    2.72 -		      "mov r27=%2\n\t"
    2.73 -		      "mov ar.ccv = r0;;\n\t"
    2.74 -		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t"
    2.75 -		      "movl r29 = ia64_spinlock_contention;;\n\t"
    2.76 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
    2.77 -		      "mov b6 = r29;;\n\t"
    2.78 -		      "(p14) br.call.spnt.many b6 = b6"
    2.79 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
    2.80 -# else
    2.81 -	asm volatile ("mov r30 = 1\n\t"
    2.82 -		      "mov r27=%2\n\t"
    2.83 -		      "mov ar.ccv = r0;;\n\t"
    2.84 -		      "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t"
    2.85 -		      "cmp4.ne p14, p0 = r30, r0\n\t"
    2.86 -		      "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;"
    2.87 -		      : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS);
    2.88 -# endif /* CONFIG_MCKINLEY */
    2.89 -#endif
    2.90 -
    2.91 -#ifdef DEBUG_SPINLOCK
    2.92 -	asm volatile ("mov %0=ip" : "=r" (lock->locker));
    2.93 -#endif
    2.94 -}
    2.95 -#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0)
    2.96 -#else /* !ASM_SUPPORTED */
    2.97 -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
    2.98 -# define _raw_spin_lock(x)								\
    2.99 -do {											\
   2.100 -	__u32 *ia64_spinlock_ptr = (__u32 *) (x);					\
   2.101 -	__u64 ia64_spinlock_val;							\
   2.102 -	ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);			\
   2.103 -	if (unlikely(ia64_spinlock_val)) {						\
   2.104 -		do {									\
   2.105 -			while (*ia64_spinlock_ptr)					\
   2.106 -				ia64_barrier();						\
   2.107 -			ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);	\
   2.108 -		} while (ia64_spinlock_val);						\
   2.109 -	}										\
   2.110 -} while (0)
   2.111 -#endif /* !ASM_SUPPORTED */
   2.112  
   2.113  #define _raw_spin_is_locked(x)	((x)->lock != 0)
   2.114  #define _raw_spin_unlock(x)	do { barrier(); (x)->lock = 0; } while (0)
   2.115 @@ -134,9 +32,6 @@ do {											\
   2.116  typedef struct {
   2.117  	volatile unsigned int read_counter	: 31;
   2.118  	volatile unsigned int write_lock	:  1;
   2.119 -#ifdef CONFIG_PREEMPT
   2.120 -	unsigned int break_lock;
   2.121 -#endif
   2.122  } raw_rwlock_t;
   2.123  #define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
   2.124  
     3.1 --- a/xen/include/asm-x86/spinlock.h	Tue Mar 31 14:04:50 2009 +0100
     3.2 +++ b/xen/include/asm-x86/spinlock.h	Tue Mar 31 15:03:59 2009 +0100
     3.3 @@ -13,19 +13,6 @@ typedef struct {
     3.4  
     3.5  #define _raw_spin_is_locked(x) ((x)->lock <= 0)
     3.6  
     3.7 -static always_inline void _raw_spin_lock(raw_spinlock_t *lock)
     3.8 -{
     3.9 -    asm volatile (
    3.10 -        "1:  lock; decw %0         \n"
    3.11 -        "    jns 3f                \n"
    3.12 -        "2:  rep; nop              \n"
    3.13 -        "    cmpw $0,%0            \n"
    3.14 -        "    jle 2b                \n"
    3.15 -        "    jmp 1b                \n"
    3.16 -        "3:"
    3.17 -        : "=m" (lock->lock) : : "memory" );
    3.18 -}
    3.19 -
    3.20  static always_inline void _raw_spin_unlock(raw_spinlock_t *lock)
    3.21  {
    3.22      ASSERT(_raw_spin_is_locked(lock));