ia64/xen-unstable

changeset 2993:0a6a455dfbd3

bitkeeper revision 1.1159.1.420 (419a49bbZ4HbvA39Bu6mtFy6qOsqZA)

Don't use sti/cli in SMP spinlock implementation.
author cl349@freefall.cl.cam.ac.uk
date Tue Nov 16 18:40:59 2004 +0000 (2004-11-16)
parents afd6ea676808
children 1f505eb80787
files .rootkeys linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/spinlock.h
line diff
     1.1 --- a/.rootkeys	Mon Nov 15 10:20:24 2004 +0000
     1.2 +++ b/.rootkeys	Tue Nov 16 18:40:59 2004 +0000
     1.3 @@ -234,6 +234,7 @@ 40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6
     1.4  412ea0afQL2CAI-f522TbLjLPMibPQ linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/ptrace.h
     1.5  40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/segment.h
     1.6  40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/setup.h
     1.7 +4198c32a8NzmcKVOzKaEJfaQxxiA0A linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/spinlock.h
     1.8  40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
     1.9  40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/system.h
    1.10  40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/linux-2.6.9-xen-sparse/include/asm-xen/asm-i386/spinlock.h	Tue Nov 16 18:40:59 2004 +0000
     2.3 @@ -0,0 +1,224 @@
     2.4 +#ifndef __ASM_SPINLOCK_H
     2.5 +#define __ASM_SPINLOCK_H
     2.6 +
     2.7 +#include <asm/atomic.h>
     2.8 +#include <asm/rwlock.h>
     2.9 +#include <asm/page.h>
    2.10 +#include <linux/config.h>
    2.11 +#include <linux/compiler.h>
    2.12 +
    2.13 +asmlinkage int printk(const char * fmt, ...)
    2.14 +	__attribute__ ((format (printf, 1, 2)));
    2.15 +
    2.16 +/*
    2.17 + * Your basic SMP spinlocks, allowing only a single CPU anywhere
    2.18 + */
    2.19 +
    2.20 +typedef struct {
    2.21 +	volatile unsigned int lock;
    2.22 +#ifdef CONFIG_DEBUG_SPINLOCK
    2.23 +	unsigned magic;
    2.24 +#endif
    2.25 +} spinlock_t;
    2.26 +
    2.27 +#define SPINLOCK_MAGIC	0xdead4ead
    2.28 +
    2.29 +#ifdef CONFIG_DEBUG_SPINLOCK
    2.30 +#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
    2.31 +#else
    2.32 +#define SPINLOCK_MAGIC_INIT	/* */
    2.33 +#endif
    2.34 +
    2.35 +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
    2.36 +
    2.37 +#define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
    2.38 +
    2.39 +/*
    2.40 + * Simple spin lock operations.  There are two variants, one clears IRQ's
    2.41 + * on the local processor, one does not.
    2.42 + *
    2.43 + * We make no fairness assumptions. They have a cost.
    2.44 + */
    2.45 +
    2.46 +#define spin_is_locked(x)	(*(volatile signed char *)(&(x)->lock) <= 0)
    2.47 +#define spin_unlock_wait(x)	do { barrier(); } while(spin_is_locked(x))
    2.48 +
    2.49 +#define spin_lock_string \
    2.50 +	"\n1:\t" \
    2.51 +	"lock ; decb %0\n\t" \
    2.52 +	"jns 3f\n" \
    2.53 +	"2:\t" \
    2.54 +	"rep;nop\n\t" \
    2.55 +	"cmpb $0,%0\n\t" \
    2.56 +	"jle 2b\n\t" \
    2.57 +	"jmp 1b\n" \
    2.58 +	"3:\n\t"
    2.59 +
    2.60 +#define spin_lock_string_flags \
    2.61 +	"\n1:\t" \
    2.62 +	"lock ; decb %0\n\t" \
    2.63 +	"jns 4f\n\t" \
    2.64 +	"2:\t" \
    2.65 +	"testl $0x200, %1\n\t" \
    2.66 +	"jz 3f\n\t" \
    2.67 +	"#sti\n\t" \
    2.68 +	"3:\t" \
    2.69 +	"rep;nop\n\t" \
    2.70 +	"cmpb $0, %0\n\t" \
    2.71 +	"jle 3b\n\t" \
    2.72 +	"#cli\n\t" \
    2.73 +	"jmp 1b\n" \
    2.74 +	"4:\n\t"
    2.75 +
    2.76 +/*
    2.77 + * This works. Despite all the confusion.
    2.78 + * (except on PPro SMP or if we are using OOSTORE)
    2.79 + * (PPro errata 66, 92)
    2.80 + */
    2.81 + 
    2.82 +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
    2.83 +
    2.84 +#define spin_unlock_string \
    2.85 +	"movb $1,%0" \
    2.86 +		:"=m" (lock->lock) : : "memory"
    2.87 +
    2.88 +
    2.89 +static inline void _raw_spin_unlock(spinlock_t *lock)
    2.90 +{
    2.91 +#ifdef CONFIG_DEBUG_SPINLOCK
    2.92 +	BUG_ON(lock->magic != SPINLOCK_MAGIC);
    2.93 +	BUG_ON(!spin_is_locked(lock));
    2.94 +#endif
    2.95 +	__asm__ __volatile__(
    2.96 +		spin_unlock_string
    2.97 +	);
    2.98 +}
    2.99 +
   2.100 +#else
   2.101 +
   2.102 +#define spin_unlock_string \
   2.103 +	"xchgb %b0, %1" \
   2.104 +		:"=q" (oldval), "=m" (lock->lock) \
   2.105 +		:"0" (oldval) : "memory"
   2.106 +
   2.107 +static inline void _raw_spin_unlock(spinlock_t *lock)
   2.108 +{
   2.109 +	char oldval = 1;
   2.110 +#ifdef CONFIG_DEBUG_SPINLOCK
   2.111 +	BUG_ON(lock->magic != SPINLOCK_MAGIC);
   2.112 +	BUG_ON(!spin_is_locked(lock));
   2.113 +#endif
   2.114 +	__asm__ __volatile__(
   2.115 +		spin_unlock_string
   2.116 +	);
   2.117 +}
   2.118 +
   2.119 +#endif
   2.120 +
   2.121 +static inline int _raw_spin_trylock(spinlock_t *lock)
   2.122 +{
   2.123 +	char oldval;
   2.124 +	__asm__ __volatile__(
   2.125 +		"xchgb %b0,%1"
   2.126 +		:"=q" (oldval), "=m" (lock->lock)
   2.127 +		:"0" (0) : "memory");
   2.128 +	return oldval > 0;
   2.129 +}
   2.130 +
   2.131 +static inline void _raw_spin_lock(spinlock_t *lock)
   2.132 +{
   2.133 +#ifdef CONFIG_DEBUG_SPINLOCK
   2.134 +	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
   2.135 +		printk("eip: %p\n", __builtin_return_address(0));
   2.136 +		BUG();
   2.137 +	}
   2.138 +#endif
   2.139 +	__asm__ __volatile__(
   2.140 +		spin_lock_string
   2.141 +		:"=m" (lock->lock) : : "memory");
   2.142 +}
   2.143 +
   2.144 +static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
   2.145 +{
   2.146 +#ifdef CONFIG_DEBUG_SPINLOCK
   2.147 +	if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
   2.148 +		printk("eip: %p\n", __builtin_return_address(0));
   2.149 +		BUG();
   2.150 +	}
   2.151 +#endif
   2.152 +	__asm__ __volatile__(
   2.153 +		spin_lock_string_flags
   2.154 +		:"=m" (lock->lock) : "r" (flags) : "memory");
   2.155 +}
   2.156 +
   2.157 +/*
   2.158 + * Read-write spinlocks, allowing multiple readers
   2.159 + * but only one writer.
   2.160 + *
   2.161 + * NOTE! it is quite common to have readers in interrupts
   2.162 + * but no interrupt writers. For those circumstances we
   2.163 + * can "mix" irq-safe locks - any writer needs to get a
   2.164 + * irq-safe write-lock, but readers can get non-irqsafe
   2.165 + * read-locks.
   2.166 + */
   2.167 +typedef struct {
   2.168 +	volatile unsigned int lock;
   2.169 +#ifdef CONFIG_DEBUG_SPINLOCK
   2.170 +	unsigned magic;
   2.171 +#endif
   2.172 +} rwlock_t;
   2.173 +
   2.174 +#define RWLOCK_MAGIC	0xdeaf1eed
   2.175 +
   2.176 +#ifdef CONFIG_DEBUG_SPINLOCK
   2.177 +#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
   2.178 +#else
   2.179 +#define RWLOCK_MAGIC_INIT	/* */
   2.180 +#endif
   2.181 +
   2.182 +#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
   2.183 +
   2.184 +#define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
   2.185 +
   2.186 +#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
   2.187 +
   2.188 +/*
   2.189 + * On x86, we implement read-write locks as a 32-bit counter
   2.190 + * with the high bit (sign) being the "contended" bit.
   2.191 + *
   2.192 + * The inline assembly is non-obvious. Think about it.
   2.193 + *
   2.194 + * Changed to use the same technique as rw semaphores.  See
   2.195 + * semaphore.h for details.  -ben
   2.196 + */
   2.197 +/* the spinlock helpers are in arch/i386/kernel/semaphore.c */
   2.198 +
   2.199 +static inline void _raw_read_lock(rwlock_t *rw)
   2.200 +{
   2.201 +#ifdef CONFIG_DEBUG_SPINLOCK
   2.202 +	BUG_ON(rw->magic != RWLOCK_MAGIC);
   2.203 +#endif
   2.204 +	__build_read_lock(rw, "__read_lock_failed");
   2.205 +}
   2.206 +
   2.207 +static inline void _raw_write_lock(rwlock_t *rw)
   2.208 +{
   2.209 +#ifdef CONFIG_DEBUG_SPINLOCK
   2.210 +	BUG_ON(rw->magic != RWLOCK_MAGIC);
   2.211 +#endif
   2.212 +	__build_write_lock(rw, "__write_lock_failed");
   2.213 +}
   2.214 +
   2.215 +#define _raw_read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
   2.216 +#define _raw_write_unlock(rw)	asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
   2.217 +
   2.218 +static inline int _raw_write_trylock(rwlock_t *lock)
   2.219 +{
   2.220 +	atomic_t *count = (atomic_t *)lock;
   2.221 +	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
   2.222 +		return 1;
   2.223 +	atomic_add(RW_LOCK_BIAS, count);
   2.224 +	return 0;
   2.225 +}
   2.226 +
   2.227 +#endif /* __ASM_SPINLOCK_H */