ia64/xen-unstable

changeset 2373:6c947ee660be

bitkeeper revision 1.1159.59.1 (412f52e5pzGsSRKxWkXlLmoWzjYc7g)

Add debugging for locked critical regions. Allows us to assert that
certain things don't happen while in a c.r.: currently these include
taking page faults and GPFs, and also we disallow use of the
user-space access macros (uaccess.h).
author kaf24@freefall.cl.cam.ac.uk
date Fri Aug 27 15:27:33 2004 +0000 (2004-08-27)
parents ac03da8a0f29
children 1d22184ac072
files xen/arch/x86/nmi.c xen/arch/x86/traps.c xen/common/kernel.c xen/include/asm-x86/spinlock.h xen/include/asm-x86/x86_32/uaccess.h xen/include/xen/spinlock.h
line diff
     1.1 --- a/xen/arch/x86/nmi.c	Fri Aug 27 13:41:23 2004 +0000
     1.2 +++ b/xen/arch/x86/nmi.c	Fri Aug 27 15:27:33 2004 +0000
     1.3 @@ -288,6 +288,7 @@ void nmi_watchdog_tick (struct pt_regs *
     1.4          if ( alert_counter[cpu] == 5*nmi_hz )
     1.5          {
     1.6              console_force_unlock();
     1.7 +            disable_criticalregion_checking();
     1.8              die("NMI Watchdog detected LOCKUP on CPU", regs, cpu);
     1.9          }
    1.10      } 
     2.1 --- a/xen/arch/x86/traps.c	Fri Aug 27 13:41:23 2004 +0000
     2.2 +++ b/xen/arch/x86/traps.c	Fri Aug 27 15:27:33 2004 +0000
     2.3 @@ -315,6 +315,8 @@ asmlinkage void do_page_fault(struct pt_
     2.4  
     2.5      perfc_incrc(page_faults);
     2.6  
     2.7 +    ASSERT_no_criticalregion();
     2.8 +
     2.9      if ( unlikely(addr >= LDT_VIRT_START) && 
    2.10           (addr < (LDT_VIRT_START + (d->mm.ldt_ents*LDT_ENTRY_SIZE))) )
    2.11      {
    2.12 @@ -413,6 +415,8 @@ asmlinkage void do_general_protection(st
    2.13      trap_info_t *ti;
    2.14      unsigned long fixup;
    2.15  
    2.16 +    ASSERT_no_criticalregion();
    2.17 +
    2.18      /* Badness if error in ring 0, or result of an interrupt. */
    2.19      if ( !(regs->xcs & 3) || (error_code & 1) )
    2.20          goto gp_in_kernel;
    2.21 @@ -493,6 +497,7 @@ asmlinkage void do_general_protection(st
    2.22  asmlinkage void mem_parity_error(struct pt_regs *regs)
    2.23  {
    2.24      console_force_unlock();
    2.25 +    disable_criticalregion_checking();
    2.26  
    2.27      printk("\n\n");
    2.28  
    2.29 @@ -513,6 +518,7 @@ asmlinkage void mem_parity_error(struct 
    2.30  asmlinkage void io_check_error(struct pt_regs *regs)
    2.31  {
    2.32      console_force_unlock();
    2.33 +    disable_criticalregion_checking();
    2.34  
    2.35      printk("\n\n");
    2.36  
     3.1 --- a/xen/common/kernel.c	Fri Aug 27 13:41:23 2004 +0000
     3.2 +++ b/xen/common/kernel.c	Fri Aug 27 15:27:33 2004 +0000
     3.3 @@ -389,3 +389,44 @@ long do_ni_hypercall(void)
     3.4      /* No-op hypercall. */
     3.5      return -ENOSYS;
     3.6  }
     3.7 +
     3.8 +/*
     3.9 + * Lock debugging
    3.10 + */
    3.11 +
    3.12 +#ifndef NDEBUG
    3.13 +
    3.14 +static int crit_count[NR_CPUS];
    3.15 +static int crit_checking = 1;
    3.16 +
    3.17 +void disable_criticalregion_checking(void)
    3.18 +{
    3.19 +    crit_checking = 0;
    3.20 +}
    3.21 +
    3.22 +void criticalregion_enter(void)
    3.23 +{
    3.24 +    int cpu = smp_processor_id();
    3.25 +    ASSERT(crit_count[cpu] >= 0);
    3.26 +    crit_count[cpu]++;
    3.27 +}
    3.28 +
    3.29 +void criticalregion_exit(void)
    3.30 +{
    3.31 +    int cpu = smp_processor_id();
    3.32 +    crit_count[cpu]--;
    3.33 +    ASSERT(crit_count[cpu] >= 0);
    3.34 +}
    3.35 +
    3.36 +void ASSERT_no_criticalregion(void)
    3.37 +{
    3.38 +    int cpu = smp_processor_id();
    3.39 +    if ( (crit_count[cpu] == 0) || !crit_checking )
    3.40 +        return;
    3.41 +    disable_criticalregion_checking();
    3.42 +    ASSERT(crit_count[cpu] >= 0); /* -ve count is a special kind of bogus! */
    3.43 +    ASSERT(crit_count[cpu] == 0); /* we should definitely take this path   */
    3.44 +    ASSERT(1); /* NEVER GET HERE! */
    3.45 +}
    3.46 +
    3.47 +#endif /* !NDEBUG */
     4.1 --- a/xen/include/asm-x86/spinlock.h	Fri Aug 27 13:41:23 2004 +0000
     4.2 +++ b/xen/include/asm-x86/spinlock.h	Fri Aug 27 15:27:33 2004 +0000
     4.3 @@ -17,7 +17,7 @@ typedef struct {
     4.4  #define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
     4.5  #define spin_is_locked(x)	(*(volatile char *)(&(x)->lock) <= 0)
     4.6  
     4.7 -static inline void spin_lock(spinlock_t *lock)
     4.8 +static inline void _raw_spin_lock(spinlock_t *lock)
     4.9  {
    4.10      __asm__ __volatile__ (
    4.11          "1:  lock; decb %0         \n"
    4.12 @@ -31,7 +31,7 @@ static inline void spin_lock(spinlock_t 
    4.13          : "=m" (lock->lock) : : "memory" );
    4.14  }
    4.15  
    4.16 -static inline void spin_unlock(spinlock_t *lock)
    4.17 +static inline void _raw_spin_unlock(spinlock_t *lock)
    4.18  {
    4.19  #if !defined(CONFIG_X86_OOSTORE)
    4.20      ASSERT(spin_is_locked(lock));
    4.21 @@ -47,7 +47,7 @@ static inline void spin_unlock(spinlock_
    4.22  #endif
    4.23  }
    4.24  
    4.25 -static inline int spin_trylock(spinlock_t *lock)
    4.26 +static inline int _raw_spin_trylock(spinlock_t *lock)
    4.27  {
    4.28      char oldval;
    4.29      __asm__ __volatile__(
    4.30 @@ -64,7 +64,7 @@ static inline int spin_trylock(spinlock_
    4.31   * are any critical regions that cannot form part of such a set, they can use
    4.32   * standard spin_[un]lock().
    4.33   */
    4.34 -#define spin_lock_recursive(_lock)                 \
    4.35 +#define _raw_spin_lock_recursive(_lock)            \
    4.36      do {                                           \
    4.37          int cpu = smp_processor_id();              \
    4.38          if ( likely((_lock)->recurse_cpu != cpu) ) \
    4.39 @@ -75,7 +75,7 @@ static inline int spin_trylock(spinlock_
    4.40          (_lock)->recurse_cnt++;                    \
    4.41      } while ( 0 )
    4.42  
    4.43 -#define spin_unlock_recursive(_lock)               \
    4.44 +#define _raw_spin_unlock_recursive(_lock)          \
    4.45      do {                                           \
    4.46          if ( likely(--(_lock)->recurse_cnt == 0) ) \
    4.47          {                                          \
    4.48 @@ -97,32 +97,23 @@ typedef struct {
    4.49   * On x86, we implement read-write locks as a 32-bit counter
    4.50   * with the high bit (sign) being the "contended" bit.
    4.51   */
    4.52 -static inline void read_lock(rwlock_t *rw)
    4.53 +static inline void _raw_read_lock(rwlock_t *rw)
    4.54  {
    4.55      __build_read_lock(rw, "__read_lock_failed");
    4.56  }
    4.57  
    4.58 -static inline void write_lock(rwlock_t *rw)
    4.59 +static inline void _raw_write_lock(rwlock_t *rw)
    4.60  {
    4.61      __build_write_lock(rw, "__write_lock_failed");
    4.62  }
    4.63  
    4.64 -#define read_unlock(rw)                            \
    4.65 +#define _raw_read_unlock(rw)                       \
    4.66      __asm__ __volatile__ (                         \
    4.67          "lock ; incl %0" :                         \
    4.68          "=m" ((rw)->lock) : : "memory" )
    4.69 -#define write_unlock(rw)                           \
    4.70 +#define _raw_write_unlock(rw)                      \
    4.71      __asm__ __volatile__ (                         \
    4.72          "lock ; addl $" RW_LOCK_BIAS_STR ",%0" :   \
    4.73          "=m" ((rw)->lock) : : "memory" )
    4.74  
    4.75 -static inline int write_trylock(rwlock_t *lock)
    4.76 -{
    4.77 -    atomic_t *count = (atomic_t *)lock;
    4.78 -    if ( atomic_sub_and_test(RW_LOCK_BIAS, count) )
    4.79 -        return 1;
    4.80 -    atomic_add(RW_LOCK_BIAS, count);
    4.81 -    return 0;
    4.82 -}
    4.83 -
    4.84  #endif /* __ASM_SPINLOCK_H */
     5.1 --- a/xen/include/asm-x86/x86_32/uaccess.h	Fri Aug 27 13:41:23 2004 +0000
     5.2 +++ b/xen/include/asm-x86/x86_32/uaccess.h	Fri Aug 27 15:27:33 2004 +0000
     5.3 @@ -243,6 +243,7 @@ struct __large_struct { unsigned long bu
     5.4   * aliasing issues.
     5.5   */
     5.6  #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
     5.7 +	ASSERT_no_criticalregion();					\
     5.8  	__asm__ __volatile__(						\
     5.9  		"1:	mov"itype" %"rtype"1,%2\n"			\
    5.10  		"2:\n"							\
    5.11 @@ -291,6 +292,7 @@ do {									\
    5.12  } while (0)
    5.13  
    5.14  #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)	\
    5.15 +	ASSERT_no_criticalregion();					\
    5.16  	__asm__ __volatile__(						\
    5.17  		"1:	mov"itype" %2,%"rtype"1\n"			\
    5.18  		"2:\n"							\
    5.19 @@ -334,6 +336,7 @@ unsigned long __copy_from_user_ll(void *
    5.20  static always_inline unsigned long
    5.21  __copy_to_user(void __user *to, const void *from, unsigned long n)
    5.22  {
    5.23 +	ASSERT_no_criticalregion();
    5.24  	if (__builtin_constant_p(n)) {
    5.25  		unsigned long ret;
    5.26  
    5.27 @@ -372,6 +375,7 @@ static always_inline unsigned long
    5.28  static always_inline unsigned long
    5.29  __copy_from_user(void *to, const void __user *from, unsigned long n)
    5.30  {
    5.31 +	ASSERT_no_criticalregion();
    5.32  	if (__builtin_constant_p(n)) {
    5.33  		unsigned long ret;
    5.34  
     6.1 --- a/xen/include/xen/spinlock.h	Fri Aug 27 13:41:23 2004 +0000
     6.2 +++ b/xen/include/xen/spinlock.h	Fri Aug 27 15:27:33 2004 +0000
     6.3 @@ -48,14 +48,13 @@ typedef struct { int gcc_is_buggy; } spi
     6.4  #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
     6.5  #endif
     6.6  
     6.7 -#define spin_lock_init(lock)    do { } while(0)
     6.8 -#define spin_lock(lock)         (void)(lock) /* Not "unused variable". */
     6.9 -#define spin_is_locked(lock)    (0)
    6.10 -#define spin_trylock(lock)      ({1; })
    6.11 -#define spin_unlock_wait(lock)  do { } while(0)
    6.12 -#define spin_unlock(lock)       do { } while(0)
    6.13 -#define spin_lock_recursive(lock)   do { } while(0)
    6.14 -#define spin_unlock_recursive(lock) do { } while(0)
    6.15 +#define spin_lock_init(lock)             do { } while(0)
    6.16 +#define spin_is_locked(lock)             (0)
    6.17 +#define _raw_spin_lock(lock)             (void)(lock)
    6.18 +#define _raw_spin_trylock(lock)          ({1; })
    6.19 +#define _raw_spin_unlock(lock)           do { } while(0)
    6.20 +#define _raw_spin_lock_recursive(lock)   do { } while(0)
    6.21 +#define _raw_spin_unlock_recursive(lock) do { } while(0)
    6.22  
    6.23  #if (__GNUC__ > 2)
    6.24  typedef struct { } rwlock_t;
    6.25 @@ -65,11 +64,63 @@ typedef struct { int gcc_is_buggy; } rwl
    6.26  #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
    6.27  #endif
    6.28  
    6.29 -#define rwlock_init(lock)       do { } while(0)
    6.30 -#define read_lock(lock)         (void)(lock) /* Not "unused variable". */
    6.31 -#define read_unlock(lock)       do { } while(0)
    6.32 -#define write_lock(lock)        (void)(lock) /* Not "unused variable". */
    6.33 -#define write_unlock(lock)      do { } while(0)
    6.34 +#define rwlock_init(lock)            do { } while(0)
    6.35 +#define _raw_read_lock(lock)         (void)(lock) /* Not "unused variable". */
    6.36 +#define _raw_read_unlock(lock)       do { } while(0)
    6.37 +#define _raw_write_lock(lock)        (void)(lock) /* Not "unused variable". */
    6.38 +#define _raw_write_unlock(lock)      do { } while(0)
    6.39 +
    6.40 +#endif
    6.41 +
    6.42 +#ifndef NDEBUG
    6.43 +
    6.44 +extern void criticalregion_enter(void);
    6.45 +extern void criticalregion_exit(void);
    6.46 +extern void ASSERT_no_criticalregion(void);
    6.47 +extern void disable_criticalregion_checking(void);
    6.48 +
    6.49 +#define spin_lock(_lock) \
    6.50 +    do { criticalregion_enter(); _raw_spin_lock(_lock); } while (0)
    6.51 +#define spin_unlock(_lock) \
    6.52 +    do { _raw_spin_unlock(_lock); criticalregion_exit(); } while (0)
    6.53 +#define spin_lock_recursive(_lock) \
    6.54 +    do { criticalregion_enter(); _raw_spin_lock_recursive(_lock); } while (0)
    6.55 +#define spin_unlock_recursive(_lock) \
    6.56 +    do { _raw_spin_unlock_recursive(_lock); criticalregion_exit(); } while (0)
    6.57 +#define read_lock(_lock) \
    6.58 +    do { criticalregion_enter(); _raw_read_lock(_lock); } while (0)
    6.59 +#define read_unlock(_lock) \
    6.60 +    do { _raw_read_unlock(_lock); criticalregion_exit(); } while (0)
    6.61 +#define write_lock(_lock) \
    6.62 +    do { criticalregion_enter(); _raw_write_lock(_lock); } while (0)
    6.63 +#define write_unlock(_lock) \
    6.64 +    do { _raw_write_unlock(_lock); criticalregion_exit(); } while (0)
    6.65 +
    6.66 +static inline int spin_trylock(spinlock_t *lock)
    6.67 +{
    6.68 +    criticalregion_enter();
    6.69 +    if ( !_raw_spin_trylock(lock) )
    6.70 +    {
    6.71 +        criticalregion_exit();
    6.72 +        return 0;
    6.73 +    }
    6.74 +    return 1;
    6.75 +}
    6.76 +
    6.77 +#else
    6.78 +
    6.79 +#define ASSERT_no_criticalregion()        ((void)0)
    6.80 +#define disable_criticalregion_checking() ((void)0)
    6.81 +
    6.82 +#define spin_lock(_lock)             _raw_spin_lock(_lock)
    6.83 +#define spin_trylock(_lock)          _raw_spin_trylock(_lock)
    6.84 +#define spin_unlock(_lock)           _raw_spin_unlock(_lock)
    6.85 +#define spin_lock_recursive(_lock)   _raw_spin_lock_recursive(_lock)
    6.86 +#define spin_unlock_recursive(_lock) _raw_spin_unlock_recursive(_lock)
    6.87 +#define read_lock(_lock)             _raw_read_lock(_lock)
    6.88 +#define read_unlock(_lock)           _raw_read_unlock(_lock)
    6.89 +#define write_lock(_lock)            _raw_write_lock(_lock)
    6.90 +#define write_unlock(_lock)          _raw_write_unlock(_lock)
    6.91  
    6.92  #endif
    6.93