static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0);
-static void check_lock(struct lock_debug *debug)
+static void check_lock(union lock_debug *debug)
{
- int irq_safe = !local_irq_is_enabled();
+ bool irq_safe = !local_irq_is_enabled();
+
+ BUILD_BUG_ON(LOCK_DEBUG_PAD_BITS <= 0);
if ( unlikely(atomic_read(&spin_debug) <= 0) )
return;
*/
if ( unlikely(debug->irq_safe != irq_safe) )
{
- int seen = cmpxchg(&debug->irq_safe, -1, irq_safe);
+ union lock_debug seen, new = { 0 };
+
+ new.irq_safe = irq_safe;
+ seen.val = cmpxchg(&debug->val, LOCK_DEBUG_INITVAL, new.val);
- if ( seen == !irq_safe )
+ if ( !seen.unseen && seen.irq_safe == !irq_safe )
{
printk("CHECKLOCK FAILURE: prev irqsafe: %d, curr irqsafe %d\n",
- seen, irq_safe);
+ seen.irq_safe, irq_safe);
BUG();
}
}
}
-static void check_barrier(struct lock_debug *debug)
+static void check_barrier(union lock_debug *debug)
{
if ( unlikely(atomic_read(&spin_debug) <= 0) )
return;
* However, if we spin on an IRQ-unsafe lock with IRQs disabled then that
* is clearly wrong, for the same reason outlined in check_lock() above.
*/
- BUG_ON(!local_irq_is_enabled() && (debug->irq_safe == 0));
+ BUG_ON(!local_irq_is_enabled() && !debug->irq_safe);
+}
+
+static void got_lock(union lock_debug *debug)
+{
+ debug->cpu = smp_processor_id();
+}
+
+static void rel_lock(union lock_debug *debug)
+{
+ ASSERT(debug->cpu == smp_processor_id());
+ debug->cpu = SPINLOCK_NO_CPU;
}
void spin_debug_enable(void)
#define check_lock(l) ((void)0)
#define check_barrier(l) ((void)0)
+#define got_lock(l) ((void)0)
+#define rel_lock(l) ((void)0)
#endif
cb(data);
arch_lock_relax();
}
+ got_lock(&lock->debug);
LOCK_PROFILE_GOT;
preempt_disable();
arch_lock_acquire_barrier();
arch_lock_release_barrier();
preempt_enable();
LOCK_PROFILE_REL;
+ rel_lock(&lock->debug);
add_sized(&lock->tickets.head, 1);
arch_lock_signal();
}
if ( cmpxchg(&lock->tickets.head_tail,
old.head_tail, new.head_tail) != old.head_tail )
return 0;
+ got_lock(&lock->debug);
#ifdef CONFIG_LOCK_PROFILE
if (lock->profile)
lock->profile->time_locked = NOW();
/* Don't allow overflow of recurse_cpu field. */
BUILD_BUG_ON(NR_CPUS > SPINLOCK_NO_CPU);
+ BUILD_BUG_ON(SPINLOCK_RECURSE_BITS < 3);
check_lock(&lock->debug);
#include <asm/spinlock.h>
#include <asm/types.h>
+#define SPINLOCK_CPU_BITS 12
+
#ifndef NDEBUG
-struct lock_debug {
- s16 irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */
+union lock_debug {
+ uint16_t val;
+#define LOCK_DEBUG_INITVAL 0xffff
+ struct {
+ uint16_t cpu:SPINLOCK_CPU_BITS;
+#define LOCK_DEBUG_PAD_BITS (14 - SPINLOCK_CPU_BITS)
+ uint16_t :LOCK_DEBUG_PAD_BITS;
+ bool irq_safe:1;
+ bool unseen:1;
+ };
};
-#define _LOCK_DEBUG { -1 }
+#define _LOCK_DEBUG { LOCK_DEBUG_INITVAL }
void spin_debug_enable(void);
void spin_debug_disable(void);
#else
-struct lock_debug { };
+union lock_debug { };
#define _LOCK_DEBUG { }
#define spin_debug_enable() ((void)0)
#define spin_debug_disable() ((void)0)
typedef struct spinlock {
spinlock_tickets_t tickets;
- u16 recurse_cpu:12;
-#define SPINLOCK_NO_CPU 0xfffu
- u16 recurse_cnt:4;
-#define SPINLOCK_MAX_RECURSE 0xfu
- struct lock_debug debug;
+ u16 recurse_cpu:SPINLOCK_CPU_BITS;
+#define SPINLOCK_NO_CPU ((1u << SPINLOCK_CPU_BITS) - 1)
+#define SPINLOCK_RECURSE_BITS (16 - SPINLOCK_CPU_BITS)
+ u16 recurse_cnt:SPINLOCK_RECURSE_BITS;
+#define SPINLOCK_MAX_RECURSE ((1u << SPINLOCK_RECURSE_BITS) - 1)
+ union lock_debug debug;
#ifdef CONFIG_LOCK_PROFILE
struct lock_profile *profile;
#endif