/* After this barrier no new PoD activities can happen. */
BUG_ON(!d->is_dying);
- spin_barrier(&p2m->pod.lock.lock);
+ rspin_barrier(&p2m->pod.lock.lock);
lock_page_alloc(p2m);
case DOMDYING_alive:
domain_pause(d);
d->is_dying = DOMDYING_dying;
- spin_barrier(&d->domain_lock);
+ rspin_barrier(&d->domain_lock);
argo_destroy(d);
vnuma_destroy(d->vnuma);
domain_set_outstanding_pages(d, 0);
{
long dom_before, dom_after, dom_claimed, sys_before, sys_after;
- ASSERT(spin_is_locked(&d->page_alloc_lock));
+ ASSERT(rspin_is_locked(&d->page_alloc_lock));
d->tot_pages += pages;
/*
int _spin_is_locked(const spinlock_t *lock)
{
/*
- * Recursive locks may be locked by another CPU, yet we return
- * "false" here, making this function suitable only for use in
- * ASSERT()s and alike.
+ * This function is suitable only for use in ASSERT()s and alike, as it
+ * doesn't tell _who_ is holding the lock.
*/
- return lock->recurse_cpu == SPINLOCK_NO_CPU
- ? spin_is_locked_common(&lock->tickets)
- : lock->recurse_cpu == smp_processor_id();
+ return spin_is_locked_common(&lock->tickets);
}
static bool always_inline spin_trylock_common(spinlock_tickets_t *t,
spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
}
+bool _rspin_is_locked(const rspinlock_t *lock)
+{
+ /*
+ * Recursive locks may be locked by another CPU, yet we return
+ * "false" here, making this function suitable only for use in
+ * ASSERT()s and alike.
+ */
+ return lock->recurse_cpu == SPINLOCK_NO_CPU
+ ? spin_is_locked_common(&lock->tickets)
+ : lock->recurse_cpu == smp_processor_id();
+}
+
+void _rspin_barrier(rspinlock_t *lock)
+{
+ spin_barrier_common(&lock->tickets, &lock->debug, LOCK_PROFILE_PAR);
+}
+
bool _rspin_trylock(rspinlock_t *lock)
{
unsigned int cpu = smp_processor_id();
static void conring_puts(const char *str, size_t len)
{
- ASSERT(spin_is_locked(&console_lock));
+ ASSERT(rspin_is_locked(&console_lock));
while ( len-- )
conring[CONRING_IDX_MASK(conringp++)] = *str++;
{
size_t len = strlen(str);
- ASSERT(spin_is_locked(&console_lock));
+ ASSERT(rspin_is_locked(&console_lock));
console_serial_puts(str, len);
video_puts(str, len);
bool pcidevs_locked(void)
{
- return !!spin_is_locked(&_pcidevs_lock);
+ return rspin_is_locked(&_pcidevs_lock);
}
static struct radix_tree_root pci_segments;
unsigned long _rspin_lock_irqsave(rspinlock_t *lock);
void _rspin_unlock(rspinlock_t *lock);
void _rspin_unlock_irqrestore(rspinlock_t *lock, unsigned long flags);
+bool _rspin_is_locked(const rspinlock_t *lock);
+void _rspin_barrier(rspinlock_t *lock);
static always_inline void rspin_lock(rspinlock_t *lock)
{
#define rspin_trylock(l) lock_evaluate_nospec(_rspin_trylock(l))
#define rspin_unlock(l) _rspin_unlock(l)
#define rspin_unlock_irqrestore(l, f) _rspin_unlock_irqrestore(l, f)
+#define rspin_barrier(l) _rspin_barrier(l)
+#define rspin_is_locked(l) _rspin_is_locked(l)
#define nrspin_trylock(l) spin_trylock(l)
#define nrspin_lock(l) spin_lock(l)