/* Per-CPU variable for enforcing the lock ordering */
DECLARE_PER_CPU(int, mm_lock_level);
-#define __get_lock_level() (this_cpu(mm_lock_level))
DECLARE_PERCPU_RWLOCK_GLOBAL(p2m_percpu_rwlock);
return (l->lock.recurse_cpu == current->processor);
}
+static inline int _get_lock_level(void)
+{
+ return this_cpu(mm_lock_level);
+}
+
/*
* If you see this crash, the numbers printed are order levels defined
* in this file.
*/
-#define __check_lock_level(l) \
-do { \
- if ( unlikely(__get_lock_level() > (l)) ) \
- { \
- printk("mm locking order violation: %i > %i\n", \
- __get_lock_level(), (l)); \
- BUG(); \
- } \
-} while(0)
-
-#define __set_lock_level(l) \
-do { \
- __get_lock_level() = (l); \
-} while(0)
+static inline void _check_lock_level(int l)
+{
+ if ( unlikely(_get_lock_level() > l) )
+ {
+ printk("mm locking order violation: %i > %i\n", _get_lock_level(), l);
+ BUG();
+ }
+}
+
+static inline void _set_lock_level(int l)
+{
+ this_cpu(mm_lock_level) = l;
+}
static inline void _mm_lock(mm_lock_t *l, const char *func, int level, int rec)
{
if ( !((mm_locked_by_me(l)) && rec) )
- __check_lock_level(level);
+ _check_lock_level(level);
spin_lock_recursive(&l->lock);
if ( l->lock.recurse_cnt == 1 )
{
l->locker_function = func;
- l->unlock_level = __get_lock_level();
+ l->unlock_level = _get_lock_level();
}
else if ( (unlikely(!rec)) )
panic("mm lock already held by %s\n", l->locker_function);
- __set_lock_level(level);
+ _set_lock_level(level);
}
static inline void _mm_enforce_order_lock_pre(int level)
{
- __check_lock_level(level);
+ _check_lock_level(level);
}
static inline void _mm_enforce_order_lock_post(int level, int *unlock_level,
{
if ( (*recurse_count)++ == 0 )
{
- *unlock_level = __get_lock_level();
+ *unlock_level = _get_lock_level();
}
} else {
- *unlock_level = __get_lock_level();
+ *unlock_level = _get_lock_level();
}
- __set_lock_level(level);
+ _set_lock_level(level);
}
{
if ( !mm_write_locked_by_me(l) )
{
- __check_lock_level(level);
+ _check_lock_level(level);
percpu_write_lock(p2m_percpu_rwlock, &l->lock);
l->locker = get_processor_id();
l->locker_function = func;
- l->unlock_level = __get_lock_level();
- __set_lock_level(level);
+ l->unlock_level = _get_lock_level();
+ _set_lock_level(level);
}
l->recurse_count++;
}
return;
l->locker = -1;
l->locker_function = "nobody";
- __set_lock_level(l->unlock_level);
+ _set_lock_level(l->unlock_level);
percpu_write_unlock(p2m_percpu_rwlock, &l->lock);
}
static inline void _mm_read_lock(mm_rwlock_t *l, int level)
{
- __check_lock_level(level);
+ _check_lock_level(level);
percpu_read_lock(p2m_percpu_rwlock, &l->lock);
/* There's nowhere to store the per-CPU unlock level so we can't
* set the lock level. */
if ( l->lock.recurse_cnt == 1 )
{
l->locker_function = "nobody";
- __set_lock_level(l->unlock_level);
+ _set_lock_level(l->unlock_level);
}
spin_unlock_recursive(&l->lock);
}
BUG_ON(*recurse_count == 0);
if ( (*recurse_count)-- == 1 )
{
- __set_lock_level(unlock_level);
+ _set_lock_level(unlock_level);
}
} else {
- __set_lock_level(unlock_level);
+ _set_lock_level(unlock_level);
}
}
#define MM_LOCK_ORDER_altp2m 40
declare_mm_rwlock(altp2m);
-#define p2m_lock(p) \
- do { \
- if ( p2m_is_altp2m(p) ) \
- mm_write_lock(altp2m, &(p)->lock); \
- else \
- mm_write_lock(p2m, &(p)->lock); \
- (p)->defer_flush++; \
- } while (0)
-#define p2m_unlock(p) \
- do { \
- if ( --(p)->defer_flush == 0 ) \
- p2m_unlock_and_tlb_flush(p); \
- else \
- mm_write_unlock(&(p)->lock); \
- } while (0)
+
+static inline void p2m_lock(struct p2m_domain *p)
+{
+ if ( p2m_is_altp2m(p) )
+ mm_write_lock(altp2m, &p->lock);
+ else
+ mm_write_lock(p2m, &p->lock);
+ p->defer_flush++;
+}
+
+static inline void p2m_unlock(struct p2m_domain *p)
+{
+ if ( --p->defer_flush == 0 )
+ p2m_unlock_and_tlb_flush(p);
+ else
+ mm_write_unlock(&p->lock);
+}
+
#define gfn_lock(p,g,o) p2m_lock(p)
#define gfn_unlock(p,g,o) p2m_unlock(p)
#define p2m_read_lock(p) mm_read_lock(p2m, &(p)->lock)