return this_cpu(mm_lock_level);
}
+#define MM_LOCK_ORDER_MAX 64
+/*
+ * Return the lock level taking the domain bias into account. If the domain is
+ * privileged a bias of MM_LOCK_ORDER_MAX is applied to the lock level, so that
+ * mm locks that belong to a control domain can be acquired after having
+ * acquired mm locks of an unprivileged domain.
+ *
+ * This is required in order to use some hypercalls from a paging domain that
+ * take locks of a subject domain and then attempt to copy data to/from the
+ * caller domain.
+ */
+static inline int _lock_level(const struct domain *d, int l)
+{
+ ASSERT(l <= MM_LOCK_ORDER_MAX);
+
+ return l + (d && is_control_domain(d) ? MM_LOCK_ORDER_MAX : 0);
+}
+
/*
* If you see this crash, the numbers printed are order levels defined
* in this file.
*/
-static inline void _check_lock_level(int l)
+static inline void _check_lock_level(const struct domain *d, int l)
{
- if ( unlikely(_get_lock_level() > l) )
+ int lvl = _lock_level(d, l);
+
+ if ( unlikely(_get_lock_level() > lvl) )
{
- printk("mm locking order violation: %i > %i\n", _get_lock_level(), l);
+ printk("mm locking order violation: %i > %i\n", _get_lock_level(), lvl);
BUG();
}
}
this_cpu(mm_lock_level) = l;
}
-static inline void _mm_lock(mm_lock_t *l, const char *func, int level, int rec)
+static inline void _mm_lock(const struct domain *d, mm_lock_t *l,
+ const char *func, int level, int rec)
{
if ( !((mm_locked_by_me(l)) && rec) )
- _check_lock_level(level);
+ _check_lock_level(d, level);
spin_lock_recursive(&l->lock);
if ( l->lock.recurse_cnt == 1 )
{
}
else if ( (unlikely(!rec)) )
panic("mm lock already held by %s\n", l->locker_function);
- _set_lock_level(level);
+ _set_lock_level(_lock_level(d, level));
}
-static inline void _mm_enforce_order_lock_pre(int level)
+static inline void _mm_enforce_order_lock_pre(const struct domain *d, int level)
{
- _check_lock_level(level);
+ _check_lock_level(d, level);
}
-static inline void _mm_enforce_order_lock_post(int level, int *unlock_level,
- unsigned short *recurse_count)
+static inline void _mm_enforce_order_lock_post(const struct domain *d, int level,
+ int *unlock_level,
+ unsigned short *recurse_count)
{
if ( recurse_count )
{
} else {
*unlock_level = _get_lock_level();
}
- _set_lock_level(level);
+ _set_lock_level(_lock_level(d, level));
}
return (l->locker == get_processor_id());
}
-static inline void _mm_write_lock(mm_rwlock_t *l, const char *func, int level)
+static inline void _mm_write_lock(const struct domain *d, mm_rwlock_t *l,
+ const char *func, int level)
{
if ( !mm_write_locked_by_me(l) )
{
- _check_lock_level(level);
+ _check_lock_level(d, level);
percpu_write_lock(p2m_percpu_rwlock, &l->lock);
l->locker = get_processor_id();
l->locker_function = func;
l->unlock_level = _get_lock_level();
- _set_lock_level(level);
+ _set_lock_level(_lock_level(d, level));
}
l->recurse_count++;
}
percpu_write_unlock(p2m_percpu_rwlock, &l->lock);
}
-static inline void _mm_read_lock(mm_rwlock_t *l, int level)
+static inline void _mm_read_lock(const struct domain *d, mm_rwlock_t *l,
+ int level)
{
- _check_lock_level(level);
+ _check_lock_level(d, level);
percpu_read_lock(p2m_percpu_rwlock, &l->lock);
/* There's nowhere to store the per-CPU unlock level so we can't
* set the lock level. */
/* This wrapper uses the line number to express the locking order below */
#define declare_mm_lock(name) \
- static inline void mm_lock_##name(mm_lock_t *l, const char *func, int rec)\
- { _mm_lock(l, func, MM_LOCK_ORDER_##name, rec); }
+ static inline void mm_lock_##name(const struct domain *d, mm_lock_t *l, \
+ const char *func, int rec) \
+ { _mm_lock(d, l, func, MM_LOCK_ORDER_##name, rec); }
#define declare_mm_rwlock(name) \
- static inline void mm_write_lock_##name(mm_rwlock_t *l, const char *func) \
- { _mm_write_lock(l, func, MM_LOCK_ORDER_##name); } \
- static inline void mm_read_lock_##name(mm_rwlock_t *l) \
- { _mm_read_lock(l, MM_LOCK_ORDER_##name); }
+ static inline void mm_write_lock_##name(const struct domain *d, \
+ mm_rwlock_t *l, const char *func) \
+ { _mm_write_lock(d, l, func, MM_LOCK_ORDER_##name); } \
+ static inline void mm_read_lock_##name(const struct domain *d, \
+ mm_rwlock_t *l) \
+ { _mm_read_lock(d, l, MM_LOCK_ORDER_##name); }
/* These capture the name of the calling function */
-#define mm_lock(name, l) mm_lock_##name(l, __func__, 0)
-#define mm_lock_recursive(name, l) mm_lock_##name(l, __func__, 1)
-#define mm_write_lock(name, l) mm_write_lock_##name(l, __func__)
-#define mm_read_lock(name, l) mm_read_lock_##name(l)
+#define mm_lock(name, d, l) mm_lock_##name(d, l, __func__, 0)
+#define mm_lock_recursive(name, d, l) mm_lock_##name(d, l, __func__, 1)
+#define mm_write_lock(name, d, l) mm_write_lock_##name(d, l, __func__)
+#define mm_read_lock(name, d, l) mm_read_lock_##name(d, l)
/* This wrapper is intended for "external" locks which do not use
* the mm_lock_t types. Such locks inside the mm code are also subject
* to ordering constraints. */
-#define declare_mm_order_constraint(name) \
- static inline void mm_enforce_order_lock_pre_##name(void) \
- { _mm_enforce_order_lock_pre(MM_LOCK_ORDER_##name); } \
- static inline void mm_enforce_order_lock_post_##name( \
- int *unlock_level, unsigned short *recurse_count) \
- { _mm_enforce_order_lock_post(MM_LOCK_ORDER_##name, unlock_level, recurse_count); } \
+#define declare_mm_order_constraint(name) \
+ static inline void mm_enforce_order_lock_pre_##name(const struct domain *d) \
+ { _mm_enforce_order_lock_pre(d, MM_LOCK_ORDER_##name); } \
+ static inline void mm_enforce_order_lock_post_##name(const struct domain *d,\
+ int *unlock_level, unsigned short *recurse_count) \
+ { _mm_enforce_order_lock_post(d, MM_LOCK_ORDER_##name, unlock_level, \
+ recurse_count); }
static inline void mm_unlock(mm_lock_t *l)
{
#define MM_LOCK_ORDER_nestedp2m 8
declare_mm_lock(nestedp2m)
-#define nestedp2m_lock(d) mm_lock(nestedp2m, &(d)->arch.nested_p2m_lock)
+#define nestedp2m_lock(d) mm_lock(nestedp2m, d, &(d)->arch.nested_p2m_lock)
#define nestedp2m_unlock(d) mm_unlock(&(d)->arch.nested_p2m_lock)
/* P2M lock (per-non-alt-p2m-table)
#define MM_LOCK_ORDER_per_page_sharing 24
declare_mm_order_constraint(per_page_sharing)
-#define page_sharing_mm_pre_lock() mm_enforce_order_lock_pre_per_page_sharing()
+#define page_sharing_mm_pre_lock() \
+ mm_enforce_order_lock_pre_per_page_sharing(NULL)
#define page_sharing_mm_post_lock(l, r) \
- mm_enforce_order_lock_post_per_page_sharing((l), (r))
+ mm_enforce_order_lock_post_per_page_sharing(NULL, (l), (r))
#define page_sharing_mm_unlock(l, r) mm_enforce_order_unlock((l), (r))
/* Alternate P2M list lock (per-domain)
#define MM_LOCK_ORDER_altp2mlist 32
declare_mm_lock(altp2mlist)
-#define altp2m_list_lock(d) mm_lock(altp2mlist, &(d)->arch.altp2m_list_lock)
+#define altp2m_list_lock(d) mm_lock(altp2mlist, d, \
+ &(d)->arch.altp2m_list_lock)
#define altp2m_list_unlock(d) mm_unlock(&(d)->arch.altp2m_list_lock)
/* P2M lock (per-altp2m-table)
static inline void p2m_lock(struct p2m_domain *p)
{
if ( p2m_is_altp2m(p) )
- mm_write_lock(altp2m, &p->lock);
+ mm_write_lock(altp2m, p->domain, &p->lock);
else
- mm_write_lock(p2m, &p->lock);
+ mm_write_lock(p2m, p->domain, &p->lock);
p->defer_flush++;
}
#define gfn_lock(p,g,o) p2m_lock(p)
#define gfn_unlock(p,g,o) p2m_unlock(p)
-#define p2m_read_lock(p) mm_read_lock(p2m, &(p)->lock)
+#define p2m_read_lock(p) mm_read_lock(p2m, (p)->domain, &(p)->lock)
#define p2m_read_unlock(p) mm_read_unlock(&(p)->lock)
#define p2m_locked_by_me(p) mm_write_locked_by_me(&(p)->lock)
#define gfn_locked_by_me(p,g) p2m_locked_by_me(p)
#define MM_LOCK_ORDER_pod 48
declare_mm_lock(pod)
-#define pod_lock(p) mm_lock(pod, &(p)->pod.lock)
+#define pod_lock(p) mm_lock(pod, (p)->domain, &(p)->pod.lock)
#define pod_unlock(p) mm_unlock(&(p)->pod.lock)
#define pod_locked_by_me(p) mm_locked_by_me(&(p)->pod.lock)
#define MM_LOCK_ORDER_page_alloc 56
declare_mm_order_constraint(page_alloc)
-#define page_alloc_mm_pre_lock() mm_enforce_order_lock_pre_page_alloc()
-#define page_alloc_mm_post_lock(l) mm_enforce_order_lock_post_page_alloc(&(l), NULL)
+#define page_alloc_mm_pre_lock(d) mm_enforce_order_lock_pre_page_alloc(d)
+#define page_alloc_mm_post_lock(d, l) \
+ mm_enforce_order_lock_post_page_alloc(d, &(l), NULL)
#define page_alloc_mm_unlock(l) mm_enforce_order_unlock((l), NULL)
/* Paging lock (per-domain)
#define MM_LOCK_ORDER_paging 64
declare_mm_lock(paging)
-#define paging_lock(d) mm_lock(paging, &(d)->arch.paging.lock)
+#define paging_lock(d) mm_lock(paging, d, &(d)->arch.paging.lock)
#define paging_lock_recursive(d) \
- mm_lock_recursive(paging, &(d)->arch.paging.lock)
+ mm_lock_recursive(paging, d, &(d)->arch.paging.lock)
#define paging_unlock(d) mm_unlock(&(d)->arch.paging.lock)
#define paging_locked_by_me(d) mm_locked_by_me(&(d)->arch.paging.lock)