/*
* First take the write lock to protect against other writers or slow
* path readers.
+ *
+ * Note we use the speculation unsafe variant of write_lock(), as the
+ * calling wrapper already adds a speculation barrier after the lock has
+ * been taken.
*/
- write_lock(&percpu_rwlock->rwlock);
+ _write_lock(&percpu_rwlock->rwlock);
/* Now set the global variable so that readers start using read_lock. */
percpu_rwlock->writer_activating = 1;
#define percpu_rwlock_resource_init(l, owner) \
(*(l) = (percpu_rwlock_t)PERCPU_RW_LOCK_UNLOCKED(&get_per_cpu_var(owner)))
-static inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
- percpu_rwlock_t *percpu_rwlock)
+static always_inline void _percpu_read_lock(percpu_rwlock_t **per_cpudata,
+ percpu_rwlock_t *percpu_rwlock)
{
/* Validate the correct per_cpudata variable has been provided. */
_percpu_rwlock_owner_check(per_cpudata, percpu_rwlock);
}
else
{
+ /* Other branch already has a speculation barrier in read_lock(). */
+ block_lock_speculation();
/* All other paths have implicit check_lock() calls via read_lock(). */
check_lock(&percpu_rwlock->rwlock.lock.debug, false);
}
_percpu_read_lock(&get_per_cpu_var(percpu), lock)
#define percpu_read_unlock(percpu, lock) \
_percpu_read_unlock(&get_per_cpu_var(percpu), lock)
-#define percpu_write_lock(percpu, lock) \
- _percpu_write_lock(&get_per_cpu_var(percpu), lock)
+
+#define percpu_write_lock(percpu, lock) \
+({ \
+ _percpu_write_lock(&get_per_cpu_var(percpu), lock); \
+ block_lock_speculation(); \
+})
#define percpu_write_unlock(percpu, lock) \
_percpu_write_unlock(&get_per_cpu_var(percpu), lock)