diff options
author | Brian Behlendorf <[email protected]> | 2012-07-13 12:49:40 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2012-07-13 13:07:39 -0700 |
commit | d503b971f4848f1b9d654684f1b3c864baad5d88 (patch) | |
tree | c7f7f6d99b6f800f944012161d5620b00953cd8f | |
parent | d801db1487205365b268efab4700e3541adab493 (diff) |
Optimize spl_rwsem_is_locked()
The spl_rwsem_is_locked() compatibility function has been observed
to be a hot spot. The root cause of this is that we must check the
rwsem activity under the rwsem->wait_lock to avoid a race. When
the lock is busy significant contention can occur.
The upstream kernel fix for this race had the insight that by using
spin_trylock_irqsave() this contention could be avoided. When the
lock is contended it's reasonable to return that it is locked.
This change updates the SPLs implemention to be like the upstream
kernel. Since the kernel code has been in use for years now this
a low risk change.
Signed-off-by: Brian Behlendorf <[email protected]>
-rw-r--r-- | include/linux/rwsem_compat.h | 67 |
1 files changed, 25 insertions, 42 deletions
diff --git a/include/linux/rwsem_compat.h b/include/linux/rwsem_compat.h index fe69f0154..757bb42af 100644 --- a/include/linux/rwsem_compat.h +++ b/include/linux/rwsem_compat.h @@ -27,57 +27,40 @@ #include <linux/rwsem.h> -#ifdef RWSEM_SPINLOCK_IS_RAW -#define spl_rwsem_lock_irqsave(lock, flags) \ -({ \ - raw_spin_lock_irqsave(lock, flags); \ -}) -#define spl_rwsem_unlock_irqrestore(lock, flags) \ -({ \ - raw_spin_unlock_irqrestore(lock, flags); \ -}) +#if defined(RWSEM_SPINLOCK_IS_RAW) +#define spl_rwsem_lock_irqsave(lk, fl) raw_spin_lock_irqsave(lk, fl) +#define spl_rwsem_unlock_irqrestore(lk, fl) raw_spin_unlock_irqrestore(lk, fl) +#define spl_rwsem_trylock_irqsave(lk, fl) raw_spin_trylock_irqsave(lk, fl) #else -#define spl_rwsem_lock_irqsave(lock, flags) \ -({ \ - spin_lock_irqsave(lock, flags); \ -}) -#define spl_rwsem_unlock_irqrestore(lock, flags) \ -({ \ - spin_unlock_irqrestore(lock, flags); \ -}) +#define spl_rwsem_lock_irqsave(lk, fl) spin_lock_irqsave(lk, fl) +#define spl_rwsem_unlock_irqrestore(lk, fl) spin_unlock_irqrestore(lk, fl) +#define spl_rwsem_trylock_irqsave(lk, fl) spin_trylock_irqsave(lk, fl) #endif /* RWSEM_SPINLOCK_IS_RAW */ -#ifdef RWSEM_IS_LOCKED_TAKES_WAIT_LOCK /* - * A race condition in rwsem_is_locked() was fixed in Linux 2.6.33 and the fix - * was backported to RHEL5 as of kernel 2.6.18-190.el5. Details can be found - * here: + * Prior to Linux 2.6.33 there existed a race condition in rwsem_is_locked(). + * The semaphore's activity was checked outside of the wait_lock which + * could result in some readers getting the incorrect activity value. * - * https://bugzilla.redhat.com/show_bug.cgi?id=526092 - - * The race condition was fixed in the kernel by acquiring the semaphore's - * wait_lock inside rwsem_is_locked(). The SPL worked around the race - * condition by acquiring the wait_lock before calling that function, but - * with the fix in place we must not do that. + * When a kernel without this fix is detected the SPL takes responsibility + * for acquiring the wait_lock to avoid this race. */ - -#define spl_rwsem_is_locked(rwsem) \ -({ \ - rwsem_is_locked(rwsem); \ -}) - +#if defined(RWSEM_IS_LOCKED_TAKES_WAIT_LOCK) +#define spl_rwsem_is_locked(rwsem) rwsem_is_locked(rwsem) #else +static inline int +spl_rwsem_is_locked(struct rw_semaphore *rwsem) +{ + unsigned long flags; + int rc = 1; -#define spl_rwsem_is_locked(rwsem) \ -({ \ - unsigned long _flags_; \ - int _rc_; \ - spl_rwsem_lock_irqsave(&rwsem->wait_lock, _flags_); \ - _rc_ = rwsem_is_locked(rwsem); \ - spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, _flags_); \ - _rc_; \ -}) + if (spl_rwsem_trylock_irqsave(&rwsem->wait_lock, flags)) { + rc = rwsem_is_locked(rwsem); + spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, flags); + } + return (rc); +} #endif /* RWSEM_IS_LOCKED_TAKES_WAIT_LOCK */ #endif /* _SPL_RWSEM_COMPAT_H */ |