From d503b971f4848f1b9d654684f1b3c864baad5d88 Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Fri, 13 Jul 2012 12:49:40 -0700 Subject: Optimize spl_rwsem_is_locked() The spl_rwsem_is_locked() compatibility function has been observed to be a hot spot. The root cause of this is that we must check the rwsem activity under the rwsem->wait_lock to avoid a race. When the lock is busy significant contention can occur. The upstream kernel fix for this race had the insight that by using spin_trylock_irqsave() this contention could be avoided. When the lock is contended it's reasonable to return that it is locked. This change updates the SPLs implemention to be like the upstream kernel. Since the kernel code has been in use for years now this a low risk change. Signed-off-by: Brian Behlendorf --- include/linux/rwsem_compat.h | 67 +++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 42 deletions(-) diff --git a/include/linux/rwsem_compat.h b/include/linux/rwsem_compat.h index fe69f0154..757bb42af 100644 --- a/include/linux/rwsem_compat.h +++ b/include/linux/rwsem_compat.h @@ -27,57 +27,40 @@ #include -#ifdef RWSEM_SPINLOCK_IS_RAW -#define spl_rwsem_lock_irqsave(lock, flags) \ -({ \ - raw_spin_lock_irqsave(lock, flags); \ -}) -#define spl_rwsem_unlock_irqrestore(lock, flags) \ -({ \ - raw_spin_unlock_irqrestore(lock, flags); \ -}) +#if defined(RWSEM_SPINLOCK_IS_RAW) +#define spl_rwsem_lock_irqsave(lk, fl) raw_spin_lock_irqsave(lk, fl) +#define spl_rwsem_unlock_irqrestore(lk, fl) raw_spin_unlock_irqrestore(lk, fl) +#define spl_rwsem_trylock_irqsave(lk, fl) raw_spin_trylock_irqsave(lk, fl) #else -#define spl_rwsem_lock_irqsave(lock, flags) \ -({ \ - spin_lock_irqsave(lock, flags); \ -}) -#define spl_rwsem_unlock_irqrestore(lock, flags) \ -({ \ - spin_unlock_irqrestore(lock, flags); \ -}) +#define spl_rwsem_lock_irqsave(lk, fl) spin_lock_irqsave(lk, fl) +#define spl_rwsem_unlock_irqrestore(lk, fl) spin_unlock_irqrestore(lk, fl) +#define spl_rwsem_trylock_irqsave(lk, fl) spin_trylock_irqsave(lk, fl) #endif /* RWSEM_SPINLOCK_IS_RAW */ -#ifdef RWSEM_IS_LOCKED_TAKES_WAIT_LOCK /* - * A race condition in rwsem_is_locked() was fixed in Linux 2.6.33 and the fix - * was backported to RHEL5 as of kernel 2.6.18-190.el5. Details can be found - * here: + * Prior to Linux 2.6.33 there existed a race condition in rwsem_is_locked(). + * The semaphore's activity was checked outside of the wait_lock which + * could result in some readers getting the incorrect activity value. * - * https://bugzilla.redhat.com/show_bug.cgi?id=526092 - - * The race condition was fixed in the kernel by acquiring the semaphore's - * wait_lock inside rwsem_is_locked(). The SPL worked around the race - * condition by acquiring the wait_lock before calling that function, but - * with the fix in place we must not do that. + * When a kernel without this fix is detected the SPL takes responsibility + * for acquiring the wait_lock to avoid this race. */ - -#define spl_rwsem_is_locked(rwsem) \ -({ \ - rwsem_is_locked(rwsem); \ -}) - +#if defined(RWSEM_IS_LOCKED_TAKES_WAIT_LOCK) +#define spl_rwsem_is_locked(rwsem) rwsem_is_locked(rwsem) #else +static inline int +spl_rwsem_is_locked(struct rw_semaphore *rwsem) +{ + unsigned long flags; + int rc = 1; -#define spl_rwsem_is_locked(rwsem) \ -({ \ - unsigned long _flags_; \ - int _rc_; \ - spl_rwsem_lock_irqsave(&rwsem->wait_lock, _flags_); \ - _rc_ = rwsem_is_locked(rwsem); \ - spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, _flags_); \ - _rc_; \ -}) + if (spl_rwsem_trylock_irqsave(&rwsem->wait_lock, flags)) { + rc = rwsem_is_locked(rwsem); + spl_rwsem_unlock_irqrestore(&rwsem->wait_lock, flags); + } + return (rc); +} #endif /* RWSEM_IS_LOCKED_TAKES_WAIT_LOCK */ #endif /* _SPL_RWSEM_COMPAT_H */ -- cgit v1.2.3