summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2018-07-30 09:22:30 -0700
committerGitHub <[email protected]>2018-07-30 09:22:30 -0700
commit11d0525cbb15a3519df683ec0248b6ac55f9d090 (patch)
tree14fdc0c33c8972cdf79644c5306716ec4d95cb03
parentfb7307b8922e9eb7da430733d6665e06d2cb0bf5 (diff)
Add rwsem_tryupgrade for 4.9.20-rt16 kernel
The RT rwsem implementation was changed to allow multiple readers as of the 4.9.20-rt16 patch set. This results in a build failure because the existing implementation was forced to directly access the rwsem structure which has changed. While this could be accommodated by adding additional compatibility code. This patch resolves the build issue by simply assuming the rwsem can never be upgraded. This functionality is a performance optimization and all callers must already handle this case. Converting the last remaining use of __SPIN_LOCK_UNLOCKED to spin_lock_init() was additionally required to get a clean build. Signed-off-by: Brian Behlendorf <[email protected]> Closes #7589
-rw-r--r--include/spl/sys/rwlock.h10
-rw-r--r--module/spl/spl-rwlock.c19
-rw-r--r--module/spl/spl-vnode.c2
3 files changed, 20 insertions, 11 deletions
diff --git a/include/spl/sys/rwlock.h b/include/spl/sys/rwlock.h
index 088e28b44..0ac528e16 100644
--- a/include/spl/sys/rwlock.h
+++ b/include/spl/sys/rwlock.h
@@ -172,7 +172,7 @@ RW_LOCK_HELD(krwlock_t *rwp)
}
/*
- * The following functions must be a #define and not static inline.
+ * The following functions must be a #define and not static inline.
* This ensures that the native linux semaphore functions (down/up)
* will be correctly located in the users code which is important
* for the built in kernel lock analysis tools
@@ -188,10 +188,10 @@ RW_LOCK_HELD(krwlock_t *rwp)
spl_rw_set_type(rwp, type); \
})
-#define rw_destroy(rwp) \
-({ \
- VERIFY(!RW_LOCK_HELD(rwp)); \
-})
+/*
+ * The Linux rwsem implementation does not require a matching destroy.
+ */
+#define rw_destroy(rwp) ((void) 0)
#define rw_tryenter(rwp, rw) \
({ \
diff --git a/module/spl/spl-rwlock.c b/module/spl/spl-rwlock.c
index cf03bc593..4ffebc8ea 100644
--- a/module/spl/spl-rwlock.c
+++ b/module/spl/spl-rwlock.c
@@ -35,16 +35,24 @@
static int
__rwsem_tryupgrade(struct rw_semaphore *rwsem)
{
-
+#if defined(READER_BIAS) && defined(WRITER_BIAS)
+ /*
+ * After the 4.9.20-rt16 kernel the realtime patch series lifted the
+ * single reader restriction. While this could be accommodated by
+ * adding additional compatibility code assume the rwsem can never
+ * be upgraded. All caller must already cleanly handle this case.
+ */
+ return (0);
+#else
ASSERT((struct task_struct *)
((unsigned long)rwsem->lock.owner & ~RT_MUTEX_OWNER_MASKALL) ==
current);
/*
- * Under the realtime patch series, rwsem is implemented as a
- * single mutex held by readers and writers alike. However,
- * this implementation would prevent a thread from taking a
- * read lock twice, as the mutex would already be locked on
+ * Prior to 4.9.20-rt16 kernel the realtime patch series, rwsem is
+ * implemented as a single mutex held by readers and writers alike.
+ * However, this implementation would prevent a thread from taking
+ * a read lock twice, as the mutex would already be locked on
* the second attempt. Therefore the implementation allows a
* single thread to take a rwsem as read lock multiple times
* tracking that nesting as read_depth counter.
@@ -60,6 +68,7 @@ __rwsem_tryupgrade(struct rw_semaphore *rwsem)
return (1);
}
return (0);
+#endif
}
#elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
static int
diff --git a/module/spl/spl-vnode.c b/module/spl/spl-vnode.c
index aebee0a82..b72c4896a 100644
--- a/module/spl/spl-vnode.c
+++ b/module/spl/spl-vnode.c
@@ -744,7 +744,7 @@ vn_file_cache_destructor(void *buf, void *cdrarg)
int
spl_vn_init(void)
{
- vn_file_lock = __SPIN_LOCK_UNLOCKED(vn_file_lock);
+ spin_lock_init(&vn_file_lock);
vn_cache = kmem_cache_create("spl_vn_cache",
sizeof (struct vnode), 64, vn_cache_constructor,