aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorMark Johnston <[email protected]>2020-06-30 15:35:29 -0400
committerBrian Behlendorf <[email protected]>2020-07-06 11:53:31 -0700
commit6e0056171234b84450af2afbb6594bd3b09422b5 (patch)
tree89ea4c7e98ddc130bc23004ad944c54ba78a3bc2 /module
parenta4b0a74c7f346cab0a9ae3f8f55bd1a372e14336 (diff)
Add a "try" operation for range locks
zfs_rangelock_tryenter() bails immediately instead of waiting for the lock to become available. This will be used to resolve a deadlock in the FreeBSD page-in code. No functional change intended. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Signed-off-by: Mark Johnston <[email protected]> Closes #10519
Diffstat (limited to 'module')
-rw-r--r--module/zfs/zfs_rlock.c65
1 files changed, 47 insertions, 18 deletions
diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c
index 091b37f9f..06a5e031a 100644
--- a/module/zfs/zfs_rlock.c
+++ b/module/zfs/zfs_rlock.c
@@ -150,10 +150,12 @@ zfs_rangelock_fini(zfs_rangelock_t *rl)
}
/*
- * Check if a write lock can be grabbed, or wait and recheck until available.
+ * Check if a write lock can be grabbed. If not, fail immediately or sleep and
+ * recheck until available, depending on the value of the "nonblock" parameter.
*/
-static void
-zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
+static boolean_t
+zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new,
+ boolean_t nonblock)
{
avl_tree_t *tree = &rl->rl_tree;
zfs_locked_range_t *lr;
@@ -183,7 +185,7 @@ zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
*/
if (avl_numnodes(tree) == 0) {
avl_add(tree, new);
- return;
+ return (B_TRUE);
}
/*
@@ -204,8 +206,10 @@ zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
goto wait;
avl_insert(tree, new, where);
- return;
+ return (B_TRUE);
wait:
+ if (nonblock)
+ return (B_FALSE);
if (!lr->lr_write_wanted) {
cv_init(&lr->lr_write_cv, NULL, CV_DEFAULT, NULL);
lr->lr_write_wanted = B_TRUE;
@@ -391,10 +395,12 @@ zfs_rangelock_add_reader(avl_tree_t *tree, zfs_locked_range_t *new,
}
/*
- * Check if a reader lock can be grabbed, or wait and recheck until available.
+ * Check if a reader lock can be grabbed. If not, fail immediately or sleep and
+ * recheck until available, depending on the value of the "nonblock" parameter.
*/
-static void
-zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new)
+static boolean_t
+zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new,
+ boolean_t nonblock)
{
avl_tree_t *tree = &rl->rl_tree;
zfs_locked_range_t *prev, *next;
@@ -415,6 +421,8 @@ retry:
*/
if (prev && (off < prev->lr_offset + prev->lr_length)) {
if ((prev->lr_type == RL_WRITER) || (prev->lr_write_wanted)) {
+ if (nonblock)
+ return (B_FALSE);
if (!prev->lr_read_wanted) {
cv_init(&prev->lr_read_cv,
NULL, CV_DEFAULT, NULL);
@@ -439,6 +447,8 @@ retry:
if (off + len <= next->lr_offset)
goto got_lock;
if ((next->lr_type == RL_WRITER) || (next->lr_write_wanted)) {
+ if (nonblock)
+ return (B_FALSE);
if (!next->lr_read_wanted) {
cv_init(&next->lr_read_cv,
NULL, CV_DEFAULT, NULL);
@@ -457,6 +467,7 @@ got_lock:
* locks and bumping ref counts (r_count).
*/
zfs_rangelock_add_reader(tree, new, prev, where);
+ return (B_TRUE);
}
/*
@@ -464,11 +475,12 @@ got_lock:
* (RL_WRITER or RL_APPEND). If RL_APPEND is specified, rl_cb() will convert
* it to a RL_WRITER lock (with the offset at the end of the file). Returns
* the range lock structure for later unlocking (or reduce range if the
- * entire file is locked as RL_WRITER).
+ * entire file is locked as RL_WRITER), or NULL if nonblock is true and the
+ * lock could not be acquired immediately.
*/
-zfs_locked_range_t *
-zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
- zfs_rangelock_type_t type)
+static zfs_locked_range_t *
+zfs_rangelock_enter_impl(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
+ zfs_rangelock_type_t type, boolean_t nonblock)
{
zfs_locked_range_t *new;
@@ -491,18 +503,34 @@ zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
/*
* First check for the usual case of no locks
*/
- if (avl_numnodes(&rl->rl_tree) == 0)
+ if (avl_numnodes(&rl->rl_tree) == 0) {
avl_add(&rl->rl_tree, new);
- else
- zfs_rangelock_enter_reader(rl, new);
- } else {
- /* RL_WRITER or RL_APPEND */
- zfs_rangelock_enter_writer(rl, new);
+ } else if (!zfs_rangelock_enter_reader(rl, new, nonblock)) {
+ kmem_free(new, sizeof (*new));
+ new = NULL;
+ }
+ } else if (!zfs_rangelock_enter_writer(rl, new, nonblock)) {
+ kmem_free(new, sizeof (*new));
+ new = NULL;
}
mutex_exit(&rl->rl_lock);
return (new);
}
+zfs_locked_range_t *
+zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
+ zfs_rangelock_type_t type)
+{
+ return (zfs_rangelock_enter_impl(rl, off, len, type, B_FALSE));
+}
+
+zfs_locked_range_t *
+zfs_rangelock_tryenter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
+ zfs_rangelock_type_t type)
+{
+ return (zfs_rangelock_enter_impl(rl, off, len, type, B_TRUE));
+}
+
/*
* Safely free the zfs_locked_range_t.
*/
@@ -657,6 +685,7 @@ zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len)
EXPORT_SYMBOL(zfs_rangelock_init);
EXPORT_SYMBOL(zfs_rangelock_fini);
EXPORT_SYMBOL(zfs_rangelock_enter);
+EXPORT_SYMBOL(zfs_rangelock_tryenter);
EXPORT_SYMBOL(zfs_rangelock_exit);
EXPORT_SYMBOL(zfs_rangelock_reduce);
#endif