summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zfs/dmu_objset.c10
-rw-r--r--module/zfs/dsl_pool.c7
-rw-r--r--module/zfs/rrwlock.c25
3 files changed, 38 insertions, 4 deletions
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index bc1aa1286..823a15677 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -1784,7 +1784,15 @@ dmu_objset_find_dp_cb(void *arg)
dmu_objset_find_ctx_t *dcp = arg;
dsl_pool_t *dp = dcp->dc_dp;
- dsl_pool_config_enter(dp, FTAG);
+ /*
+ * We need to get a pool_config_lock here, as there are several
+ * asssert(pool_config_held) down the stack. Getting a lock via
+ * dsl_pool_config_enter is risky, as it might be stalled by a
+ * pending writer. This would deadlock, as the write lock can
+ * only be granted when our parent thread gives up the lock.
+ * The _prio interface gives us priority over a pending writer.
+ */
+ dsl_pool_config_enter_prio(dp, FTAG);
dmu_objset_find_dp_impl(dcp);
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 5d804352d..23cf43862 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -1051,6 +1051,13 @@ dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
}
void
+dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
+{
+ ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
+ rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
+}
+
+void
dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
{
rrw_exit(&dp->dp_config_rwlock, tag);
diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
index 8e80166c7..29a22534e 100644
--- a/module/zfs/rrwlock.c
+++ b/module/zfs/rrwlock.c
@@ -159,8 +159,8 @@ rrw_destroy(rrwlock_t *rrl)
refcount_destroy(&rrl->rr_linked_rcount);
}
-void
-rrw_enter_read(rrwlock_t *rrl, void *tag)
+static void
+rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
{
mutex_enter(&rrl->rr_lock);
#if !defined(DEBUG) && defined(_KERNEL)
@@ -176,7 +176,7 @@ rrw_enter_read(rrwlock_t *rrl, void *tag)
ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
- refcount_is_zero(&rrl->rr_anon_rcount) &&
+ refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
rrn_find(rrl) == NULL))
cv_wait(&rrl->rr_cv, &rrl->rr_lock);
@@ -192,6 +192,25 @@ rrw_enter_read(rrwlock_t *rrl, void *tag)
}
void
+rrw_enter_read(rrwlock_t *rrl, void *tag)
+{
+ rrw_enter_read_impl(rrl, B_FALSE, tag);
+}
+
+/*
+ * take a read lock even if there are pending write lock requests. if we want
+ * to take a lock reentrantly, but from different threads (that have a
+ * relationship to each other), the normal detection mechanism to overrule
+ * the pending writer does not work, so we have to give an explicit hint here.
+ */
+void
+rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
+{
+ rrw_enter_read_impl(rrl, B_TRUE, tag);
+}
+
+
+void
rrw_enter_write(rrwlock_t *rrl)
{
mutex_enter(&rrl->rr_lock);