aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dmu_tx.c
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2019-06-22 16:48:54 -0700
committerBrian Behlendorf <[email protected]>2019-06-22 16:48:54 -0700
commitcb9e5b7e84654a8c7dba0f9a0d1227f3c8fa1012 (patch)
tree27a4c56bed02bf6590a083d1a676ad8620564609 /module/zfs/dmu_tx.c
parentca4e5a785f844eaace4bf80cb70a3a02f1b587f6 (diff)
dn_struct_rwlock can not be held in dmu_tx_try_assign()
The thread calling dmu_tx_try_assign() can't hold the dn_struct_rwlock while assigning the tx, because this can lead to deadlock. Specifically, if this dnode is already assigned to an earlier txg, this thread may need to wait for that txg to sync (the ERESTART case below). The other thread that has assigned this dnode to an earlier txg prevents this txg from syncing until its tx can complete (calling dmu_tx_commit()), but it may need to acquire the dn_struct_rwlock to do so (e.g. via dmu_buf_hold*()). This commit adds an assertion to dmu_tx_try_assign() to ensure that this deadlock is not inadvertently introduced. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> Closes #8929
Diffstat (limited to 'module/zfs/dmu_tx.c')
-rw-r--r--module/zfs/dmu_tx.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index cbadcc86f..7d65e842f 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -925,6 +925,25 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode;
if (dn != NULL) {
+ /*
+ * This thread can't hold the dn_struct_rwlock
+ * while assigning the tx, because this can lead to
+ * deadlock. Specifically, if this dnode is already
+ * assigned to an earlier txg, this thread may need
+ * to wait for that txg to sync (the ERESTART case
+ * below). The other thread that has assigned this
+ * dnode to an earlier txg prevents this txg from
+ * syncing until its tx can complete (calling
+ * dmu_tx_commit()), but it may need to acquire the
+ * dn_struct_rwlock to do so (e.g. via
+ * dmu_buf_hold*()).
+ *
+ * Note that this thread can't hold the lock for
+ * read either, but the rwlock doesn't record
+ * enough information to make that assertion.
+ */
+ ASSERT(!RW_WRITE_HELD(&dn->dn_struct_rwlock));
+
mutex_enter(&dn->dn_mtx);
if (dn->dn_assigned_txg == tx->tx_txg - 1) {
mutex_exit(&dn->dn_mtx);