summaryrefslogtreecommitdiffstats
path: root/module/zfs/dbuf.c
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2011-07-22 13:55:27 -0700
committerBrian Behlendorf <[email protected]>2011-07-22 15:24:57 -0700
commitbeb982690293310b14a4835d3ffeb883ba03cc0a (patch)
tree4a9cbed83508e2802e137c0af366c47c263d56c5 /module/zfs/dbuf.c
parentf7ef846ea19809b2f373c0542741ac04c65a07cb (diff)
Fix txg_sync_thread deadlock
Update two kmem_alloc()'s in dbuf_dirty() to use KM_PUSHPAGE. Because these functions are called from txg_sync_thread we must ensure they don't reenter the zfs filesystem code via the .writepage callback. This would result in a deadlock. This deadlock is rare and has only been observed once under an abusive mmap() write workload. Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/zfs/dbuf.c')
-rw-r--r--module/zfs/dbuf.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 1ebac4ee6..e166c81df 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -1095,7 +1095,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
dn->dn_dirtyctx =
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
ASSERT(dn->dn_dirtyctx_firstset == NULL);
- dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
+ dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_PUSHPAGE);
}
mutex_exit(&dn->dn_mtx);
@@ -1172,7 +1172,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
* to make a copy of it so that the changes we make in this
* transaction group won't leak out when we sync the older txg.
*/
- dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
+ dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_PUSHPAGE);
list_link_init(&dr->dr_dirty_node);
if (db->db_level == 0) {
void *data_old = db->db_buf;