From 74328ee18f94d27f9c802d29fdd311018dab2adf Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Fri, 19 Dec 2014 12:57:54 -0800 Subject: Fix zfs_putpage() lock inversion There exists a lock inversions involving the zfs range lock and the individual page writeback bits which can result in a deadlock. To prevent this we must always manipulate the writeback bit while holding the range lock. The exact deadlock is as follows: ------ Process A ------ ------ Process B ------ zpl_writepages zpl_fallocate write_cache_pages zpl_fallocate_common zpl_putpage zfs_space zfs_putpage (set bit) zfs_freesp zfs_range_lock (wait on lock) zfs_free_range (take lock) [has not yet initiated I/O, truncate_inode_pages_range the bit will not be cleared] wait_on_page_writeback (wait on bit) Signed-off-by: Brian Behlendorf Signed-off-by: Tim Chase Signed-off-by: Richard Yao Issue #2976 --- module/zfs/zfs_vnops.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index d05ccef39..a048aeb36 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -3899,14 +3899,13 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) } #endif + rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER); + set_page_writeback(pp); unlock_page(pp); - rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER); tx = dmu_tx_create(zsb->z_os); - dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen); - dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); zfs_sa_upgrade_txholds(tx, zp); err = dmu_tx_assign(tx, TXG_NOWAIT); -- cgit v1.2.3