summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2012-10-31 10:06:34 -0700
committerBrian Behlendorf <[email protected]>2012-11-09 19:01:09 -0800
commit4c837f0d931546e656b832caf11b8d4c2063d905 (patch)
treea5a19bfb82f907861f24bf2946e42917509a2a21
parente26ade5101ba1d8e8350ff1270bfca4258e1ffe3 (diff)
Fix "allocating allocated segment" panic
Gunnar Beutner did all the hard work on this one by correctly identifying that this issue is a race between dmu_sync() and dbuf_dirty(). Now in all cases the caller is responsible for preventing this race by making sure the zfs_range_lock() is held when dirtying a buffer which may be referenced in a log record. The mmap case which relies on zfs_putpage() was not taking the range lock. This code was accidentally dropped when the function was rewritten for the Linux VFS. This patch adds the required range locking to zfs_putpage(). It also adds the missing ZFS_ENTER()/ZFS_EXIT() macros which aren't strictly required due to the VFS holding a reference. However, this makes the code more consistent with the upsteam code and there's no harm in being extra careful here. Original-patch-by: Gunnar Beutner <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #541
-rw-r--r--module/zfs/zfs_vnops.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 89f0f608a..5765c9aa6 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -3790,7 +3790,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
zfs_sb_t *zsb = ITOZSB(ip);
loff_t offset;
loff_t pgoff;
- unsigned int pglen;
+ unsigned int pglen;
+ rl_t *rl;
dmu_tx_t *tx;
caddr_t va;
int err = 0;
@@ -3799,6 +3800,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
int cnt = 0;
int sync;
+ ZFS_ENTER(zsb);
+ ZFS_VERIFY_ZP(zp);
ASSERT(PageLocked(pp));
@@ -3810,6 +3813,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
/* Page is beyond end of file */
if (pgoff >= offset) {
unlock_page(pp);
+ ZFS_EXIT(zsb);
return (0);
}
@@ -3832,6 +3836,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
set_page_writeback(pp);
unlock_page(pp);
+ rl = zfs_range_lock(zp, pgoff, pglen, RL_WRITER);
tx = dmu_tx_create(zsb->z_os);
sync = ((zsb->z_os->os_sync == ZFS_SYNC_ALWAYS) ||
@@ -3858,6 +3863,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
if (sync)
zfs_putpage_commit_cb(pp, ECANCELED);
+ zfs_range_unlock(rl);
+ ZFS_EXIT(zsb);
return (err);
}
@@ -3873,6 +3880,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
zfs_log_write(zsb->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0);
dmu_tx_commit(tx);
+ zfs_range_unlock(rl);
ASSERT3S(err, ==, 0);
if (sync) {
@@ -3880,6 +3888,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
zfs_putpage_commit_cb(pp, err);
}
+ ZFS_EXIT(zsb);
return (err);
}