diff options
author | Tim Chase <[email protected]> | 2014-09-25 23:40:41 -0500 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2014-09-29 09:22:03 -0700 |
commit | cb08f063074a5363810a39775fd90a4d214c5d87 (patch) | |
tree | b0a665c80be2a77ac86b250df56e0efd1f93a6ee | |
parent | dcca723acee9803561dd7ef456a28c4684c28e86 (diff) |
Perform whole-page page truncation for hole-punching under a range lock
As an attempt to perform the page truncation more optimally, the
hole-punching support added in 223df0161fad50f53a8fa5ffeea8cc4f8137d522
truncated performed the operation in two steps: first, sub-page "stubs"
were zeroed under the range lock in zfs_free_range() using the new
zfs_zero_partial_page() function and then the whole pages were truncated
within zfs_freesp(). This left a window of opportunity during which
the full pages could be touched.
This patch closes the window by moving the whole-page truncation into
zfs_free_range() under the range lock.
Signed-off-by: Tim Chase <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #2733
-rw-r--r-- | module/zfs/zfs_znode.c | 30 |
1 files changed, 8 insertions, 22 deletions
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 08faf0838..90dbfd315 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -1440,6 +1440,13 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) /* offset of last_page */ last_page_offset = last_page << PAGE_CACHE_SHIFT; + /* truncate whole pages */ + if (last_page_offset > first_page_offset) { + truncate_inode_pages_range(ZTOI(zp)->i_mapping, + first_page_offset, last_page_offset - 1); + } + + /* truncate sub-page ranges */ if (first_page > last_page) { /* entire punched area within a single page */ zfs_zero_partial_page(zp, off, len); @@ -1607,31 +1614,10 @@ out: /* * Truncate the page cache - for file truncate operations, use * the purpose-built API for truncations. For punching operations, - * truncate only whole pages within the region; partial pages are - * zeroed under a range lock in zfs_free_range(). + * the truncation is handled under a range lock in zfs_free_range. */ if (len == 0) truncate_setsize(ZTOI(zp), off); - else if (zp->z_is_mapped) { - loff_t first_page, last_page; - loff_t first_page_offset, last_page_offset; - - /* first possible full page in hole */ - first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - /* last page of hole */ - last_page = (off + len) >> PAGE_CACHE_SHIFT; - - /* offset of first_page */ - first_page_offset = first_page << PAGE_CACHE_SHIFT; - /* offset of last_page */ - last_page_offset = last_page << PAGE_CACHE_SHIFT; - - /* truncate whole pages */ - if (last_page_offset > first_page_offset) { - truncate_inode_pages_range(ZTOI(zp)->i_mapping, - first_page_offset, last_page_offset - 1); - } - } return (error); } |