aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2016-04-05 12:39:37 -0700
committerBrian Behlendorf <[email protected]>2016-04-05 17:26:56 -0700
commit8b1899d3fbab354dfe57b2293216a76579e5d805 (patch)
tree91d4b3ec5b26ab958150da9a372c928ea95c35c0 /module/zfs
parent63e0828c174e84ec583d34327b3fc1c5f53fa39c (diff)
Linux 4.6 compat: PAGE_CACHE_SIZE removal
As described in torvalds/linux@4a2d057e the macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were originally introduced to make it possible to add bigger chunks to the page cache. This never panned out and it has therefore been removed from the kernel. ZFS has been updated to use the PAGE_{SIZE,SHIFT,MASK,ALIGN} macros and calls to page_cache_release() have been replaced with put_page(). There was no need to introduce a configure check for this because these interfaces have existed for a very long time. Signed-off-by: Brian Behlendorf <[email protected]> Signed-off-by: Chunwei Chen <[email protected]> Closes #4489
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/zfs_vnops.c28
-rw-r--r--module/zfs/zfs_znode.c19
2 files changed, 23 insertions, 24 deletions
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 21b5d1ca4..4fa4fe074 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -333,11 +333,11 @@ update_pages(struct inode *ip, int64_t start, int len,
int64_t off;
void *pb;
- off = start & (PAGE_CACHE_SIZE-1);
- for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
- nbytes = MIN(PAGE_CACHE_SIZE - off, len);
+ off = start & (PAGE_SIZE-1);
+ for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
+ nbytes = MIN(PAGE_SIZE - off, len);
- pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
+ pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
@@ -354,7 +354,7 @@ update_pages(struct inode *ip, int64_t start, int len,
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
- page_cache_release(pp);
+ put_page(pp);
}
len -= nbytes;
@@ -385,11 +385,11 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio)
void *pb;
start = uio->uio_loffset;
- off = start & (PAGE_CACHE_SIZE-1);
- for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) {
- bytes = MIN(PAGE_CACHE_SIZE - off, len);
+ off = start & (PAGE_SIZE-1);
+ for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
+ bytes = MIN(PAGE_SIZE - off, len);
- pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
+ pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
ASSERT(PageUptodate(pp));
@@ -402,7 +402,7 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio)
mark_page_accessed(pp);
unlock_page(pp);
- page_cache_release(pp);
+ put_page(pp);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
@@ -3976,8 +3976,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
pgoff = page_offset(pp); /* Page byte-offset in file */
offset = i_size_read(ip); /* File length in bytes */
- pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */
- P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff);
+ pglen = MIN(PAGE_SIZE, /* Page length in bytes */
+ P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
/* Page is beyond end of file */
if (pgoff >= offset) {
@@ -4088,7 +4088,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
}
va = kmap(pp);
- ASSERT3U(pglen, <=, PAGE_CACHE_SIZE);
+ ASSERT3U(pglen, <=, PAGE_SIZE);
dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx);
kunmap(pp);
@@ -4263,7 +4263,7 @@ zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
int err;
os = zsb->z_os;
- io_len = nr_pages << PAGE_CACHE_SHIFT;
+ io_len = nr_pages << PAGE_SHIFT;
i_size = i_size_read(ip);
io_off = page_offset(pl[0]);
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index 7ebe54384..f32482247 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -1512,13 +1512,12 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
int64_t off;
void *pb;
- ASSERT((start & PAGE_CACHE_MASK) ==
- ((start + len - 1) & PAGE_CACHE_MASK));
+ ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
- off = start & (PAGE_CACHE_SIZE - 1);
- start &= PAGE_CACHE_MASK;
+ off = start & (PAGE_SIZE - 1);
+ start &= PAGE_MASK;
- pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT);
+ pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
@@ -1534,7 +1533,7 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
- page_cache_release(pp);
+ put_page(pp);
}
}
@@ -1581,14 +1580,14 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
loff_t first_page_offset, last_page_offset;
/* first possible full page in hole */
- first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* last page of hole */
- last_page = (off + len) >> PAGE_CACHE_SHIFT;
+ last_page = (off + len) >> PAGE_SHIFT;
/* offset of first_page */
- first_page_offset = first_page << PAGE_CACHE_SHIFT;
+ first_page_offset = first_page << PAGE_SHIFT;
/* offset of last_page */
- last_page_offset = last_page << PAGE_CACHE_SHIFT;
+ last_page_offset = last_page << PAGE_SHIFT;
/* truncate whole pages */
if (last_page_offset > first_page_offset) {