diff options
Diffstat (limited to 'module/zfs')
-rw-r--r-- | module/zfs/zfs_vnops.c | 28 | ||||
-rw-r--r-- | module/zfs/zfs_znode.c | 19 |
2 files changed, 23 insertions, 24 deletions
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 21b5d1ca4..4fa4fe074 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -333,11 +333,11 @@ update_pages(struct inode *ip, int64_t start, int len, int64_t off; void *pb; - off = start & (PAGE_CACHE_SIZE-1); - for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) { - nbytes = MIN(PAGE_CACHE_SIZE - off, len); + off = start & (PAGE_SIZE-1); + for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) { + nbytes = MIN(PAGE_SIZE - off, len); - pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); + pp = find_lock_page(mp, start >> PAGE_SHIFT); if (pp) { if (mapping_writably_mapped(mp)) flush_dcache_page(pp); @@ -354,7 +354,7 @@ update_pages(struct inode *ip, int64_t start, int len, SetPageUptodate(pp); ClearPageError(pp); unlock_page(pp); - page_cache_release(pp); + put_page(pp); } len -= nbytes; @@ -385,11 +385,11 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio) void *pb; start = uio->uio_loffset; - off = start & (PAGE_CACHE_SIZE-1); - for (start &= PAGE_CACHE_MASK; len > 0; start += PAGE_CACHE_SIZE) { - bytes = MIN(PAGE_CACHE_SIZE - off, len); + off = start & (PAGE_SIZE-1); + for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) { + bytes = MIN(PAGE_SIZE - off, len); - pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); + pp = find_lock_page(mp, start >> PAGE_SHIFT); if (pp) { ASSERT(PageUptodate(pp)); @@ -402,7 +402,7 @@ mappedread(struct inode *ip, int nbytes, uio_t *uio) mark_page_accessed(pp); unlock_page(pp); - page_cache_release(pp); + put_page(pp); } else { error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl), uio, bytes); @@ -3976,8 +3976,8 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) pgoff = page_offset(pp); /* Page byte-offset in file */ offset = i_size_read(ip); /* File length in bytes */ - pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */ - P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff); + pglen = MIN(PAGE_SIZE, /* Page length in bytes */ + P2ROUNDUP(offset, PAGE_SIZE)-pgoff); /* Page is beyond end of file */ if (pgoff >= offset) { @@ -4088,7 +4088,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) } va = kmap(pp); - ASSERT3U(pglen, <=, PAGE_CACHE_SIZE); + ASSERT3U(pglen, <=, PAGE_SIZE); dmu_write(zsb->z_os, zp->z_id, pgoff, pglen, va, tx); kunmap(pp); @@ -4263,7 +4263,7 @@ zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages) int err; os = zsb->z_os; - io_len = nr_pages << PAGE_CACHE_SHIFT; + io_len = nr_pages << PAGE_SHIFT; i_size = i_size_read(ip); io_off = page_offset(pl[0]); diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index 7ebe54384..f32482247 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -1512,13 +1512,12 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len) int64_t off; void *pb; - ASSERT((start & PAGE_CACHE_MASK) == - ((start + len - 1) & PAGE_CACHE_MASK)); + ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK)); - off = start & (PAGE_CACHE_SIZE - 1); - start &= PAGE_CACHE_MASK; + off = start & (PAGE_SIZE - 1); + start &= PAGE_MASK; - pp = find_lock_page(mp, start >> PAGE_CACHE_SHIFT); + pp = find_lock_page(mp, start >> PAGE_SHIFT); if (pp) { if (mapping_writably_mapped(mp)) flush_dcache_page(pp); @@ -1534,7 +1533,7 @@ zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len) SetPageUptodate(pp); ClearPageError(pp); unlock_page(pp); - page_cache_release(pp); + put_page(pp); } } @@ -1581,14 +1580,14 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len) loff_t first_page_offset, last_page_offset; /* first possible full page in hole */ - first_page = (off + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT; /* last page of hole */ - last_page = (off + len) >> PAGE_CACHE_SHIFT; + last_page = (off + len) >> PAGE_SHIFT; /* offset of first_page */ - first_page_offset = first_page << PAGE_CACHE_SHIFT; + first_page_offset = first_page << PAGE_SHIFT; /* offset of last_page */ - last_page_offset = last_page << PAGE_CACHE_SHIFT; + last_page_offset = last_page << PAGE_SHIFT; /* truncate whole pages */ if (last_page_offset > first_page_offset) { |