diff options
author | Prasad Joshi <[email protected]> | 2011-05-28 02:53:07 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2011-07-01 12:22:52 -0700 |
commit | dde471ef5a07bd569deeadd3e9a88655db3e10ab (patch) | |
tree | a6fc4f05594420aa7b4a1337c7db1da7bae61113 /module/zfs/zpl_file.c | |
parent | 2a005961a48e748632e96272915192dab6ce9401 (diff) |
MMAP Optimization
Enable zfs_getpage, zfs_fillpage, zfs_putpage, zfs_putapage functions.
The functions have been modified to make them Linux friendly.
ZFS uses these functions to read/write the mmapped pages. Using them
from readpage/writepage results in clear code. The patch also adds
readpages and writepages interface functions to read/write list of
pages in one function call.
The code change handles the first mmap optimization mentioned on
https://github.com/behlendorf/zfs/issues/225
Signed-off-by: Prasad Joshi <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Issue #255
Diffstat (limited to 'module/zfs/zpl_file.c')
-rw-r--r-- | module/zfs/zpl_file.c | 141 |
1 files changed, 82 insertions, 59 deletions
diff --git a/module/zfs/zpl_file.c b/module/zfs/zpl_file.c index de66ff4b4..0e90b7803 100644 --- a/module/zfs/zpl_file.c +++ b/module/zfs/zpl_file.c @@ -254,6 +254,60 @@ zpl_mmap(struct file *filp, struct vm_area_struct *vma) return (error); } +static struct page ** +pages_vector_from_list(struct list_head *pages, unsigned nr_pages) +{ + struct page **pl; + struct page *t; + unsigned page_idx; + + pl = kmalloc(sizeof(*pl) * nr_pages, GFP_NOFS); + if (!pl) + return ERR_PTR(-ENOMEM); + + page_idx = 0; + list_for_each_entry_reverse(t, pages, lru) { + pl[page_idx] = t; + page_idx++; + } + + return pl; +} + +static int +zpl_readpages(struct file *file, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + struct inode *ip; + struct page **pl; + struct page *p, *n; + int error; + + ip = mapping->host; + + pl = pages_vector_from_list(pages, nr_pages); + if (IS_ERR(pl)) + return PTR_ERR(pl); + + error = -zfs_getpage(ip, pl, nr_pages); + if (error) + goto error; + + list_for_each_entry_safe_reverse(p, n, pages, lru) { + + list_del(&p->lru); + + flush_dcache_page(p); + SetPageUptodate(p); + unlock_page(p); + page_cache_release(p); + } + +error: + kfree(pl); + return error; +} + /* * Populate a page with data for the Linux page cache. This function is * only used to support mmap(2). There will be an identical copy of the @@ -267,33 +321,34 @@ static int zpl_readpage(struct file *filp, struct page *pp) { struct inode *ip; - loff_t off, i_size; - size_t len, wrote; - cred_t *cr = CRED(); - void *pb; + struct page *pl[1]; int error = 0; ASSERT(PageLocked(pp)); ip = pp->mapping->host; - off = page_offset(pp); - i_size = i_size_read(ip); - ASSERT3S(off, <, i_size); + pl[0] = pp; - crhold(cr); - len = MIN(PAGE_CACHE_SIZE, i_size - off); + error = -zfs_getpage(ip, pl, 1); - pb = kmap(pp); + if (error) { + SetPageError(pp); + ClearPageUptodate(pp); + } else { + ClearPageError(pp); + SetPageUptodate(pp); + flush_dcache_page(pp); + } - /* O_DIRECT is passed to bypass the page cache and avoid deadlock. */ - wrote = zpl_read_common(ip, pb, len, off, UIO_SYSSPACE, O_DIRECT, cr); - if (wrote != len) - error = -EIO; + unlock_page(pp); + return error; +} - if (!error && (len < PAGE_CACHE_SIZE)) - memset(pb + len, 0, PAGE_CACHE_SIZE - len); +int +zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data) +{ + int error; - kunmap(pp); - crfree(cr); + error = -zfs_putpage(pp, wbc, data); if (error) { SetPageError(pp); @@ -305,8 +360,13 @@ zpl_readpage(struct file *filp, struct page *pp) } unlock_page(pp); + return error; +} - return (error); +static int +zpl_writepages(struct address_space *mapping, struct writeback_control *wbc) +{ + return write_cache_pages(mapping, wbc, zpl_putpage, mapping); } /* @@ -314,55 +374,18 @@ zpl_readpage(struct file *filp, struct page *pp) * support mmap(2). Mapped pages may be dirtied by memory operations * which never call .write(). These dirty pages are kept in sync with * the ARC buffers via this hook. - * - * Currently this function relies on zpl_write_common() and the O_DIRECT - * flag to push out the page. This works but the more correct way is - * to update zfs_putapage() to be Linux friendly and use that interface. */ static int zpl_writepage(struct page *pp, struct writeback_control *wbc) { - struct inode *ip; - loff_t off, i_size; - size_t len, read; - cred_t *cr = CRED(); - void *pb; - int error = 0; - - ASSERT(PageLocked(pp)); - ip = pp->mapping->host; - off = page_offset(pp); - i_size = i_size_read(ip); - - crhold(cr); - len = MIN(PAGE_CACHE_SIZE, i_size - off); - - pb = kmap(pp); - - /* O_DIRECT is passed to bypass the page cache and avoid deadlock. */ - read = zpl_write_common(ip, pb, len, off, UIO_SYSSPACE, O_DIRECT, cr); - if (read != len) - error = -EIO; - - kunmap(pp); - crfree(cr); - - if (error) { - SetPageError(pp); - ClearPageUptodate(pp); - } else { - ClearPageError(pp); - SetPageUptodate(pp); - } - - unlock_page(pp); - - return (error); + return zpl_putpage(pp, wbc, pp->mapping); } const struct address_space_operations zpl_address_space_operations = { + .readpages = zpl_readpages, .readpage = zpl_readpage, .writepage = zpl_writepage, + .writepages = zpl_writepages, }; const struct file_operations zpl_file_operations = { |