diff options
-rw-r--r-- | config/kernel-vfs-iov_iter.m4 | 206 | ||||
-rw-r--r-- | config/kernel.m4 | 2 | ||||
-rw-r--r-- | include/os/linux/spl/sys/uio.h | 70 | ||||
-rw-r--r-- | include/os/linux/zfs/sys/zpl.h | 9 | ||||
-rw-r--r-- | lib/libspl/include/sys/uio.h | 1 | ||||
-rw-r--r-- | lib/libzfs/Makefile.am | 1 | ||||
-rw-r--r-- | lib/libzpool/Makefile.am | 1 | ||||
-rw-r--r-- | module/os/linux/zfs/Makefile.in | 1 | ||||
-rw-r--r-- | module/os/linux/zfs/zfs_uio.c (renamed from module/zcommon/zfs_uio.c) | 173 | ||||
-rw-r--r-- | module/os/linux/zfs/zfs_vnops_os.c | 35 | ||||
-rw-r--r-- | module/os/linux/zfs/zpl_file.c | 332 | ||||
-rw-r--r-- | module/os/linux/zfs/zpl_inode.c | 10 | ||||
-rw-r--r-- | module/os/linux/zfs/zpl_xattr.c | 24 | ||||
-rw-r--r-- | module/os/linux/zfs/zvol_os.c | 27 | ||||
-rw-r--r-- | module/zcommon/Makefile.in | 1 | ||||
-rw-r--r-- | module/zfs/zfs_vnops.c | 4 |
16 files changed, 576 insertions, 321 deletions
diff --git a/config/kernel-vfs-iov_iter.m4 b/config/kernel-vfs-iov_iter.m4 new file mode 100644 index 000000000..69db11b68 --- /dev/null +++ b/config/kernel-vfs-iov_iter.m4 @@ -0,0 +1,206 @@ +dnl # +dnl # Check for available iov_iter functionality. +dnl # +AC_DEFUN([ZFS_AC_KERNEL_SRC_VFS_IOV_ITER], [ + ZFS_LINUX_TEST_SRC([iov_iter_types], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + int type __attribute__ ((unused)) = + ITER_IOVEC | ITER_KVEC | ITER_BVEC | ITER_PIPE; + ]) + + ZFS_LINUX_TEST_SRC([iov_iter_init], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + struct iovec iov; + unsigned long nr_segs = 1; + size_t count = 1024; + + iov_iter_init(&iter, WRITE, &iov, nr_segs, count); + ]) + + ZFS_LINUX_TEST_SRC([iov_iter_init_legacy], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + struct iovec iov; + unsigned long nr_segs = 1; + size_t count = 1024; + size_t written = 0; + + iov_iter_init(&iter, &iov, nr_segs, count, written); + ]) + + ZFS_LINUX_TEST_SRC([iov_iter_advance], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + size_t advance = 512; + + iov_iter_advance(&iter, advance); + ]) + + ZFS_LINUX_TEST_SRC([iov_iter_revert], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + size_t revert = 512; + + iov_iter_revert(&iter, revert); + ]) + + ZFS_LINUX_TEST_SRC([iov_iter_fault_in_readable], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + size_t size = 512; + int error __attribute__ ((unused)); + + error = iov_iter_fault_in_readable(&iter, size); + ]) + + ZFS_LINUX_TEST_SRC([iov_iter_count], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + size_t bytes __attribute__ ((unused)); + + bytes = iov_iter_count(&iter); + ]) + + ZFS_LINUX_TEST_SRC([copy_to_iter], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + char buf[512] = { 0 }; + size_t size = 512; + size_t bytes __attribute__ ((unused)); + + bytes = copy_to_iter((const void *)&buf, size, &iter); + ]) + + ZFS_LINUX_TEST_SRC([copy_from_iter], [ + #include <linux/fs.h> + #include <linux/uio.h> + ],[ + struct iov_iter iter = { 0 }; + char buf[512] = { 0 }; + size_t size = 512; + size_t bytes __attribute__ ((unused)); + + bytes = copy_from_iter((void *)&buf, size, &iter); + ]) +]) + +AC_DEFUN([ZFS_AC_KERNEL_VFS_IOV_ITER], [ + enable_vfs_iov_iter="yes" + + AC_MSG_CHECKING([whether iov_iter types are available]) + ZFS_LINUX_TEST_RESULT([iov_iter_types], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOV_ITER_TYPES, 1, + [iov_iter types are available]) + ],[ + AC_MSG_RESULT(no) + enable_vfs_iov_iter="no" + ]) + + dnl # + dnl # 'iov_iter_init' available in Linux 3.16 and newer. + dnl # 'iov_iter_init_legacy' available in Linux 3.15 and older. + dnl # + AC_MSG_CHECKING([whether iov_iter_init() is available]) + ZFS_LINUX_TEST_RESULT([iov_iter_init], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOV_ITER_INIT, 1, + [iov_iter_init() is available]) + ],[ + ZFS_LINUX_TEST_RESULT([iov_iter_init_legacy], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOV_ITER_INIT_LEGACY, 1, + [iov_iter_init() is available]) + ],[ + ZFS_LINUX_TEST_ERROR([iov_iter_init()]) + ]) + ]) + + AC_MSG_CHECKING([whether iov_iter_advance() is available]) + ZFS_LINUX_TEST_RESULT([iov_iter_advance], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOV_ITER_ADVANCE, 1, + [iov_iter_advance() is available]) + ],[ + AC_MSG_RESULT(no) + enable_vfs_iov_iter="no" + ]) + + AC_MSG_CHECKING([whether iov_iter_revert() is available]) + ZFS_LINUX_TEST_RESULT([iov_iter_revert], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOV_ITER_REVERT, 1, + [iov_iter_revert() is available]) + ],[ + AC_MSG_RESULT(no) + enable_vfs_iov_iter="no" + ]) + + AC_MSG_CHECKING([whether iov_iter_fault_in_readable() is available]) + ZFS_LINUX_TEST_RESULT([iov_iter_fault_in_readable], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOV_ITER_FAULT_IN_READABLE, 1, + [iov_iter_fault_in_readable() is available]) + ],[ + AC_MSG_RESULT(no) + enable_vfs_iov_iter="no" + ]) + + AC_MSG_CHECKING([whether iov_iter_count() is available]) + ZFS_LINUX_TEST_RESULT([iov_iter_count], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_IOV_ITER_COUNT, 1, + [iov_iter_count() is available]) + ],[ + AC_MSG_RESULT(no) + enable_vfs_iov_iter="no" + ]) + + AC_MSG_CHECKING([whether copy_to_iter() is available]) + ZFS_LINUX_TEST_RESULT([copy_to_iter], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_COPY_TO_ITER, 1, + [copy_to_iter() is available]) + ],[ + AC_MSG_RESULT(no) + enable_vfs_iov_iter="no" + ]) + + AC_MSG_CHECKING([whether copy_from_iter() is available]) + ZFS_LINUX_TEST_RESULT([copy_from_iter], [ + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_COPY_FROM_ITER, 1, + [copy_from_iter() is available]) + ],[ + AC_MSG_RESULT(no) + enable_vfs_iov_iter="no" + ]) + + dnl # + dnl # As of the 4.9 kernel support is provided for iovecs, kvecs, + dnl # bvecs and pipes in the iov_iter structure. As long as the + dnl # other support interfaces are all available the iov_iter can + dnl # be correctly used in the uio structure. + dnl # + AS_IF([test "x$enable_vfs_iov_iter" = "xyes"], [ + AC_DEFINE(HAVE_VFS_IOV_ITER, 1, + [All required iov_iter interfaces are available]) + ]) +]) diff --git a/config/kernel.m4 b/config/kernel.m4 index 14a8d4c58..5377bcadd 100644 --- a/config/kernel.m4 +++ b/config/kernel.m4 @@ -106,6 +106,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [ ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS + ZFS_AC_KERNEL_SRC_VFS_IOV_ITER ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN @@ -204,6 +205,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [ ZFS_AC_KERNEL_VFS_DIRECT_IO ZFS_AC_KERNEL_VFS_RW_ITERATE ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS + ZFS_AC_KERNEL_VFS_IOV_ITER ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS ZFS_AC_KERNEL_FOLLOW_DOWN_ONE ZFS_AC_KERNEL_MAKE_REQUEST_FN diff --git a/include/os/linux/spl/sys/uio.h b/include/os/linux/spl/sys/uio.h index 9bd358dc3..6e850c5fe 100644 --- a/include/os/linux/spl/sys/uio.h +++ b/include/os/linux/spl/sys/uio.h @@ -44,14 +44,19 @@ typedef enum uio_rw { typedef enum uio_seg { UIO_USERSPACE = 0, UIO_SYSSPACE = 1, - UIO_USERISPACE = 2, - UIO_BVEC = 3, + UIO_BVEC = 2, +#if defined(HAVE_VFS_IOV_ITER) + UIO_ITER = 3, +#endif } uio_seg_t; typedef struct uio { union { const struct iovec *uio_iov; const struct bio_vec *uio_bvec; +#if defined(HAVE_VFS_IOV_ITER) + struct iov_iter *uio_iter; +#endif }; int uio_iovcnt; offset_t uio_loffset; @@ -97,4 +102,65 @@ uio_index_at_offset(uio_t *uio, offset_t off, uint_t *vec_idx) return (off); } +static inline void +iov_iter_init_compat(struct iov_iter *iter, unsigned int dir, + const struct iovec *iov, unsigned long nr_segs, size_t count) +{ +#if defined(HAVE_IOV_ITER_INIT) + iov_iter_init(iter, dir, iov, nr_segs, count); +#elif defined(HAVE_IOV_ITER_INIT_LEGACY) + iov_iter_init(iter, iov, nr_segs, count, 0); +#else +#error "Unsupported kernel" +#endif +} + +static inline void +uio_iovec_init(uio_t *uio, const struct iovec *iov, unsigned long nr_segs, + offset_t offset, uio_seg_t seg, ssize_t resid, size_t skip) +{ + ASSERT(seg == UIO_USERSPACE || seg == UIO_SYSSPACE); + + uio->uio_iov = iov; + uio->uio_iovcnt = nr_segs; + uio->uio_loffset = offset; + uio->uio_segflg = seg; + uio->uio_fault_disable = B_FALSE; + uio->uio_fmode = 0; + uio->uio_extflg = 0; + uio->uio_resid = resid; + uio->uio_skip = skip; +} + +static inline void +uio_bvec_init(uio_t *uio, struct bio *bio) +{ + uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)]; + uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio); + uio->uio_loffset = BIO_BI_SECTOR(bio) << 9; + uio->uio_segflg = UIO_BVEC; + uio->uio_fault_disable = B_FALSE; + uio->uio_fmode = 0; + uio->uio_extflg = 0; + uio->uio_resid = BIO_BI_SIZE(bio); + uio->uio_skip = BIO_BI_SKIP(bio); +} + +#if defined(HAVE_VFS_IOV_ITER) +static inline void +uio_iov_iter_init(uio_t *uio, struct iov_iter *iter, offset_t offset, + ssize_t resid, size_t skip) +{ + uio->uio_iter = iter; + uio->uio_iovcnt = iter->nr_segs; + uio->uio_loffset = offset; + uio->uio_segflg = UIO_ITER; + uio->uio_fault_disable = B_FALSE; + uio->uio_fmode = 0; + uio->uio_extflg = 0; + uio->uio_resid = resid; + uio->uio_skip = skip; +} +#endif + #endif /* SPL_UIO_H */ diff --git a/include/os/linux/zfs/sys/zpl.h b/include/os/linux/zfs/sys/zpl.h index ef5a0b842..b0bb9c29c 100644 --- a/include/os/linux/zfs/sys/zpl.h +++ b/include/os/linux/zfs/sys/zpl.h @@ -46,15 +46,6 @@ extern const struct inode_operations zpl_dir_inode_operations; extern const struct inode_operations zpl_symlink_inode_operations; extern const struct inode_operations zpl_special_inode_operations; extern dentry_operations_t zpl_dentry_operations; - -/* zpl_file.c */ -extern ssize_t zpl_read_common(struct inode *ip, const char *buf, - size_t len, loff_t *ppos, uio_seg_t segment, int flags, - cred_t *cr); -extern ssize_t zpl_write_common(struct inode *ip, const char *buf, - size_t len, loff_t *ppos, uio_seg_t segment, int flags, - cred_t *cr); - extern const struct address_space_operations zpl_address_space_operations; extern const struct file_operations zpl_file_operations; extern const struct file_operations zpl_dir_file_operations; diff --git a/lib/libspl/include/sys/uio.h b/lib/libspl/include/sys/uio.h index 99a5a4d2a..1d56b5b18 100644 --- a/lib/libspl/include/sys/uio.h +++ b/lib/libspl/include/sys/uio.h @@ -59,7 +59,6 @@ typedef enum uio_rw { typedef enum uio_seg { UIO_USERSPACE = 0, UIO_SYSSPACE = 1, - UIO_USERISPACE = 2, } uio_seg_t; #elif defined(__FreeBSD__) diff --git a/lib/libzfs/Makefile.am b/lib/libzfs/Makefile.am index abaef2d15..cd80ef719 100644 --- a/lib/libzfs/Makefile.am +++ b/lib/libzfs/Makefile.am @@ -61,7 +61,6 @@ KERNEL_C = \ zfs_fletcher_superscalar4.c \ zfs_namecheck.c \ zfs_prop.c \ - zfs_uio.c \ zpool_prop.c \ zprop_common.c diff --git a/lib/libzpool/Makefile.am b/lib/libzpool/Makefile.am index 5b938bd4a..7aa7e8098 100644 --- a/lib/libzpool/Makefile.am +++ b/lib/libzpool/Makefile.am @@ -47,7 +47,6 @@ KERNEL_C = \ zfs_fletcher_superscalar4.c \ zfs_namecheck.c \ zfs_prop.c \ - zfs_uio.c \ zpool_prop.c \ zprop_common.c \ abd.c \ diff --git a/module/os/linux/zfs/Makefile.in b/module/os/linux/zfs/Makefile.in index 6f653f9ae..75bec52c9 100644 --- a/module/os/linux/zfs/Makefile.in +++ b/module/os/linux/zfs/Makefile.in @@ -23,6 +23,7 @@ $(MODULE)-objs += ../os/linux/zfs/zfs_dir.o $(MODULE)-objs += ../os/linux/zfs/zfs_file_os.o $(MODULE)-objs += ../os/linux/zfs/zfs_ioctl_os.o $(MODULE)-objs += ../os/linux/zfs/zfs_sysfs.o +$(MODULE)-objs += ../os/linux/zfs/zfs_uio.o $(MODULE)-objs += ../os/linux/zfs/zfs_vfsops.o $(MODULE)-objs += ../os/linux/zfs/zfs_vnops_os.o $(MODULE)-objs += ../os/linux/zfs/zfs_znode.o diff --git a/module/zcommon/zfs_uio.c b/module/os/linux/zfs/zfs_uio.c index d586e0a12..e435e1a9f 100644 --- a/module/zcommon/zfs_uio.c +++ b/module/os/linux/zfs/zfs_uio.c @@ -39,12 +39,6 @@ * Copyright (c) 2015 by Chunwei Chen. All rights reserved. */ -/* - * The uio support from OpenSolaris has been added as a short term - * work around. The hope is to adopt native Linux type and drop the - * use of uio's entirely. Under Linux they only add overhead and - * when possible we want to use native APIs for the ZPL layer. - */ #ifdef _KERNEL #include <sys/types.h> @@ -71,7 +65,6 @@ uiomove_iov(void *p, size_t n, enum uio_rw rw, struct uio *uio) cnt = MIN(iov->iov_len - skip, n); switch (uio->uio_segflg) { case UIO_USERSPACE: - case UIO_USERISPACE: /* * p = kernel data pointer * iov->iov_base = user data pointer @@ -165,81 +158,82 @@ uiomove_bvec(void *p, size_t n, enum uio_rw rw, struct uio *uio) return (0); } +#if defined(HAVE_VFS_IOV_ITER) +static int +uiomove_iter(void *p, size_t n, enum uio_rw rw, struct uio *uio, + boolean_t revert) +{ + size_t cnt = MIN(n, uio->uio_resid); + + if (uio->uio_skip) + iov_iter_advance(uio->uio_iter, uio->uio_skip); + + if (rw == UIO_READ) + cnt = copy_to_iter(p, cnt, uio->uio_iter); + else + cnt = copy_from_iter(p, cnt, uio->uio_iter); + + /* + * When operating on a full pipe no bytes are processed. + * In which case return EFAULT which is converted to EAGAIN + * by the kernel's generic_file_splice_read() function. + */ + if (cnt == 0) + return (EFAULT); + + /* + * Revert advancing the uio_iter. This is set by uiocopy() + * to avoid consuming the uio and its iov_iter structure. + */ + if (revert) + iov_iter_revert(uio->uio_iter, cnt); + + uio->uio_resid -= cnt; + uio->uio_loffset += cnt; + + return (0); +} +#endif + int uiomove(void *p, size_t n, enum uio_rw rw, struct uio *uio) { - if (uio->uio_segflg != UIO_BVEC) - return (uiomove_iov(p, n, rw, uio)); - else + if (uio->uio_segflg == UIO_BVEC) return (uiomove_bvec(p, n, rw, uio)); +#if defined(HAVE_VFS_IOV_ITER) + else if (uio->uio_segflg == UIO_ITER) + return (uiomove_iter(p, n, rw, uio, B_FALSE)); +#endif + else + return (uiomove_iov(p, n, rw, uio)); } EXPORT_SYMBOL(uiomove); -#define fuword8(uptr, vptr) get_user((*vptr), (uptr)) - -/* - * Fault in the pages of the first n bytes specified by the uio structure. - * 1 byte in each page is touched and the uio struct is unmodified. Any - * error will terminate the process as this is only a best attempt to get - * the pages resident. - */ int uio_prefaultpages(ssize_t n, struct uio *uio) { - const struct iovec *iov; - ulong_t cnt, incr; - caddr_t p; - uint8_t tmp; - int iovcnt; - size_t skip; + struct iov_iter iter, *iterp = NULL; - /* no need to fault in kernel pages */ - switch (uio->uio_segflg) { - case UIO_SYSSPACE: - case UIO_BVEC: - return (0); - case UIO_USERSPACE: - case UIO_USERISPACE: - break; - default: - ASSERT(0); - } - - iov = uio->uio_iov; - iovcnt = uio->uio_iovcnt; - skip = uio->uio_skip; - - for (; n > 0 && iovcnt > 0; iov++, iovcnt--, skip = 0) { - cnt = MIN(iov->iov_len - skip, n); - /* empty iov */ - if (cnt == 0) - continue; - n -= cnt; - /* - * touch each page in this segment. - */ - p = iov->iov_base + skip; - while (cnt) { - if (fuword8((uint8_t *)p, &tmp)) - return (EFAULT); - incr = MIN(cnt, PAGESIZE); - p += incr; - cnt -= incr; - } - /* - * touch the last byte in case it straddles a page. - */ - p--; - if (fuword8((uint8_t *)p, &tmp)) - return (EFAULT); +#if defined(HAVE_IOV_ITER_FAULT_IN_READABLE) + if (uio->uio_segflg == UIO_USERSPACE) { + iterp = &iter; + iov_iter_init_compat(iterp, READ, uio->uio_iov, + uio->uio_iovcnt, uio->uio_resid); +#if defined(HAVE_VFS_IOV_ITER) + } else if (uio->uio_segflg == UIO_ITER) { + iterp = uio->uio_iter; +#endif } + if (iterp && iov_iter_fault_in_readable(iterp, n)) + return (EFAULT); +#endif return (0); } EXPORT_SYMBOL(uio_prefaultpages); /* - * same as uiomove() but doesn't modify uio structure. + * The same as uiomove() but doesn't modify uio structure. * return in cbytes how many bytes were copied. */ int @@ -249,39 +243,54 @@ uiocopy(void *p, size_t n, enum uio_rw rw, struct uio *uio, size_t *cbytes) int ret; bcopy(uio, &uio_copy, sizeof (struct uio)); - ret = uiomove(p, n, rw, &uio_copy); + + if (uio->uio_segflg == UIO_BVEC) + ret = uiomove_bvec(p, n, rw, &uio_copy); +#if defined(HAVE_VFS_IOV_ITER) + else if (uio->uio_segflg == UIO_ITER) + ret = uiomove_iter(p, n, rw, &uio_copy, B_TRUE); +#endif + else + ret = uiomove_iov(p, n, rw, &uio_copy); + *cbytes = uio->uio_resid - uio_copy.uio_resid; + return (ret); } EXPORT_SYMBOL(uiocopy); /* - * Drop the next n chars out of *uiop. + * Drop the next n chars out of *uio. */ void -uioskip(uio_t *uiop, size_t n) +uioskip(uio_t *uio, size_t n) { - if (n > uiop->uio_resid) + if (n > uio->uio_resid) return; - uiop->uio_skip += n; - if (uiop->uio_segflg != UIO_BVEC) { - while (uiop->uio_iovcnt && - uiop->uio_skip >= uiop->uio_iov->iov_len) { - uiop->uio_skip -= uiop->uio_iov->iov_len; - uiop->uio_iov++; - uiop->uio_iovcnt--; + if (uio->uio_segflg == UIO_BVEC) { + uio->uio_skip += n; + while (uio->uio_iovcnt && + uio->uio_skip >= uio->uio_bvec->bv_len) { + uio->uio_skip -= uio->uio_bvec->bv_len; + uio->uio_bvec++; + uio->uio_iovcnt--; } +#if defined(HAVE_VFS_IOV_ITER) + } else if (uio->uio_segflg == UIO_ITER) { + iov_iter_advance(uio->uio_iter, n); +#endif } else { - while (uiop->uio_iovcnt && - uiop->uio_skip >= uiop->uio_bvec->bv_len) { - uiop->uio_skip -= uiop->uio_bvec->bv_len; - uiop->uio_bvec++; - uiop->uio_iovcnt--; + uio->uio_skip += n; + while (uio->uio_iovcnt && + uio->uio_skip >= uio->uio_iov->iov_len) { + uio->uio_skip -= uio->uio_iov->iov_len; + uio->uio_iov++; + uio->uio_iovcnt--; } } - uiop->uio_loffset += n; - uiop->uio_resid -= n; + uio->uio_loffset += n; + uio->uio_resid -= n; } EXPORT_SYMBOL(uioskip); #endif /* _KERNEL */ diff --git a/module/os/linux/zfs/zfs_vnops_os.c b/module/os/linux/zfs/zfs_vnops_os.c index 8e5208000..3be387a30 100644 --- a/module/os/linux/zfs/zfs_vnops_os.c +++ b/module/os/linux/zfs/zfs_vnops_os.c @@ -355,28 +355,37 @@ unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT; * OUT: resid - remaining bytes to write * * RETURN: 0 if success - * positive error code if failure + * positive error code if failure. EIO is returned + * for a short write when residp isn't provided. * * Timestamps: * zp - ctime|mtime updated if byte count > 0 */ int zfs_write_simple(znode_t *zp, const void *data, size_t len, - loff_t pos, size_t *resid) + loff_t pos, size_t *residp) { - ssize_t written; - int error = 0; + fstrans_cookie_t cookie; + int error; - written = zpl_write_common(ZTOI(zp), data, len, &pos, - UIO_SYSSPACE, 0, kcred); - if (written < 0) { - error = -written; - } else if (resid == NULL) { - if (written < len) - error = SET_ERROR(EIO); /* short write */ - } else { - *resid = len - written; + struct iovec iov; + iov.iov_base = (void *)data; + iov.iov_len = len; + + uio_t uio; + uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0); + + cookie = spl_fstrans_mark(); + error = zfs_write(zp, &uio, 0, kcred); + spl_fstrans_unmark(cookie); + + if (error == 0) { + if (residp != NULL) + *residp = uio_resid(&uio); + else if (uio_resid(&uio) != 0) + error = SET_ERROR(EIO); } + return (error); } diff --git a/module/os/linux/zfs/zpl_file.c b/module/os/linux/zfs/zpl_file.c index 616e1b942..d6bc84009 100644 --- a/module/os/linux/zfs/zpl_file.c +++ b/module/os/linux/zfs/zpl_file.c @@ -212,242 +212,224 @@ zfs_io_flags(struct kiocb *kiocb) return (flags); } -static ssize_t -zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count, - unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags, - cred_t *cr, size_t skip) +/* + * If relatime is enabled, call file_accessed() if zfs_relatime_need_update() + * is true. This is needed since datasets with inherited "relatime" property + * aren't necessarily mounted with the MNT_RELATIME flag (e.g. after + * `zfs set relatime=...`), which is what relatime test in VFS by + * relatime_need_update() is based on. + */ +static inline void +zpl_file_accessed(struct file *filp) { - ssize_t read; - uio_t uio = { { 0 }, 0 }; - int error; - fstrans_cookie_t cookie; - - uio.uio_iov = iovp; - uio.uio_iovcnt = nr_segs; - uio.uio_loffset = *ppos; - uio.uio_segflg = segment; - uio.uio_resid = count; - uio.uio_skip = skip; - - cookie = spl_fstrans_mark(); - error = -zfs_read(ITOZ(ip), &uio, flags, cr); - spl_fstrans_unmark(cookie); - if (error < 0) - return (error); - - read = count - uio.uio_resid; - *ppos += read; + struct inode *ip = filp->f_mapping->host; - return (read); + if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) { + if (zfs_relatime_need_update(ip)) + file_accessed(filp); + } else { + file_accessed(filp); + } } -inline ssize_t -zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos, - uio_seg_t segment, int flags, cred_t *cr) -{ - struct iovec iov; - - iov.iov_base = (void *)buf; - iov.iov_len = len; +#if defined(HAVE_VFS_RW_ITERATE) - return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment, - flags, cr, 0)); +/* + * When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports + * iovecs, kvevs, bvecs and pipes, plus all the required interfaces to + * manipulate the iov_iter are available. In which case the full iov_iter + * can be attached to the uio and correctly handled in the lower layers. + * Otherwise, for older kernels extract the iovec and pass it instead. + */ +static void +zpl_uio_init(uio_t *uio, struct kiocb *kiocb, struct iov_iter *to, + loff_t pos, ssize_t count, size_t skip) +{ +#if defined(HAVE_VFS_IOV_ITER) + uio_iov_iter_init(uio, to, pos, count, skip); +#else + uio_iovec_init(uio, to->iov, to->nr_segs, pos, + to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE, + count, skip); +#endif } static ssize_t -zpl_iter_read_common(struct kiocb *kiocb, const struct iovec *iovp, - unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip) +zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to) { cred_t *cr = CRED(); + fstrans_cookie_t cookie; struct file *filp = kiocb->ki_filp; - struct inode *ip = filp->f_mapping->host; - zfsvfs_t *zfsvfs = ZTOZSB(ITOZ(ip)); - ssize_t read; - unsigned int f_flags = filp->f_flags; + ssize_t count = iov_iter_count(to); + uio_t uio; + + zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0); - f_flags |= zfs_io_flags(kiocb); crhold(cr); - read = zpl_read_common_iovec(filp->f_mapping->host, iovp, count, - nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip); + cookie = spl_fstrans_mark(); + + int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio, + filp->f_flags | zfs_io_flags(kiocb), cr); + + spl_fstrans_unmark(cookie); crfree(cr); - /* - * If relatime is enabled, call file_accessed() only if - * zfs_relatime_need_update() is true. This is needed since datasets - * with inherited "relatime" property aren't necessarily mounted with - * MNT_RELATIME flag (e.g. after `zfs set relatime=...`), which is what - * relatime test in VFS by relatime_need_update() is based on. - */ - if (!IS_NOATIME(ip) && zfsvfs->z_relatime) { - if (zfs_relatime_need_update(ip)) - file_accessed(filp); - } else { - file_accessed(filp); - } + if (error < 0) + return (error); + + ssize_t read = count - uio.uio_resid; + kiocb->ki_pos += read; + + zpl_file_accessed(filp); + + if (read > 0) + iov_iter_advance(to, read); return (read); } -#if defined(HAVE_VFS_RW_ITERATE) -static ssize_t -zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to) +static inline ssize_t +zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from, + size_t *countp) { - ssize_t ret; - uio_seg_t seg = UIO_USERSPACE; - if (to->type & ITER_KVEC) - seg = UIO_SYSSPACE; - if (to->type & ITER_BVEC) - seg = UIO_BVEC; - ret = zpl_iter_read_common(kiocb, to->iov, to->nr_segs, - iov_iter_count(to), seg, to->iov_offset); - if (ret > 0) - iov_iter_advance(to, ret); - return (ret); -} +#ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB + ssize_t ret = generic_write_checks(kiocb, from); + if (ret <= 0) + return (ret); + + *countp = ret; #else -static ssize_t -zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp, - unsigned long nr_segs, loff_t pos) -{ - ssize_t ret; - size_t count; + struct file *file = kiocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *ip = mapping->host; + int isblk = S_ISBLK(ip->i_mode); - ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_WRITE); + *countp = iov_iter_count(from); + ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk); if (ret) return (ret); +#endif - return (zpl_iter_read_common(kiocb, iovp, nr_segs, count, - UIO_USERSPACE, 0)); + return (0); } -#endif /* HAVE_VFS_RW_ITERATE */ static ssize_t -zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count, - unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags, - cred_t *cr, size_t skip) +zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from) { - ssize_t wrote; - uio_t uio = { { 0 }, 0 }; - int error; + cred_t *cr = CRED(); fstrans_cookie_t cookie; + struct file *filp = kiocb->ki_filp; + struct inode *ip = filp->f_mapping->host; + uio_t uio; + size_t count; + ssize_t ret; - if (flags & O_APPEND) - *ppos = i_size_read(ip); + ret = zpl_generic_write_checks(kiocb, from, &count); + if (ret) + return (ret); - uio.uio_iov = iovp; - uio.uio_iovcnt = nr_segs; - uio.uio_loffset = *ppos; - uio.uio_segflg = segment; - uio.uio_resid = count; - uio.uio_skip = skip; + zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset); + crhold(cr); cookie = spl_fstrans_mark(); - error = -zfs_write(ITOZ(ip), &uio, flags, cr); + + int error = -zfs_write(ITOZ(ip), &uio, + filp->f_flags | zfs_io_flags(kiocb), cr); + spl_fstrans_unmark(cookie); + crfree(cr); + if (error < 0) return (error); - wrote = count - uio.uio_resid; - *ppos += wrote; + ssize_t wrote = count - uio.uio_resid; + kiocb->ki_pos += wrote; + + if (wrote > 0) + iov_iter_advance(from, wrote); return (wrote); } -inline ssize_t -zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos, - uio_seg_t segment, int flags, cred_t *cr) -{ - struct iovec iov; - - iov.iov_base = (void *)buf; - iov.iov_len = len; - - return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment, - flags, cr, 0)); -} +#else /* !HAVE_VFS_RW_ITERATE */ static ssize_t -zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp, - unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip) +zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) { cred_t *cr = CRED(); + fstrans_cookie_t cookie; struct file *filp = kiocb->ki_filp; - ssize_t wrote; - unsigned int f_flags = filp->f_flags; - - f_flags |= zfs_io_flags(kiocb); - crhold(cr); - wrote = zpl_write_common_iovec(filp->f_mapping->host, iovp, count, - nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip); - crfree(cr); - - return (wrote); -} - -#if defined(HAVE_VFS_RW_ITERATE) -static ssize_t -zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from) -{ size_t count; ssize_t ret; - uio_seg_t seg = UIO_USERSPACE; - -#ifndef HAVE_GENERIC_WRITE_CHECKS_KIOCB - struct file *file = kiocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *ip = mapping->host; - int isblk = S_ISBLK(ip->i_mode); - count = iov_iter_count(from); - ret = generic_write_checks(file, &kiocb->ki_pos, &count, isblk); + ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); if (ret) return (ret); -#else - /* - * XXX - ideally this check should be in the same lock region with - * write operations, so that there's no TOCTTOU race when doing - * append and someone else grow the file. - */ - ret = generic_write_checks(kiocb, from); - if (ret <= 0) - return (ret); - count = ret; -#endif - if (from->type & ITER_KVEC) - seg = UIO_SYSSPACE; - if (from->type & ITER_BVEC) - seg = UIO_BVEC; + uio_t uio; + uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, + count, 0); - ret = zpl_iter_write_common(kiocb, from->iov, from->nr_segs, - count, seg, from->iov_offset); - if (ret > 0) - iov_iter_advance(from, ret); + crhold(cr); + cookie = spl_fstrans_mark(); + + int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio, + filp->f_flags | zfs_io_flags(kiocb), cr); + + spl_fstrans_unmark(cookie); + crfree(cr); + + if (error < 0) + return (error); + + ssize_t read = count - uio.uio_resid; + kiocb->ki_pos += read; - return (ret); + zpl_file_accessed(filp); + + return (read); } -#else + static ssize_t -zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp, +zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { - struct file *file = kiocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *ip = mapping->host; - int isblk = S_ISBLK(ip->i_mode); + cred_t *cr = CRED(); + fstrans_cookie_t cookie; + struct file *filp = kiocb->ki_filp; + struct inode *ip = filp->f_mapping->host; size_t count; ssize_t ret; - ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_READ); + ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); if (ret) return (ret); - ret = generic_write_checks(file, &pos, &count, isblk); + ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode)); if (ret) return (ret); - return (zpl_iter_write_common(kiocb, iovp, nr_segs, count, - UIO_USERSPACE, 0)); + uio_t uio; + uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE, + count, 0); + + crhold(cr); + cookie = spl_fstrans_mark(); + + int error = -zfs_write(ITOZ(ip), &uio, + filp->f_flags | zfs_io_flags(kiocb), cr); + + spl_fstrans_unmark(cookie); + crfree(cr); + + if (error < 0) + return (error); + + ssize_t wrote = count - uio.uio_resid; + kiocb->ki_pos += wrote; + + return (wrote); } #endif /* HAVE_VFS_RW_ITERATE */ @@ -488,13 +470,13 @@ zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos) #if defined(HAVE_VFS_DIRECT_IO_IOVEC) static ssize_t -zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iovp, +zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs) { if (rw == WRITE) - return (zpl_aio_write(kiocb, iovp, nr_segs, pos)); + return (zpl_aio_write(kiocb, iov, nr_segs, pos)); else - return (zpl_aio_read(kiocb, iovp, nr_segs, pos)); + return (zpl_aio_read(kiocb, iov, nr_segs, pos)); } #else #error "Unknown direct IO interface" @@ -601,10 +583,6 @@ zpl_mmap(struct file *filp, struct vm_area_struct *vma) * Populate a page with data for the Linux page cache. This function is * only used to support mmap(2). There will be an identical copy of the * data in the ARC which is kept up to date via .write() and .writepage(). - * - * Current this function relies on zpl_read_common() and the O_DIRECT - * flag to read in a page. This works but the more correct way is to - * update zfs_fillpage() to be Linux friendly and use that interface. */ static int zpl_readpage(struct file *filp, struct page *pp) @@ -1035,6 +1013,10 @@ const struct file_operations zpl_file_operations = { #endif .read_iter = zpl_iter_read, .write_iter = zpl_iter_write, +#ifdef HAVE_VFS_IOV_ITER + .splice_read = generic_file_splice_read, + .splice_write = iter_file_splice_write, +#endif #else .read = do_sync_read, .write = do_sync_write, diff --git a/module/os/linux/zfs/zpl_inode.c b/module/os/linux/zfs/zpl_inode.c index f3b97a220..f336fbb12 100644 --- a/module/os/linux/zfs/zpl_inode.c +++ b/module/os/linux/zfs/zpl_inode.c @@ -490,19 +490,17 @@ zpl_get_link_common(struct dentry *dentry, struct inode *ip, char **link) { fstrans_cookie_t cookie; cred_t *cr = CRED(); - struct iovec iov; - uio_t uio = { { 0 }, 0 }; int error; crhold(cr); *link = NULL; + + struct iovec iov; iov.iov_len = MAXPATHLEN; iov.iov_base = kmem_zalloc(MAXPATHLEN, KM_SLEEP); - uio.uio_iov = &iov; - uio.uio_iovcnt = 1; - uio.uio_segflg = UIO_SYSSPACE; - uio.uio_resid = (MAXPATHLEN - 1); + uio_t uio; + uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, MAXPATHLEN - 1, 0); cookie = spl_fstrans_mark(); error = -zfs_readlink(ip, &uio, cr); diff --git a/module/os/linux/zfs/zpl_xattr.c b/module/os/linux/zfs/zpl_xattr.c index 9b5fd0fd3..1ec3dae2b 100644 --- a/module/os/linux/zfs/zpl_xattr.c +++ b/module/os/linux/zfs/zpl_xattr.c @@ -274,10 +274,10 @@ static int zpl_xattr_get_dir(struct inode *ip, const char *name, void *value, size_t size, cred_t *cr) { + fstrans_cookie_t cookie; struct inode *xip = NULL; znode_t *dxzp = NULL; znode_t *xzp = NULL; - loff_t pos = 0; int error; /* Lookup the xattr directory */ @@ -302,7 +302,19 @@ zpl_xattr_get_dir(struct inode *ip, const char *name, void *value, goto out; } - error = zpl_read_common(xip, value, size, &pos, UIO_SYSSPACE, 0, cr); + struct iovec iov; + iov.iov_base = (void *)value; + iov.iov_len = size; + + uio_t uio; + uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0); + + cookie = spl_fstrans_mark(); + error = -zfs_read(ITOZ(xip), &uio, 0, cr); + spl_fstrans_unmark(cookie); + + if (error == 0) + error = size - uio_resid(&uio); out: if (xzp) zrele(xzp); @@ -441,7 +453,6 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value, znode_t *dxzp = NULL; znode_t *xzp = NULL; vattr_t *vap = NULL; - ssize_t wrote; int lookup_flags, error; const int xattr_mode = S_IFREG | 0644; loff_t pos = 0; @@ -496,13 +507,8 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value, if (error) goto out; - wrote = zpl_write_common(ZTOI(xzp), value, size, &pos, - UIO_SYSSPACE, 0, cr); - if (wrote < 0) - error = wrote; - + error = -zfs_write_simple(xzp, value, size, pos, NULL); out: - if (error == 0) { ip->i_ctime = current_time(ip); zfs_mark_inode_dirty(ip); diff --git a/module/os/linux/zfs/zvol_os.c b/module/os/linux/zfs/zvol_os.c index 32737a18e..62fe6cd7f 100644 --- a/module/os/linux/zfs/zvol_os.c +++ b/module/os/linux/zfs/zvol_os.c @@ -85,25 +85,14 @@ zvol_is_zvol_impl(const char *device) } static void -uio_from_bio(uio_t *uio, struct bio *bio) -{ - uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)]; - uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio); - uio->uio_loffset = BIO_BI_SECTOR(bio) << 9; - uio->uio_segflg = UIO_BVEC; - uio->uio_resid = BIO_BI_SIZE(bio); - uio->uio_skip = BIO_BI_SKIP(bio); -} - -static void zvol_write(void *arg) { - int error = 0; - zv_request_t *zvr = arg; struct bio *bio = zvr->bio; - uio_t uio = { { 0 }, 0 }; - uio_from_bio(&uio, bio); + int error = 0; + uio_t uio; + + uio_bvec_init(&uio, bio); zvol_state_t *zv = zvr->zv; ASSERT3P(zv, !=, NULL); @@ -249,12 +238,12 @@ unlock: static void zvol_read(void *arg) { - int error = 0; - zv_request_t *zvr = arg; struct bio *bio = zvr->bio; - uio_t uio = { { 0 }, 0 }; - uio_from_bio(&uio, bio); + int error = 0; + uio_t uio; + + uio_bvec_init(&uio, bio); zvol_state_t *zv = zvr->zv; ASSERT3P(zv, !=, NULL); diff --git a/module/zcommon/Makefile.in b/module/zcommon/Makefile.in index b5cdf4c0c..ebc538440 100644 --- a/module/zcommon/Makefile.in +++ b/module/zcommon/Makefile.in @@ -19,7 +19,6 @@ $(MODULE)-objs += zfs_fletcher_superscalar.o $(MODULE)-objs += zfs_fletcher_superscalar4.o $(MODULE)-objs += zfs_namecheck.o $(MODULE)-objs += zfs_prop.o -$(MODULE)-objs += zfs_uio.o $(MODULE)-objs += zpool_prop.o $(MODULE)-objs += zprop_common.o diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 1f78b8cee..3b7c52b8d 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -472,7 +472,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) dmu_return_arcbuf(abuf); break; } - ASSERT(cbytes == max_blksz); + ASSERT3S(cbytes, ==, max_blksz); } /* @@ -652,7 +652,7 @@ zfs_write(znode_t *zp, uio_t *uio, int ioflag, cred_t *cr) if (error != 0) break; - ASSERT(tx_bytes == nbytes); + ASSERT3S(tx_bytes, ==, nbytes); n -= nbytes; if (n > 0) { |