aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/zfs_vnops.c
diff options
context:
space:
mode:
authorSerapheim Dimitropoulos <[email protected]>2018-08-20 09:52:37 -0700
committerBrian Behlendorf <[email protected]>2018-08-20 09:52:37 -0700
commita448a2557ec4938ed6944c7766fe0b8e6e5f6456 (patch)
tree1d622c6c40aeb9c34d233ad562b2920ab2ef651c /module/zfs/zfs_vnops.c
parentfa84714abbb9316208bef7188009ee74204d532e (diff)
Introduce read/write kstats per dataset
The following patch introduces a few statistics on reads and writes grouped by dataset. These statistics are implemented as kstats (backed by aggregate sums for performance) and can be retrieved by using the dataset objset ID number. The motivation for this change is to provide some preliminary analytics on dataset usage/performance. Reviewed-by: Richard Elling <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Reviewed by: Matthew Ahrens <[email protected]> Signed-off-by: Serapheim Dimitropoulos <[email protected]> Closes #7705
Diffstat (limited to 'module/zfs/zfs_vnops.c')
-rw-r--r--module/zfs/zfs_vnops.c105
1 files changed, 51 insertions, 54 deletions
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 35bbd884b..4e163e2e3 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
@@ -438,15 +438,10 @@ unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
int
zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
{
- znode_t *zp = ITOZ(ip);
- zfsvfs_t *zfsvfs = ITOZSB(ip);
- ssize_t n, nbytes;
- int error = 0;
- rl_t *rl;
-#ifdef HAVE_UIO_ZEROCOPY
- xuio_t *xuio = NULL;
-#endif /* HAVE_UIO_ZEROCOPY */
+ int error = 0;
+ znode_t *zp = ITOZ(ip);
+ zfsvfs_t *zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
@@ -482,8 +477,8 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* Lock the range against changes.
*/
- rl = zfs_range_lock(&zp->z_range_lock, uio->uio_loffset, uio->uio_resid,
- RL_READER);
+ rl_t *rl = zfs_range_lock(&zp->z_range_lock,
+ uio->uio_loffset, uio->uio_resid, RL_READER);
/*
* If we are reading past end-of-file we can skip
@@ -495,9 +490,11 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
}
ASSERT(uio->uio_loffset < zp->z_size);
- n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
+ ssize_t n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
+ ssize_t start_resid = n;
#ifdef HAVE_UIO_ZEROCOPY
+ xuio_t *xuio = NULL;
if ((uio->uio_extflg == UIO_XUIO) &&
(((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
int nblk;
@@ -529,7 +526,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
#endif /* HAVE_UIO_ZEROCOPY */
while (n > 0) {
- nbytes = MIN(n, zfs_read_chunk_size -
+ ssize_t nbytes = MIN(n, zfs_read_chunk_size -
P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
@@ -548,6 +545,10 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
n -= nbytes;
}
+
+ int64_t nread = start_resid - n;
+ dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
+ task_io_account_read(nread);
out:
zfs_range_unlock(rl);
@@ -578,46 +579,28 @@ out:
int
zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
{
- znode_t *zp = ITOZ(ip);
- rlim64_t limit = uio->uio_limit;
- ssize_t start_resid = uio->uio_resid;
- ssize_t tx_bytes;
- uint64_t end_size;
- dmu_tx_t *tx;
- zfsvfs_t *zfsvfs = ZTOZSB(zp);
- zilog_t *zilog;
- offset_t woff;
- ssize_t n, nbytes;
- rl_t *rl;
- int max_blksz = zfsvfs->z_max_blksz;
- int error = 0;
- arc_buf_t *abuf;
- const iovec_t *aiov = NULL;
- xuio_t *xuio = NULL;
- int write_eof;
- int count = 0;
- sa_bulk_attr_t bulk[4];
- uint64_t mtime[2], ctime[2];
- uint32_t uid;
-#ifdef HAVE_UIO_ZEROCOPY
- int i_iov = 0;
- const iovec_t *iovp = uio->uio_iov;
- ASSERTV(int iovcnt = uio->uio_iovcnt);
-#endif
+ int error = 0;
+ ssize_t start_resid = uio->uio_resid;
/*
* Fasttrack empty write
*/
- n = start_resid;
+ ssize_t n = start_resid;
if (n == 0)
return (0);
+ rlim64_t limit = uio->uio_limit;
if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
limit = MAXOFFSET_T;
+ znode_t *zp = ITOZ(ip);
+ zfsvfs_t *zfsvfs = ZTOZSB(zp);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
+ sa_bulk_attr_t bulk[4];
+ int count = 0;
+ uint64_t mtime[2], ctime[2];
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
@@ -644,17 +627,18 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
return (SET_ERROR(EPERM));
}
- zilog = zfsvfs->z_log;
-
/*
* Validate file offset
*/
- woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
+ offset_t woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
if (woff < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
+ int max_blksz = zfsvfs->z_max_blksz;
+ xuio_t *xuio = NULL;
+
/*
* Pre-fault the pages to ensure slow (eg NFS) pages
* don't hold up txg.
@@ -668,6 +652,8 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
#endif
uio_prefaultpages(MIN(n, max_blksz), uio);
+ rl_t *rl;
+
/*
* If in append mode, set the io offset pointer to eof.
*/
@@ -706,9 +692,16 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
n = limit - woff;
/* Will this write extend the file length? */
- write_eof = (woff + n > zp->z_size);
+ int write_eof = (woff + n > zp->z_size);
+
+ uint64_t end_size = MAX(zp->z_size, woff + n);
+ zilog_t *zilog = zfsvfs->z_log;
+#ifdef HAVE_UIO_ZEROCOPY
+ int i_iov = 0;
+ const iovec_t *iovp = uio->uio_iov;
+ ASSERTV(int iovcnt = uio->uio_iovcnt);
+#endif
- end_size = MAX(zp->z_size, woff + n);
/*
* Write the file in reasonable size chunks. Each chunk is written
@@ -716,8 +709,8 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* and allows us to do more fine-grained space accounting.
*/
while (n > 0) {
- abuf = NULL;
woff = uio->uio_loffset;
+
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
KUID_TO_SUID(ip->i_uid)) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
@@ -725,13 +718,13 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
- if (abuf != NULL)
- dmu_return_arcbuf(abuf);
error = SET_ERROR(EDQUOT);
break;
}
- if (xuio && abuf == NULL) {
+ arc_buf_t *abuf = NULL;
+ const iovec_t *aiov = NULL;
+ if (xuio) {
#ifdef HAVE_UIO_ZEROCOPY
ASSERT(i_iov < iovcnt);
ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
@@ -743,8 +736,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
aiov->iov_len == arc_buf_size(abuf)));
i_iov++;
#endif
- } else if (abuf == NULL && n >= max_blksz &&
- woff >= zp->z_size &&
+ } else if (n >= max_blksz && woff >= zp->z_size &&
P2PHASE(woff, max_blksz) == 0 &&
zp->z_blksz == max_blksz) {
/*
@@ -771,7 +763,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* Start a transaction.
*/
- tx = dmu_tx_create(zfsvfs->z_os);
+ dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
zfs_sa_upgrade_txholds(tx, zp);
@@ -812,8 +804,9 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* XXX - should we really limit each write to z_max_blksz?
* Perhaps we should use SPA_MAXBLOCKSIZE chunks?
*/
- nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
+ ssize_t nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
+ ssize_t tx_bytes;
if (abuf == NULL) {
tx_bytes = uio->uio_resid;
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
@@ -873,7 +866,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* user 0 is not an ephemeral uid.
*/
mutex_enter(&zp->z_acl_lock);
- uid = KUID_TO_SUID(ip->i_uid);
+ uint32_t uid = KUID_TO_SUID(ip->i_uid);
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
(S_IXUSR >> 6))) != 0 &&
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
@@ -937,6 +930,10 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
+ int64_t nwritten = start_resid - uio->uio_resid;
+ dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, nwritten);
+ task_io_account_write(nwritten);
+
ZFS_EXIT(zfsvfs);
return (0);
}