diff options
author | Richard Yao <[email protected]> | 2017-06-13 12:18:08 -0400 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2017-06-13 09:18:08 -0700 |
commit | 5228cf011626081ee5dd4b6ce6fd31857d494e93 (patch) | |
tree | cf5b578fa63b763ec7482146325a92bc90199dd3 | |
parent | 1fbfcf115979c707e636167fac6c10fec4ff5657 (diff) |
Make zvol operations use _by_dnode routines
This continues what was started in
0eef1bde31d67091d3deed23fe2394f5a8bf2276 by fully converting zvols
to avoid unnecessary dnode_hold() calls. This saves a small amount
of CPU time and slightly improves latencies of operations on zvols.
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Richard Yao <[email protected]>
Closes #6058
-rw-r--r-- | include/sys/dmu.h | 3 | ||||
-rw-r--r-- | module/zfs/dmu.c | 4 | ||||
-rw-r--r-- | module/zfs/zvol.c | 22 |
3 files changed, 15 insertions, 14 deletions
diff --git a/include/sys/dmu.h b/include/sys/dmu.h index ea1b116be..d24615262 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -751,10 +751,13 @@ void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, #include <linux/blkdev_compat.h> int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size); +int dmu_read_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size); int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, dmu_tx_t *tx); int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size, dmu_tx_t *tx); +int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size, + dmu_tx_t *tx); #endif struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); void dmu_return_arcbuf(struct arc_buf *buf); diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 5cf09412f..48e89eef4 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1225,7 +1225,7 @@ xuio_stat_wbuf_nocopy(void) } #ifdef _KERNEL -static int +int dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size) { dmu_buf_t **dbp; @@ -1334,7 +1334,7 @@ dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size) return (err); } -static int +int dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx) { dmu_buf_t **dbp; diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index bf9f48adb..72ea99af6 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -111,7 +111,7 @@ struct zvol_state { uint32_t zv_changed; /* disk changed */ zilog_t *zv_zilog; /* ZIL handle */ zfs_rlock_t zv_range_lock; /* range lock */ - dmu_buf_t *zv_dbuf; /* bonus handle */ + dnode_t *zv_dn; /* dnode hold */ dev_t zv_dev; /* device id */ struct gendisk *zv_disk; /* generic disk */ struct request_queue *zv_queue; /* request queue */ @@ -640,8 +640,8 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset, itx = zil_itx_create(TX_WRITE, sizeof (*lr) + (wr_state == WR_COPIED ? len : 0)); lr = (lr_write_t *)&itx->itx_lr; - if (wr_state == WR_COPIED && dmu_read(zv->zv_objset, - ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) { + if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn, + offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) { zil_itx_destroy(itx); itx = zil_itx_create(TX_WRITE, sizeof (*lr)); lr = (lr_write_t *)&itx->itx_lr; @@ -720,7 +720,7 @@ zvol_write(void *arg) dmu_tx_abort(tx); break; } - error = dmu_write_uio_dbuf(zv->zv_dbuf, &uio, bytes, tx); + error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx); if (error == 0) zvol_log_write(zv, tx, off, bytes, sync); dmu_tx_commit(tx); @@ -845,7 +845,7 @@ zvol_read(void *arg) if (bytes > volsize - uio.uio_loffset) bytes = volsize - uio.uio_loffset; - error = dmu_read_uio_dbuf(zv->zv_dbuf, &uio, bytes); + error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes); if (error) { /* convert checksum errors into IO errors */ if (error == ECKSUM) @@ -969,8 +969,6 @@ static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) { zvol_state_t *zv = arg; - objset_t *os = zv->zv_objset; - uint64_t object = ZVOL_OBJ; uint64_t offset = lr->lr_offset; uint64_t size = lr->lr_length; blkptr_t *bp = &lr->lr_blkptr; @@ -994,12 +992,12 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) * we don't have to write the data twice. */ if (buf != NULL) { /* immediate write */ - error = dmu_read(os, object, offset, size, buf, + error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf, DMU_READ_NO_PREFETCH); } else { size = zv->zv_volblocksize; offset = P2ALIGN_TYPED(offset, size, uint64_t); - error = dmu_buf_hold(os, object, offset, zgd, &db, + error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db, DMU_READ_NO_PREFETCH); if (error == 0) { blkptr_t *obp = dmu_buf_get_blkptr(db); @@ -1070,7 +1068,7 @@ zvol_setup_zv(zvol_state_t *zv) if (error) return (SET_ERROR(error)); - error = dmu_bonus_hold(os, ZVOL_OBJ, zv, &zv->zv_dbuf); + error = dnode_hold(os, ZVOL_OBJ, FTAG, &zv->zv_dn); if (error) return (SET_ERROR(error)); @@ -1099,8 +1097,8 @@ zvol_shutdown_zv(zvol_state_t *zv) zil_close(zv->zv_zilog); zv->zv_zilog = NULL; - dmu_buf_rele(zv->zv_dbuf, zv); - zv->zv_dbuf = NULL; + dnode_rele(zv->zv_dn, FTAG); + zv->zv_dn = NULL; /* * Evict cached data |