aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/zvol.c
diff options
context:
space:
mode:
authorRichard Yao <[email protected]>2017-06-13 12:18:08 -0400
committerBrian Behlendorf <[email protected]>2017-06-13 09:18:08 -0700
commit5228cf011626081ee5dd4b6ce6fd31857d494e93 (patch)
treecf5b578fa63b763ec7482146325a92bc90199dd3 /module/zfs/zvol.c
parent1fbfcf115979c707e636167fac6c10fec4ff5657 (diff)
Make zvol operations use _by_dnode routines
This continues what was started in 0eef1bde31d67091d3deed23fe2394f5a8bf2276 by fully converting zvols to avoid unnecessary dnode_hold() calls. This saves a small amount of CPU time and slightly improves latencies of operations on zvols. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Richard Yao <[email protected]> Closes #6058
Diffstat (limited to 'module/zfs/zvol.c')
-rw-r--r--module/zfs/zvol.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index bf9f48adb..72ea99af6 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
@@ -111,7 +111,7 @@ struct zvol_state {
uint32_t zv_changed; /* disk changed */
zilog_t *zv_zilog; /* ZIL handle */
zfs_rlock_t zv_range_lock; /* range lock */
- dmu_buf_t *zv_dbuf; /* bonus handle */
+ dnode_t *zv_dn; /* dnode hold */
dev_t zv_dev; /* device id */
struct gendisk *zv_disk; /* generic disk */
struct request_queue *zv_queue; /* request queue */
@@ -640,8 +640,8 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
(wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
- if (wr_state == WR_COPIED && dmu_read(zv->zv_objset,
- ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
+ if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
+ offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
@@ -720,7 +720,7 @@ zvol_write(void *arg)
dmu_tx_abort(tx);
break;
}
- error = dmu_write_uio_dbuf(zv->zv_dbuf, &uio, bytes, tx);
+ error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
if (error == 0)
zvol_log_write(zv, tx, off, bytes, sync);
dmu_tx_commit(tx);
@@ -845,7 +845,7 @@ zvol_read(void *arg)
if (bytes > volsize - uio.uio_loffset)
bytes = volsize - uio.uio_loffset;
- error = dmu_read_uio_dbuf(zv->zv_dbuf, &uio, bytes);
+ error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
@@ -969,8 +969,6 @@ static int
zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
{
zvol_state_t *zv = arg;
- objset_t *os = zv->zv_objset;
- uint64_t object = ZVOL_OBJ;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
blkptr_t *bp = &lr->lr_blkptr;
@@ -994,12 +992,12 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
- error = dmu_read(os, object, offset, size, buf,
+ error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
DMU_READ_NO_PREFETCH);
} else {
size = zv->zv_volblocksize;
offset = P2ALIGN_TYPED(offset, size, uint64_t);
- error = dmu_buf_hold(os, object, offset, zgd, &db,
+ error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *obp = dmu_buf_get_blkptr(db);
@@ -1070,7 +1068,7 @@ zvol_setup_zv(zvol_state_t *zv)
if (error)
return (SET_ERROR(error));
- error = dmu_bonus_hold(os, ZVOL_OBJ, zv, &zv->zv_dbuf);
+ error = dnode_hold(os, ZVOL_OBJ, FTAG, &zv->zv_dn);
if (error)
return (SET_ERROR(error));
@@ -1099,8 +1097,8 @@ zvol_shutdown_zv(zvol_state_t *zv)
zil_close(zv->zv_zilog);
zv->zv_zilog = NULL;
- dmu_buf_rele(zv->zv_dbuf, zv);
- zv->zv_dbuf = NULL;
+ dnode_rele(zv->zv_dn, FTAG);
+ zv->zv_dn = NULL;
/*
* Evict cached data