aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/zvol.c
diff options
context:
space:
mode:
authorChunwei Chen <[email protected]>2016-04-11 14:53:48 -0700
committerBrian Behlendorf <[email protected]>2016-05-17 10:29:02 -0700
commitd88895a069765bc3c6119ac1a3c8ea9edec7a370 (patch)
tree4124ee3bfcf5d5ecb3f7c5e84cc95824a063faf7 /module/zfs/zvol.c
parent61a3d06f8414ae1eb8b278be8776a6b30b351549 (diff)
Remove dummy znode from zvol_state
struct zvol_state contains a dummy znode, which is around 1KB on x64, only for zfs_range_lock. But in reality, other than z_range_lock and z_range_avl, zfs_range_lock only need znode on regular file, which means we add 1KB on a structure and gain nothing. In this patch, we remove the dummy znode for zvol_state. In order to do that, we also need to refactor zfs_range_lock a bit. We move z_range_lock and z_range_avl pair out of znode_t to form zfs_rlock_t. This new struct replaces znode_t as the main handle inside the range lock functions. We also add pointers to z_size, z_blksz, and z_max_blksz so range lock code doesn't depend on znode_t. This allows non-ZPL consumers like Lustre to use the range locks with their equivalent znode_t structure. Signed-off-by: Chunwei Chen <[email protected]> Signed-off-by: Boris Protopopov <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #4510
Diffstat (limited to 'module/zfs/zvol.c')
-rw-r--r--module/zfs/zvol.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index ba482a474..be6aea879 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
@@ -75,7 +75,7 @@ typedef struct zvol_state {
uint32_t zv_open_count; /* open counts */
uint32_t zv_changed; /* disk changed */
zilog_t *zv_zilog; /* ZIL handle */
- znode_t zv_znode; /* for range locking */
+ zfs_rlock_t zv_range_lock; /* range lock */
dmu_buf_t *zv_dbuf; /* bonus handle */
dev_t zv_dev; /* device id */
struct gendisk *zv_disk; /* generic disk */
@@ -633,8 +633,8 @@ zvol_write(zvol_state_t *zv, uio_t *uio, boolean_t sync)
ASSERT(zv && zv->zv_open_count > 0);
- rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
- RL_WRITER);
+ rl = zfs_range_lock(&zv->zv_range_lock, uio->uio_loffset,
+ uio->uio_resid, RL_WRITER);
while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
@@ -725,7 +725,7 @@ zvol_discard(struct bio *bio)
if (start >= end)
return (0);
- rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);
+ rl = zfs_range_lock(&zv->zv_range_lock, start, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
@@ -752,8 +752,8 @@ zvol_read(zvol_state_t *zv, uio_t *uio)
ASSERT(zv && zv->zv_open_count > 0);
- rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
- RL_READER);
+ rl = zfs_range_lock(&zv->zv_range_lock, uio->uio_loffset,
+ uio->uio_resid, RL_READER);
while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
@@ -879,7 +879,8 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_zilog = zv->zv_zilog;
- zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
+ zgd->zgd_rl = zfs_range_lock(&zv->zv_range_lock, offset, size,
+ RL_READER);
/*
* Write records come in two flavors: immediate and indirect.
@@ -1305,10 +1306,7 @@ zvol_alloc(dev_t dev, const char *name)
zv->zv_open_count = 0;
strlcpy(zv->zv_name, name, MAXNAMELEN);
- mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
- avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
- sizeof (rl_t), offsetof(rl_t, r_node));
- zv->zv_znode.z_is_zvol = TRUE;
+ zfs_rlock_init(&zv->zv_range_lock);
zv->zv_disk->major = zvol_major;
zv->zv_disk->first_minor = (dev & MINORMASK);
@@ -1337,8 +1335,7 @@ zvol_free(zvol_state_t *zv)
ASSERT(MUTEX_HELD(&zvol_state_lock));
ASSERT(zv->zv_open_count == 0);
- avl_destroy(&zv->zv_znode.z_range_avl);
- mutex_destroy(&zv->zv_znode.z_range_lock);
+ zfs_rlock_destroy(&zv->zv_range_lock);
zv->zv_disk->private_data = NULL;