aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zfs/zfs_rlock.c4
-rw-r--r--module/zfs/zvol.c2
2 files changed, 4 insertions, 2 deletions
diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c
index 6709ce80b..9362fb4e8 100644
--- a/module/zfs/zfs_rlock.c
+++ b/module/zfs/zfs_rlock.c
@@ -112,14 +112,14 @@ zfs_range_lock_writer(znode_t *zp, rl_t *new)
* Range locking is also used by zvol and uses a
* dummied up znode. However, for zvol, we don't need to
* append or grow blocksize, and besides we don't have
- * a "sa" data or z_zfsvfs - so skip that processing.
+ * a "sa" data or zfs_sb_t - so skip that processing.
*
* Yes, this is ugly, and would be solved by not handling
* grow or append in range lock code. If that was done then
* we could make the range locking code generically available
* to other non-zfs consumers.
*/
- if (zp->z_vnode) { /* caller is ZPL */
+ if (!zp->z_is_zvol) { /* caller is ZPL */
/*
* If in append mode pick up the current end of file.
* This is done under z_range_lock to avoid races.
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index b2a08fb43..3d829a3d4 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
@@ -1062,6 +1062,8 @@ zvol_alloc(dev_t dev, const char *name)
mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
sizeof (rl_t), offsetof(rl_t, r_node));
+ zv->zv_znode.z_is_zvol = TRUE;
+
spin_lock_init(&zv->zv_lock);
list_link_init(&zv->zv_next);