diff options
author | Etienne Dechamps <[email protected]> | 2011-09-02 15:23:12 +0200 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2012-02-09 16:19:38 -0800 |
commit | 30930fba219642cb1dadf1c8ef60ff799e3dc424 (patch) | |
tree | 093f2c5c254768e859f8307d77d393e9668b5e1f /module/zfs/zvol.c | |
parent | cb2d19010d8fbcf6c22585cd8763fad3ba7db724 (diff) |
Add support for DISCARD to ZVOLs.
DISCARD (REQ_DISCARD, BLKDISCARD) is useful for thin provisioning.
It allows ZVOL clients to discard (unmap, trim) block ranges from
a ZVOL, thus optimizing disk space usage by allowing a ZVOL to
shrink instead of just grow.
We can't use zfs_space() or zfs_freesp() here, since these functions
only work on regular files, not volumes. Fortunately we can use the
low-level function dmu_free_long_range() which does exactly what we
want.
Currently the discard operation is not added to the log. That's not
a big deal since losing discard requests cannot result in data
corruption. It would however result in disk space usage higher than
it should be. Thus adding log support to zvol_discard() is probably
a good idea for a future improvement.
Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/zfs/zvol.c')
-rw-r--r-- | module/zfs/zvol.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 19888ea96..9b1313402 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -574,6 +574,42 @@ zvol_write(void *arg) blk_end_request(req, -error, size); } +#ifdef HAVE_BLK_QUEUE_DISCARD +static void +zvol_discard(void *arg) +{ + struct request *req = (struct request *)arg; + struct request_queue *q = req->q; + zvol_state_t *zv = q->queuedata; + uint64_t offset = blk_rq_pos(req) << 9; + uint64_t size = blk_rq_bytes(req); + int error; + rl_t *rl; + + if (offset + size > zv->zv_volsize) { + blk_end_request(req, -EIO, size); + return; + } + + if (size == 0) { + blk_end_request(req, 0, size); + return; + } + + rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER); + + error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, size); + + /* + * TODO: maybe we should add the operation to the log. + */ + + zfs_range_unlock(rl); + + blk_end_request(req, -error, size); +} +#endif /* HAVE_BLK_QUEUE_DISCARD */ + /* * Common read path running under the zvol taskq context. This function * is responsible for copying the requested data out of the DMU and in to @@ -674,6 +710,13 @@ zvol_request(struct request_queue *q) break; } +#ifdef HAVE_BLK_QUEUE_DISCARD + if (req->cmd_flags & VDEV_REQ_DISCARD) { + zvol_dispatch(zvol_discard, req); + break; + } +#endif /* HAVE_BLK_QUEUE_DISCARD */ + zvol_dispatch(zvol_write, req); break; default: @@ -1193,6 +1236,10 @@ __zvol_create_minor(const char *name) blk_queue_max_segment_size(zv->zv_queue, UINT_MAX); blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize); blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize); +#ifdef HAVE_BLK_QUEUE_DISCARD + blk_queue_max_discard_sectors(zv->zv_queue, UINT_MAX); + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue); +#endif #ifdef HAVE_BLK_QUEUE_NONROT queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue); #endif |