diff options
Diffstat (limited to 'module/zfs/zvol.c')
-rw-r--r-- | module/zfs/zvol.c | 31 |
1 files changed, 10 insertions, 21 deletions
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index c25b243db..bf6d59fba 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -707,20 +707,16 @@ zvol_discard(struct bio *bio) return (SET_ERROR(EIO)); /* - * Align the request to volume block boundaries when REQ_SECURE is - * available, but not requested. If we don't, then this will force - * dnode_free_range() to zero out the unaligned parts, which is slow - * (read-modify-write) and useless since we are not freeing any space - * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through - * 2.6.35) will not receive this optimization. + * Align the request to volume block boundaries when a secure erase is + * not required. This will prevent dnode_free_range() from zeroing out + * the unaligned parts which is slow (read-modify-write) and useless + * since we are not freeing any space by doing so. */ -#ifdef REQ_SECURE - if (!(bio->bi_rw & REQ_SECURE)) { + if (!bio_is_secure_erase(bio)) { start = P2ROUNDUP(start, zv->zv_volblocksize); end = P2ALIGN(end, zv->zv_volblocksize); size = end - start; } -#endif if (start >= end) return (0); @@ -812,7 +808,7 @@ zvol_request(struct request_queue *q, struct bio *bio) goto out2; } - if (bio_is_discard(bio)) { + if (bio_is_discard(bio) || bio_is_secure_erase(bio)) { error = zvol_discard(bio); goto out2; } @@ -821,14 +817,14 @@ zvol_request(struct request_queue *q, struct bio *bio) * Some requests are just for flush and nothing else. */ if (uio.uio_resid == 0) { - if (bio->bi_rw & VDEV_REQ_FLUSH) + if (bio_is_flush(bio)) zil_commit(zv->zv_zilog, ZVOL_OBJ); goto out2; } error = zvol_write(zv, &uio, - ((bio->bi_rw & (VDEV_REQ_FUA|VDEV_REQ_FLUSH)) || - zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)); + bio_is_flush(bio) || bio_is_fua(bio) || + zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); } else error = zvol_read(zv, &uio); @@ -1290,14 +1286,7 @@ zvol_alloc(dev_t dev, const char *name) goto out_kmem; blk_queue_make_request(zv->zv_queue, zvol_request); - -#ifdef HAVE_BLK_QUEUE_WRITE_CACHE - blk_queue_write_cache(zv->zv_queue, B_TRUE, B_TRUE); -#elif defined(HAVE_BLK_QUEUE_FLUSH) - blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA); -#else - blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL); -#endif /* HAVE_BLK_QUEUE_FLUSH */ + blk_queue_set_write_cache(zv->zv_queue, B_TRUE, B_TRUE); zv->zv_disk = alloc_disk(ZVOL_MINORS); if (zv->zv_disk == NULL) |