aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Motin <[email protected]>2023-10-11 19:37:21 -0400
committerGitHub <[email protected]>2023-10-11 16:37:21 -0700
commit1b310dfb1d5aa90db7d8c4ec8d6a92b4430b0496 (patch)
tree0e0c795f387a877db323dfe6ffde0cf388d0aa16
parent9facf2d1ad0c4857de1d47a2a14e882a3fb78b89 (diff)
DMU: Do not pre-read holes during write
dmu_tx_check_ioerr() pre-reads blocks that are going to be dirtied as part of transaction to both prefetch them and check for errors. But it makes no sense to do it for holes, since there are no disk reads to prefetch and there can be no errors. On the other side those blocks are anonymous, and they are freed immediately by the dbuf_rele() without even being put into dbuf cache, so we just burn CPU time on decompression and overheads and get absolutely no result at the end. Use of dbuf_hold_impl() with fail_sparse parameter allows to skip the extra work, and on my tests with sequential 8KB writes to empty ZVOL with 32KB blocks shows throughput increase from 1.7 to 2GB/s. Reviewed-by: Brian Atkinson <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Alexander Motin <[email protected]> Sponsored by: iXsystems, Inc. Closes #15371
-rw-r--r--module/zfs/dmu_tx.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index 0eb8c17e3..8451b5082 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -210,10 +210,12 @@ dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
dmu_buf_impl_t *db;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
- db = dbuf_hold_level(dn, level, blkid, FTAG);
+ err = dbuf_hold_impl(dn, level, blkid, TRUE, FALSE, FTAG, &db);
rw_exit(&dn->dn_struct_rwlock);
- if (db == NULL)
- return (SET_ERROR(EIO));
+ if (err == ENOENT)
+ return (0);
+ if (err != 0)
+ return (err);
/*
* PARTIAL_FIRST allows caching for uncacheable blocks. It will
* be cleared after dmu_buf_will_dirty() call dbuf_read() again.