aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/zio.c
diff options
context:
space:
mode:
authorAlexander Motin <[email protected]>2024-08-08 18:25:10 -0400
committerGitHub <[email protected]>2024-08-08 15:25:10 -0700
commitaef452f108fd4a598199cb1b2c73d6b5e8c011b0 (patch)
tree078c9066452d5d9d09a84abfa70e402802cf2d68 /module/zfs/zio.c
parent24e6585e761db4c929e192d364a5554f81518c33 (diff)
Improve zfs_blkptr_verify()
- Skip config lock enter/exit for embedded blocks. They have no DVAs, so there is nothing to check under the lock. - Skip CHECKSUM check and properly check PSIZE for embedded blocks. - Add static branch predictions for unlikely conditions. - Do not verify DVAs for blocks already in ARC. ARC hit already "verified" the first (often the only) DVA, and it does not worth to enter/exit config lock for nothing. Some profiles show me up to 3% of CPU saving from this change. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Allan Jude <[email protected]> Signed-off-by: Alexander Motin <[email protected]> Sponsored by: iXsystems, Inc. Closes #16387
Diffstat (limited to 'module/zfs/zio.c')
-rw-r--r--module/zfs/zio.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 26ffc597f..fd69136f7 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -1105,45 +1105,50 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
{
int errors = 0;
- if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
+ if (unlikely(!DMU_OT_IS_VALID(BP_GET_TYPE(bp)))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
- if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
- errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
- "blkptr at %px has invalid CHECKSUM %llu",
- bp, (longlong_t)BP_GET_CHECKSUM(bp));
- }
- if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
+ if (unlikely(BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
- if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
+ if (unlikely(BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
- if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
- errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
- "blkptr at %px has invalid PSIZE %llu",
- bp, (longlong_t)BP_GET_PSIZE(bp));
- }
-
if (BP_IS_EMBEDDED(bp)) {
- if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
+ if (unlikely(BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
+ if (unlikely(BPE_GET_PSIZE(bp) > BPE_PAYLOAD_SIZE)) {
+ errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
+ "blkptr at %px has invalid PSIZE %llu",
+ bp, (longlong_t)BPE_GET_PSIZE(bp));
+ }
+ return (errors == 0);
+ }
+ if (unlikely(BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS)) {
+ errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
+ "blkptr at %px has invalid CHECKSUM %llu",
+ bp, (longlong_t)BP_GET_CHECKSUM(bp));
+ }
+ if (unlikely(BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE)) {
+ errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
+ "blkptr at %px has invalid PSIZE %llu",
+ bp, (longlong_t)BP_GET_PSIZE(bp));
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
- if (!spa->spa_trust_config)
+ if (unlikely(!spa->spa_trust_config))
return (errors == 0);
switch (blk_config) {
@@ -1172,20 +1177,20 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
- if (vdevid >= spa->spa_root_vdev->vdev_children) {
+ if (unlikely(vdevid >= spa->spa_root_vdev->vdev_children)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
- if (vd == NULL) {
+ if (unlikely(vd == NULL)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
- if (vd->vdev_ops == &vdev_hole_ops) {
+ if (unlikely(vd->vdev_ops == &vdev_hole_ops)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
@@ -1203,7 +1208,7 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
- if (offset + asize > vd->vdev_asize) {
+ if (unlikely(offset + asize > vd->vdev_asize)) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %px DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);