diff options
author | Richard Yao <[email protected]> | 2022-09-22 14:28:33 -0400 |
---|---|---|
committer | GitHub <[email protected]> | 2022-09-22 11:28:33 -0700 |
commit | e506a0ce40bd777a84ba1de8ed40df2154f7afb1 (patch) | |
tree | 24556ac73ceacee93fbba742f205eac50172007e /module/zfs/metaslab.c | |
parent | c629f0bf62e351355716f9870d6c2e377584b016 (diff) |
Cleanup: Change 1 used in bitshifts to 1ULL
Coverity complains about this. It is not a bug as long as we never shift
by more than 31, but it is not terrible to change the constants from 1
to 1ULL as clean up.
Reviewed-by: Ryan Moeller <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Richard Yao <[email protected]>
Closes #13914
Diffstat (limited to 'module/zfs/metaslab.c')
-rw-r--r-- | module/zfs/metaslab.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 02cf121d8..4234f8ebf 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -1449,7 +1449,7 @@ metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) zfs_btree_t *size_tree = mrap->mra_bt; if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < - (1 << mrap->mra_floor_shift)) + (1ULL << mrap->mra_floor_shift)) return; zfs_btree_add(size_tree, rs); @@ -1461,7 +1461,7 @@ metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) metaslab_rt_arg_t *mrap = arg; zfs_btree_t *size_tree = mrap->mra_bt; - if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 << + if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL << mrap->mra_floor_shift)) return; @@ -3552,7 +3552,7 @@ metaslab_should_condense(metaslab_t *msp) { space_map_t *sm = msp->ms_sm; vdev_t *vd = msp->ms_group->mg_vd; - uint64_t vdev_blocksize = 1 << vd->vdev_ashift; + uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift; ASSERT(MUTEX_HELD(&msp->ms_lock)); ASSERT(msp->ms_loaded); |