From 63e3a8616b200dc36fe9d298a466bb5c25b58132 Mon Sep 17 00:00:00 2001 From: Matthew Ahrens Date: Wed, 26 Nov 2014 09:57:30 -0800 Subject: Illumos 5349 - verify that block pointer is plausible before reading 5349 verify that block pointer is plausible before reading Reviewed by: Alex Reece Reviewed by: Christopher Siden Reviewed by: Dan McDonald Reviewed by: George Wilson Reviewed by: Richard Lowe Reviewed by: Xin Li Reviewed by: Josef 'Jeff' Sipek Approved by: Gordon Ross References: https://www.illumos.org/issues/5349 https://github.com/illumos/illumos-gate/commit/f63ab3d5 Porting notes: * Several variable declarations were moved due to C style needs Ported-by: DHE Signed-off-by: Brian Behlendorf Closes #3373 --- module/zfs/zio.c | 94 +++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 90 insertions(+), 4 deletions(-) (limited to 'module/zfs/zio.c') diff --git a/module/zfs/zio.c b/module/zfs/zio.c index c89a2f99e..9204df2b2 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -212,7 +212,7 @@ zio_buf_alloc(size_t size) { size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; - ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE)); } @@ -228,7 +228,7 @@ zio_data_buf_alloc(size_t size) { size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; - ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE)); } @@ -238,7 +238,7 @@ zio_buf_free(void *buf, size_t size) { size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; - ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); kmem_cache_free(zio_buf_cache[c], buf); } @@ -248,7 +248,7 @@ zio_data_buf_free(void *buf, size_t size) { size_t c = (size - 1) >> SPA_MINBLOCKSHIFT; - ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); + VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT); kmem_cache_free(zio_data_buf_cache[c], buf); } @@ -596,6 +596,90 @@ zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags) return (zio_null(NULL, spa, NULL, done, private, flags)); } +void +zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp) +{ + int i; + + if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) { + zfs_panic_recover("blkptr at %p has invalid TYPE %llu", + bp, (longlong_t)BP_GET_TYPE(bp)); + } + if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS || + BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) { + zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu", + bp, (longlong_t)BP_GET_CHECKSUM(bp)); + } + if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS || + BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) { + zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu", + bp, (longlong_t)BP_GET_COMPRESS(bp)); + } + if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) { + zfs_panic_recover("blkptr at %p has invalid LSIZE %llu", + bp, (longlong_t)BP_GET_LSIZE(bp)); + } + if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) { + zfs_panic_recover("blkptr at %p has invalid PSIZE %llu", + bp, (longlong_t)BP_GET_PSIZE(bp)); + } + + if (BP_IS_EMBEDDED(bp)) { + if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) { + zfs_panic_recover("blkptr at %p has invalid ETYPE %llu", + bp, (longlong_t)BPE_GET_ETYPE(bp)); + } + } + + /* + * Pool-specific checks. + * + * Note: it would be nice to verify that the blk_birth and + * BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze() + * allows the birth time of log blocks (and dmu_sync()-ed blocks + * that are in the log) to be arbitrarily large. + */ + for (i = 0; i < BP_GET_NDVAS(bp); i++) { + uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]); + vdev_t *vd; + uint64_t offset, asize; + if (vdevid >= spa->spa_root_vdev->vdev_children) { + zfs_panic_recover("blkptr at %p DVA %u has invalid " + "VDEV %llu", + bp, i, (longlong_t)vdevid); + } + vd = spa->spa_root_vdev->vdev_child[vdevid]; + if (vd == NULL) { + zfs_panic_recover("blkptr at %p DVA %u has invalid " + "VDEV %llu", + bp, i, (longlong_t)vdevid); + } + if (vd->vdev_ops == &vdev_hole_ops) { + zfs_panic_recover("blkptr at %p DVA %u has hole " + "VDEV %llu", + bp, i, (longlong_t)vdevid); + + } + if (vd->vdev_ops == &vdev_missing_ops) { + /* + * "missing" vdevs are valid during import, but we + * don't have their detailed info (e.g. asize), so + * we can't perform any more checks on them. + */ + continue; + } + offset = DVA_GET_OFFSET(&bp->blk_dva[i]); + asize = DVA_GET_ASIZE(&bp->blk_dva[i]); + if (BP_IS_GANG(bp)) + asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); + if (offset + asize > vd->vdev_asize) { + zfs_panic_recover("blkptr at %p DVA %u has invalid " + "OFFSET %llu", + bp, i, (longlong_t)offset); + } + } +} + zio_t * zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, void *data, uint64_t size, zio_done_func_t *done, void *private, @@ -603,6 +687,8 @@ zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, { zio_t *zio; + zfs_blkptr_verify(spa, bp); + zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp, data, size, done, private, ZIO_TYPE_READ, priority, flags, NULL, 0, zb, -- cgit v1.2.3