diff options
author | Tom Caputi <[email protected]> | 2019-01-17 18:47:08 -0500 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2019-01-17 15:47:08 -0800 |
commit | 305781da4bbe11acef8707894d7e33f8aef3ca8e (patch) | |
tree | 3ac436d2d8c1fe88daee63964e838bdab9f63cd2 /module | |
parent | 75058f33034d23801818582996d3dc941c545910 (diff) |
Fix error handling incallers of dbuf_hold_level()
Currently, the functions dbuf_prefetch_indirect_done() and
dmu_assign_arcbuf_by_dnode() assume that dbuf_hold_level() cannot
fail. In the event of an error the former will cause a NULL pointer
dereference and the later will trigger a VERIFY. This patch adds
error handling to these functions and their callers where necessary.
Reviewed by: Matt Ahrens <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Tom Caputi <[email protected]>
Closes #8291
Diffstat (limited to 'module')
-rw-r--r-- | module/zfs/dbuf.c | 6 | ||||
-rw-r--r-- | module/zfs/dmu.c | 15 | ||||
-rw-r--r-- | module/zfs/dmu_recv.c | 7 | ||||
-rw-r--r-- | module/zfs/zfs_vnops.c | 7 |
4 files changed, 29 insertions, 6 deletions
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 826934b36..9f3c9bfd5 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -2886,6 +2886,12 @@ dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, dpa->dpa_zb.zb_level)); dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, dpa->dpa_curlevel, curblkid, FTAG); + if (db == NULL) { + kmem_free(dpa, sizeof (*dpa)); + arc_buf_destroy(abuf, private); + return; + } + (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); dbuf_rele(db, FTAG); diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 5b79eb907..231ed3053 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1688,7 +1688,7 @@ dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, * If this is not possible copy the contents of passed arc buf via * dmu_write(). */ -void +int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, dmu_tx_t *tx) { @@ -1700,7 +1700,9 @@ dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, rw_enter(&dn->dn_struct_rwlock, RW_READER); blkid = dbuf_whichblock(dn, 0, offset); - VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL); + db = dbuf_hold(dn, blkid, FTAG); + if (db == NULL) + return (SET_ERROR(EIO)); rw_exit(&dn->dn_struct_rwlock); /* @@ -1720,17 +1722,22 @@ dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, dmu_return_arcbuf(buf); XUIOSTAT_BUMP(xuiostat_wbuf_copied); } + + return (0); } -void +int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, dmu_tx_t *tx) { + int err; dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; DB_DNODE_ENTER(dbuf); - dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); + err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); DB_DNODE_EXIT(dbuf); + + return (err); } typedef struct { diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index a448bc148..257f157fd 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -1439,7 +1439,12 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, } VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn)); - dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx); + err = dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx); + if (err != 0) { + dnode_rele(dn, FTAG); + dmu_tx_commit(tx); + return (err); + } dnode_rele(dn, FTAG); /* diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 06d93bad0..bbc171bd2 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -844,8 +844,13 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) xuio_stat_wbuf_copied(); } else { ASSERT(xuio || tx_bytes == max_blksz); - dmu_assign_arcbuf_by_dbuf( + error = dmu_assign_arcbuf_by_dbuf( sa_get_db(zp->z_sa_hdl), woff, abuf, tx); + if (error != 0) { + dmu_return_arcbuf(abuf); + dmu_tx_commit(tx); + break; + } } ASSERT(tx_bytes <= uio->uio_resid); uioskip(uio, tx_bytes); |