aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/os/freebsd/zfs/dmu_os.c4
-rw-r--r--module/zfs/dbuf.c33
-rw-r--r--module/zfs/dmu.c21
-rw-r--r--module/zfs/dmu_recv.c2
4 files changed, 36 insertions, 24 deletions
diff --git a/module/os/freebsd/zfs/dmu_os.c b/module/os/freebsd/zfs/dmu_os.c
index a5f486b95..c33ce01ab 100644
--- a/module/os/freebsd/zfs/dmu_os.c
+++ b/module/os/freebsd/zfs/dmu_os.c
@@ -110,7 +110,7 @@ dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
- dmu_buf_will_fill(db, tx);
+ dmu_buf_will_fill(db, tx, B_FALSE);
else
dmu_buf_will_dirty(db, tx);
@@ -126,7 +126,7 @@ dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
}
if (tocpy == db->db_size)
- dmu_buf_fill_done(db, tx);
+ dmu_buf_fill_done(db, tx, B_FALSE);
offset += tocpy;
size -= tocpy;
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 255add6cd..280001bc3 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -2734,7 +2734,7 @@ dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
}
void
-dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
+dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
@@ -2752,8 +2752,14 @@ dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
* Block cloning: We will be completely overwriting a block
* cloned in this transaction group, so let's undirty the
* pending clone and mark the block as uncached. This will be
- * as if the clone was never done.
+ * as if the clone was never done. But if the fill can fail
+ * we should have a way to return back to the cloned data.
*/
+ if (canfail && dbuf_find_dirty_eq(db, tx->tx_txg) != NULL) {
+ mutex_exit(&db->db_mtx);
+ dmu_buf_will_dirty(db_fake, tx);
+ return;
+ }
VERIFY(!dbuf_undirty(db, tx));
db->db_state = DB_UNCACHED;
}
@@ -2814,32 +2820,41 @@ dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
dl->dr_overridden_by.blk_birth = dr->dr_txg;
}
-void
-dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
+boolean_t
+dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx, boolean_t failed)
{
(void) tx;
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
- dbuf_states_t old_state;
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
- old_state = db->db_state;
- db->db_state = DB_CACHED;
- if (old_state == DB_FILL) {
+ if (db->db_state == DB_FILL) {
if (db->db_level == 0 && db->db_freed_in_flight) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
memset(db->db.db_data, 0, db->db.db_size);
db->db_freed_in_flight = FALSE;
+ db->db_state = DB_CACHED;
DTRACE_SET_STATE(db,
"fill done handling freed in flight");
+ failed = B_FALSE;
+ } else if (failed) {
+ VERIFY(!dbuf_undirty(db, tx));
+ db->db_buf = NULL;
+ dbuf_clear_data(db);
+ DTRACE_SET_STATE(db, "fill failed");
} else {
+ db->db_state = DB_CACHED;
DTRACE_SET_STATE(db, "fill done");
}
cv_broadcast(&db->db_changed);
+ } else {
+ db->db_state = DB_CACHED;
+ failed = B_FALSE;
}
mutex_exit(&db->db_mtx);
+ return (failed);
}
void
@@ -2984,7 +2999,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
DTRACE_SET_STATE(db, "filling assigned arcbuf");
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
- dmu_buf_fill_done(&db->db, tx);
+ dmu_buf_fill_done(&db->db, tx, B_FALSE);
}
void
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 909605aa2..3215ab1c2 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -1115,14 +1115,14 @@ dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
- dmu_buf_will_fill(db, tx);
+ dmu_buf_will_fill(db, tx, B_FALSE);
else
dmu_buf_will_dirty(db, tx);
(void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
if (tocpy == db->db_size)
- dmu_buf_fill_done(db, tx);
+ dmu_buf_fill_done(db, tx, B_FALSE);
offset += tocpy;
size -= tocpy;
@@ -1330,27 +1330,24 @@ dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
ASSERT(size > 0);
- bufoff = zfs_uio_offset(uio) - db->db_offset;
+ offset_t off = zfs_uio_offset(uio);
+ bufoff = off - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
- dmu_buf_will_fill(db, tx);
+ dmu_buf_will_fill(db, tx, B_TRUE);
else
dmu_buf_will_dirty(db, tx);
- /*
- * XXX zfs_uiomove could block forever (eg.nfs-backed
- * pages). There needs to be a uiolockdown() function
- * to lock the pages in memory, so that zfs_uiomove won't
- * block.
- */
err = zfs_uio_fault_move((char *)db->db_data + bufoff,
tocpy, UIO_WRITE, uio);
- if (tocpy == db->db_size)
- dmu_buf_fill_done(db, tx);
+ if (tocpy == db->db_size && dmu_buf_fill_done(db, tx, err)) {
+ /* The fill was reverted. Undo any uio progress. */
+ zfs_uio_advance(uio, off - zfs_uio_offset(uio));
+ }
if (err)
break;
diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c
index 05ca91717..54aa60259 100644
--- a/module/zfs/dmu_recv.c
+++ b/module/zfs/dmu_recv.c
@@ -2532,7 +2532,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
* size of the provided arc_buf_t.
*/
if (db_spill->db_size != drrs->drr_length) {
- dmu_buf_will_fill(db_spill, tx);
+ dmu_buf_will_fill(db_spill, tx, B_FALSE);
VERIFY0(dbuf_spill_set_blksz(db_spill,
drrs->drr_length, tx));
}