aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs/dmu.c')
-rw-r--r--module/zfs/dmu.c138
1 files changed, 0 insertions, 138 deletions
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 4af2a13e7..0722f5c5e 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -75,13 +75,6 @@ unsigned long zfs_per_txg_dirty_frees_percent = 5;
int zfs_dmu_offset_next_sync = 0;
/*
- * This can be used for testing, to ensure that certain actions happen
- * while in the middle of a remap (which might otherwise complete too
- * quickly). Used by ztest(8).
- */
-int zfs_object_remap_one_indirect_delay_ms = 0;
-
-/*
* Limit the amount we can prefetch with one call to this amount. This
* helps to limit the amount of memory that can be used by prefetching.
* Larger objects should be prefetched a bit at a time.
@@ -1114,137 +1107,6 @@ dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
-static int
-dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
- uint64_t last_removal_txg, uint64_t offset)
-{
- uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
- dnode_t *dn_tx;
- int err = 0;
-
- rw_enter(&dn->dn_struct_rwlock, RW_READER);
- dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
- ASSERT3P(dbuf, !=, NULL);
-
- /*
- * If the block hasn't been written yet, this default will ensure
- * we don't try to remap it.
- */
- uint64_t birth = UINT64_MAX;
- ASSERT3U(last_removal_txg, !=, UINT64_MAX);
- if (dbuf->db_blkptr != NULL)
- birth = dbuf->db_blkptr->blk_birth;
- rw_exit(&dn->dn_struct_rwlock);
-
- /*
- * If this L1 was already written after the last removal, then we've
- * already tried to remap it. An additional hold is taken after the
- * dmu_tx_assign() to handle the case where the dnode is freed while
- * waiting for the next open txg.
- */
- if (birth <= last_removal_txg &&
- dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
- dbuf_can_remap(dbuf)) {
- dmu_tx_t *tx = dmu_tx_create(os);
- dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err == 0) {
- err = dnode_hold(os, dn->dn_object, FTAG, &dn_tx);
- if (err == 0) {
- (void) dbuf_dirty(dbuf, tx);
- dnode_rele(dn_tx, FTAG);
- }
- dmu_tx_commit(tx);
- } else {
- dmu_tx_abort(tx);
- }
- }
-
- dbuf_rele(dbuf, FTAG);
-
- delay(MSEC_TO_TICK(zfs_object_remap_one_indirect_delay_ms));
-
- return (err);
-}
-
-/*
- * Remap all blockpointers in the object, if possible, so that they reference
- * only concrete vdevs.
- *
- * To do this, iterate over the L0 blockpointers and remap any that reference
- * an indirect vdev. Note that we only examine L0 blockpointers; since we
- * cannot guarantee that we can remap all blockpointer anyways (due to split
- * blocks), we do not want to make the code unnecessarily complicated to
- * catch the unlikely case that there is an L1 block on an indirect vdev that
- * contains no indirect blockpointers.
- */
-int
-dmu_object_remap_indirects(objset_t *os, uint64_t object,
- uint64_t last_removal_txg)
-{
- uint64_t offset, l1span;
- int err;
- dnode_t *dn, *dn_tx;
-
- err = dnode_hold(os, object, FTAG, &dn);
- if (err != 0) {
- return (err);
- }
-
- if (dn->dn_nlevels <= 1) {
- if (issig(JUSTLOOKING) && issig(FORREAL)) {
- err = SET_ERROR(EINTR);
- }
-
- /*
- * If the dnode has no indirect blocks, we cannot dirty them.
- * We still want to remap the blkptr(s) in the dnode if
- * appropriate, so mark it as dirty. An additional hold is
- * taken after the dmu_tx_assign() to handle the case where
- * the dnode is freed while waiting for the next open txg.
- */
- if (err == 0 && dnode_needs_remap(dn)) {
- dmu_tx_t *tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, object);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err == 0) {
- err = dnode_hold(os, object, FTAG, &dn_tx);
- if (err == 0) {
- dnode_setdirty(dn_tx, tx);
- dnode_rele(dn_tx, FTAG);
- }
- dmu_tx_commit(tx);
- } else {
- dmu_tx_abort(tx);
- }
- }
-
- dnode_rele(dn, FTAG);
- return (err);
- }
-
- offset = 0;
- l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
- dn->dn_datablkshift);
- /*
- * Find the next L1 indirect that is not a hole.
- */
- while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
- if (issig(JUSTLOOKING) && issig(FORREAL)) {
- err = SET_ERROR(EINTR);
- break;
- }
- if ((err = dmu_object_remap_one_indirect(os, dn,
- last_removal_txg, offset)) != 0) {
- break;
- }
- offset += l1span;
- }
-
- dnode_rele(dn, FTAG);
- return (err);
-}
-
void
dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)