aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/dbuf.c54
-rw-r--r--module/zfs/dmu.c138
-rw-r--r--module/zfs/dmu_objset.c95
-rw-r--r--module/zfs/dmu_tx.c17
-rw-r--r--module/zfs/dsl_dir.c46
-rw-r--r--module/zfs/zfs_ioctl.c17
6 files changed, 3 insertions, 364 deletions
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 8afc3df37..abfae29ad 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -4445,60 +4445,6 @@ dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx)
}
/*
- * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting
- * to remap a copy of every bp in the dbuf.
- */
-boolean_t
-dbuf_can_remap(const dmu_buf_impl_t *db)
-{
- spa_t *spa = dmu_objset_spa(db->db_objset);
- blkptr_t *bp = db->db.db_data;
- boolean_t ret = B_FALSE;
-
- ASSERT3U(db->db_level, >, 0);
- ASSERT3S(db->db_state, ==, DB_CACHED);
-
- ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
-
- spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
- for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
- blkptr_t bp_copy = bp[i];
- if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
- ret = B_TRUE;
- break;
- }
- }
- spa_config_exit(spa, SCL_VDEV, FTAG);
-
- return (ret);
-}
-
-boolean_t
-dnode_needs_remap(const dnode_t *dn)
-{
- spa_t *spa = dmu_objset_spa(dn->dn_objset);
- boolean_t ret = B_FALSE;
-
- if (dn->dn_phys->dn_nlevels == 0) {
- return (B_FALSE);
- }
-
- ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
-
- spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
- for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) {
- blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j];
- if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
- ret = B_TRUE;
- break;
- }
- }
- spa_config_exit(spa, SCL_VDEV, FTAG);
-
- return (ret);
-}
-
-/*
* Remap any existing BP's to concrete vdevs, if possible.
*/
static void
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 4af2a13e7..0722f5c5e 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -75,13 +75,6 @@ unsigned long zfs_per_txg_dirty_frees_percent = 5;
int zfs_dmu_offset_next_sync = 0;
/*
- * This can be used for testing, to ensure that certain actions happen
- * while in the middle of a remap (which might otherwise complete too
- * quickly). Used by ztest(8).
- */
-int zfs_object_remap_one_indirect_delay_ms = 0;
-
-/*
* Limit the amount we can prefetch with one call to this amount. This
* helps to limit the amount of memory that can be used by prefetching.
* Larger objects should be prefetched a bit at a time.
@@ -1114,137 +1107,6 @@ dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
-static int
-dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
- uint64_t last_removal_txg, uint64_t offset)
-{
- uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
- dnode_t *dn_tx;
- int err = 0;
-
- rw_enter(&dn->dn_struct_rwlock, RW_READER);
- dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
- ASSERT3P(dbuf, !=, NULL);
-
- /*
- * If the block hasn't been written yet, this default will ensure
- * we don't try to remap it.
- */
- uint64_t birth = UINT64_MAX;
- ASSERT3U(last_removal_txg, !=, UINT64_MAX);
- if (dbuf->db_blkptr != NULL)
- birth = dbuf->db_blkptr->blk_birth;
- rw_exit(&dn->dn_struct_rwlock);
-
- /*
- * If this L1 was already written after the last removal, then we've
- * already tried to remap it. An additional hold is taken after the
- * dmu_tx_assign() to handle the case where the dnode is freed while
- * waiting for the next open txg.
- */
- if (birth <= last_removal_txg &&
- dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
- dbuf_can_remap(dbuf)) {
- dmu_tx_t *tx = dmu_tx_create(os);
- dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err == 0) {
- err = dnode_hold(os, dn->dn_object, FTAG, &dn_tx);
- if (err == 0) {
- (void) dbuf_dirty(dbuf, tx);
- dnode_rele(dn_tx, FTAG);
- }
- dmu_tx_commit(tx);
- } else {
- dmu_tx_abort(tx);
- }
- }
-
- dbuf_rele(dbuf, FTAG);
-
- delay(MSEC_TO_TICK(zfs_object_remap_one_indirect_delay_ms));
-
- return (err);
-}
-
-/*
- * Remap all blockpointers in the object, if possible, so that they reference
- * only concrete vdevs.
- *
- * To do this, iterate over the L0 blockpointers and remap any that reference
- * an indirect vdev. Note that we only examine L0 blockpointers; since we
- * cannot guarantee that we can remap all blockpointer anyways (due to split
- * blocks), we do not want to make the code unnecessarily complicated to
- * catch the unlikely case that there is an L1 block on an indirect vdev that
- * contains no indirect blockpointers.
- */
-int
-dmu_object_remap_indirects(objset_t *os, uint64_t object,
- uint64_t last_removal_txg)
-{
- uint64_t offset, l1span;
- int err;
- dnode_t *dn, *dn_tx;
-
- err = dnode_hold(os, object, FTAG, &dn);
- if (err != 0) {
- return (err);
- }
-
- if (dn->dn_nlevels <= 1) {
- if (issig(JUSTLOOKING) && issig(FORREAL)) {
- err = SET_ERROR(EINTR);
- }
-
- /*
- * If the dnode has no indirect blocks, we cannot dirty them.
- * We still want to remap the blkptr(s) in the dnode if
- * appropriate, so mark it as dirty. An additional hold is
- * taken after the dmu_tx_assign() to handle the case where
- * the dnode is freed while waiting for the next open txg.
- */
- if (err == 0 && dnode_needs_remap(dn)) {
- dmu_tx_t *tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, object);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err == 0) {
- err = dnode_hold(os, object, FTAG, &dn_tx);
- if (err == 0) {
- dnode_setdirty(dn_tx, tx);
- dnode_rele(dn_tx, FTAG);
- }
- dmu_tx_commit(tx);
- } else {
- dmu_tx_abort(tx);
- }
- }
-
- dnode_rele(dn, FTAG);
- return (err);
- }
-
- offset = 0;
- l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
- dn->dn_datablkshift);
- /*
- * Find the next L1 indirect that is not a hole.
- */
- while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
- if (issig(JUSTLOOKING) && issig(FORREAL)) {
- err = SET_ERROR(EINTR);
- break;
- }
- if ((err = dmu_object_remap_one_indirect(os, dn,
- last_removal_txg, offset)) != 0) {
- break;
- }
- offset += l1span;
- }
-
- dnode_rele(dn, FTAG);
- return (err);
-}
-
void
dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 6b8c380e5..4091ac355 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -1396,101 +1396,6 @@ dmu_objset_clone(const char *clone, const char *origin)
6, ZFS_SPACE_CHECK_NORMAL));
}
-static int
-dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg)
-{
- int error = 0;
- uint64_t object = 0;
- while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
- error = dmu_object_remap_indirects(os, object,
- last_removed_txg);
- /*
- * If the ZPL removed the object before we managed to dnode_hold
- * it, we would get an ENOENT. If the ZPL declares its intent
- * to remove the object (dnode_free) before we manage to
- * dnode_hold it, we would get an EEXIST. In either case, we
- * want to continue remapping the other objects in the objset;
- * in all other cases, we want to break early.
- */
- if (error != 0 && error != ENOENT && error != EEXIST) {
- break;
- }
- }
- if (error == ESRCH) {
- error = 0;
- }
- return (error);
-}
-
-int
-dmu_objset_remap_indirects(const char *fsname)
-{
- int error = 0;
- objset_t *os = NULL;
- uint64_t last_removed_txg;
- uint64_t remap_start_txg;
- dsl_dir_t *dd;
-
- error = dmu_objset_hold(fsname, FTAG, &os);
- if (error != 0) {
- return (error);
- }
- dd = dmu_objset_ds(os)->ds_dir;
-
- if (!spa_feature_is_enabled(dmu_objset_spa(os),
- SPA_FEATURE_OBSOLETE_COUNTS)) {
- dmu_objset_rele(os, FTAG);
- return (SET_ERROR(ENOTSUP));
- }
-
- if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) {
- dmu_objset_rele(os, FTAG);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * If there has not been a removal, we're done.
- */
- last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os));
- if (last_removed_txg == -1ULL) {
- dmu_objset_rele(os, FTAG);
- return (0);
- }
-
- /*
- * If we have remapped since the last removal, we're done.
- */
- if (dsl_dir_is_zapified(dd)) {
- uint64_t last_remap_txg;
- if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)),
- dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
- sizeof (last_remap_txg), 1, &last_remap_txg) == 0 &&
- last_remap_txg > last_removed_txg) {
- dmu_objset_rele(os, FTAG);
- return (0);
- }
- }
-
- dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
- dsl_pool_rele(dmu_objset_pool(os), FTAG);
-
- remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os));
- error = dmu_objset_remap_indirects_impl(os, last_removed_txg);
- if (error == 0) {
- /*
- * We update the last_remap_txg to be the start txg so that
- * we can guarantee that every block older than last_remap_txg
- * that can be remapped has been remapped.
- */
- error = dsl_dir_update_last_remap_txg(dd, remap_start_txg);
- }
-
- dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
- dsl_dataset_rele(dmu_objset_ds(os), FTAG);
-
- return (error);
-}
-
int
dmu_objset_snapshot_one(const char *fsname, const char *snapname)
{
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index 7d65e842f..4f489de5f 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -317,23 +317,6 @@ dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
}
void
-dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object)
-{
- dmu_tx_hold_t *txh;
-
- ASSERT(tx->tx_txg == 0);
- txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
- object, THT_WRITE, 0, 0);
- if (txh == NULL)
- return;
-
- dnode_t *dn = txh->txh_dnode;
- (void) zfs_refcount_add_many(&txh->txh_space_towrite,
- 1ULL << dn->dn_indblkshift, FTAG);
- dmu_tx_count_dnode(txh);
-}
-
-void
dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c
index 6fb711f59..741ca232e 100644
--- a/module/zfs/dsl_dir.c
+++ b/module/zfs/dsl_dir.c
@@ -757,35 +757,6 @@ dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, cred_t *cr)
return (enforce);
}
-static void
-dsl_dir_update_last_remap_txg_sync(void *varg, dmu_tx_t *tx)
-{
- ddulrt_arg_t *arg = varg;
- uint64_t last_remap_txg;
- dsl_dir_t *dd = arg->ddulrta_dd;
- objset_t *mos = dd->dd_pool->dp_meta_objset;
-
- dsl_dir_zapify(dd, tx);
- if (zap_lookup(mos, dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
- sizeof (last_remap_txg), 1, &last_remap_txg) != 0 ||
- last_remap_txg < arg->ddlrta_txg) {
- VERIFY0(zap_update(mos, dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
- sizeof (arg->ddlrta_txg), 1, &arg->ddlrta_txg, tx));
- }
-}
-
-int
-dsl_dir_update_last_remap_txg(dsl_dir_t *dd, uint64_t txg)
-{
- ddulrt_arg_t arg;
- arg.ddulrta_dd = dd;
- arg.ddlrta_txg = txg;
-
- return (dsl_sync_task(spa_name(dd->dd_pool->dp_spa),
- NULL, dsl_dir_update_last_remap_txg_sync, &arg,
- 1, ZFS_SPACE_CHECK_RESERVED));
-}
-
/*
* Check if adding additional child filesystem(s) would exceed any filesystem
* limits or adding additional snapshot(s) would exceed any snapshot limits.
@@ -1083,19 +1054,6 @@ dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
}
}
-int
-dsl_dir_get_remaptxg(dsl_dir_t *dd, uint64_t *count)
-{
- if (dsl_dir_is_zapified(dd)) {
- objset_t *os = dd->dd_pool->dp_meta_objset;
- return (zap_lookup(os, dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
- sizeof (*count), 1, count));
- } else {
- return (ENOENT);
- }
-
-}
-
void
dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
{
@@ -1127,10 +1085,6 @@ dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
count);
}
- if (dsl_dir_get_remaptxg(dd, &count) == 0) {
- dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REMAPTXG,
- count);
- }
if (dsl_dir_is_clone(dd)) {
char buf[ZFS_MAX_DATASET_NAME_LEN];
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 2b67761fd..c2b75cc98 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -1047,14 +1047,6 @@ zfs_secpolicy_bookmark(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
/* ARGSUSED */
static int
-zfs_secpolicy_remap(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
-{
- return (zfs_secpolicy_write_perms(zc->zc_name,
- ZFS_DELEG_PERM_REMAP, cr));
-}
-
-/* ARGSUSED */
-static int
zfs_secpolicy_destroy_bookmarks(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
nvpair_t *pair, *nextpair;
@@ -3447,11 +3439,8 @@ static const zfs_ioc_key_t zfs_keys_remap[] = {
static int
zfs_ioc_remap(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
- if (strchr(fsname, '@') ||
- strchr(fsname, '%'))
- return (SET_ERROR(EINVAL));
-
- return (dmu_objset_remap_indirects(fsname));
+ /* This IOCTL is no longer supported. */
+ return (0);
}
/*
@@ -6790,7 +6779,7 @@ zfs_ioctl_init(void)
zfs_keys_clone, ARRAY_SIZE(zfs_keys_clone));
zfs_ioctl_register("remap", ZFS_IOC_REMAP,
- zfs_ioc_remap, zfs_secpolicy_remap, DATASET_NAME,
+ zfs_ioc_remap, zfs_secpolicy_none, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE,
zfs_keys_remap, ARRAY_SIZE(zfs_keys_remap));