summaryrefslogtreecommitdiffstats
path: root/module/zfs/spa_misc.c
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs/spa_misc.c')
-rw-r--r--module/zfs/spa_misc.c82
1 files changed, 77 insertions, 5 deletions
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 17f8c1638..5c6e82567 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -243,7 +243,12 @@ kmem_cache_t *spa_buffer_pool;
int spa_mode_global;
#ifdef ZFS_DEBUG
-int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | ZFS_DEBUG_SPA);
+/*
+ * Everything except dprintf, set_error, spa, and indirect_remap is on
+ * by default in debug builds.
+ */
+int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
+ ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
@@ -460,7 +465,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
(void) refcount_add(&scl->scl_count, tag);
mutex_exit(&scl->scl_lock);
}
- ASSERT(wlocks_held <= locks);
+ ASSERT3U(wlocks_held, <=, locks);
}
void
@@ -1136,7 +1141,7 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
* If the config changed, update the config cache.
*/
if (config_changed)
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
}
/*
@@ -1228,7 +1233,7 @@ spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
mutex_exit(&spa_namespace_lock);
}
@@ -1306,7 +1311,7 @@ spa_rename(const char *name, const char *newname)
/*
* Sync the updated config cache.
*/
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_close(spa, FTAG);
@@ -1525,6 +1530,12 @@ spa_is_initializing(spa_t *spa)
return (spa->spa_is_initializing);
}
+boolean_t
+spa_indirect_vdevs_loaded(spa_t *spa)
+{
+ return (spa->spa_indirect_vdevs_loaded);
+}
+
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
@@ -1683,6 +1694,24 @@ spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa);
+ if (spa->spa_vdev_removal != NULL) {
+ /*
+ * We can't allocate from the removing device, so
+ * subtract its size. This prevents the DMU/DSL from
+ * filling up the (now smaller) pool while we are in the
+ * middle of removing the device.
+ *
+ * Note that the DMU/DSL doesn't actually know or care
+ * how much space is allocated (it does its own tracking
+ * of how much space has been logically used). So it
+ * doesn't matter that the data we are moving may be
+ * allocated twice (on the old device and the new
+ * device).
+ */
+ vdev_t *vd = spa->spa_vdev_removal->svr_vdev;
+ spa->spa_dspace -= spa_deflate(spa) ?
+ vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
+ }
}
/*
@@ -2105,6 +2134,49 @@ spa_maxblocksize(spa_t *spa)
return (SPA_OLD_MAXBLOCKSIZE);
}
+
+/*
+ * Returns the txg that the last device removal completed. No indirect mappings
+ * have been added since this txg.
+ */
+uint64_t
+spa_get_last_removal_txg(spa_t *spa)
+{
+ uint64_t vdevid;
+ uint64_t ret = -1ULL;
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ /*
+ * sr_prev_indirect_vdev is only modified while holding all the
+ * config locks, so it is sufficient to hold SCL_VDEV as reader when
+ * examining it.
+ */
+ vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
+
+ while (vdevid != -1ULL) {
+ vdev_t *vd = vdev_lookup_top(spa, vdevid);
+ vdev_indirect_births_t *vib = vd->vdev_indirect_births;
+
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ /*
+ * If the removal did not remap any data, we don't care.
+ */
+ if (vdev_indirect_births_count(vib) != 0) {
+ ret = vdev_indirect_births_last_entry_txg(vib);
+ break;
+ }
+
+ vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
+ }
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ IMPLY(ret != -1ULL,
+ spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+ return (ret);
+}
+
int
spa_maxdnodesize(spa_t *spa)
{