summaryrefslogtreecommitdiffstats
path: root/module/zfs/metaslab.c
diff options
context:
space:
mode:
authorGeorge Wilson <[email protected]>2014-03-31 17:22:55 -0700
committerBrian Behlendorf <[email protected]>2014-05-06 09:46:04 -0700
commitaa7d06a98a2b3b0d864aadd310113c52b946b842 (patch)
treee49591df2df73594f9f0ac0a497514d6d6ce0bb8 /module/zfs/metaslab.c
parentcc79a5c263802b58de62b190e264c7f61b6235c9 (diff)
Illumos #4101 finer-grained control of metaslab_debug
Today the metaslab_debug logic performs two tasks: - load all metaslabs on import/open - don't unload metaslabs at the end of spa_sync This change provides knobs for each of these independently. References: https://illumos.org/issues/4101 https://github.com/illumos/illumos-gate/commit/0713e23 Notes: 1) This is a small piece of the metaslab improvement patch from Illumos. It was worth bringing over before the rest, since it's low risk and it can be useful on fragmented pools (e.g. Lustre MDTs). metaslab_debug_unload would give the performance benefit of the old metaslab_debug option without causing unwanted delay during pool import. Ported-by: Ned Bass <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #2227
Diffstat (limited to 'module/zfs/metaslab.c')
-rw-r--r--module/zfs/metaslab.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 6356f7950..32ffdfb98 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -81,9 +81,14 @@ int zfs_mg_alloc_failures = 0;
int zfs_mg_noalloc_threshold = 0;
/*
- * Metaslab debugging: when set, keeps all space maps in core to verify frees.
+ * When set will load all metaslabs when pool is first opened.
*/
-int metaslab_debug = 0;
+int metaslab_debug_load = 0;
+
+/*
+ * When set will prevent metaslabs from being unloaded.
+ */
+int metaslab_debug_unload = 0;
/*
* Minimum size which forces the dynamic allocator to change
@@ -846,7 +851,7 @@ metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
metaslab_group_add(mg, msp);
- if (metaslab_debug && smo->smo_object != 0) {
+ if (metaslab_debug_load && smo->smo_object != 0) {
mutex_enter(&msp->ms_lock);
VERIFY(space_map_load(msp->ms_map, mg->mg_class->mc_ops,
SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0);
@@ -1407,7 +1412,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
if (msp->ms_allocmap[(txg + t) & TXG_MASK]->sm_space)
evictable = 0;
- if (evictable && !metaslab_debug)
+ if (evictable && !metaslab_debug_unload)
space_map_unload(sm);
}
@@ -2109,6 +2114,9 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
}
#if defined(_KERNEL) && defined(HAVE_SPL)
-module_param(metaslab_debug, int, 0644);
-MODULE_PARM_DESC(metaslab_debug, "keep space maps in core to verify frees");
+module_param(metaslab_debug_load, int, 0644);
+MODULE_PARM_DESC(metaslab_debug_load, "load all metaslabs during pool import");
+
+module_param(metaslab_debug_unload, int, 0644);
+MODULE_PARM_DESC(metaslab_debug_unload, "prevent metaslabs from being unloaded");
#endif /* _KERNEL && HAVE_SPL */