summaryrefslogtreecommitdiffstats
path: root/include/sys
diff options
context:
space:
mode:
authorGeorge Wilson <[email protected]>2013-06-14 22:30:35 -0500
committerBrian Behlendorf <[email protected]>2013-06-19 16:22:39 -0700
commite51be06697762215dc3b679f8668987034a5a048 (patch)
tree84446c5dbac3de2f8f9bee1fe67bb41096a6dfc8 /include/sys
parentc99c90015ece64746e20b74245caca41d1dbefe1 (diff)
Illumos #3552, #3564
3552 condensing one space map burns 3 seconds of CPU in spa_sync() thread 3564 spa_sync() spends 5-10% of its time in metaslab_sync() (when not condensing) Reviewed by: Adam Leventhal <[email protected]> Reviewed by: Dan Kimmel <[email protected]> Reviewed by: Matthew Ahrens <[email protected]> Approved by: Richard Lowe <[email protected]> References: illumos/illumos-gate@16a4a8074274d2d7cc408589cf6359f4a378c861 https://www.illumos.org/issues/3552 https://www.illumos.org/issues/3564 Ported-by: Tim Chase <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #1513
Diffstat (limited to 'include/sys')
-rw-r--r--include/sys/metaslab_impl.h32
-rw-r--r--include/sys/space_map.h7
2 files changed, 29 insertions, 10 deletions
diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h
index 386f6240c..a36baedd4 100644
--- a/include/sys/metaslab_impl.h
+++ b/include/sys/metaslab_impl.h
@@ -67,20 +67,38 @@ struct metaslab_group {
};
/*
- * Each metaslab's free space is tracked in space map object in the MOS,
- * which is only updated in syncing context. Each time we sync a txg,
+ * Each metaslab maintains an in-core free map (ms_map) that contains the
+ * current list of free segments. As blocks are allocated, the allocated
+ * segment is removed from the ms_map and added to a per txg allocation map.
+ * As blocks are freed, they are added to the per txg free map. These per
+ * txg maps allow us to process all allocations and frees in syncing context
+ * where it is safe to update the on-disk space maps.
+ *
+ * Each metaslab's free space is tracked in a space map object in the MOS,
+ * which is only updated in syncing context. Each time we sync a txg,
* we append the allocs and frees from that txg to the space map object.
* When the txg is done syncing, metaslab_sync_done() updates ms_smo
- * to ms_smo_syncing. Everything in ms_smo is always safe to allocate.
+ * to ms_smo_syncing. Everything in ms_smo is always safe to allocate.
+ *
+ * To load the in-core free map we read the space map object from disk.
+ * This object contains a series of alloc and free records that are
+ * combined to make up the list of all free segments in this metaslab. These
+ * segments are represented in-core by the ms_map and are stored in an
+ * AVL tree.
+ *
+ * As the space map objects grows (as a result of the appends) it will
+ * eventually become space-inefficient. When the space map object is
+ * zfs_condense_pct/100 times the size of the minimal on-disk representation,
+ * we rewrite it in its minimized form.
*/
struct metaslab {
kmutex_t ms_lock; /* metaslab lock */
space_map_obj_t ms_smo; /* synced space map object */
space_map_obj_t ms_smo_syncing; /* syncing space map object */
- space_map_t ms_allocmap[TXG_SIZE]; /* allocated this txg */
- space_map_t ms_freemap[TXG_SIZE]; /* freed this txg */
- space_map_t ms_defermap[TXG_DEFER_SIZE]; /* deferred frees */
- space_map_t ms_map; /* in-core free space map */
+ space_map_t *ms_allocmap[TXG_SIZE]; /* allocated this txg */
+ space_map_t *ms_freemap[TXG_SIZE]; /* freed this txg */
+ space_map_t *ms_defermap[TXG_DEFER_SIZE]; /* deferred frees */
+ space_map_t *ms_map; /* in-core free space map */
int64_t ms_deferspace; /* sum of ms_defermap[] space */
uint64_t ms_weight; /* weight vs. others in group */
metaslab_group_t *ms_group; /* metaslab group */
diff --git a/include/sys/space_map.h b/include/sys/space_map.h
index cbe75afd1..2da80d29b 100644
--- a/include/sys/space_map.h
+++ b/include/sys/space_map.h
@@ -40,17 +40,17 @@ extern "C" {
typedef const struct space_map_ops space_map_ops_t;
typedef struct space_map {
- avl_tree_t sm_root; /* AVL tree of map segments */
+ avl_tree_t sm_root; /* offset-ordered segment AVL tree */
uint64_t sm_space; /* sum of all segments in the map */
uint64_t sm_start; /* start of map */
uint64_t sm_size; /* size of map */
uint8_t sm_shift; /* unit shift */
- uint8_t sm_pad[3]; /* unused */
uint8_t sm_loaded; /* map loaded? */
uint8_t sm_loading; /* map loading? */
+ uint8_t sm_condensing; /* map condensing? */
kcondvar_t sm_load_cv; /* map load completion */
space_map_ops_t *sm_ops; /* space map block picker ops vector */
- avl_tree_t *sm_pp_root; /* picker-private AVL tree */
+ avl_tree_t *sm_pp_root; /* size-ordered, picker-private tree */
void *sm_ppd; /* picker-private data */
kmutex_t *sm_lock; /* pointer to lock that protects map */
} space_map_t;
@@ -149,6 +149,7 @@ extern void space_map_add(space_map_t *sm, uint64_t start, uint64_t size);
extern void space_map_remove(space_map_t *sm, uint64_t start, uint64_t size);
extern boolean_t space_map_contains(space_map_t *sm,
uint64_t start, uint64_t size);
+extern void space_map_swap(space_map_t **msrc, space_map_t **mdest);
extern void space_map_vacate(space_map_t *sm,
space_map_func_t *func, space_map_t *mdest);
extern void space_map_walk(space_map_t *sm,