aboutsummaryrefslogtreecommitdiffstats
path: root/include/sys/metaslab_impl.h
diff options
context:
space:
mode:
authorSerapheim Dimitropoulos <[email protected]>2016-12-16 14:11:29 -0800
committerBrian Behlendorf <[email protected]>2018-06-26 10:07:42 -0700
commitd2734cce68cf740e015312314415f9034c67851c (patch)
treeb7a140a3cf2a19bb7c88f2d277f3b5a33c121cea /include/sys/metaslab_impl.h
parent88eaf610d9c7056f0946e5090cba1e6288ff2b70 (diff)
OpenZFS 9166 - zfs storage pool checkpoint
Details about the motivation of this feature and its usage can be found in this blogpost: https://sdimitro.github.io/post/zpool-checkpoint/ A lightning talk of this feature can be found here: https://www.youtube.com/watch?v=fPQA8K40jAM Implementation details can be found in big block comment of spa_checkpoint.c Side-changes that are relevant to this commit but not explained elsewhere: * renames members of "struct metaslab trees to be shorter without losing meaning * space_map_{alloc,truncate}() accept a block size as a parameter. The reason is that in the current state all space maps that we allocate through the DMU use a global tunable (space_map_blksz) which defauls to 4KB. This is ok for metaslab space maps in terms of bandwirdth since they are scattered all over the disk. But for other space maps this default is probably not what we want. Examples are device removal's vdev_obsolete_sm or vdev_chedkpoint_sm from this review. Both of these have a 1:1 relationship with each vdev and could benefit from a bigger block size. Porting notes: * The part of dsl_scan_sync() which handles async destroys has been moved into the new dsl_process_async_destroys() function. * Remove "VERIFY(!(flags & FWRITE))" in "kernel.c" so zhack can write to block device backed pools. * ZTS: * Fix get_txg() in zpool_sync_001_pos due to "checkpoint_txg". * Don't use large dd block sizes on /dev/urandom under Linux in checkpoint_capacity. * Adopt Delphix-OS's setting of 4 (spa_asize_inflation = SPA_DVAS_PER_BP + 1) for the checkpoint_capacity test to speed its attempts to fill the pool * Create the base and nested pools with sync=disabled to speed up the "setup" phase. * Clear labels in test pool between checkpoint tests to avoid duplicate pool issues. * The import_rewind_device_replaced test has been marked as "known to fail" for the reasons listed in its DISCLAIMER. * New module parameters: zfs_spa_discard_memory_limit, zfs_remove_max_bytes_pause (not documented - debugging only) vdev_max_ms_count (formerly metaslabs_per_vdev) vdev_min_ms_count Authored by: Serapheim Dimitropoulos <[email protected]> Reviewed by: Matthew Ahrens <[email protected]> Reviewed by: John Kennedy <[email protected]> Reviewed by: Dan Kimmel <[email protected]> Reviewed by: Brian Behlendorf <[email protected]> Approved by: Richard Lowe <[email protected]> Ported-by: Tim Chase <[email protected]> Signed-off-by: Tim Chase <[email protected]> OpenZFS-issue: https://illumos.org/issues/9166 OpenZFS-commit: https://github.com/openzfs/openzfs/commit/7159fdb8 Closes #7570
Diffstat (limited to 'include/sys/metaslab_impl.h')
-rw-r--r--include/sys/metaslab_impl.h53
1 files changed, 28 insertions, 25 deletions
diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h
index 76f670a4d..dafd2b231 100644
--- a/include/sys/metaslab_impl.h
+++ b/include/sys/metaslab_impl.h
@@ -24,7 +24,7 @@
*/
/*
- * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
*/
#ifndef _SYS_METASLAB_IMPL_H
@@ -255,16 +255,16 @@ struct metaslab_group {
/*
* Each metaslab maintains a set of in-core trees to track metaslab
- * operations. The in-core free tree (ms_tree) contains the list of
+ * operations. The in-core free tree (ms_allocatable) contains the list of
* free segments which are eligible for allocation. As blocks are
- * allocated, the allocated segment are removed from the ms_tree and
- * added to a per txg allocation tree (ms_alloctree). As blocks are
- * freed, they are added to the free tree (ms_freeingtree). These trees
+ * allocated, the allocated segment are removed from the ms_allocatable and
+ * added to a per txg allocation tree (ms_allocating). As blocks are
+ * freed, they are added to the free tree (ms_freeing). These trees
* allow us to process all allocations and frees in syncing context
* where it is safe to update the on-disk space maps. An additional set
* of in-core trees is maintained to track deferred frees
- * (ms_defertree). Once a block is freed it will move from the
- * ms_freedtree to the ms_defertree. A deferred free means that a block
+ * (ms_defer). Once a block is freed it will move from the
+ * ms_freed to the ms_defer tree. A deferred free means that a block
* has been freed but cannot be used by the pool until TXG_DEFER_SIZE
* transactions groups later. For example, a block that is freed in txg
* 50 will not be available for reallocation until txg 52 (50 +
@@ -278,14 +278,14 @@ struct metaslab_group {
* ALLOCATE
* |
* V
- * free segment (ms_tree) -----> ms_alloctree[4] ----> (write to space map)
+ * free segment (ms_allocatable) -> ms_allocating[4] -> (write to space map)
* ^
- * | ms_freeingtree <--- FREE
- * | |
- * | v
- * | ms_freedtree
- * | |
- * +-------- ms_defertree[2] <-------+---------> (write to space map)
+ * | ms_freeing <--- FREE
+ * | |
+ * | v
+ * | ms_freed
+ * | |
+ * +-------- ms_defer[2] <-------+-------> (write to space map)
*
*
* Each metaslab's space is tracked in a single space map in the MOS,
@@ -296,8 +296,8 @@ struct metaslab_group {
* To load the in-core free tree we read the space map from disk. This
* object contains a series of alloc and free records that are combined
* to make up the list of all free segments in this metaslab. These
- * segments are represented in-core by the ms_tree and are stored in an
- * AVL tree.
+ * segments are represented in-core by the ms_allocatable and are stored
+ * in an AVL tree.
*
* As the space map grows (as a result of the appends) it will
* eventually become space-inefficient. When the metaslab's in-core
@@ -317,20 +317,22 @@ struct metaslab {
uint64_t ms_size;
uint64_t ms_fragmentation;
- range_tree_t *ms_alloctree[TXG_SIZE];
- range_tree_t *ms_tree;
+ range_tree_t *ms_allocating[TXG_SIZE];
+ range_tree_t *ms_allocatable;
/*
* The following range trees are accessed only from syncing context.
* ms_free*tree only have entries while syncing, and are empty
* between syncs.
*/
- range_tree_t *ms_freeingtree; /* to free this syncing txg */
- range_tree_t *ms_freedtree; /* already freed this syncing txg */
- range_tree_t *ms_defertree[TXG_DEFER_SIZE];
+ range_tree_t *ms_freeing; /* to free this syncing txg */
+ range_tree_t *ms_freed; /* already freed this syncing txg */
+ range_tree_t *ms_defer[TXG_DEFER_SIZE];
+ range_tree_t *ms_checkpointing; /* to add to the checkpoint */
boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted;
+ uint64_t ms_condense_checked_txg;
/*
* We must hold both ms_lock and ms_group->mg_lock in order to
@@ -356,11 +358,12 @@ struct metaslab {
/*
* The metaslab block allocators can optionally use a size-ordered
* range tree and/or an array of LBAs. Not all allocators use
- * this functionality. The ms_size_tree should always contain the
- * same number of segments as the ms_tree. The only difference
- * is that the ms_size_tree is ordered by segment sizes.
+ * this functionality. The ms_allocatable_by_size should always
+ * contain the same number of segments as the ms_allocatable. The
+ * only difference is that the ms_allocatable_by_size is ordered by
+ * segment sizes.
*/
- avl_tree_t ms_size_tree;
+ avl_tree_t ms_allocatable_by_size;
uint64_t ms_lbas[MAX_LBAS];
metaslab_group_t *ms_group; /* metaslab group */