diff options
author | George Wilson <[email protected]> | 2013-10-01 13:25:53 -0800 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2014-07-22 09:39:16 -0700 |
commit | 93cf20764a1be64a603020f54b45200e37b3877e (patch) | |
tree | b0db8d60368de34cdbd4eccc9ee98d1110beb15e /include | |
parent | 1be627f5c28a355bcd49e4e097114c13fae7731b (diff) |
Illumos #4101, #4102, #4103, #4105, #4106
4101 metaslab_debug should allow for fine-grained control
4102 space_maps should store more information about themselves
4103 space map object blocksize should be increased
4105 removing a mirrored log device results in a leaked object
4106 asynchronously load metaslab
Reviewed by: Matthew Ahrens <[email protected]>
Reviewed by: Adam Leventhal <[email protected]>
Reviewed by: Sebastien Roy <[email protected]>
Approved by: Garrett D'Amore <[email protected]>
Prior to this patch, space_maps were preferred solely based on the
amount of free space left in each. Unfortunately, this heuristic didn't
contain any information about the make-up of that free space, which
meant we could keep preferring and loading a highly fragmented space map
that wouldn't actually have enough contiguous space to satisfy the
allocation; then unloading that space_map and repeating the process.
This change modifies the space_map's to store additional information
about the contiguous space in the space_map, so that we can use this
information to make a better decision about which space_map to load.
This requires reallocating all space_map objects to increase their
bonus buffer size sizes enough to fit the new metadata.
The above feature can be enabled via a new feature flag introduced by
this change: com.delphix:spacemap_histogram
In addition to the above, this patch allows the space_map block size to
be increase. Currently the block size is set to be 4K in size, which has
certain implications including the following:
* 4K sector devices will not see any compression benefit
* large space_maps require more metadata on-disk
* large space_maps require more time to load (typically random reads)
Now the space_map block size can adjust as needed up to the maximum size
set via the space_map_max_blksz variable.
A bug was fixed which resulted in potentially leaking an object when
removing a mirrored log device. The previous logic for vdev_remove() did
not deal with removing top-level vdevs that are interior vdevs (i.e.
mirror) correctly. The problem would occur when removing a mirrored log
device, and result in the DTL space map object being leaked; because
top-level vdevs don't have DTL space map objects associated with them.
References:
https://www.illumos.org/issues/4101
https://www.illumos.org/issues/4102
https://www.illumos.org/issues/4103
https://www.illumos.org/issues/4105
https://www.illumos.org/issues/4106
https://github.com/illumos/illumos-gate/commit/0713e23
Porting notes:
A handful of kmem_alloc() calls were converted to kmem_zalloc(). Also,
the KM_PUSHPAGE and TQ_PUSHPAGE flags were used as necessary.
Ported-by: Tim Chase <[email protected]>
Signed-off-by: Prakash Surya <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #2488
Diffstat (limited to 'include')
-rw-r--r-- | include/sys/Makefile.am | 2 | ||||
-rw-r--r-- | include/sys/metaslab.h | 66 | ||||
-rw-r--r-- | include/sys/metaslab_impl.h | 107 | ||||
-rw-r--r-- | include/sys/range_tree.h | 96 | ||||
-rw-r--r-- | include/sys/space_map.h | 166 | ||||
-rw-r--r-- | include/sys/space_reftree.h | 57 | ||||
-rw-r--r-- | include/sys/vdev_impl.h | 26 | ||||
-rw-r--r-- | include/sys/zfeature.h | 3 | ||||
-rw-r--r-- | include/zfeature_common.h | 3 |
9 files changed, 370 insertions, 156 deletions
diff --git a/include/sys/Makefile.am b/include/sys/Makefile.am index 9d7756627..4115c6d8f 100644 --- a/include/sys/Makefile.am +++ b/include/sys/Makefile.am @@ -32,12 +32,14 @@ COMMON_H = \ $(top_srcdir)/include/sys/metaslab_impl.h \ $(top_srcdir)/include/sys/nvpair.h \ $(top_srcdir)/include/sys/nvpair_impl.h \ + $(top_srcdir)/include/sys/range_tree.h \ $(top_srcdir)/include/sys/refcount.h \ $(top_srcdir)/include/sys/rrwlock.h \ $(top_srcdir)/include/sys/sa.h \ $(top_srcdir)/include/sys/sa_impl.h \ $(top_srcdir)/include/sys/spa_boot.h \ $(top_srcdir)/include/sys/space_map.h \ + $(top_srcdir)/include/sys/space_reftree.h \ $(top_srcdir)/include/sys/spa.h \ $(top_srcdir)/include/sys/spa_impl.h \ $(top_srcdir)/include/sys/txg.h \ diff --git a/include/sys/metaslab.h b/include/sys/metaslab.h index 70f7af0a5..a3bbc25f7 100644 --- a/include/sys/metaslab.h +++ b/include/sys/metaslab.h @@ -20,7 +20,7 @@ */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright (c) 2013 by Delphix. All rights reserved. */ #ifndef _SYS_METASLAB_H @@ -36,14 +36,25 @@ extern "C" { #endif -extern space_map_ops_t *zfs_metaslab_ops; +typedef struct metaslab_ops { + uint64_t (*msop_alloc)(metaslab_t *msp, uint64_t size); + boolean_t (*msop_fragmented)(metaslab_t *msp); +} metaslab_ops_t; -extern metaslab_t *metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, - uint64_t start, uint64_t size, uint64_t txg); -extern void metaslab_fini(metaslab_t *msp); -extern void metaslab_sync(metaslab_t *msp, uint64_t txg); -extern void metaslab_sync_done(metaslab_t *msp, uint64_t txg); -extern void metaslab_sync_reassess(metaslab_group_t *mg); +extern metaslab_ops_t *zfs_metaslab_ops; + +metaslab_t *metaslab_init(metaslab_group_t *mg, uint64_t id, + uint64_t object, uint64_t txg); +void metaslab_fini(metaslab_t *msp); + +void metaslab_load_wait(metaslab_t *msp); +int metaslab_load(metaslab_t *msp); +void metaslab_unload(metaslab_t *msp); + +void metaslab_sync(metaslab_t *msp, uint64_t txg); +void metaslab_sync_done(metaslab_t *msp, uint64_t txg); +void metaslab_sync_reassess(metaslab_group_t *mg); +uint64_t metaslab_block_maxsize(metaslab_t *msp); #define METASLAB_HINTBP_FAVOR 0x0 #define METASLAB_HINTBP_AVOID 0x1 @@ -52,33 +63,30 @@ extern void metaslab_sync_reassess(metaslab_group_t *mg); #define METASLAB_GANG_AVOID 0x8 #define METASLAB_FASTWRITE 0x10 -extern int metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, +int metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, int ncopies, uint64_t txg, blkptr_t *hintbp, int flags); -extern void metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, - boolean_t now); -extern int metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg); -extern void metaslab_check_free(spa_t *spa, const blkptr_t *bp); -extern void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp); -extern void metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp); +void metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now); +int metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg); +void metaslab_check_free(spa_t *spa, const blkptr_t *bp); +void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp); +void metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp); -extern metaslab_class_t *metaslab_class_create(spa_t *spa, - space_map_ops_t *ops); -extern void metaslab_class_destroy(metaslab_class_t *mc); -extern int metaslab_class_validate(metaslab_class_t *mc); +metaslab_class_t *metaslab_class_create(spa_t *spa, metaslab_ops_t *ops); +void metaslab_class_destroy(metaslab_class_t *mc); +int metaslab_class_validate(metaslab_class_t *mc); -extern void metaslab_class_space_update(metaslab_class_t *mc, +void metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, int64_t defer_delta, int64_t space_delta, int64_t dspace_delta); -extern uint64_t metaslab_class_get_alloc(metaslab_class_t *mc); -extern uint64_t metaslab_class_get_space(metaslab_class_t *mc); -extern uint64_t metaslab_class_get_dspace(metaslab_class_t *mc); -extern uint64_t metaslab_class_get_deferred(metaslab_class_t *mc); +uint64_t metaslab_class_get_alloc(metaslab_class_t *mc); +uint64_t metaslab_class_get_space(metaslab_class_t *mc); +uint64_t metaslab_class_get_dspace(metaslab_class_t *mc); +uint64_t metaslab_class_get_deferred(metaslab_class_t *mc); -extern metaslab_group_t *metaslab_group_create(metaslab_class_t *mc, - vdev_t *vd); -extern void metaslab_group_destroy(metaslab_group_t *mg); -extern void metaslab_group_activate(metaslab_group_t *mg); -extern void metaslab_group_passivate(metaslab_group_t *mg); +metaslab_group_t *metaslab_group_create(metaslab_class_t *mc, vdev_t *vd); +void metaslab_group_destroy(metaslab_group_t *mg); +void metaslab_group_activate(metaslab_group_t *mg); +void metaslab_group_passivate(metaslab_group_t *mg); #ifdef __cplusplus } diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h index 36aa60d4c..3e9f32e1c 100644 --- a/include/sys/metaslab_impl.h +++ b/include/sys/metaslab_impl.h @@ -32,6 +32,7 @@ #include <sys/metaslab.h> #include <sys/space_map.h> +#include <sys/range_tree.h> #include <sys/vdev.h> #include <sys/txg.h> #include <sys/avl.h> @@ -43,7 +44,7 @@ extern "C" { struct metaslab_class { spa_t *mc_spa; metaslab_group_t *mc_rotor; - space_map_ops_t *mc_ops; + metaslab_ops_t *mc_ops; uint64_t mc_aliquot; uint64_t mc_alloc_groups; /* # of allocatable groups */ uint64_t mc_alloc; /* total allocated space */ @@ -57,7 +58,6 @@ struct metaslab_group { kmutex_t mg_lock; avl_tree_t mg_metaslab_tree; uint64_t mg_aliquot; - uint64_t mg_bonus_area; uint64_t mg_alloc_failures; boolean_t mg_allocatable; /* can we allocate? */ uint64_t mg_free_capacity; /* percentage free */ @@ -65,45 +65,102 @@ struct metaslab_group { int64_t mg_activation_count; metaslab_class_t *mg_class; vdev_t *mg_vd; + taskq_t *mg_taskq; metaslab_group_t *mg_prev; metaslab_group_t *mg_next; }; /* - * Each metaslab maintains an in-core free map (ms_map) that contains the - * current list of free segments. As blocks are allocated, the allocated - * segment is removed from the ms_map and added to a per txg allocation map. - * As blocks are freed, they are added to the per txg free map. These per - * txg maps allow us to process all allocations and frees in syncing context - * where it is safe to update the on-disk space maps. + * This value defines the number of elements in the ms_lbas array. The value + * of 64 was chosen as it covers to cover all power of 2 buckets up to + * UINT64_MAX. This is the equivalent of highbit(UINT64_MAX). + */ +#define MAX_LBAS 64 + +/* + * Each metaslab maintains a set of in-core trees to track metaslab operations. + * The in-core free tree (ms_tree) contains the current list of free segments. + * As blocks are allocated, the allocated segment are removed from the ms_tree + * and added to a per txg allocation tree (ms_alloctree). As blocks are freed, + * they are added to the per txg free tree (ms_freetree). These per txg + * trees allow us to process all allocations and frees in syncing context + * where it is safe to update the on-disk space maps. One additional in-core + * tree is maintained to track deferred frees (ms_defertree). Once a block + * is freed it will move from the ms_freetree to the ms_defertree. A deferred + * free means that a block has been freed but cannot be used by the pool + * until TXG_DEFER_SIZE transactions groups later. For example, a block + * that is freed in txg 50 will not be available for reallocation until + * txg 52 (50 + TXG_DEFER_SIZE). This provides a safety net for uberblock + * rollback. A pool could be safely rolled back TXG_DEFERS_SIZE + * transactions groups and ensure that no block has been reallocated. + * + * The simplified transition diagram looks like this: + * + * + * ALLOCATE + * | + * V + * free segment (ms_tree) --------> ms_alloctree ----> (write to space map) + * ^ + * | + * | ms_freetree <--- FREE + * | | + * | | + * | | + * +----------- ms_defertree <-------+---------> (write to space map) * - * Each metaslab's free space is tracked in a space map object in the MOS, + * + * Each metaslab's space is tracked in a single space map in the MOS, * which is only updated in syncing context. Each time we sync a txg, - * we append the allocs and frees from that txg to the space map object. - * When the txg is done syncing, metaslab_sync_done() updates ms_smo - * to ms_smo_syncing. Everything in ms_smo is always safe to allocate. + * we append the allocs and frees from that txg to the space map. + * The pool space is only updated once all metaslabs have finished syncing. * - * To load the in-core free map we read the space map object from disk. + * To load the in-core free tree we read the space map from disk. * This object contains a series of alloc and free records that are * combined to make up the list of all free segments in this metaslab. These - * segments are represented in-core by the ms_map and are stored in an + * segments are represented in-core by the ms_tree and are stored in an * AVL tree. * - * As the space map objects grows (as a result of the appends) it will - * eventually become space-inefficient. When the space map object is - * zfs_condense_pct/100 times the size of the minimal on-disk representation, - * we rewrite it in its minimized form. + * As the space map grows (as a result of the appends) it will + * eventually become space-inefficient. When the metaslab's in-core free tree + * is zfs_condense_pct/100 times the size of the minimal on-disk + * representation, we rewrite it in its minimized form. If a metaslab + * needs to condense then we must set the ms_condensing flag to ensure + * that allocations are not performed on the metaslab that is being written. */ struct metaslab { - kmutex_t ms_lock; /* metaslab lock */ - space_map_obj_t ms_smo; /* synced space map object */ - space_map_obj_t ms_smo_syncing; /* syncing space map object */ - space_map_t *ms_allocmap[TXG_SIZE]; /* allocated this txg */ - space_map_t *ms_freemap[TXG_SIZE]; /* freed this txg */ - space_map_t *ms_defermap[TXG_DEFER_SIZE]; /* deferred frees */ - space_map_t *ms_map; /* in-core free space map */ + kmutex_t ms_lock; + kcondvar_t ms_load_cv; + space_map_t *ms_sm; + metaslab_ops_t *ms_ops; + uint64_t ms_id; + uint64_t ms_start; + uint64_t ms_size; + + range_tree_t *ms_alloctree[TXG_SIZE]; + range_tree_t *ms_freetree[TXG_SIZE]; + range_tree_t *ms_defertree[TXG_DEFER_SIZE]; + range_tree_t *ms_tree; + + boolean_t ms_condensing; /* condensing? */ + boolean_t ms_loaded; + boolean_t ms_loading; + int64_t ms_deferspace; /* sum of ms_defermap[] space */ uint64_t ms_weight; /* weight vs. others in group */ + uint64_t ms_factor; + uint64_t ms_access_txg; + + /* + * The metaslab block allocators can optionally use a size-ordered + * range tree and/or an array of LBAs. Not all allocators use + * this functionality. The ms_size_tree should always contain the + * same number of segments as the ms_tree. The only difference + * is that the ms_size_tree is ordered by segment sizes. + */ + avl_tree_t ms_size_tree; + uint64_t ms_lbas[MAX_LBAS]; + metaslab_group_t *ms_group; /* metaslab group */ avl_node_t ms_group_node; /* node in metaslab group tree */ txg_node_t ms_txg_node; /* per-txg dirty metaslab links */ diff --git a/include/sys/range_tree.h b/include/sys/range_tree.h new file mode 100644 index 000000000..a41effe4e --- /dev/null +++ b/include/sys/range_tree.h @@ -0,0 +1,96 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2013 by Delphix. All rights reserved. + */ + +#ifndef _SYS_RANGE_TREE_H +#define _SYS_RANGE_TREE_H + +#include <sys/avl.h> +#include <sys/dmu.h> + +#ifdef __cplusplus +extern "C" { +#endif + +#define RANGE_TREE_HISTOGRAM_SIZE 64 + +typedef struct range_tree_ops range_tree_ops_t; + +typedef struct range_tree { + avl_tree_t rt_root; /* offset-ordered segment AVL tree */ + uint64_t rt_space; /* sum of all segments in the map */ + range_tree_ops_t *rt_ops; + void *rt_arg; + + /* + * The rt_histogram maintains a histogram of ranges. Each bucket, + * rt_histogram[i], contains the number of ranges whose size is: + * 2^i <= size of range in bytes < 2^(i+1) + */ + uint64_t rt_histogram[RANGE_TREE_HISTOGRAM_SIZE]; + kmutex_t *rt_lock; /* pointer to lock that protects map */ +} range_tree_t; + +typedef struct range_seg { + avl_node_t rs_node; /* AVL node */ + avl_node_t rs_pp_node; /* AVL picker-private node */ + uint64_t rs_start; /* starting offset of this segment */ + uint64_t rs_end; /* ending offset (non-inclusive) */ +} range_seg_t; + +struct range_tree_ops { + void (*rtop_create)(range_tree_t *rt, void *arg); + void (*rtop_destroy)(range_tree_t *rt, void *arg); + void (*rtop_add)(range_tree_t *rt, range_seg_t *rs, void *arg); + void (*rtop_remove)(range_tree_t *rt, range_seg_t *rs, void *arg); + void (*rtop_vacate)(range_tree_t *rt, void *arg); +}; + +typedef void range_tree_func_t(void *arg, uint64_t start, uint64_t size); + +void range_tree_init(void); +void range_tree_fini(void); +range_tree_t *range_tree_create(range_tree_ops_t *ops, void *arg, kmutex_t *lp); +void range_tree_destroy(range_tree_t *rt); +boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size); +uint64_t range_tree_space(range_tree_t *rt); +void range_tree_verify(range_tree_t *rt, uint64_t start, uint64_t size); +void range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst); +void range_tree_stat_verify(range_tree_t *rt); + +void range_tree_add(void *arg, uint64_t start, uint64_t size); +void range_tree_remove(void *arg, uint64_t start, uint64_t size); + +void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg); +void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_RANGE_TREE_H */ diff --git a/include/sys/space_map.h b/include/sys/space_map.h index 588feb8e8..369180330 100644 --- a/include/sys/space_map.h +++ b/include/sys/space_map.h @@ -24,66 +24,72 @@ */ /* - * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright (c) 2013 by Delphix. All rights reserved. */ #ifndef _SYS_SPACE_MAP_H #define _SYS_SPACE_MAP_H #include <sys/avl.h> +#include <sys/range_tree.h> #include <sys/dmu.h> #ifdef __cplusplus extern "C" { #endif -typedef const struct space_map_ops space_map_ops_t; +/* + * The size of the space map object has increased to include a histogram. + * The SPACE_MAP_SIZE_V0 designates the original size and is used to + * maintain backward compatibility. + */ +#define SPACE_MAP_SIZE_V0 (3 * sizeof (uint64_t)) +#define SPACE_MAP_HISTOGRAM_SIZE(sm) \ + (sizeof ((sm)->sm_phys->smp_histogram) / \ + sizeof ((sm)->sm_phys->smp_histogram[0])) + +/* + * The space_map_phys is the on-disk representation of the space map. + * Consumers of space maps should never reference any of the members of this + * structure directly. These members may only be updated in syncing context. + * + * Note the smp_object is no longer used but remains in the structure + * for backward compatibility. + */ +typedef struct space_map_phys { + uint64_t smp_object; /* on-disk space map object */ + uint64_t smp_objsize; /* size of the object */ + uint64_t smp_alloc; /* space allocated from the map */ + uint64_t smp_pad[5]; /* reserved */ + + /* + * The smp_histogram maintains a histogram of free regions. Each + * bucket, smp_histogram[i], contains the number of free regions + * whose size is: + * 2^(i+sm_shift) <= size of free region in bytes < 2^(i+sm_shift+1) + */ + uint64_t smp_histogram[32]; /* histogram of free space */ +} space_map_phys_t; +/* + * The space map object defines a region of space, its size, how much is + * allocated, and the on-disk object that stores this information. + * Consumers of space maps may only access the members of this structure. + */ typedef struct space_map { - avl_tree_t sm_root; /* offset-ordered segment AVL tree */ - uint64_t sm_space; /* sum of all segments in the map */ uint64_t sm_start; /* start of map */ uint64_t sm_size; /* size of map */ uint8_t sm_shift; /* unit shift */ - uint8_t sm_loaded; /* map loaded? */ - uint8_t sm_loading; /* map loading? */ - uint8_t sm_condensing; /* map condensing? */ - kcondvar_t sm_load_cv; /* map load completion */ - space_map_ops_t *sm_ops; /* space map block picker ops vector */ - avl_tree_t *sm_pp_root; /* size-ordered, picker-private tree */ - void *sm_ppd; /* picker-private data */ + uint64_t sm_length; /* synced length */ + uint64_t sm_alloc; /* synced space allocated */ + objset_t *sm_os; /* objset for this map */ + uint64_t sm_object; /* object id for this map */ + uint32_t sm_blksz; /* block size for space map */ + dmu_buf_t *sm_dbuf; /* space_map_phys_t dbuf */ + space_map_phys_t *sm_phys; /* on-disk space map */ kmutex_t *sm_lock; /* pointer to lock that protects map */ } space_map_t; -typedef struct space_seg { - avl_node_t ss_node; /* AVL node */ - avl_node_t ss_pp_node; /* AVL picker-private node */ - uint64_t ss_start; /* starting offset of this segment */ - uint64_t ss_end; /* ending offset (non-inclusive) */ -} space_seg_t; - -typedef struct space_ref { - avl_node_t sr_node; /* AVL node */ - uint64_t sr_offset; /* offset (start or end) */ - int64_t sr_refcnt; /* associated reference count */ -} space_ref_t; - -typedef struct space_map_obj { - uint64_t smo_object; /* on-disk space map object */ - uint64_t smo_objsize; /* size of the object */ - uint64_t smo_alloc; /* space allocated from the map */ -} space_map_obj_t; - -struct space_map_ops { - void (*smop_load)(space_map_t *sm); - void (*smop_unload)(space_map_t *sm); - uint64_t (*smop_alloc)(space_map_t *sm, uint64_t size); - void (*smop_claim)(space_map_t *sm, uint64_t start, uint64_t size); - void (*smop_free)(space_map_t *sm, uint64_t start, uint64_t size); - uint64_t (*smop_max)(space_map_t *sm); - boolean_t (*smop_fragmented)(space_map_t *sm); -}; - /* * debug entry * @@ -124,61 +130,45 @@ struct space_map_ops { #define SM_RUN_MAX SM_RUN_DECODE(~0ULL) -#define SM_ALLOC 0x0 -#define SM_FREE 0x1 +typedef enum { + SM_ALLOC, + SM_FREE +} maptype_t; /* * The data for a given space map can be kept on blocks of any size. * Larger blocks entail fewer i/o operations, but they also cause the * DMU to keep more data in-core, and also to waste more i/o bandwidth * when only a few blocks have changed since the last transaction group. - * This could use a lot more research, but for now, set the freelist - * block size to 4k (2^12). + * Rather than having a fixed block size for all space maps the block size + * can adjust as needed (see space_map_max_blksz). Set the initial block + * size for the space map to 4k. */ -#define SPACE_MAP_BLOCKSHIFT 12 - -typedef void space_map_func_t(space_map_t *sm, uint64_t start, uint64_t size); - -extern void space_map_init(void); -extern void space_map_fini(void); -extern void space_map_create(space_map_t *sm, uint64_t start, uint64_t size, - uint8_t shift, kmutex_t *lp); -extern void space_map_destroy(space_map_t *sm); -extern void space_map_add(space_map_t *sm, uint64_t start, uint64_t size); -extern void space_map_remove(space_map_t *sm, uint64_t start, uint64_t size); -extern boolean_t space_map_contains(space_map_t *sm, - uint64_t start, uint64_t size); -extern space_seg_t *space_map_find(space_map_t *sm, uint64_t start, - uint64_t size, avl_index_t *wherep); -extern void space_map_swap(space_map_t **msrc, space_map_t **mdest); -extern void space_map_vacate(space_map_t *sm, - space_map_func_t *func, space_map_t *mdest); -extern void space_map_walk(space_map_t *sm, - space_map_func_t *func, space_map_t *mdest); - -extern void space_map_load_wait(space_map_t *sm); -extern int space_map_load(space_map_t *sm, space_map_ops_t *ops, - uint8_t maptype, space_map_obj_t *smo, objset_t *os); -extern void space_map_unload(space_map_t *sm); - -extern uint64_t space_map_alloc(space_map_t *sm, uint64_t size); -extern void space_map_claim(space_map_t *sm, uint64_t start, uint64_t size); -extern void space_map_free(space_map_t *sm, uint64_t start, uint64_t size); -extern uint64_t space_map_maxsize(space_map_t *sm); - -extern void space_map_sync(space_map_t *sm, uint8_t maptype, - space_map_obj_t *smo, objset_t *os, dmu_tx_t *tx); -extern void space_map_truncate(space_map_obj_t *smo, - objset_t *os, dmu_tx_t *tx); - -extern void space_map_ref_create(avl_tree_t *t); -extern void space_map_ref_destroy(avl_tree_t *t); -extern void space_map_ref_add_seg(avl_tree_t *t, - uint64_t start, uint64_t end, int64_t refcnt); -extern void space_map_ref_add_map(avl_tree_t *t, - space_map_t *sm, int64_t refcnt); -extern void space_map_ref_generate_map(avl_tree_t *t, - space_map_t *sm, int64_t minref); +#define SPACE_MAP_INITIAL_BLOCKSIZE (1ULL << 12) + +int space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype); + +void space_map_histogram_clear(space_map_t *sm); +void space_map_histogram_add(space_map_t *sm, range_tree_t *rt, + dmu_tx_t *tx); + +void space_map_update(space_map_t *sm); + +uint64_t space_map_object(space_map_t *sm); +uint64_t space_map_allocated(space_map_t *sm); +uint64_t space_map_length(space_map_t *sm); + +void space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, + dmu_tx_t *tx); +void space_map_truncate(space_map_t *sm, dmu_tx_t *tx); +uint64_t space_map_alloc(objset_t *os, dmu_tx_t *tx); +void space_map_free(space_map_t *sm, dmu_tx_t *tx); + +int space_map_open(space_map_t **smp, objset_t *os, uint64_t object, + uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp); +void space_map_close(space_map_t *sm); + +int64_t space_map_alloc_delta(space_map_t *sm); #ifdef __cplusplus } diff --git a/include/sys/space_reftree.h b/include/sys/space_reftree.h new file mode 100644 index 000000000..249b15be6 --- /dev/null +++ b/include/sys/space_reftree.h @@ -0,0 +1,57 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright 2009 Sun Microsystems, Inc. All rights reserved. + * Use is subject to license terms. + */ + +/* + * Copyright (c) 2013 by Delphix. All rights reserved. + */ + +#ifndef _SYS_SPACE_REFTREE_H +#define _SYS_SPACE_REFTREE_H + +#include <sys/range_tree.h> + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct space_ref { + avl_node_t sr_node; /* AVL node */ + uint64_t sr_offset; /* range offset (start or end) */ + int64_t sr_refcnt; /* associated reference count */ +} space_ref_t; + +void space_reftree_create(avl_tree_t *t); +void space_reftree_destroy(avl_tree_t *t); +void space_reftree_add_seg(avl_tree_t *t, uint64_t start, uint64_t end, + int64_t refcnt); +void space_reftree_add_map(avl_tree_t *t, range_tree_t *rt, int64_t refcnt); +void space_reftree_generate_map(avl_tree_t *t, range_tree_t *rt, + int64_t minref); + +#ifdef __cplusplus +} +#endif + +#endif /* _SYS_SPACE_REFTREE_H */ diff --git a/include/sys/vdev_impl.h b/include/sys/vdev_impl.h index 703dd65da..16cf4d770 100644 --- a/include/sys/vdev_impl.h +++ b/include/sys/vdev_impl.h @@ -152,7 +152,6 @@ struct vdev { vdev_t *vdev_parent; /* parent vdev */ vdev_t **vdev_child; /* array of children */ uint64_t vdev_children; /* number of children */ - space_map_t vdev_dtl[DTL_TYPES]; /* in-core dirty time logs */ vdev_stat_t vdev_stat; /* virtual device statistics */ boolean_t vdev_expanding; /* expand the vdev? */ boolean_t vdev_reopening; /* reopen in progress? */ @@ -174,19 +173,21 @@ struct vdev { txg_node_t vdev_txg_node; /* per-txg dirty vdev linkage */ boolean_t vdev_remove_wanted; /* async remove wanted? */ boolean_t vdev_probe_wanted; /* async probe wanted? */ - uint64_t vdev_removing; /* device is being removed? */ list_node_t vdev_config_dirty_node; /* config dirty list */ list_node_t vdev_state_dirty_node; /* state dirty list */ uint64_t vdev_deflate_ratio; /* deflation ratio (x512) */ uint64_t vdev_islog; /* is an intent log device */ - uint64_t vdev_ishole; /* is a hole in the namespace */ + uint64_t vdev_removing; /* device is being removed? */ + boolean_t vdev_ishole; /* is a hole in the namespace */ /* * Leaf vdev state. */ - uint64_t vdev_psize; /* physical device capacity */ - space_map_obj_t vdev_dtl_smo; /* dirty time log space map obj */ + range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */ + space_map_t *vdev_dtl_sm; /* dirty time log space map */ txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */ + uint64_t vdev_dtl_object; /* DTL object */ + uint64_t vdev_psize; /* physical device capacity */ uint64_t vdev_wholedisk; /* true if this is a whole disk */ uint64_t vdev_offline; /* persistent offline state */ uint64_t vdev_faulted; /* persistent faulted state */ @@ -200,18 +201,17 @@ struct vdev { char *vdev_fru; /* physical FRU location */ uint64_t vdev_not_present; /* not present during import */ uint64_t vdev_unspare; /* unspare when resilvering done */ - hrtime_t vdev_last_try; /* last reopen time */ boolean_t vdev_nowritecache; /* true if flushwritecache failed */ boolean_t vdev_checkremove; /* temporary online test */ boolean_t vdev_forcefault; /* force online fault */ boolean_t vdev_splitting; /* split or repair in progress */ boolean_t vdev_delayed_close; /* delayed device close? */ - uint8_t vdev_tmpoffline; /* device taken offline temporarily? */ - uint8_t vdev_detached; /* device detached? */ - uint8_t vdev_cant_read; /* vdev is failing all reads */ - uint8_t vdev_cant_write; /* vdev is failing all writes */ - uint64_t vdev_isspare; /* was a hot spare */ - uint64_t vdev_isl2cache; /* was a l2cache device */ + boolean_t vdev_tmpoffline; /* device taken offline temporarily? */ + boolean_t vdev_detached; /* device detached? */ + boolean_t vdev_cant_read; /* vdev is failing all reads */ + boolean_t vdev_cant_write; /* vdev is failing all writes */ + boolean_t vdev_isspare; /* was a hot spare */ + boolean_t vdev_isl2cache; /* was a l2cache device */ vdev_queue_t vdev_queue; /* I/O deadline schedule queue */ vdev_cache_t vdev_cache; /* physical block cache */ spa_aux_vdev_t *vdev_aux; /* for l2cache vdevs */ @@ -312,9 +312,11 @@ extern void vdev_remove_parent(vdev_t *cvd); extern void vdev_load_log_state(vdev_t *nvd, vdev_t *ovd); extern boolean_t vdev_log_state_valid(vdev_t *vd); extern void vdev_load(vdev_t *vd); +extern int vdev_dtl_load(vdev_t *vd); extern void vdev_sync(vdev_t *vd, uint64_t txg); extern void vdev_sync_done(vdev_t *vd, uint64_t txg); extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg); +extern void vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg); /* * Available vdev types. diff --git a/include/sys/zfeature.h b/include/sys/zfeature.h index 1a081e422..9df7e4973 100644 --- a/include/sys/zfeature.h +++ b/include/sys/zfeature.h @@ -20,7 +20,7 @@ */ /* - * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright (c) 2013 by Delphix. All rights reserved. */ #ifndef _SYS_ZFEATURE_H @@ -47,6 +47,7 @@ extern void spa_feature_incr(struct spa *, zfeature_info_t *, struct dmu_tx *); extern void spa_feature_decr(struct spa *, zfeature_info_t *, struct dmu_tx *); extern boolean_t spa_feature_is_enabled(struct spa *, zfeature_info_t *); extern boolean_t spa_feature_is_active(struct spa *, zfeature_info_t *); +extern int spa_feature_get_refcount(struct spa *, zfeature_info_t *); #ifdef __cplusplus } diff --git a/include/zfeature_common.h b/include/zfeature_common.h index 8c104e47f..f4c1088db 100644 --- a/include/zfeature_common.h +++ b/include/zfeature_common.h @@ -20,7 +20,7 @@ */ /* - * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright (c) 2013 by Delphix. All rights reserved. * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. */ @@ -54,6 +54,7 @@ typedef enum spa_feature { SPA_FEATURE_ASYNC_DESTROY, SPA_FEATURE_EMPTY_BPOBJ, SPA_FEATURE_LZ4_COMPRESS, + SPA_FEATURE_SPACEMAP_HISTOGRAM, SPA_FEATURES } spa_feature_t; |