diff options
author | Paul Dagnelie <[email protected]> | 2019-08-05 14:34:27 -0700 |
---|---|---|
committer | Matthew Ahrens <[email protected]> | 2019-08-05 14:34:27 -0700 |
commit | c81f1790e28a547769ce9d8e2d25391cf4317cc0 (patch) | |
tree | 4bad0800ec2e06ff51bb73eaaeabe2c85ca18cbe /include/sys | |
parent | 99e755d653aec351c43a636b5734d1de3476d233 (diff) |
Metaslab max_size should be persisted while unloaded
When we unload metaslabs today in ZFS, the cached max_size value is
discarded. We instead use the histogram to determine whether or not we
think we can satisfy an allocation from the metaslab. This can result in
situations where, if we're doing I/Os of a size not aligned to a
histogram bucket, a metaslab is loaded even though it cannot satisfy the
allocation we think it can. For example, a metaslab with 16 entries in
the 16k-32k bucket may have entirely 16kB entries. If we try to allocate
a 24kB buffer, we will load that metaslab because we think it should be
able to handle the allocation. Doing so is expensive in CPU time, disk
reads, and average IO latency. This is exacerbated if the write being
attempted is a sync write.
This change makes ZFS cache the max_size after the metaslab is
unloaded. If we ever get a free (or a coalesced group of frees) larger
than the max_size, we will update it. Otherwise, we leave it as is. When
attempting to allocate, we use the max_size as a lower bound, and
respect it unless we are in try_hard. However, we do age the max_size
out at some point, since we expect the actual max_size to increase as we
do more frees. A more sophisticated algorithm here might be helpful, but
this works reasonably well.
Reviewed-by: Brian Behlendorf <[email protected]>
Reviewed-by: Matt Ahrens <[email protected]>
Signed-off-by: Paul Dagnelie <[email protected]>
Closes #9055
Diffstat (limited to 'include/sys')
-rw-r--r-- | include/sys/metaslab.h | 2 | ||||
-rw-r--r-- | include/sys/metaslab_impl.h | 7 | ||||
-rw-r--r-- | include/sys/range_tree.h | 2 |
3 files changed, 10 insertions, 1 deletions
diff --git a/include/sys/metaslab.h b/include/sys/metaslab.h index 973f15d75..7dd5fe2b5 100644 --- a/include/sys/metaslab.h +++ b/include/sys/metaslab.h @@ -66,7 +66,7 @@ uint64_t metaslab_allocated_space(metaslab_t *); void metaslab_sync(metaslab_t *, uint64_t); void metaslab_sync_done(metaslab_t *, uint64_t); void metaslab_sync_reassess(metaslab_group_t *); -uint64_t metaslab_block_maxsize(metaslab_t *); +uint64_t metaslab_largest_allocatable(metaslab_t *); /* * metaslab alloc flags diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h index 29bc8cd5e..08ee8d279 100644 --- a/include/sys/metaslab_impl.h +++ b/include/sys/metaslab_impl.h @@ -475,6 +475,12 @@ struct metaslab { * stay cached. */ uint64_t ms_selected_txg; + /* + * ms_load/unload_time can be used for performance monitoring + * (e.g. by dtrace or mdb). + */ + hrtime_t ms_load_time; /* time last loaded */ + hrtime_t ms_unload_time; /* time last unloaded */ uint64_t ms_alloc_txg; /* last successful alloc (debug only) */ uint64_t ms_max_size; /* maximum allocatable size */ @@ -495,6 +501,7 @@ struct metaslab { * segment sizes. */ avl_tree_t ms_allocatable_by_size; + avl_tree_t ms_unflushed_frees_by_size; uint64_t ms_lbas[MAX_LBAS]; metaslab_group_t *ms_group; /* metaslab group */ diff --git a/include/sys/range_tree.h b/include/sys/range_tree.h index fce1df68d..e299613b8 100644 --- a/include/sys/range_tree.h +++ b/include/sys/range_tree.h @@ -89,6 +89,8 @@ range_tree_t *range_tree_create_impl(range_tree_ops_t *ops, void *arg, range_tree_t *range_tree_create(range_tree_ops_t *ops, void *arg); void range_tree_destroy(range_tree_t *rt); boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size); +boolean_t range_tree_find_in(range_tree_t *rt, uint64_t start, uint64_t size, + uint64_t *ostart, uint64_t *osize); void range_tree_verify_not_present(range_tree_t *rt, uint64_t start, uint64_t size); range_seg_t *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size); |