From f01eaed4556623b63c414fee9085ae27d457fe46 Mon Sep 17 00:00:00 2001 From: Prakash Surya Date: Wed, 24 Feb 2021 09:52:43 -0800 Subject: Add upper bound for slop space calculation This change modifies the behavior of how we determine how much slop space to use in the pool, such that now it has an upper limit. The default upper limit is 128G, but is configurable via a tunable. Reviewed-by: Matthew Ahrens Reviewed-by: Brian Behlendorf Signed-off-by: Prakash Surya Closes #11023 --- module/zfs/spa_misc.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) (limited to 'module') diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index b4c73f58d..e6e9b0e0b 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -347,13 +347,14 @@ int spa_asize_inflation = 24; /* * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in - * the pool to be consumed. This ensures that we don't run the pool - * completely out of space, due to unaccounted changes (e.g. to the MOS). - * It also limits the worst-case time to allocate space. If we have less than - * this amount of free space, most ZPL operations (e.g. write, create) will - * return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are also part of - * this 3.2% of space which can't be consumed by normal writes; the slop space - * "proper" (spa_get_slop_space()) is decreased by the embedded log space. + * the pool to be consumed (bounded by spa_max_slop). This ensures that we + * don't run the pool completely out of space, due to unaccounted changes (e.g. + * to the MOS). It also limits the worst-case time to allocate space. If we + * have less than this amount of free space, most ZPL operations (e.g. write, + * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are + * also part of this 3.2% of space which can't be consumed by normal writes; + * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded + * log space. * * Certain operations (e.g. file removal, most administrative actions) can * use half the slop space. They will only return ENOSPC if less than half @@ -376,10 +377,15 @@ int spa_asize_inflation = 24; * 3.2%, in an effort to have it be at least spa_min_slop (128MB), * but we never allow it to be more than half the pool size. * + * Further, on very large pools, the slop space will be smaller than + * 3.2%, to avoid reserving much more space than we actually need; bounded + * by spa_max_slop (128GB). + * * See also the comments in zfs_space_check_t. */ int spa_slop_shift = 5; -uint64_t spa_min_slop = 128 * 1024 * 1024; +uint64_t spa_min_slop = 128ULL * 1024 * 1024; +uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; int spa_allocators = 4; @@ -1781,7 +1787,8 @@ spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) /* * Return the amount of slop space in bytes. It is typically 1/32 of the pool * (3.2%), minus the embedded log space. On very small pools, it may be - * slightly larger than this. The embedded log space is not included in + * slightly larger than this. On very large pools, it will be capped to + * the value of spa_max_slop. The embedded log space is not included in * spa_dspace. By subtracting it, the usable space (per "zfs list") is a * constant 97% of the total space, regardless of metaslab size (assuming the * default spa_slop_shift=5 and a non-tiny pool). @@ -1792,7 +1799,7 @@ uint64_t spa_get_slop_space(spa_t *spa) { uint64_t space = spa_get_dspace(spa); - uint64_t slop = space >> spa_slop_shift; + uint64_t slop = MIN(space >> spa_slop_shift, spa_max_slop); /* * Subtract the embedded log space, but no more than half the (3.2%) -- cgit v1.2.3