aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/spa_misc.c
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs/spa_misc.c')
-rw-r--r--module/zfs/spa_misc.c67
1 files changed, 47 insertions, 20 deletions
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index f49be8eec..b4c73f58d 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -349,9 +349,11 @@ int spa_asize_inflation = 24;
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed. This ensures that we don't run the pool
* completely out of space, due to unaccounted changes (e.g. to the MOS).
- * It also limits the worst-case time to allocate space. If we have
- * less than this amount of free space, most ZPL operations (e.g. write,
- * create) will return ENOSPC.
+ * It also limits the worst-case time to allocate space. If we have less than
+ * this amount of free space, most ZPL operations (e.g. write, create) will
+ * return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are also part of
+ * this 3.2% of space which can't be consumed by normal writes; the slop space
+ * "proper" (spa_get_slop_space()) is decreased by the embedded log space.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
@@ -1026,10 +1028,10 @@ spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
/*
* Spares are tracked globally due to the following constraints:
*
- * - A spare may be part of multiple pools.
- * - A spare may be added to a pool even if it's actively in use within
+ * - A spare may be part of multiple pools.
+ * - A spare may be added to a pool even if it's actively in use within
* another pool.
- * - A spare in use in any pool can only be the source of a replacement if
+ * - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
@@ -1236,6 +1238,7 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
+ ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0);
@@ -1776,17 +1779,37 @@ spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
}
/*
- * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%),
- * or at least 128MB, unless that would cause it to be more than half the
- * pool size.
- *
- * See the comment above spa_slop_shift for details.
+ * Return the amount of slop space in bytes. It is typically 1/32 of the pool
+ * (3.2%), minus the embedded log space. On very small pools, it may be
+ * slightly larger than this. The embedded log space is not included in
+ * spa_dspace. By subtracting it, the usable space (per "zfs list") is a
+ * constant 97% of the total space, regardless of metaslab size (assuming the
+ * default spa_slop_shift=5 and a non-tiny pool).
+ *
+ * See the comment above spa_slop_shift for more details.
*/
uint64_t
spa_get_slop_space(spa_t *spa)
{
uint64_t space = spa_get_dspace(spa);
- return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
+ uint64_t slop = space >> spa_slop_shift;
+
+ /*
+ * Subtract the embedded log space, but no more than half the (3.2%)
+ * unusable space. Note, the "no more than half" is only relevant if
+ * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by
+ * default.
+ */
+ uint64_t embedded_log =
+ metaslab_class_get_dspace(spa_embedded_log_class(spa));
+ slop -= MIN(embedded_log, slop >> 1);
+
+ /*
+ * Slop space should be at least spa_min_slop, but no more than half
+ * the entire pool.
+ */
+ slop = MAX(slop, MIN(space >> 1, spa_min_slop));
+ return (slop);
}
uint64_t
@@ -1873,6 +1896,12 @@ spa_log_class(spa_t *spa)
}
metaslab_class_t *
+spa_embedded_log_class(spa_t *spa)
+{
+ return (spa->spa_embedded_log_class);
+}
+
+metaslab_class_t *
spa_special_class(spa_t *spa)
{
return (spa->spa_special_class);
@@ -1891,12 +1920,10 @@ metaslab_class_t *
spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype,
uint_t level, uint_t special_smallblk)
{
- if (DMU_OT_IS_ZIL(objtype)) {
- if (spa->spa_log_class->mc_groups != 0)
- return (spa_log_class(spa));
- else
- return (spa_normal_class(spa));
- }
+ /*
+ * ZIL allocations determine their class in zio_alloc_zil().
+ */
+ ASSERT(objtype != DMU_OT_INTENT_LOG);
boolean_t has_special_class = spa->spa_special_class->mc_groups != 0;
@@ -2432,9 +2459,9 @@ spa_fini(void)
}
/*
- * Return whether this pool has slogs. No locking needed.
+ * Return whether this pool has a dedicated slog device. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
- * performance and not correctness
+ * performance and not correctness.
*/
boolean_t
spa_has_slogs(spa_t *spa)