aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zcommon/zpool_prop.c3
-rw-r--r--module/zfs/Makefile.in1
-rw-r--r--module/zfs/dmu.c2
-rw-r--r--module/zfs/metaslab.c141
-rw-r--r--module/zfs/spa.c206
-rw-r--r--module/zfs/spa_misc.c21
-rw-r--r--module/zfs/spa_stats.c101
-rw-r--r--module/zfs/txg.c8
-rw-r--r--module/zfs/vdev.c106
-rw-r--r--module/zfs/vdev_disk.c29
-rw-r--r--module/zfs/vdev_file.c35
-rw-r--r--module/zfs/vdev_initialize.c145
-rw-r--r--module/zfs/vdev_label.c18
-rw-r--r--module/zfs/vdev_queue.c67
-rw-r--r--module/zfs/vdev_raidz.c2
-rw-r--r--module/zfs/vdev_removal.c17
-rw-r--r--module/zfs/vdev_trim.c1460
-rw-r--r--module/zfs/zfs_ioctl.c95
-rw-r--r--module/zfs/zfs_sysfs.c3
-rw-r--r--module/zfs/zio.c39
20 files changed, 2284 insertions, 215 deletions
diff --git a/module/zcommon/zpool_prop.c b/module/zcommon/zpool_prop.c
index 2d5777937..ac1c42b3f 100644
--- a/module/zcommon/zpool_prop.c
+++ b/module/zcommon/zpool_prop.c
@@ -130,6 +130,9 @@ zpool_prop_init(void)
zprop_register_index(ZPOOL_PROP_FAILUREMODE, "failmode",
ZIO_FAILURE_MODE_WAIT, PROP_DEFAULT, ZFS_TYPE_POOL,
"wait | continue | panic", "FAILMODE", failuremode_table);
+ zprop_register_index(ZPOOL_PROP_AUTOTRIM, "autotrim",
+ SPA_AUTOTRIM_OFF, PROP_DEFAULT, ZFS_TYPE_POOL,
+ "on | off", "AUTOTRIM", boolean_table);
/* hidden properties */
zprop_register_hidden(ZPOOL_PROP_NAME, "name", PROP_TYPE_STRING,
diff --git a/module/zfs/Makefile.in b/module/zfs/Makefile.in
index 193bdc510..b2460f0d6 100644
--- a/module/zfs/Makefile.in
+++ b/module/zfs/Makefile.in
@@ -99,6 +99,7 @@ $(MODULE)-objs += vdev_raidz_math.o
$(MODULE)-objs += vdev_raidz_math_scalar.o
$(MODULE)-objs += vdev_removal.o
$(MODULE)-objs += vdev_root.o
+$(MODULE)-objs += vdev_trim.o
$(MODULE)-objs += zap.o
$(MODULE)-objs += zap_leaf.o
$(MODULE)-objs += zap_micro.o
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 219703231..18328042c 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -842,7 +842,7 @@ dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
if (dirty_frees_threshold != 0 &&
long_free_dirty_all_txgs >= dirty_frees_threshold) {
DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
- txg_wait_open(dp, 0);
+ txg_wait_open(dp, 0, B_TRUE);
continue;
}
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 8380897a9..06d8383f0 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -181,7 +181,6 @@ int metaslab_lba_weighting_enabled = B_TRUE;
*/
int metaslab_bias_enabled = B_TRUE;
-
/*
* Enable/disable remapping of indirect DVAs to their concrete vdevs.
*/
@@ -219,6 +218,12 @@ boolean_t metaslab_trace_enabled = B_TRUE;
uint64_t metaslab_trace_max_entries = 5000;
#endif
+/*
+ * Maximum number of metaslabs per group that can be disabled
+ * simultaneously.
+ */
+int max_disabled_ms = 3;
+
static uint64_t metaslab_weight(metaslab_t *);
static void metaslab_set_fragmentation(metaslab_t *);
static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
@@ -652,8 +657,8 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&mg->mg_ms_initialize_lock, NULL, MUTEX_DEFAULT, NULL);
- cv_init(&mg->mg_ms_initialize_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
KM_SLEEP);
mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *),
@@ -700,8 +705,8 @@ metaslab_group_destroy(metaslab_group_t *mg)
kmem_free(mg->mg_secondaries, mg->mg_allocators *
sizeof (metaslab_t *));
mutex_destroy(&mg->mg_lock);
- mutex_destroy(&mg->mg_ms_initialize_lock);
- cv_destroy(&mg->mg_ms_initialize_cv);
+ mutex_destroy(&mg->mg_ms_disabled_lock);
+ cv_destroy(&mg->mg_ms_disabled_cv);
for (int i = 0; i < mg->mg_allocators; i++) {
zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
@@ -1846,8 +1851,10 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
*/
ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops,
&ms->ms_allocatable_by_size, metaslab_rangesize_compare, 0);
- metaslab_group_add(mg, ms);
+ ms->ms_trim = range_tree_create(NULL, NULL);
+
+ metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms);
/*
@@ -1921,6 +1928,9 @@ metaslab_fini(metaslab_t *msp)
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
+ range_tree_vacate(msp->ms_trim, NULL, NULL);
+ range_tree_destroy(msp->ms_trim);
+
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
mutex_destroy(&msp->ms_lock);
@@ -2727,6 +2737,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg)
ASSERT3P(msp->ms_freeing, !=, NULL);
ASSERT3P(msp->ms_freed, !=, NULL);
ASSERT3P(msp->ms_checkpointing, !=, NULL);
+ ASSERT3P(msp->ms_trim, !=, NULL);
/*
* Normally, we don't want to process a metaslab if there are no
@@ -3000,6 +3011,24 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
metaslab_load_wait(msp);
/*
+ * When auto-trimming is enabled, free ranges which are added to
+ * ms_allocatable are also be added to ms_trim. The ms_trim tree is
+ * periodically consumed by the vdev_autotrim_thread() which issues
+ * trims for all ranges and then vacates the tree. The ms_trim tree
+ * can be discarded at any time with the sole consequence of recent
+ * frees not being trimmed.
+ */
+ if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
+ range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
+ if (!defer_allowed) {
+ range_tree_walk(msp->ms_freed, range_tree_add,
+ msp->ms_trim);
+ }
+ } else {
+ range_tree_vacate(msp->ms_trim, NULL, NULL);
+ }
+
+ /*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and
* the defer_tree -- this is safe to do because we've
@@ -3047,7 +3076,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg)
* from it in 'metaslab_unload_delay' txgs, then unload it.
*/
if (msp->ms_loaded &&
- msp->ms_initializing == 0 &&
+ msp->ms_disabled == 0 &&
msp->ms_selected_txg + metaslab_unload_delay < txg) {
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
@@ -3330,7 +3359,7 @@ metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
metaslab_class_t *mc = msp->ms_group->mg_class;
VERIFY(!msp->ms_condensing);
- VERIFY0(msp->ms_initializing);
+ VERIFY0(msp->ms_disabled);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
@@ -3341,6 +3370,7 @@ metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
range_tree_remove(rt, start, size);
+ range_tree_clear(msp->ms_trim, start, size);
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
@@ -3391,10 +3421,10 @@ find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
}
/*
- * If the selected metaslab is condensing or being
- * initialized, skip it.
+ * If the selected metaslab is condensing or disabled,
+ * skip it.
*/
- if (msp->ms_condensing || msp->ms_initializing > 0)
+ if (msp->ms_condensing || msp->ms_disabled > 0)
continue;
*was_active = msp->ms_allocator != -1;
@@ -3566,9 +3596,9 @@ metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
~METASLAB_ACTIVE_MASK);
mutex_exit(&msp->ms_lock);
continue;
- } else if (msp->ms_initializing > 0) {
+ } else if (msp->ms_disabled > 0) {
metaslab_trace_add(zal, mg, msp, asize, d,
- TRACE_INITIALIZING, allocator);
+ TRACE_DISABLED, allocator);
metaslab_passivate(msp, msp->ms_weight &
~METASLAB_ACTIVE_MASK);
mutex_exit(&msp->ms_lock);
@@ -4294,6 +4324,7 @@ metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
msp->ms_size);
range_tree_remove(msp->ms_allocatable, offset, size);
+ range_tree_clear(msp->ms_trim, offset, size);
if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
@@ -4606,6 +4637,7 @@ metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
offset, size);
}
+ range_tree_verify_not_present(msp->ms_trim, offset, size);
range_tree_verify_not_present(msp->ms_freeing, offset, size);
range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
range_tree_verify_not_present(msp->ms_freed, offset, size);
@@ -4637,6 +4669,89 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
spa_config_exit(spa, SCL_VDEV, FTAG);
}
+static void
+metaslab_group_disable_wait(metaslab_group_t *mg)
+{
+ ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
+ while (mg->mg_disabled_updating) {
+ cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
+ }
+}
+
+static void
+metaslab_group_disabled_increment(metaslab_group_t *mg)
+{
+ ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
+ ASSERT(mg->mg_disabled_updating);
+
+ while (mg->mg_ms_disabled >= max_disabled_ms) {
+ cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
+ }
+ mg->mg_ms_disabled++;
+ ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
+}
+
+/*
+ * Mark the metaslab as disabled to prevent any allocations on this metaslab.
+ * We must also track how many metaslabs are currently disabled within a
+ * metaslab group and limit them to prevent allocation failures from
+ * occurring because all metaslabs are disabled.
+ */
+void
+metaslab_disable(metaslab_t *msp)
+{
+ ASSERT(!MUTEX_HELD(&msp->ms_lock));
+ metaslab_group_t *mg = msp->ms_group;
+
+ mutex_enter(&mg->mg_ms_disabled_lock);
+
+ /*
+ * To keep an accurate count of how many threads have disabled
+ * a specific metaslab group, we only allow one thread to mark
+ * the metaslab group at a time. This ensures that the value of
+ * ms_disabled will be accurate when we decide to mark a metaslab
+ * group as disabled. To do this we force all other threads
+ * to wait till the metaslab's mg_disabled_updating flag is no
+ * longer set.
+ */
+ metaslab_group_disable_wait(mg);
+ mg->mg_disabled_updating = B_TRUE;
+ if (msp->ms_disabled == 0) {
+ metaslab_group_disabled_increment(mg);
+ }
+ mutex_enter(&msp->ms_lock);
+ msp->ms_disabled++;
+ mutex_exit(&msp->ms_lock);
+
+ mg->mg_disabled_updating = B_FALSE;
+ cv_broadcast(&mg->mg_ms_disabled_cv);
+ mutex_exit(&mg->mg_ms_disabled_lock);
+}
+
+void
+metaslab_enable(metaslab_t *msp, boolean_t sync)
+{
+ metaslab_group_t *mg = msp->ms_group;
+ spa_t *spa = mg->mg_vd->vdev_spa;
+
+ /*
+ * Wait for the outstanding IO to be synced to prevent newly
+ * allocated blocks from being overwritten. This used by
+ * initialize and TRIM which are modifying unallocated space.
+ */
+ if (sync)
+ txg_wait_synced(spa_get_dsl(spa), 0);
+
+ mutex_enter(&mg->mg_ms_disabled_lock);
+ mutex_enter(&msp->ms_lock);
+ if (--msp->ms_disabled == 0) {
+ mg->mg_ms_disabled--;
+ cv_broadcast(&mg->mg_ms_disabled_cv);
+ }
+ mutex_exit(&msp->ms_lock);
+ mutex_exit(&mg->mg_ms_disabled_lock);
+}
+
#if defined(_KERNEL)
/* BEGIN CSTYLED */
module_param(metaslab_aliquot, ulong, 0644);
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 71744139e..5392e3547 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -57,6 +57,7 @@
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
+#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
@@ -132,7 +133,7 @@ static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
- * are so high frequency and short-lived that the taskq itself can become a a
+ * are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
@@ -150,6 +151,7 @@ const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
{ ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
+ { ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
@@ -554,6 +556,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
+ case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
@@ -1442,8 +1445,10 @@ spa_unload(spa_t *spa)
spa_async_suspend(spa);
if (spa->spa_root_vdev) {
- vdev_initialize_stop_all(spa->spa_root_vdev,
- VDEV_INITIALIZE_ACTIVE);
+ vdev_t *root_vdev = spa->spa_root_vdev;
+ vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
+ vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
+ vdev_autotrim_stop_all(spa);
}
/*
@@ -3585,7 +3590,7 @@ spa_ld_get_props(spa_t *spa)
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
&spa->spa_dedup_ditto);
-
+ spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
@@ -4336,6 +4341,8 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
+ vdev_trim_restart(spa->spa_root_vdev);
+ vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
@@ -5338,6 +5345,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
+ spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
@@ -5746,14 +5754,16 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
/*
* We're about to export or destroy this pool. Make sure
- * we stop all initializtion activity here before we
- * set the spa_final_txg. This will ensure that all
+ * we stop all initialization and trim activity here before
+ * we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
if (spa->spa_root_vdev != NULL) {
- vdev_initialize_stop_all(spa->spa_root_vdev,
- VDEV_INITIALIZE_ACTIVE);
+ vdev_t *rvd = spa->spa_root_vdev;
+ vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
+ vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
+ vdev_autotrim_stop_all(spa);
}
/*
@@ -6376,7 +6386,6 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
vdev_remove_parent(cvd);
}
-
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
@@ -6490,7 +6499,7 @@ spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
* a previous initialization process which has completed but
* the thread is not exited.
*/
- if (cmd_type == POOL_INITIALIZE_DO &&
+ if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_initialize_lock);
@@ -6507,7 +6516,7 @@ spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
}
switch (cmd_type) {
- case POOL_INITIALIZE_DO:
+ case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
@@ -6571,6 +6580,126 @@ spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
return (total_errors);
}
+static int
+spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
+ uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
+{
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+
+ spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
+
+ /* Look up vdev and ensure it's a leaf. */
+ vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
+ if (vd == NULL || vd->vdev_detached) {
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+ return (SET_ERROR(ENODEV));
+ } else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+ return (SET_ERROR(EINVAL));
+ } else if (!vdev_writeable(vd)) {
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+ return (SET_ERROR(EROFS));
+ } else if (!vd->vdev_has_trim) {
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+ return (SET_ERROR(EOPNOTSUPP));
+ } else if (secure && !vd->vdev_has_securetrim) {
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+ return (SET_ERROR(EOPNOTSUPP));
+ }
+ mutex_enter(&vd->vdev_trim_lock);
+ spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
+
+ /*
+ * When we activate a TRIM action we check to see if the
+ * vdev_trim_thread is NULL. We do this instead of using the
+ * vdev_trim_state since there might be a previous TRIM process
+ * which has completed but the thread is not exited.
+ */
+ if (cmd_type == POOL_TRIM_START &&
+ (vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
+ mutex_exit(&vd->vdev_trim_lock);
+ return (SET_ERROR(EBUSY));
+ } else if (cmd_type == POOL_TRIM_CANCEL &&
+ (vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
+ vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
+ mutex_exit(&vd->vdev_trim_lock);
+ return (SET_ERROR(ESRCH));
+ } else if (cmd_type == POOL_TRIM_SUSPEND &&
+ vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
+ mutex_exit(&vd->vdev_trim_lock);
+ return (SET_ERROR(ESRCH));
+ }
+
+ switch (cmd_type) {
+ case POOL_TRIM_START:
+ vdev_trim(vd, rate, partial, secure);
+ break;
+ case POOL_TRIM_CANCEL:
+ vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
+ break;
+ case POOL_TRIM_SUSPEND:
+ vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
+ break;
+ default:
+ panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
+ }
+ mutex_exit(&vd->vdev_trim_lock);
+
+ return (0);
+}
+
+/*
+ * Initiates a manual TRIM for the requested vdevs. This kicks off individual
+ * TRIM threads for each child vdev. These threads pass over all of the free
+ * space in the vdev's metaslabs and issues TRIM commands for that space.
+ */
+int
+spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
+ boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
+{
+ int total_errors = 0;
+ list_t vd_list;
+
+ list_create(&vd_list, sizeof (vdev_t),
+ offsetof(vdev_t, vdev_trim_node));
+
+ /*
+ * We hold the namespace lock through the whole function
+ * to prevent any changes to the pool while we're starting or
+ * stopping TRIM. The config and state locks are held so that
+ * we can properly assess the vdev state before we commit to
+ * the TRIM operation.
+ */
+ mutex_enter(&spa_namespace_lock);
+
+ for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
+ pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
+ uint64_t vdev_guid = fnvpair_value_uint64(pair);
+
+ int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
+ rate, partial, secure, &vd_list);
+ if (error != 0) {
+ char guid_as_str[MAXNAMELEN];
+
+ (void) snprintf(guid_as_str, sizeof (guid_as_str),
+ "%llu", (unsigned long long)vdev_guid);
+ fnvlist_add_int64(vdev_errlist, guid_as_str, error);
+ total_errors++;
+ }
+ }
+
+ /* Wait for all TRIM threads to stop. */
+ vdev_trim_stop_wait(spa, &vd_list);
+
+ /* Sync out the TRIM state */
+ txg_wait_synced(spa->spa_dsl_pool, 0);
+ mutex_exit(&spa_namespace_lock);
+
+ list_destroy(&vd_list);
+
+ return (total_errors);
+}
+
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
@@ -6780,24 +6909,36 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
spa_async_suspend(newspa);
/*
- * Temporarily stop the initializing activity. We set the state to
- * ACTIVE so that we know to resume the initializing once the split
- * has completed.
+ * Temporarily stop the initializing and TRIM activity. We set the
+ * state to ACTIVE so that we know to resume initializing or TRIM
+ * once the split has completed.
*/
- list_t vd_list;
- list_create(&vd_list, sizeof (vdev_t),
+ list_t vd_initialize_list;
+ list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
+ list_t vd_trim_list;
+ list_create(&vd_trim_list, sizeof (vdev_t),
+ offsetof(vdev_t, vdev_trim_node));
+
for (c = 0; c < children; c++) {
if (vml[c] != NULL) {
mutex_enter(&vml[c]->vdev_initialize_lock);
- vdev_initialize_stop(vml[c], VDEV_INITIALIZE_ACTIVE,
- &vd_list);
+ vdev_initialize_stop(vml[c],
+ VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
+
+ mutex_enter(&vml[c]->vdev_trim_lock);
+ vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
+ mutex_exit(&vml[c]->vdev_trim_lock);
}
}
- vdev_initialize_stop_wait(spa, &vd_list);
- list_destroy(&vd_list);
+
+ vdev_initialize_stop_wait(spa, &vd_initialize_list);
+ vdev_trim_stop_wait(spa, &vd_trim_list);
+
+ list_destroy(&vd_initialize_list);
+ list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
@@ -6899,8 +7040,10 @@ out:
vml[c]->vdev_offline = B_FALSE;
}
- /* restart initializing disks as necessary */
+ /* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
+ spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
+ spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
@@ -7283,6 +7426,22 @@ spa_async_thread(void *arg)
mutex_exit(&spa_namespace_lock);
}
+ if (tasks & SPA_ASYNC_TRIM_RESTART) {
+ mutex_enter(&spa_namespace_lock);
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ vdev_trim_restart(spa->spa_root_vdev);
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+ mutex_exit(&spa_namespace_lock);
+ }
+
+ if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
+ mutex_enter(&spa_namespace_lock);
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ vdev_autotrim_restart(spa);
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+ mutex_exit(&spa_namespace_lock);
+ }
+
/*
* Let the world know that we're done.
*/
@@ -7782,6 +7941,11 @@ spa_sync_props(void *arg, dmu_tx_t *tx)
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
+ case ZPOOL_PROP_AUTOTRIM:
+ spa->spa_autotrim = intval;
+ spa_async_request(spa,
+ SPA_ASYNC_AUTOTRIM_RESTART);
+ break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 71221b21b..b3a4a7b12 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -39,6 +39,7 @@
#include <sys/zil.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
+#include <sys/vdev_trim.h>
#include <sys/vdev_file.h>
#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
@@ -1128,6 +1129,9 @@ spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
+
+ vdev_autotrim_stop_all(spa);
+
return (spa_vdev_config_enter(spa));
}
@@ -1204,8 +1208,17 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED,
NULL);
mutex_exit(&vd->vdev_initialize_lock);
+
+ mutex_enter(&vd->vdev_trim_lock);
+ vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL);
+ mutex_exit(&vd->vdev_trim_lock);
}
+ /*
+ * The vdev may be both a leaf and top-level device.
+ */
+ vdev_autotrim_stop_wait(vd);
+
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_ALL, spa);
@@ -1227,6 +1240,8 @@ spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
int
spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
{
+ vdev_autotrim_restart(spa);
+
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
@@ -1923,6 +1938,12 @@ spa_deadman_synctime(spa_t *spa)
return (spa->spa_deadman_synctime);
}
+spa_autotrim_t
+spa_get_autotrim(spa_t *spa)
+{
+ return (spa->spa_autotrim);
+}
+
uint64_t
spa_deadman_ziotime(spa_t *spa)
{
diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c
index e01d2d198..3b51250c6 100644
--- a/module/zfs/spa_stats.c
+++ b/module/zfs/spa_stats.c
@@ -887,6 +887,105 @@ spa_health_destroy(spa_t *spa)
mutex_destroy(&shk->lock);
}
+static spa_iostats_t spa_iostats_template = {
+ { "trim_extents_written", KSTAT_DATA_UINT64 },
+ { "trim_bytes_written", KSTAT_DATA_UINT64 },
+ { "trim_extents_skipped", KSTAT_DATA_UINT64 },
+ { "trim_bytes_skipped", KSTAT_DATA_UINT64 },
+ { "trim_extents_failed", KSTAT_DATA_UINT64 },
+ { "trim_bytes_failed", KSTAT_DATA_UINT64 },
+ { "autotrim_extents_written", KSTAT_DATA_UINT64 },
+ { "autotrim_bytes_written", KSTAT_DATA_UINT64 },
+ { "autotrim_extents_skipped", KSTAT_DATA_UINT64 },
+ { "autotrim_bytes_skipped", KSTAT_DATA_UINT64 },
+ { "autotrim_extents_failed", KSTAT_DATA_UINT64 },
+ { "autotrim_bytes_failed", KSTAT_DATA_UINT64 },
+};
+
+#define SPA_IOSTATS_ADD(stat, val) \
+ atomic_add_64(&iostats->stat.value.ui64, (val));
+
+void
+spa_iostats_trim_add(spa_t *spa, trim_type_t type,
+ uint64_t extents_written, uint64_t bytes_written,
+ uint64_t extents_skipped, uint64_t bytes_skipped,
+ uint64_t extents_failed, uint64_t bytes_failed)
+{
+ spa_history_kstat_t *shk = &spa->spa_stats.iostats;
+ kstat_t *ksp = shk->kstat;
+ spa_iostats_t *iostats;
+
+ if (ksp == NULL)
+ return;
+
+ iostats = ksp->ks_data;
+ if (type == TRIM_TYPE_MANUAL) {
+ SPA_IOSTATS_ADD(trim_extents_written, extents_written);
+ SPA_IOSTATS_ADD(trim_bytes_written, bytes_written);
+ SPA_IOSTATS_ADD(trim_extents_skipped, extents_skipped);
+ SPA_IOSTATS_ADD(trim_bytes_skipped, bytes_skipped);
+ SPA_IOSTATS_ADD(trim_extents_failed, extents_failed);
+ SPA_IOSTATS_ADD(trim_bytes_failed, bytes_failed);
+ } else {
+ SPA_IOSTATS_ADD(autotrim_extents_written, extents_written);
+ SPA_IOSTATS_ADD(autotrim_bytes_written, bytes_written);
+ SPA_IOSTATS_ADD(autotrim_extents_skipped, extents_skipped);
+ SPA_IOSTATS_ADD(autotrim_bytes_skipped, bytes_skipped);
+ SPA_IOSTATS_ADD(autotrim_extents_failed, extents_failed);
+ SPA_IOSTATS_ADD(autotrim_bytes_failed, bytes_failed);
+ }
+}
+
+int
+spa_iostats_update(kstat_t *ksp, int rw)
+{
+ if (rw == KSTAT_WRITE) {
+ memcpy(ksp->ks_data, &spa_iostats_template,
+ sizeof (spa_iostats_t));
+ }
+
+ return (0);
+}
+
+static void
+spa_iostats_init(spa_t *spa)
+{
+ spa_history_kstat_t *shk = &spa->spa_stats.iostats;
+
+ mutex_init(&shk->lock, NULL, MUTEX_DEFAULT, NULL);
+
+ char *name = kmem_asprintf("zfs/%s", spa_name(spa));
+ kstat_t *ksp = kstat_create(name, 0, "iostats", "misc",
+ KSTAT_TYPE_NAMED, sizeof (spa_iostats_t) / sizeof (kstat_named_t),
+ KSTAT_FLAG_VIRTUAL);
+
+ shk->kstat = ksp;
+ if (ksp) {
+ int size = sizeof (spa_iostats_t);
+ ksp->ks_lock = &shk->lock;
+ ksp->ks_private = spa;
+ ksp->ks_update = spa_iostats_update;
+ ksp->ks_data = kmem_alloc(size, KM_SLEEP);
+ memcpy(ksp->ks_data, &spa_iostats_template, size);
+ kstat_install(ksp);
+ }
+
+ strfree(name);
+}
+
+static void
+spa_iostats_destroy(spa_t *spa)
+{
+ spa_history_kstat_t *shk = &spa->spa_stats.iostats;
+ kstat_t *ksp = shk->kstat;
+ if (ksp) {
+ kmem_free(ksp->ks_data, sizeof (spa_iostats_t));
+ kstat_delete(ksp);
+ }
+
+ mutex_destroy(&shk->lock);
+}
+
void
spa_stats_init(spa_t *spa)
{
@@ -896,11 +995,13 @@ spa_stats_init(spa_t *spa)
spa_io_history_init(spa);
spa_mmp_history_init(spa);
spa_state_init(spa);
+ spa_iostats_init(spa);
}
void
spa_stats_destroy(spa_t *spa)
{
+ spa_iostats_destroy(spa);
spa_health_destroy(spa);
spa_tx_assign_destroy(spa);
spa_txg_history_destroy(spa);
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index db0f60cd1..b3f895302 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -694,8 +694,12 @@ txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
mutex_exit(&tx->tx_sync_lock);
}
+/*
+ * Wait for the specified open transaction group. Set should_quiesce
+ * when the current open txg should be quiesced immediately.
+ */
void
-txg_wait_open(dsl_pool_t *dp, uint64_t txg)
+txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
{
tx_state_t *tx = &dp->dp_tx;
@@ -705,7 +709,7 @@ txg_wait_open(dsl_pool_t *dp, uint64_t txg)
ASSERT3U(tx->tx_threads, ==, 2);
if (txg == 0)
txg = tx->tx_open_txg + 1;
- if (tx->tx_quiesce_txg_waiting < txg)
+ if (tx->tx_quiesce_txg_waiting < txg && should_quiesce)
tx->tx_quiesce_txg_waiting = txg;
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index ae1c2bcec..085ae6873 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -51,6 +51,7 @@
#include <sys/dsl_scan.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
+#include <sys/vdev_trim.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
@@ -543,6 +544,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
list_link_init(&vd->vdev_state_dirty_node);
list_link_init(&vd->vdev_initialize_node);
list_link_init(&vd->vdev_leaf_node);
+ list_link_init(&vd->vdev_trim_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -551,6 +553,12 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
mutex_init(&vd->vdev_initialize_io_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_cv, NULL, CV_DEFAULT, NULL);
cv_init(&vd->vdev_initialize_io_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&vd->vdev_trim_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&vd->vdev_autotrim_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&vd->vdev_trim_io_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&vd->vdev_trim_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&vd->vdev_autotrim_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&vd->vdev_trim_io_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
vd->vdev_dtl[t] = range_tree_create(NULL, NULL);
@@ -875,7 +883,10 @@ void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
+
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
/*
* Scan queues are normally destroyed at the end of a scan. If the
@@ -906,7 +917,6 @@ vdev_free(vdev_t *vd)
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
- ASSERT(vd->vdev_initialize_thread == NULL);
/*
* Discard allocation state.
@@ -988,6 +998,12 @@ vdev_free(vdev_t *vd)
mutex_destroy(&vd->vdev_initialize_io_lock);
cv_destroy(&vd->vdev_initialize_io_cv);
cv_destroy(&vd->vdev_initialize_cv);
+ mutex_destroy(&vd->vdev_trim_lock);
+ mutex_destroy(&vd->vdev_autotrim_lock);
+ mutex_destroy(&vd->vdev_trim_io_lock);
+ cv_destroy(&vd->vdev_trim_cv);
+ cv_destroy(&vd->vdev_autotrim_cv);
+ cv_destroy(&vd->vdev_trim_io_cv);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
@@ -3475,6 +3491,16 @@ vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
}
mutex_exit(&vd->vdev_initialize_lock);
+ /* Restart trimming if necessary */
+ mutex_enter(&vd->vdev_trim_lock);
+ if (vdev_writeable(vd) &&
+ vd->vdev_trim_thread == NULL &&
+ vd->vdev_trim_state == VDEV_TRIM_ACTIVE) {
+ (void) vdev_trim(vd, vd->vdev_trim_rate, vd->vdev_trim_partial,
+ vd->vdev_trim_secure);
+ }
+ mutex_exit(&vd->vdev_trim_lock);
+
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED))
@@ -3745,8 +3771,7 @@ vdev_accessible(vdev_t *vd, zio_t *zio)
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
- int t;
- for (t = 0; t < ZIO_TYPES; t++) {
+ for (int t = 0; t < VS_ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
@@ -3873,7 +3898,7 @@ vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
- * Report intializing progress. Since we don't
+ * Report initializing progress. Since we don't
* have the initializing locks held, this is only
* an estimate (although a fairly accurate one).
*/
@@ -3884,9 +3909,20 @@ vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
vs->vs_initialize_state = vd->vdev_initialize_state;
vs->vs_initialize_action_time =
vd->vdev_initialize_action_time;
+
+ /*
+ * Report manual TRIM progress. Since we don't have
+ * the manual TRIM locks held, this is only an
+ * estimate (although fairly accurate one).
+ */
+ vs->vs_trim_notsup = !vd->vdev_has_trim;
+ vs->vs_trim_bytes_done = vd->vdev_trim_bytes_done;
+ vs->vs_trim_bytes_est = vd->vdev_trim_bytes_est;
+ vs->vs_trim_state = vd->vdev_trim_state;
+ vs->vs_trim_action_time = vd->vdev_trim_action_time;
}
/*
- * Report expandable space on top-level, non-auxillary devices
+ * Report expandable space on top-level, non-auxiliary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
@@ -4004,9 +4040,18 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
+ zio_type_t vs_type = type;
+
+ /*
+ * TRIM ops and bytes are reported to user space as
+ * ZIO_TYPE_IOCTL. This is done to preserve the
+ * vdev_stat_t structure layout for user space.
+ */
+ if (type == ZIO_TYPE_TRIM)
+ vs_type = ZIO_TYPE_IOCTL;
- vs->vs_ops[type]++;
- vs->vs_bytes[type] += psize;
+ vs->vs_ops[vs_type]++;
+ vs->vs_bytes[vs_type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[zio->io_priority]
@@ -4104,7 +4149,8 @@ vdev_deflated_space(vdev_t *vd, int64_t space)
}
/*
- * Update the in-core space usage stats for this vdev and the root vdev.
+ * Update the in-core space usage stats for this vdev, its metaslab class,
+ * and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
@@ -4650,12 +4696,56 @@ vdev_set_deferred_resilver(spa_t *spa, vdev_t *vd)
spa->spa_resilver_deferred = B_TRUE;
}
+/*
+ * Translate a logical range to the physical range for the specified vdev_t.
+ * This function is initially called with a leaf vdev and will walk each
+ * parent vdev until it reaches a top-level vdev. Once the top-level is
+ * reached the physical range is initialized and the recursive function
+ * begins to unwind. As it unwinds it calls the parent's vdev specific
+ * translation function to do the real conversion.
+ */
+void
+vdev_xlate(vdev_t *vd, const range_seg_t *logical_rs, range_seg_t *physical_rs)
+{
+ /*
+ * Walk up the vdev tree
+ */
+ if (vd != vd->vdev_top) {
+ vdev_xlate(vd->vdev_parent, logical_rs, physical_rs);
+ } else {
+ /*
+ * We've reached the top-level vdev, initialize the
+ * physical range to the logical range and start to
+ * unwind.
+ */
+ physical_rs->rs_start = logical_rs->rs_start;
+ physical_rs->rs_end = logical_rs->rs_end;
+ return;
+ }
+
+ vdev_t *pvd = vd->vdev_parent;
+ ASSERT3P(pvd, !=, NULL);
+ ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
+
+ /*
+ * As this recursive function unwinds, translate the logical
+ * range into its physical components by calling the
+ * vdev specific translate function.
+ */
+ range_seg_t intermediate = { { { 0, 0 } } };
+ pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate);
+
+ physical_rs->rs_start = intermediate.rs_start;
+ physical_rs->rs_end = intermediate.rs_end;
+}
+
#if defined(_KERNEL)
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
+
/* BEGIN CSTYLED */
module_param(zfs_vdev_default_ms_count, int, 0644);
MODULE_PARM_DESC(zfs_vdev_default_ms_count,
diff --git a/module/zfs/vdev_disk.c b/module/zfs/vdev_disk.c
index 4ac08c861..c2312e6fa 100644
--- a/module/zfs/vdev_disk.c
+++ b/module/zfs/vdev_disk.c
@@ -30,6 +30,7 @@
#include <sys/spa_impl.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_impl.h>
+#include <sys/vdev_trim.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
@@ -223,7 +224,7 @@ vdev_elevator_switch(vdev_t *v, char *elevator)
strfree(argv[2]);
#endif /* HAVE_ELEVATOR_CHANGE */
if (error) {
- zfs_dbgmsg("Unable to set \"%s\" scheduler for %s (%s): %d\n",
+ zfs_dbgmsg("Unable to set \"%s\" scheduler for %s (%s): %d",
elevator, v->vdev_path, device, error);
}
}
@@ -322,7 +323,7 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
if (IS_ERR(bdev)) {
int error = -PTR_ERR(bdev);
- vdev_dbgmsg(v, "open error=%d count=%d\n", error, count);
+ vdev_dbgmsg(v, "open error=%d count=%d", error, count);
vd->vd_bdev = NULL;
v->vdev_tsd = vd;
rw_exit(&vd->vd_lock);
@@ -333,14 +334,22 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
rw_exit(&vd->vd_lock);
}
+ struct request_queue *q = bdev_get_queue(vd->vd_bdev);
+
/* Determine the physical block size */
block_size = vdev_bdev_block_size(vd->vd_bdev);
/* Clear the nowritecache bit, causes vdev_reopen() to try again. */
v->vdev_nowritecache = B_FALSE;
+ /* Set when device reports it supports TRIM. */
+ v->vdev_has_trim = !!blk_queue_discard(q);
+
+ /* Set when device reports it supports secure TRIM. */
+ v->vdev_has_securetrim = !!blk_queue_discard_secure(q);
+
/* Inform the ZIO pipeline that we are non-rotational */
- v->vdev_nonrot = blk_queue_nonrot(bdev_get_queue(vd->vd_bdev));
+ v->vdev_nonrot = blk_queue_nonrot(q);
/* Physical volume size in bytes for the partition */
*psize = bdev_capacity(vd->vd_bdev);
@@ -728,6 +737,7 @@ vdev_disk_io_start(zio_t *zio)
{
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
+ unsigned long trim_flags = 0;
int rw, flags, error;
/*
@@ -813,6 +823,19 @@ vdev_disk_io_start(zio_t *zio)
#endif
break;
+ case ZIO_TYPE_TRIM:
+#if defined(BLKDEV_DISCARD_SECURE)
+ if (zio->io_trim_flags & ZIO_TRIM_SECURE)
+ trim_flags |= BLKDEV_DISCARD_SECURE;
+#endif
+ zio->io_error = -blkdev_issue_discard(vd->vd_bdev,
+ zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS,
+ trim_flags);
+
+ rw_exit(&vd->vd_lock);
+ zio_interrupt(zio);
+ return;
+
default:
rw_exit(&vd->vd_lock);
zio->io_error = SET_ERROR(ENOTSUP);
diff --git a/module/zfs/vdev_file.c b/module/zfs/vdev_file.c
index 3551898e0..c04f40ca4 100644
--- a/module/zfs/vdev_file.c
+++ b/module/zfs/vdev_file.c
@@ -28,10 +28,13 @@
#include <sys/spa_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_impl.h>
+#include <sys/vdev_trim.h>
#include <sys/zio.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/abd.h>
+#include <sys/fcntl.h>
+#include <sys/vnode.h>
/*
* Virtual device vector for files.
@@ -60,10 +63,25 @@ vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
vattr_t vattr;
int error;
- /* Rotational optimizations only make sense on block devices */
+ /*
+ * Rotational optimizations only make sense on block devices.
+ */
vd->vdev_nonrot = B_TRUE;
/*
+ * Allow TRIM on file based vdevs. This may not always be supported,
+ * since it depends on your kernel version and underlying filesystem
+ * type but it is always safe to attempt.
+ */
+ vd->vdev_has_trim = B_TRUE;
+
+ /*
+ * Disable secure TRIM on file based vdevs. There is no way to
+ * request this behavior from the underlying filesystem.
+ */
+ vd->vdev_has_securetrim = B_FALSE;
+
+ /*
* We must have a pathname, and it must be absolute.
*/
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
@@ -229,6 +247,21 @@ vdev_file_io_start(zio_t *zio)
zio_execute(zio);
return;
+ } else if (zio->io_type == ZIO_TYPE_TRIM) {
+ struct flock flck;
+
+ ASSERT3U(zio->io_size, !=, 0);
+ bzero(&flck, sizeof (flck));
+ flck.l_type = F_FREESP;
+ flck.l_start = zio->io_offset;
+ flck.l_len = zio->io_size;
+ flck.l_whence = 0;
+
+ zio->io_error = VOP_SPACE(vf->vf_vnode, F_FREESP, &flck,
+ 0, 0, kcred, NULL);
+
+ zio_execute(zio);
+ return;
}
zio->io_target_timestamp = zio_handle_io_delay(zio);
diff --git a/module/zfs/vdev_initialize.c b/module/zfs/vdev_initialize.c
index bca2db7a4..b15901326 100644
--- a/module/zfs/vdev_initialize.c
+++ b/module/zfs/vdev_initialize.c
@@ -34,12 +34,6 @@
#include <sys/dmu_tx.h>
/*
- * Maximum number of metaslabs per group that can be initialized
- * simultaneously.
- */
-int max_initialize_ms = 3;
-
-/*
* Value that is written to disk during initialization.
*/
#ifdef _ILP32
@@ -132,7 +126,7 @@ vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync,
- guid, 2, ZFS_SPACE_CHECK_RESERVED, tx);
+ guid, 2, ZFS_SPACE_CHECK_NONE, tx);
switch (new_state) {
case VDEV_INITIALIZE_ACTIVE:
@@ -251,49 +245,6 @@ vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
}
/*
- * Translate a logical range to the physical range for the specified vdev_t.
- * This function is initially called with a leaf vdev and will walk each
- * parent vdev until it reaches a top-level vdev. Once the top-level is
- * reached the physical range is initialized and the recursive function
- * begins to unwind. As it unwinds it calls the parent's vdev specific
- * translation function to do the real conversion.
- */
-void
-vdev_xlate(vdev_t *vd, const range_seg_t *logical_rs, range_seg_t *physical_rs)
-{
- /*
- * Walk up the vdev tree
- */
- if (vd != vd->vdev_top) {
- vdev_xlate(vd->vdev_parent, logical_rs, physical_rs);
- } else {
- /*
- * We've reached the top-level vdev, initialize the
- * physical range to the logical range and start to
- * unwind.
- */
- physical_rs->rs_start = logical_rs->rs_start;
- physical_rs->rs_end = logical_rs->rs_end;
- return;
- }
-
- vdev_t *pvd = vd->vdev_parent;
- ASSERT3P(pvd, !=, NULL);
- ASSERT3P(pvd->vdev_ops->vdev_op_xlate, !=, NULL);
-
- /*
- * As this recursive function unwinds, translate the logical
- * range into its physical components by calling the
- * vdev specific translate function.
- */
- range_seg_t intermediate = { { { 0, 0 } } };
- pvd->vdev_ops->vdev_op_xlate(vd, physical_rs, &intermediate);
-
- physical_rs->rs_start = intermediate.rs_start;
- physical_rs->rs_end = intermediate.rs_end;
-}
-
-/*
* Callback to fill each ABD chunk with zfs_initialize_value. len must be
* divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
* allocation will guarantee these for us.
@@ -363,81 +314,6 @@ vdev_initialize_ranges(vdev_t *vd, abd_t *data)
}
static void
-vdev_initialize_mg_wait(metaslab_group_t *mg)
-{
- ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock));
- while (mg->mg_initialize_updating) {
- cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock);
- }
-}
-
-static void
-vdev_initialize_mg_mark(metaslab_group_t *mg)
-{
- ASSERT(MUTEX_HELD(&mg->mg_ms_initialize_lock));
- ASSERT(mg->mg_initialize_updating);
-
- while (mg->mg_ms_initializing >= max_initialize_ms) {
- cv_wait(&mg->mg_ms_initialize_cv, &mg->mg_ms_initialize_lock);
- }
- mg->mg_ms_initializing++;
- ASSERT3U(mg->mg_ms_initializing, <=, max_initialize_ms);
-}
-
-/*
- * Mark the metaslab as being initialized to prevent any allocations
- * on this metaslab. We must also track how many metaslabs are currently
- * being initialized within a metaslab group and limit them to prevent
- * allocation failures from occurring because all metaslabs are being
- * initialized.
- */
-static void
-vdev_initialize_ms_mark(metaslab_t *msp)
-{
- ASSERT(!MUTEX_HELD(&msp->ms_lock));
- metaslab_group_t *mg = msp->ms_group;
-
- mutex_enter(&mg->mg_ms_initialize_lock);
-
- /*
- * To keep an accurate count of how many threads are initializing
- * a specific metaslab group, we only allow one thread to mark
- * the metaslab group at a time. This ensures that the value of
- * ms_initializing will be accurate when we decide to mark a metaslab
- * group as being initialized. To do this we force all other threads
- * to wait till the metaslab's mg_initialize_updating flag is no
- * longer set.
- */
- vdev_initialize_mg_wait(mg);
- mg->mg_initialize_updating = B_TRUE;
- if (msp->ms_initializing == 0) {
- vdev_initialize_mg_mark(mg);
- }
- mutex_enter(&msp->ms_lock);
- msp->ms_initializing++;
- mutex_exit(&msp->ms_lock);
-
- mg->mg_initialize_updating = B_FALSE;
- cv_broadcast(&mg->mg_ms_initialize_cv);
- mutex_exit(&mg->mg_ms_initialize_lock);
-}
-
-static void
-vdev_initialize_ms_unmark(metaslab_t *msp)
-{
- ASSERT(!MUTEX_HELD(&msp->ms_lock));
- metaslab_group_t *mg = msp->ms_group;
- mutex_enter(&mg->mg_ms_initialize_lock);
- mutex_enter(&msp->ms_lock);
- if (--msp->ms_initializing == 0) {
- mg->mg_ms_initializing--;
- cv_broadcast(&mg->mg_ms_initialize_cv);
- }
- mutex_exit(&msp->ms_lock);
- mutex_exit(&mg->mg_ms_initialize_lock);
-}
-
-static void
vdev_initialize_calculate_progress(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
@@ -535,9 +411,8 @@ vdev_initialize_load(vdev_t *vd)
return (err);
}
-
/*
- * Convert the logical range into a physcial range and add it to our
+ * Convert the logical range into a physical range and add it to our
* avl tree.
*/
void
@@ -618,7 +493,8 @@ vdev_initialize_thread(void *arg)
ms_count = vd->vdev_top->vdev_ms_count;
}
- vdev_initialize_ms_mark(msp);
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+ metaslab_disable(msp);
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
@@ -626,16 +502,8 @@ vdev_initialize_thread(void *arg)
vd);
mutex_exit(&msp->ms_lock);
- spa_config_exit(spa, SCL_CONFIG, FTAG);
error = vdev_initialize_ranges(vd, deadbeef);
-
- /*
- * Wait for the outstanding IO to be synced to prevent
- * newly allocated blocks from being overwritten.
- */
- txg_wait_synced(spa_get_dsl(spa), 0);
-
- vdev_initialize_ms_unmark(msp);
+ metaslab_enable(msp, B_TRUE);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
@@ -853,12 +721,11 @@ vdev_initialize_restart(vdev_t *vd)
}
#if defined(_KERNEL)
-EXPORT_SYMBOL(vdev_initialize_restart);
-EXPORT_SYMBOL(vdev_xlate);
EXPORT_SYMBOL(vdev_initialize);
EXPORT_SYMBOL(vdev_initialize_stop);
EXPORT_SYMBOL(vdev_initialize_stop_all);
EXPORT_SYMBOL(vdev_initialize_stop_wait);
+EXPORT_SYMBOL(vdev_initialize_restart);
/* CSTYLED */
module_param(zfs_initialize_value, ulong, 0644);
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c
index a03722d05..a0e373b3d 100644
--- a/module/zfs/vdev_label.c
+++ b/module/zfs/vdev_label.c
@@ -251,6 +251,9 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
+ fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
+ vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
+
/* ZIOs pending */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
@@ -267,6 +270,9 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
+ fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
+ vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
+
/* Histograms */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_READ],
@@ -304,6 +310,10 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
+ fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
+ vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
+ ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
+
/* Request sizes */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
@@ -325,6 +335,10 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
+ fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
+ vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
+ ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
+
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
@@ -345,6 +359,10 @@ vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
+ fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
+ vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
+ ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
+
/* IO delays */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);
diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c
index a1861d5f0..e74df76b7 100644
--- a/module/zfs/vdev_queue.c
+++ b/module/zfs/vdev_queue.c
@@ -156,6 +156,8 @@ uint32_t zfs_vdev_removal_min_active = 1;
uint32_t zfs_vdev_removal_max_active = 2;
uint32_t zfs_vdev_initializing_min_active = 1;
uint32_t zfs_vdev_initializing_max_active = 1;
+uint32_t zfs_vdev_trim_min_active = 1;
+uint32_t zfs_vdev_trim_max_active = 2;
/*
* When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
@@ -203,6 +205,12 @@ int zfs_vdev_queue_depth_pct = 300;
*/
int zfs_vdev_def_queue_depth = 32;
+/*
+ * Allow TRIM I/Os to be aggregated. This should normally not be needed since
+ * TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted
+ * by the TRIM code in zfs_trim.c.
+ */
+int zfs_vdev_aggregate_trim = 0;
int
vdev_queue_offset_compare(const void *x1, const void *x2)
@@ -227,11 +235,13 @@ vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
static inline avl_tree_t *
vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
{
- ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE);
+ ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE || t == ZIO_TYPE_TRIM);
if (t == ZIO_TYPE_READ)
return (&vq->vq_read_offset_tree);
- else
+ else if (t == ZIO_TYPE_WRITE)
return (&vq->vq_write_offset_tree);
+ else
+ return (&vq->vq_trim_offset_tree);
}
int
@@ -266,6 +276,8 @@ vdev_queue_class_min_active(zio_priority_t p)
return (zfs_vdev_removal_min_active);
case ZIO_PRIORITY_INITIALIZING:
return (zfs_vdev_initializing_min_active);
+ case ZIO_PRIORITY_TRIM:
+ return (zfs_vdev_trim_min_active);
default:
panic("invalid priority %u", p);
return (0);
@@ -338,6 +350,8 @@ vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
return (zfs_vdev_removal_max_active);
case ZIO_PRIORITY_INITIALIZING:
return (zfs_vdev_initializing_max_active);
+ case ZIO_PRIORITY_TRIM:
+ return (zfs_vdev_trim_max_active);
default:
panic("invalid priority %u", p);
return (0);
@@ -398,19 +412,25 @@ vdev_queue_init(vdev_t *vd)
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
+ avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM),
+ vdev_queue_offset_compare, sizeof (zio_t),
+ offsetof(struct zio, io_offset_node));
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
int (*compfn) (const void *, const void *);
/*
- * The synchronous i/o queues are dispatched in FIFO rather
+ * The synchronous/trim i/o queues are dispatched in FIFO rather
* than LBA order. This provides more consistent latency for
* these i/os.
*/
- if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE)
+ if (p == ZIO_PRIORITY_SYNC_READ ||
+ p == ZIO_PRIORITY_SYNC_WRITE ||
+ p == ZIO_PRIORITY_TRIM) {
compfn = vdev_queue_timestamp_compare;
- else
+ } else {
compfn = vdev_queue_offset_compare;
+ }
avl_create(vdev_queue_class_tree(vq, p), compfn,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
}
@@ -428,6 +448,7 @@ vdev_queue_fini(vdev_t *vd)
avl_destroy(&vq->vq_active_tree);
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
+ avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_TRIM));
mutex_destroy(&vq->vq_lock);
}
@@ -559,6 +580,13 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE || limit == 0)
return (NULL);
+ /*
+ * While TRIM commands could be aggregated based on offset this
+ * behavior is disabled until it's determined to be beneficial.
+ */
+ if (zio->io_type == ZIO_TYPE_TRIM && !zfs_vdev_aggregate_trim)
+ return (NULL);
+
first = last = zio;
if (zio->io_type == ZIO_TYPE_READ)
@@ -732,7 +760,7 @@ again:
* For LBA-ordered queues (async / scrub / initializing), issue the
* i/o which follows the most recently issued i/o in LBA (offset) order.
*
- * For FIFO queues (sync), issue the i/o with the lowest timestamp.
+ * For FIFO queues (sync/trim), issue the i/o with the lowest timestamp.
*/
tree = vdev_queue_class_tree(vq, p);
vq->vq_io_search.io_timestamp = 0;
@@ -783,19 +811,27 @@ vdev_queue_io(zio_t *zio)
* not match the child's i/o type. Fix it up here.
*/
if (zio->io_type == ZIO_TYPE_READ) {
+ ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
+
if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
zio->io_priority != ZIO_PRIORITY_SCRUB &&
zio->io_priority != ZIO_PRIORITY_REMOVAL &&
- zio->io_priority != ZIO_PRIORITY_INITIALIZING)
+ zio->io_priority != ZIO_PRIORITY_INITIALIZING) {
zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
- } else {
- ASSERT(zio->io_type == ZIO_TYPE_WRITE);
+ }
+ } else if (zio->io_type == ZIO_TYPE_WRITE) {
+ ASSERT(zio->io_priority != ZIO_PRIORITY_TRIM);
+
if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
zio->io_priority != ZIO_PRIORITY_REMOVAL &&
- zio->io_priority != ZIO_PRIORITY_INITIALIZING)
+ zio->io_priority != ZIO_PRIORITY_INITIALIZING) {
zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
+ }
+ } else {
+ ASSERT(zio->io_type == ZIO_TYPE_TRIM);
+ ASSERT(zio->io_priority == ZIO_PRIORITY_TRIM);
}
zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
@@ -922,6 +958,9 @@ module_param(zfs_vdev_aggregation_limit_non_rotating, int, 0644);
MODULE_PARM_DESC(zfs_vdev_aggregation_limit_non_rotating,
"Max vdev I/O aggregation size for non-rotating media");
+module_param(zfs_vdev_aggregate_trim, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_aggregate_trim, "Allow TRIM I/O to be aggregated");
+
module_param(zfs_vdev_read_gap_limit, int, 0644);
MODULE_PARM_DESC(zfs_vdev_read_gap_limit, "Aggregate read I/O over gap");
@@ -995,6 +1034,14 @@ module_param(zfs_vdev_sync_write_min_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_sync_write_min_active,
"Min active sync write I/Os per vdev");
+module_param(zfs_vdev_trim_max_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_trim_max_active,
+ "Max active trim/discard I/Os per vdev");
+
+module_param(zfs_vdev_trim_min_active, int, 0644);
+MODULE_PARM_DESC(zfs_vdev_trim_min_active,
+ "Min active trim/discard I/Os per vdev");
+
module_param(zfs_vdev_queue_depth_pct, int, 0644);
MODULE_PARM_DESC(zfs_vdev_queue_depth_pct,
"Queue depth percentage for each top-level vdev");
diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c
index d11287bdc..215cd1c12 100644
--- a/module/zfs/vdev_raidz.c
+++ b/module/zfs/vdev_raidz.c
@@ -37,7 +37,7 @@
#include <sys/vdev_raidz_impl.h>
#ifdef ZFS_DEBUG
-#include <sys/vdev_initialize.h> /* vdev_xlate testing */
+#include <sys/vdev.h> /* For vdev_xlate() in vdev_raidz_io_verify() */
#endif
/*
diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c
index 98bf2194a..99d67b7be 100644
--- a/module/zfs/vdev_removal.c
+++ b/module/zfs/vdev_removal.c
@@ -45,6 +45,7 @@
#include <sys/vdev_indirect_mapping.h>
#include <sys/abd.h>
#include <sys/vdev_initialize.h>
+#include <sys/vdev_trim.h>
#include <sys/trace_vdev.h>
/*
@@ -1181,6 +1182,8 @@ vdev_remove_complete(spa_t *spa)
txg = spa_vdev_enter(spa);
vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
+ ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
@@ -1869,8 +1872,10 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
- /* Stop initializing */
+ /* Stop initializing and TRIM */
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED);
+ vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED);
+ vdev_autotrim_stop_wait(vd);
*txg = spa_vdev_config_enter(spa);
@@ -2051,11 +2056,13 @@ spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
error = spa_reset_logs(spa);
/*
- * We stop any initializing that is currently in progress but leave
- * the state as "active". This will allow the initializing to resume
- * if the removal is canceled sometime later.
+ * We stop any initializing and TRIM that is currently in progress
+ * but leave the state as "active". This will allow the process to
+ * resume if the removal is canceled sometime later.
*/
vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE);
+ vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE);
+ vdev_autotrim_stop_wait(vd);
*txg = spa_vdev_config_enter(spa);
@@ -2069,6 +2076,8 @@ spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
if (error != 0) {
metaslab_group_activate(mg);
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
+ spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
+ spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
return (error);
}
diff --git a/module/zfs/vdev_trim.c b/module/zfs/vdev_trim.c
new file mode 100644
index 000000000..5ad47cccd
--- /dev/null
+++ b/module/zfs/vdev_trim.c
@@ -0,0 +1,1460 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2019 by Lawrence Livermore National Security, LLC.
+ */
+
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/txg.h>
+#include <sys/vdev_impl.h>
+#include <sys/vdev_trim.h>
+#include <sys/refcount.h>
+#include <sys/metaslab_impl.h>
+#include <sys/dsl_synctask.h>
+#include <sys/zap.h>
+#include <sys/dmu_tx.h>
+
+/*
+ * TRIM is a feature which is used to notify a SSD that some previously
+ * written space is no longer allocated by the pool. This is useful because
+ * writes to a SSD must be performed to blocks which have first been erased.
+ * Ensuring the SSD always has a supply of erased blocks for new writes
+ * helps prevent the performance from deteriorating.
+ *
+ * There are two supported TRIM methods; manual and automatic.
+ *
+ * Manual TRIM:
+ *
+ * A manual TRIM is initiated by running the 'zpool trim' command. A single
+ * 'vdev_trim' thread is created for each leaf vdev, and it is responsible for
+ * managing that vdev TRIM process. This involves iterating over all the
+ * metaslabs, calculating the unallocated space ranges, and then issuing the
+ * required TRIM I/Os.
+ *
+ * While a metaslab is being actively trimmed it is not eligible to perform
+ * new allocations. After traversing all of the metaslabs the thread is
+ * terminated. Finally, both the requested options and current progress of
+ * the TRIM are regularly written to the pool. This allows the TRIM to be
+ * suspended and resumed as needed.
+ *
+ * Automatic TRIM:
+ *
+ * An automatic TRIM is enabled by setting the 'autotrim' pool property
+ * to 'on'. When enabled, a `vdev_autotrim' thread is created for each
+ * top-level (not leaf) vdev in the pool. These threads perform the same
+ * core TRIM process as a manual TRIM, but with a few key differences.
+ *
+ * 1) Automatic TRIM happens continuously in the background and operates
+ * solely on recently freed blocks (ms_trim not ms_allocatable).
+ *
+ * 2) Each thread is associated with a top-level (not leaf) vdev. This has
+ * the benefit of simplifying the threading model, it makes it easier
+ * to coordinate administrative commands, and it ensures only a single
+ * metaslab is disabled at a time. Unlike manual TRIM, this means each
+ * 'vdev_autotrim' thread is responsible for issuing TRIM I/Os for its
+ * children.
+ *
+ * 3) There is no automatic TRIM progress information stored on disk, nor
+ * is it reported by 'zpool status'.
+ *
+ * While the automatic TRIM process is highly effective it is more likely
+ * than a manual TRIM to encounter tiny ranges. Ranges less than or equal to
+ * 'zfs_trim_extent_bytes_min' (32k) are considered too small to efficiently
+ * TRIM and are skipped. This means small amounts of freed space may not
+ * be automatically trimmed.
+ *
+ * Furthermore, devices with attached hot spares and devices being actively
+ * replaced are skipped. This is done to avoid adding additional stress to
+ * a potentially unhealthy device and to minimize the required rebuild time.
+ *
+ * For this reason it may be beneficial to occasionally manually TRIM a pool
+ * even when automatic TRIM is enabled.
+ */
+
+/*
+ * Maximum size of TRIM I/O, ranges will be chunked in to 128MiB lengths.
+ */
+unsigned int zfs_trim_extent_bytes_max = 128 * 1024 * 1024;
+
+/*
+ * Minimum size of TRIM I/O, extents smaller than 32Kib will be skipped.
+ */
+unsigned int zfs_trim_extent_bytes_min = 32 * 1024;
+
+/*
+ * Skip uninitialized metaslabs during the TRIM process. This option is
+ * useful for pools constructed from large thinly-provisioned devices where
+ * TRIM operations are slow. As a pool ages an increasing fraction of
+ * the pools metaslabs will be initialized progressively degrading the
+ * usefulness of this option. This setting is stored when starting a
+ * manual TRIM and will persist for the duration of the requested TRIM.
+ */
+unsigned int zfs_trim_metaslab_skip = 0;
+
+/*
+ * Maximum number of queued TRIM I/Os per leaf vdev. The number of
+ * concurrent TRIM I/Os issued to the device is controlled by the
+ * zfs_vdev_trim_min_active and zfs_vdev_trim_max_active module options.
+ */
+unsigned int zfs_trim_queue_limit = 10;
+
+/*
+ * The minimum number of transaction groups between automatic trims of a
+ * metaslab. This setting represents a trade-off between issuing more
+ * efficient TRIM operations, by allowing them to be aggregated longer,
+ * and issuing them promptly so the trimmed space is available. Note
+ * that this value is a minimum; metaslabs can be trimmed less frequently
+ * when there are a large number of ranges which need to be trimmed.
+ *
+ * Increasing this value will allow frees to be aggregated for a longer
+ * time. This can result is larger TRIM operations, and increased memory
+ * usage in order to track the ranges to be trimmed. Decreasing this value
+ * has the opposite effect. The default value of 32 was determined though
+ * testing to be a reasonable compromise.
+ */
+unsigned int zfs_trim_txg_batch = 32;
+
+/*
+ * The trim_args are a control structure which describe how a leaf vdev
+ * should be trimmed. The core elements are the vdev, the metaslab being
+ * trimmed and a range tree containing the extents to TRIM. All provided
+ * ranges must be within the metaslab.
+ */
+typedef struct trim_args {
+ /*
+ * These fields are set by the caller of vdev_trim_ranges().
+ */
+ vdev_t *trim_vdev; /* Leaf vdev to TRIM */
+ metaslab_t *trim_msp; /* Disabled metaslab */
+ range_tree_t *trim_tree; /* TRIM ranges (in metaslab) */
+ trim_type_t trim_type; /* Manual or auto TRIM */
+ uint64_t trim_extent_bytes_max; /* Maximum TRIM I/O size */
+ uint64_t trim_extent_bytes_min; /* Minimum TRIM I/O size */
+ enum trim_flag trim_flags; /* TRIM flags (secure) */
+
+ /*
+ * These fields are updated by vdev_trim_ranges().
+ */
+ hrtime_t trim_start_time; /* Start time */
+ uint64_t trim_bytes_done; /* Bytes trimmed */
+} trim_args_t;
+
+/*
+ * Determines whether a vdev_trim_thread() should be stopped.
+ */
+static boolean_t
+vdev_trim_should_stop(vdev_t *vd)
+{
+ return (vd->vdev_trim_exit_wanted || !vdev_writeable(vd) ||
+ vd->vdev_detached || vd->vdev_top->vdev_removing);
+}
+
+/*
+ * Determines whether a vdev_autotrim_thread() should be stopped.
+ */
+static boolean_t
+vdev_autotrim_should_stop(vdev_t *tvd)
+{
+ return (tvd->vdev_autotrim_exit_wanted ||
+ !vdev_writeable(tvd) || tvd->vdev_removing ||
+ spa_get_autotrim(tvd->vdev_spa) == SPA_AUTOTRIM_OFF);
+}
+
+/*
+ * The sync task for updating the on-disk state of a manual TRIM. This
+ * is scheduled by vdev_trim_change_state().
+ */
+static void
+vdev_trim_zap_update_sync(void *arg, dmu_tx_t *tx)
+{
+ /*
+ * We pass in the guid instead of the vdev_t since the vdev may
+ * have been freed prior to the sync task being processed. This
+ * happens when a vdev is detached as we call spa_config_vdev_exit(),
+ * stop the trimming thread, schedule the sync task, and free
+ * the vdev. Later when the scheduled sync task is invoked, it would
+ * find that the vdev has been freed.
+ */
+ uint64_t guid = *(uint64_t *)arg;
+ uint64_t txg = dmu_tx_get_txg(tx);
+ kmem_free(arg, sizeof (uint64_t));
+
+ vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
+ if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
+ return;
+
+ uint64_t last_offset = vd->vdev_trim_offset[txg & TXG_MASK];
+ vd->vdev_trim_offset[txg & TXG_MASK] = 0;
+
+ VERIFY3U(vd->vdev_leaf_zap, !=, 0);
+
+ objset_t *mos = vd->vdev_spa->spa_meta_objset;
+
+ if (last_offset > 0 || vd->vdev_trim_last_offset == UINT64_MAX) {
+
+ if (vd->vdev_trim_last_offset == UINT64_MAX)
+ last_offset = 0;
+
+ vd->vdev_trim_last_offset = last_offset;
+ VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
+ VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
+ sizeof (last_offset), 1, &last_offset, tx));
+ }
+
+ if (vd->vdev_trim_action_time > 0) {
+ uint64_t val = (uint64_t)vd->vdev_trim_action_time;
+ VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
+ VDEV_LEAF_ZAP_TRIM_ACTION_TIME, sizeof (val),
+ 1, &val, tx));
+ }
+
+ if (vd->vdev_trim_rate > 0) {
+ uint64_t rate = (uint64_t)vd->vdev_trim_rate;
+
+ if (rate == UINT64_MAX)
+ rate = 0;
+
+ VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
+ VDEV_LEAF_ZAP_TRIM_RATE, sizeof (rate), 1, &rate, tx));
+ }
+
+ uint64_t partial = vd->vdev_trim_partial;
+ if (partial == UINT64_MAX)
+ partial = 0;
+
+ VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
+ sizeof (partial), 1, &partial, tx));
+
+ uint64_t secure = vd->vdev_trim_secure;
+ if (secure == UINT64_MAX)
+ secure = 0;
+
+ VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
+ sizeof (secure), 1, &secure, tx));
+
+
+ uint64_t trim_state = vd->vdev_trim_state;
+ VERIFY0(zap_update(mos, vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
+ sizeof (trim_state), 1, &trim_state, tx));
+}
+
+/*
+ * Update the on-disk state of a manual TRIM. This is called to request
+ * that a TRIM be started/suspended/canceled, or to change one of the
+ * TRIM options (partial, secure, rate).
+ */
+static void
+vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
+ uint64_t rate, boolean_t partial, boolean_t secure)
+{
+ ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
+ spa_t *spa = vd->vdev_spa;
+
+ if (new_state == vd->vdev_trim_state)
+ return;
+
+ /*
+ * Copy the vd's guid, this will be freed by the sync task.
+ */
+ uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
+ *guid = vd->vdev_guid;
+
+ /*
+ * If we're suspending, then preserve the original start time.
+ */
+ if (vd->vdev_trim_state != VDEV_TRIM_SUSPENDED) {
+ vd->vdev_trim_action_time = gethrestime_sec();
+ }
+
+ /*
+ * If we're activating, then preserve the requested rate and trim
+ * method. Setting the last offset and rate to UINT64_MAX is used
+ * as a sentinel to indicate they should be reset to default values.
+ */
+ if (new_state == VDEV_TRIM_ACTIVE) {
+ if (vd->vdev_trim_state == VDEV_TRIM_COMPLETE ||
+ vd->vdev_trim_state == VDEV_TRIM_CANCELED) {
+ vd->vdev_trim_last_offset = UINT64_MAX;
+ vd->vdev_trim_rate = UINT64_MAX;
+ vd->vdev_trim_partial = UINT64_MAX;
+ vd->vdev_trim_secure = UINT64_MAX;
+ }
+
+ if (rate != 0)
+ vd->vdev_trim_rate = rate;
+
+ if (partial != 0)
+ vd->vdev_trim_partial = partial;
+
+ if (secure != 0)
+ vd->vdev_trim_secure = secure;
+ }
+
+ boolean_t resumed = !!(vd->vdev_trim_state == VDEV_TRIM_SUSPENDED);
+ vd->vdev_trim_state = new_state;
+
+ dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
+ VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
+ dsl_sync_task_nowait(spa_get_dsl(spa), vdev_trim_zap_update_sync,
+ guid, 2, ZFS_SPACE_CHECK_NONE, tx);
+
+ switch (new_state) {
+ case VDEV_TRIM_ACTIVE:
+ spa_event_notify(spa, vd, NULL,
+ resumed ? ESC_ZFS_TRIM_RESUME : ESC_ZFS_TRIM_START);
+ spa_history_log_internal(spa, "trim", tx,
+ "vdev=%s activated", vd->vdev_path);
+ break;
+ case VDEV_TRIM_SUSPENDED:
+ spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_SUSPEND);
+ spa_history_log_internal(spa, "trim", tx,
+ "vdev=%s suspended", vd->vdev_path);
+ break;
+ case VDEV_TRIM_CANCELED:
+ spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_CANCEL);
+ spa_history_log_internal(spa, "trim", tx,
+ "vdev=%s canceled", vd->vdev_path);
+ break;
+ case VDEV_TRIM_COMPLETE:
+ spa_event_notify(spa, vd, NULL, ESC_ZFS_TRIM_FINISH);
+ spa_history_log_internal(spa, "trim", tx,
+ "vdev=%s complete", vd->vdev_path);
+ break;
+ default:
+ panic("invalid state %llu", (unsigned long long)new_state);
+ }
+
+ dmu_tx_commit(tx);
+}
+
+/*
+ * The zio_done_func_t done callback for each manual TRIM issued. It is
+ * responsible for updating the TRIM stats, reissuing failed TRIM I/Os,
+ * and limiting the number of in flight TRIM I/Os.
+ */
+static void
+vdev_trim_cb(zio_t *zio)
+{
+ vdev_t *vd = zio->io_vd;
+
+ mutex_enter(&vd->vdev_trim_io_lock);
+ if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
+ /*
+ * The I/O failed because the vdev was unavailable; roll the
+ * last offset back. (This works because spa_sync waits on
+ * spa_txg_zio before it runs sync tasks.)
+ */
+ uint64_t *offset =
+ &vd->vdev_trim_offset[zio->io_txg & TXG_MASK];
+ *offset = MIN(*offset, zio->io_offset);
+ } else {
+ if (zio->io_error != 0) {
+ vd->vdev_stat.vs_trim_errors++;
+ spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
+ 0, 0, 0, 0, 1, zio->io_orig_size);
+ } else {
+ spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_MANUAL,
+ 1, zio->io_orig_size, 0, 0, 0, 0);
+ }
+
+ vd->vdev_trim_bytes_done += zio->io_orig_size;
+ }
+
+ ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_MANUAL], >, 0);
+ vd->vdev_trim_inflight[TRIM_TYPE_MANUAL]--;
+ cv_broadcast(&vd->vdev_trim_io_cv);
+ mutex_exit(&vd->vdev_trim_io_lock);
+
+ spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
+}
+
+/*
+ * The zio_done_func_t done callback for each automatic TRIM issued. It
+ * is responsible for updating the TRIM stats and limiting the number of
+ * in flight TRIM I/Os. Automatic TRIM I/Os are best effort and are
+ * never reissued on failure.
+ */
+static void
+vdev_autotrim_cb(zio_t *zio)
+{
+ vdev_t *vd = zio->io_vd;
+
+ mutex_enter(&vd->vdev_trim_io_lock);
+
+ if (zio->io_error != 0) {
+ vd->vdev_stat.vs_trim_errors++;
+ spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
+ 0, 0, 0, 0, 1, zio->io_orig_size);
+ } else {
+ spa_iostats_trim_add(vd->vdev_spa, TRIM_TYPE_AUTO,
+ 1, zio->io_orig_size, 0, 0, 0, 0);
+ }
+
+ ASSERT3U(vd->vdev_trim_inflight[TRIM_TYPE_AUTO], >, 0);
+ vd->vdev_trim_inflight[TRIM_TYPE_AUTO]--;
+ cv_broadcast(&vd->vdev_trim_io_cv);
+ mutex_exit(&vd->vdev_trim_io_lock);
+
+ spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
+}
+
+/*
+ * Returns the average trim rate in bytes/sec for the ta->trim_vdev.
+ */
+static uint64_t
+vdev_trim_calculate_rate(trim_args_t *ta)
+{
+ return (ta->trim_bytes_done * 1000 /
+ (NSEC2MSEC(gethrtime() - ta->trim_start_time) + 1));
+}
+
+/*
+ * Issues a physical TRIM and takes care of rate limiting (bytes/sec)
+ * and number of concurrent TRIM I/Os.
+ */
+static int
+vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
+{
+ vdev_t *vd = ta->trim_vdev;
+ spa_t *spa = vd->vdev_spa;
+
+ mutex_enter(&vd->vdev_trim_io_lock);
+
+ /*
+ * Limit manual TRIM I/Os to the requested rate. This does not
+ * apply to automatic TRIM since no per vdev rate can be specified.
+ */
+ if (ta->trim_type == TRIM_TYPE_MANUAL) {
+ while (vd->vdev_trim_rate != 0 && !vdev_trim_should_stop(vd) &&
+ vdev_trim_calculate_rate(ta) > vd->vdev_trim_rate) {
+ cv_timedwait_sig(&vd->vdev_trim_io_cv,
+ &vd->vdev_trim_io_lock, ddi_get_lbolt() +
+ MSEC_TO_TICK(10));
+ }
+ }
+ ta->trim_bytes_done += size;
+
+ /* Limit in flight trimming I/Os */
+ while (vd->vdev_trim_inflight[0] + vd->vdev_trim_inflight[1] >=
+ zfs_trim_queue_limit) {
+ cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
+ }
+ vd->vdev_trim_inflight[ta->trim_type]++;
+ mutex_exit(&vd->vdev_trim_io_lock);
+
+ dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
+ VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
+ uint64_t txg = dmu_tx_get_txg(tx);
+
+ spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
+ mutex_enter(&vd->vdev_trim_lock);
+
+ if (ta->trim_type == TRIM_TYPE_MANUAL &&
+ vd->vdev_trim_offset[txg & TXG_MASK] == 0) {
+ uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
+ *guid = vd->vdev_guid;
+
+ /* This is the first write of this txg. */
+ dsl_sync_task_nowait(spa_get_dsl(spa),
+ vdev_trim_zap_update_sync, guid, 2,
+ ZFS_SPACE_CHECK_RESERVED, tx);
+ }
+
+ /*
+ * We know the vdev_t will still be around since all consumers of
+ * vdev_free must stop the trimming first.
+ */
+ if ((ta->trim_type == TRIM_TYPE_MANUAL &&
+ vdev_trim_should_stop(vd)) ||
+ (ta->trim_type == TRIM_TYPE_AUTO &&
+ vdev_autotrim_should_stop(vd->vdev_top))) {
+ mutex_enter(&vd->vdev_trim_io_lock);
+ vd->vdev_trim_inflight[ta->trim_type]--;
+ mutex_exit(&vd->vdev_trim_io_lock);
+ spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
+ mutex_exit(&vd->vdev_trim_lock);
+ dmu_tx_commit(tx);
+ return (SET_ERROR(EINTR));
+ }
+ mutex_exit(&vd->vdev_trim_lock);
+
+ if (ta->trim_type == TRIM_TYPE_MANUAL)
+ vd->vdev_trim_offset[txg & TXG_MASK] = start + size;
+
+ zio_nowait(zio_trim(spa->spa_txg_zio[txg & TXG_MASK], vd,
+ start, size, ta->trim_type == TRIM_TYPE_MANUAL ?
+ vdev_trim_cb : vdev_autotrim_cb, NULL,
+ ZIO_PRIORITY_TRIM, ZIO_FLAG_CANFAIL, ta->trim_flags));
+ /* vdev_trim_cb and vdev_autotrim_cb release SCL_STATE_ALL */
+
+ dmu_tx_commit(tx);
+
+ return (0);
+}
+
+/*
+ * Issues TRIM I/Os for all ranges in the provided ta->trim_tree range tree.
+ * Additional parameters describing how the TRIM should be performed must
+ * be set in the trim_args structure. See the trim_args definition for
+ * additional information.
+ */
+static int
+vdev_trim_ranges(trim_args_t *ta)
+{
+ vdev_t *vd = ta->trim_vdev;
+ avl_tree_t *rt = &ta->trim_tree->rt_root;
+ uint64_t extent_bytes_max = ta->trim_extent_bytes_max;
+ uint64_t extent_bytes_min = ta->trim_extent_bytes_min;
+ spa_t *spa = vd->vdev_spa;
+
+ ta->trim_start_time = gethrtime();
+ ta->trim_bytes_done = 0;
+
+ for (range_seg_t *rs = avl_first(rt); rs != NULL;
+ rs = AVL_NEXT(rt, rs)) {
+ uint64_t size = rs->rs_end - rs->rs_start;
+
+ if (extent_bytes_min && size < extent_bytes_min) {
+ spa_iostats_trim_add(spa, ta->trim_type,
+ 0, 0, 1, size, 0, 0);
+ continue;
+ }
+
+ /* Split range into legally-sized physical chunks */
+ uint64_t writes_required = ((size - 1) / extent_bytes_max) + 1;
+
+ for (uint64_t w = 0; w < writes_required; w++) {
+ int error;
+
+ error = vdev_trim_range(ta, VDEV_LABEL_START_SIZE +
+ rs->rs_start + (w * extent_bytes_max),
+ MIN(size - (w * extent_bytes_max),
+ extent_bytes_max));
+ if (error != 0) {
+ return (error);
+ }
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Calculates the completion percentage of a manual TRIM.
+ */
+static void
+vdev_trim_calculate_progress(vdev_t *vd)
+{
+ ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
+ spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
+ ASSERT(vd->vdev_leaf_zap != 0);
+
+ vd->vdev_trim_bytes_est = 0;
+ vd->vdev_trim_bytes_done = 0;
+
+ for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
+ metaslab_t *msp = vd->vdev_top->vdev_ms[i];
+ mutex_enter(&msp->ms_lock);
+
+ uint64_t ms_free = msp->ms_size -
+ metaslab_allocated_space(msp);
+
+ if (vd->vdev_top->vdev_ops == &vdev_raidz_ops)
+ ms_free /= vd->vdev_top->vdev_children;
+
+ /*
+ * Convert the metaslab range to a physical range
+ * on our vdev. We use this to determine if we are
+ * in the middle of this metaslab range.
+ */
+ range_seg_t logical_rs, physical_rs;
+ logical_rs.rs_start = msp->ms_start;
+ logical_rs.rs_end = msp->ms_start + msp->ms_size;
+ vdev_xlate(vd, &logical_rs, &physical_rs);
+
+ if (vd->vdev_trim_last_offset <= physical_rs.rs_start) {
+ vd->vdev_trim_bytes_est += ms_free;
+ mutex_exit(&msp->ms_lock);
+ continue;
+ } else if (vd->vdev_trim_last_offset > physical_rs.rs_end) {
+ vd->vdev_trim_bytes_done += ms_free;
+ vd->vdev_trim_bytes_est += ms_free;
+ mutex_exit(&msp->ms_lock);
+ continue;
+ }
+
+ /*
+ * If we get here, we're in the middle of trimming this
+ * metaslab. Load it and walk the free tree for more
+ * accurate progress estimation.
+ */
+ VERIFY0(metaslab_load(msp));
+
+ for (range_seg_t *rs = avl_first(&msp->ms_allocatable->rt_root);
+ rs; rs = AVL_NEXT(&msp->ms_allocatable->rt_root, rs)) {
+ logical_rs.rs_start = rs->rs_start;
+ logical_rs.rs_end = rs->rs_end;
+ vdev_xlate(vd, &logical_rs, &physical_rs);
+
+ uint64_t size = physical_rs.rs_end -
+ physical_rs.rs_start;
+ vd->vdev_trim_bytes_est += size;
+ if (vd->vdev_trim_last_offset >= physical_rs.rs_end) {
+ vd->vdev_trim_bytes_done += size;
+ } else if (vd->vdev_trim_last_offset >
+ physical_rs.rs_start &&
+ vd->vdev_trim_last_offset <=
+ physical_rs.rs_end) {
+ vd->vdev_trim_bytes_done +=
+ vd->vdev_trim_last_offset -
+ physical_rs.rs_start;
+ }
+ }
+ mutex_exit(&msp->ms_lock);
+ }
+}
+
+/*
+ * Load from disk the vdev's manual TRIM information. This includes the
+ * state, progress, and options provided when initiating the manual TRIM.
+ */
+static int
+vdev_trim_load(vdev_t *vd)
+{
+ int err = 0;
+ ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
+ spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
+ ASSERT(vd->vdev_leaf_zap != 0);
+
+ if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE ||
+ vd->vdev_trim_state == VDEV_TRIM_SUSPENDED) {
+ err = zap_lookup(vd->vdev_spa->spa_meta_objset,
+ vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_LAST_OFFSET,
+ sizeof (vd->vdev_trim_last_offset), 1,
+ &vd->vdev_trim_last_offset);
+ if (err == ENOENT) {
+ vd->vdev_trim_last_offset = 0;
+ err = 0;
+ }
+
+ if (err == 0) {
+ err = zap_lookup(vd->vdev_spa->spa_meta_objset,
+ vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_RATE,
+ sizeof (vd->vdev_trim_rate), 1,
+ &vd->vdev_trim_rate);
+ if (err == ENOENT) {
+ vd->vdev_trim_rate = 0;
+ err = 0;
+ }
+ }
+
+ if (err == 0) {
+ err = zap_lookup(vd->vdev_spa->spa_meta_objset,
+ vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_PARTIAL,
+ sizeof (vd->vdev_trim_partial), 1,
+ &vd->vdev_trim_partial);
+ if (err == ENOENT) {
+ vd->vdev_trim_partial = 0;
+ err = 0;
+ }
+ }
+
+ if (err == 0) {
+ err = zap_lookup(vd->vdev_spa->spa_meta_objset,
+ vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_SECURE,
+ sizeof (vd->vdev_trim_secure), 1,
+ &vd->vdev_trim_secure);
+ if (err == ENOENT) {
+ vd->vdev_trim_secure = 0;
+ err = 0;
+ }
+ }
+ }
+
+ vdev_trim_calculate_progress(vd);
+
+ return (err);
+}
+
+/*
+ * Convert the logical range into a physical range and add it to the
+ * range tree passed in the trim_args_t.
+ */
+static void
+vdev_trim_range_add(void *arg, uint64_t start, uint64_t size)
+{
+ trim_args_t *ta = arg;
+ vdev_t *vd = ta->trim_vdev;
+ range_seg_t logical_rs, physical_rs;
+ logical_rs.rs_start = start;
+ logical_rs.rs_end = start + size;
+
+ /*
+ * Every range to be trimmed must be part of ms_allocatable.
+ * When ZFS_DEBUG_TRIM is set load the metaslab to verify this
+ * is always the case.
+ */
+ if (zfs_flags & ZFS_DEBUG_TRIM) {
+ metaslab_t *msp = ta->trim_msp;
+ VERIFY0(metaslab_load(msp));
+ VERIFY3B(msp->ms_loaded, ==, B_TRUE);
+ VERIFY(range_tree_find(msp->ms_allocatable, start, size));
+ }
+
+ ASSERT(vd->vdev_ops->vdev_op_leaf);
+ vdev_xlate(vd, &logical_rs, &physical_rs);
+
+ IMPLY(vd->vdev_top == vd,
+ logical_rs.rs_start == physical_rs.rs_start);
+ IMPLY(vd->vdev_top == vd,
+ logical_rs.rs_end == physical_rs.rs_end);
+
+ /*
+ * Only a manual trim will be traversing the vdev sequentially.
+ * For an auto trim all valid ranges should be added.
+ */
+ if (ta->trim_type == TRIM_TYPE_MANUAL) {
+
+ /* Only add segments that we have not visited yet */
+ if (physical_rs.rs_end <= vd->vdev_trim_last_offset)
+ return;
+
+ /* Pick up where we left off mid-range. */
+ if (vd->vdev_trim_last_offset > physical_rs.rs_start) {
+ ASSERT3U(physical_rs.rs_end, >,
+ vd->vdev_trim_last_offset);
+ physical_rs.rs_start = vd->vdev_trim_last_offset;
+ }
+ }
+
+ ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start);
+
+ /*
+ * With raidz, it's possible that the logical range does not live on
+ * this leaf vdev. We only add the physical range to this vdev's if it
+ * has a length greater than 0.
+ */
+ if (physical_rs.rs_end > physical_rs.rs_start) {
+ range_tree_add(ta->trim_tree, physical_rs.rs_start,
+ physical_rs.rs_end - physical_rs.rs_start);
+ } else {
+ ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start);
+ }
+}
+
+/*
+ * Each manual TRIM thread is responsible for trimming the unallocated
+ * space for each leaf vdev. This is accomplished by sequentially iterating
+ * over its top-level metaslabs and issuing TRIM I/O for the space described
+ * by its ms_allocatable. While a metaslab is undergoing trimming it is
+ * not eligible for new allocations.
+ */
+static void
+vdev_trim_thread(void *arg)
+{
+ vdev_t *vd = arg;
+ spa_t *spa = vd->vdev_spa;
+ trim_args_t ta;
+ int error = 0;
+
+ /*
+ * The VDEV_LEAF_ZAP_TRIM_* entries may have been updated by
+ * vdev_trim(). Wait for the updated values to be reflected
+ * in the zap in order to start with the requested settings.
+ */
+ txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
+
+ ASSERT(vdev_is_concrete(vd));
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+ vd->vdev_trim_last_offset = 0;
+ vd->vdev_trim_rate = 0;
+ vd->vdev_trim_partial = 0;
+ vd->vdev_trim_secure = 0;
+
+ VERIFY0(vdev_trim_load(vd));
+
+ ta.trim_vdev = vd;
+ ta.trim_extent_bytes_max = zfs_trim_extent_bytes_max;
+ ta.trim_extent_bytes_min = zfs_trim_extent_bytes_min;
+ ta.trim_tree = range_tree_create(NULL, NULL);
+ ta.trim_type = TRIM_TYPE_MANUAL;
+ ta.trim_flags = 0;
+
+ /*
+ * When a secure TRIM has been requested infer that the intent
+ * is that everything must be trimmed. Override the default
+ * minimum TRIM size to prevent ranges from being skipped.
+ */
+ if (vd->vdev_trim_secure) {
+ ta.trim_flags |= ZIO_TRIM_SECURE;
+ ta.trim_extent_bytes_min = SPA_MINBLOCKSIZE;
+ }
+
+ uint64_t ms_count = 0;
+ for (uint64_t i = 0; !vd->vdev_detached &&
+ i < vd->vdev_top->vdev_ms_count; i++) {
+ metaslab_t *msp = vd->vdev_top->vdev_ms[i];
+
+ /*
+ * If we've expanded the top-level vdev or it's our
+ * first pass, calculate our progress.
+ */
+ if (vd->vdev_top->vdev_ms_count != ms_count) {
+ vdev_trim_calculate_progress(vd);
+ ms_count = vd->vdev_top->vdev_ms_count;
+ }
+
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+ metaslab_disable(msp);
+ mutex_enter(&msp->ms_lock);
+ VERIFY0(metaslab_load(msp));
+
+ /*
+ * If a partial TRIM was requested skip metaslabs which have
+ * never been initialized and thus have never been written.
+ */
+ if (msp->ms_sm == NULL && vd->vdev_trim_partial) {
+ mutex_exit(&msp->ms_lock);
+ metaslab_enable(msp, B_FALSE);
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ vdev_trim_calculate_progress(vd);
+ continue;
+ }
+
+ ta.trim_msp = msp;
+ range_tree_walk(msp->ms_allocatable, vdev_trim_range_add, &ta);
+ range_tree_vacate(msp->ms_trim, NULL, NULL);
+ mutex_exit(&msp->ms_lock);
+
+ error = vdev_trim_ranges(&ta);
+ metaslab_enable(msp, B_TRUE);
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+ range_tree_vacate(ta.trim_tree, NULL, NULL);
+ if (error != 0)
+ break;
+ }
+
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+ mutex_enter(&vd->vdev_trim_io_lock);
+ while (vd->vdev_trim_inflight[0] > 0) {
+ cv_wait(&vd->vdev_trim_io_cv, &vd->vdev_trim_io_lock);
+ }
+ mutex_exit(&vd->vdev_trim_io_lock);
+
+ range_tree_destroy(ta.trim_tree);
+
+ mutex_enter(&vd->vdev_trim_lock);
+ if (!vd->vdev_trim_exit_wanted && vdev_writeable(vd)) {
+ vdev_trim_change_state(vd, VDEV_TRIM_COMPLETE,
+ vd->vdev_trim_rate, vd->vdev_trim_partial,
+ vd->vdev_trim_secure);
+ }
+ ASSERT(vd->vdev_trim_thread != NULL || vd->vdev_trim_inflight[0] == 0);
+
+ /*
+ * Drop the vdev_trim_lock while we sync out the txg since it's
+ * possible that a device might be trying to come online and must
+ * check to see if it needs to restart a trim. That thread will be
+ * holding the spa_config_lock which would prevent the txg_wait_synced
+ * from completing.
+ */
+ mutex_exit(&vd->vdev_trim_lock);
+ txg_wait_synced(spa_get_dsl(spa), 0);
+ mutex_enter(&vd->vdev_trim_lock);
+
+ vd->vdev_trim_thread = NULL;
+ cv_broadcast(&vd->vdev_trim_cv);
+ mutex_exit(&vd->vdev_trim_lock);
+}
+
+/*
+ * Initiates a manual TRIM for the vdev_t. Callers must hold vdev_trim_lock,
+ * the vdev_t must be a leaf and cannot already be manually trimming.
+ */
+void
+vdev_trim(vdev_t *vd, uint64_t rate, boolean_t partial, boolean_t secure)
+{
+ ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
+ ASSERT(vd->vdev_ops->vdev_op_leaf);
+ ASSERT(vdev_is_concrete(vd));
+ ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ ASSERT(!vd->vdev_detached);
+ ASSERT(!vd->vdev_trim_exit_wanted);
+ ASSERT(!vd->vdev_top->vdev_removing);
+
+ vdev_trim_change_state(vd, VDEV_TRIM_ACTIVE, rate, partial, secure);
+ vd->vdev_trim_thread = thread_create(NULL, 0,
+ vdev_trim_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
+}
+
+/*
+ * Wait for the trimming thread to be terminated (canceled or stopped).
+ */
+static void
+vdev_trim_stop_wait_impl(vdev_t *vd)
+{
+ ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
+
+ while (vd->vdev_trim_thread != NULL)
+ cv_wait(&vd->vdev_trim_cv, &vd->vdev_trim_lock);
+
+ ASSERT3P(vd->vdev_trim_thread, ==, NULL);
+ vd->vdev_trim_exit_wanted = B_FALSE;
+}
+
+/*
+ * Wait for vdev trim threads which were listed to cleanly exit.
+ */
+void
+vdev_trim_stop_wait(spa_t *spa, list_t *vd_list)
+{
+ vdev_t *vd;
+
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+
+ while ((vd = list_remove_head(vd_list)) != NULL) {
+ mutex_enter(&vd->vdev_trim_lock);
+ vdev_trim_stop_wait_impl(vd);
+ mutex_exit(&vd->vdev_trim_lock);
+ }
+}
+
+/*
+ * Stop trimming a device, with the resultant trimming state being tgt_state.
+ * For blocking behavior pass NULL for vd_list. Otherwise, when a list_t is
+ * provided the stopping vdev is inserted in to the list. Callers are then
+ * required to call vdev_trim_stop_wait() to block for all the trim threads
+ * to exit. The caller must hold vdev_trim_lock and must not be writing to
+ * the spa config, as the trimming thread may try to enter the config as a
+ * reader before exiting.
+ */
+void
+vdev_trim_stop(vdev_t *vd, vdev_trim_state_t tgt_state, list_t *vd_list)
+{
+ ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
+ ASSERT(MUTEX_HELD(&vd->vdev_trim_lock));
+ ASSERT(vd->vdev_ops->vdev_op_leaf);
+ ASSERT(vdev_is_concrete(vd));
+
+ /*
+ * Allow cancel requests to proceed even if the trim thread has
+ * stopped.
+ */
+ if (vd->vdev_trim_thread == NULL && tgt_state != VDEV_TRIM_CANCELED)
+ return;
+
+ vdev_trim_change_state(vd, tgt_state, 0, 0, 0);
+ vd->vdev_trim_exit_wanted = B_TRUE;
+
+ if (vd_list == NULL) {
+ vdev_trim_stop_wait_impl(vd);
+ } else {
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ list_insert_tail(vd_list, vd);
+ }
+}
+
+/*
+ * Requests that all listed vdevs stop trimming.
+ */
+static void
+vdev_trim_stop_all_impl(vdev_t *vd, vdev_trim_state_t tgt_state,
+ list_t *vd_list)
+{
+ if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
+ mutex_enter(&vd->vdev_trim_lock);
+ vdev_trim_stop(vd, tgt_state, vd_list);
+ mutex_exit(&vd->vdev_trim_lock);
+ return;
+ }
+
+ for (uint64_t i = 0; i < vd->vdev_children; i++) {
+ vdev_trim_stop_all_impl(vd->vdev_child[i], tgt_state,
+ vd_list);
+ }
+}
+
+/*
+ * Convenience function to stop trimming of a vdev tree and set all trim
+ * thread pointers to NULL.
+ */
+void
+vdev_trim_stop_all(vdev_t *vd, vdev_trim_state_t tgt_state)
+{
+ spa_t *spa = vd->vdev_spa;
+ list_t vd_list;
+
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+
+ list_create(&vd_list, sizeof (vdev_t),
+ offsetof(vdev_t, vdev_trim_node));
+
+ vdev_trim_stop_all_impl(vd, tgt_state, &vd_list);
+ vdev_trim_stop_wait(spa, &vd_list);
+
+ if (vd->vdev_spa->spa_sync_on) {
+ /* Make sure that our state has been synced to disk */
+ txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
+ }
+
+ list_destroy(&vd_list);
+}
+
+/*
+ * Conditionally restarts a manual TRIM given its on-disk state.
+ */
+void
+vdev_trim_restart(vdev_t *vd)
+{
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
+
+ if (vd->vdev_leaf_zap != 0) {
+ mutex_enter(&vd->vdev_trim_lock);
+ uint64_t trim_state = VDEV_TRIM_NONE;
+ int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
+ vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_STATE,
+ sizeof (trim_state), 1, &trim_state);
+ ASSERT(err == 0 || err == ENOENT);
+ vd->vdev_trim_state = trim_state;
+
+ uint64_t timestamp = 0;
+ err = zap_lookup(vd->vdev_spa->spa_meta_objset,
+ vd->vdev_leaf_zap, VDEV_LEAF_ZAP_TRIM_ACTION_TIME,
+ sizeof (timestamp), 1, &timestamp);
+ ASSERT(err == 0 || err == ENOENT);
+ vd->vdev_trim_action_time = (time_t)timestamp;
+
+ if (vd->vdev_trim_state == VDEV_TRIM_SUSPENDED ||
+ vd->vdev_offline) {
+ /* load progress for reporting, but don't resume */
+ VERIFY0(vdev_trim_load(vd));
+ } else if (vd->vdev_trim_state == VDEV_TRIM_ACTIVE &&
+ vdev_writeable(vd) && !vd->vdev_top->vdev_removing &&
+ vd->vdev_trim_thread == NULL) {
+ VERIFY0(vdev_trim_load(vd));
+ vdev_trim(vd, vd->vdev_trim_rate,
+ vd->vdev_trim_partial, vd->vdev_trim_secure);
+ }
+
+ mutex_exit(&vd->vdev_trim_lock);
+ }
+
+ for (uint64_t i = 0; i < vd->vdev_children; i++) {
+ vdev_trim_restart(vd->vdev_child[i]);
+ }
+}
+
+/*
+ * Used by the automatic TRIM when ZFS_DEBUG_TRIM is set to verify that
+ * every TRIM range is contained within ms_allocatable.
+ */
+static void
+vdev_trim_range_verify(void *arg, uint64_t start, uint64_t size)
+{
+ trim_args_t *ta = arg;
+ metaslab_t *msp = ta->trim_msp;
+
+ VERIFY3B(msp->ms_loaded, ==, B_TRUE);
+ VERIFY3U(msp->ms_disabled, >, 0);
+ VERIFY(range_tree_find(msp->ms_allocatable, start, size) != NULL);
+}
+
+/*
+ * Each automatic TRIM thread is responsible for managing the trimming of a
+ * top-level vdev in the pool. No automatic TRIM state is maintained on-disk.
+ *
+ * N.B. This behavior is different from a manual TRIM where a thread
+ * is created for each leaf vdev, instead of each top-level vdev.
+ */
+static void
+vdev_autotrim_thread(void *arg)
+{
+ vdev_t *vd = arg;
+ spa_t *spa = vd->vdev_spa;
+ int shift = 0;
+
+ mutex_enter(&vd->vdev_autotrim_lock);
+ ASSERT3P(vd->vdev_top, ==, vd);
+ ASSERT3P(vd->vdev_autotrim_thread, !=, NULL);
+ mutex_exit(&vd->vdev_autotrim_lock);
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+ uint64_t extent_bytes_max = zfs_trim_extent_bytes_max;
+ uint64_t extent_bytes_min = zfs_trim_extent_bytes_min;
+
+ while (!vdev_autotrim_should_stop(vd)) {
+ int txgs_per_trim = MAX(zfs_trim_txg_batch, 1);
+ boolean_t issued_trim = B_FALSE;
+
+ /*
+ * All of the metaslabs are divided in to groups of size
+ * num_metaslabs / zfs_trim_txg_batch. Each of these groups
+ * is composed of metaslabs which are spread evenly over the
+ * device.
+ *
+ * For example, when zfs_trim_txg_batch = 32 (default) then
+ * group 0 will contain metaslabs 0, 32, 64, ...;
+ * group 1 will contain metaslabs 1, 33, 65, ...;
+ * group 2 will contain metaslabs 2, 34, 66, ...; and so on.
+ *
+ * On each pass through the while() loop one of these groups
+ * is selected. This is accomplished by using a shift value
+ * to select the starting metaslab, then striding over the
+ * metaslabs using the zfs_trim_txg_batch size. This is
+ * done to accomplish two things.
+ *
+ * 1) By dividing the metaslabs in to groups, and making sure
+ * that each group takes a minimum of one txg to process.
+ * Then zfs_trim_txg_batch controls the minimum number of
+ * txgs which must occur before a metaslab is revisited.
+ *
+ * 2) Selecting non-consecutive metaslabs distributes the
+ * TRIM commands for a group evenly over the entire device.
+ * This can be advantageous for certain types of devices.
+ */
+ for (uint64_t i = shift % txgs_per_trim; i < vd->vdev_ms_count;
+ i += txgs_per_trim) {
+ metaslab_t *msp = vd->vdev_ms[i];
+ range_tree_t *trim_tree;
+
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+ metaslab_disable(msp);
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+ mutex_enter(&msp->ms_lock);
+
+ /*
+ * Skip the metaslab when it has never been allocated
+ * or when there are no recent frees to trim.
+ */
+ if (msp->ms_sm == NULL ||
+ range_tree_is_empty(msp->ms_trim)) {
+ mutex_exit(&msp->ms_lock);
+ metaslab_enable(msp, B_FALSE);
+ continue;
+ }
+
+ /*
+ * Skip the metaslab when it has already been disabled.
+ * This may happen when a manual TRIM or initialize
+ * operation is running concurrently. In the case
+ * of a manual TRIM, the ms_trim tree will have been
+ * vacated. Only ranges added after the manual TRIM
+ * disabled the metaslab will be included in the tree.
+ * These will be processed when the automatic TRIM
+ * next revisits this metaslab.
+ */
+ if (msp->ms_disabled > 1) {
+ mutex_exit(&msp->ms_lock);
+ metaslab_enable(msp, B_FALSE);
+ continue;
+ }
+
+ /*
+ * Allocate an empty range tree which is swapped in
+ * for the existing ms_trim tree while it is processed.
+ */
+ trim_tree = range_tree_create(NULL, NULL);
+ range_tree_swap(&msp->ms_trim, &trim_tree);
+ ASSERT(range_tree_is_empty(msp->ms_trim));
+
+ /*
+ * There are two cases when constructing the per-vdev
+ * trim trees for a metaslab. If the top-level vdev
+ * has no children then it is also a leaf and should
+ * be trimmed. Otherwise our children are the leaves
+ * and a trim tree should be constructed for each.
+ */
+ trim_args_t *tap;
+ uint64_t children = vd->vdev_children;
+ if (children == 0) {
+ children = 1;
+ tap = kmem_zalloc(sizeof (trim_args_t) *
+ children, KM_SLEEP);
+ tap[0].trim_vdev = vd;
+ } else {
+ tap = kmem_zalloc(sizeof (trim_args_t) *
+ children, KM_SLEEP);
+
+ for (uint64_t c = 0; c < children; c++) {
+ tap[c].trim_vdev = vd->vdev_child[c];
+ }
+ }
+
+ for (uint64_t c = 0; c < children; c++) {
+ trim_args_t *ta = &tap[c];
+ vdev_t *cvd = ta->trim_vdev;
+
+ ta->trim_msp = msp;
+ ta->trim_extent_bytes_max = extent_bytes_max;
+ ta->trim_extent_bytes_min = extent_bytes_min;
+ ta->trim_type = TRIM_TYPE_AUTO;
+ ta->trim_flags = 0;
+
+ if (cvd->vdev_detached ||
+ !vdev_writeable(cvd) ||
+ !cvd->vdev_has_trim ||
+ cvd->vdev_trim_thread != NULL) {
+ continue;
+ }
+
+ /*
+ * When a device has an attached hot spare, or
+ * is being replaced it will not be trimmed.
+ * This is done to avoid adding additional
+ * stress to a potentially unhealthy device,
+ * and to minimize the required rebuild time.
+ */
+ if (!cvd->vdev_ops->vdev_op_leaf)
+ continue;
+
+ ta->trim_tree = range_tree_create(NULL, NULL);
+ range_tree_walk(trim_tree,
+ vdev_trim_range_add, ta);
+ }
+
+ mutex_exit(&msp->ms_lock);
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+
+ /*
+ * Issue the TRIM I/Os for all ranges covered by the
+ * TRIM trees. These ranges are safe to TRIM because
+ * no new allocations will be performed until the call
+ * to metaslab_enabled() below.
+ */
+ for (uint64_t c = 0; c < children; c++) {
+ trim_args_t *ta = &tap[c];
+
+ /*
+ * Always yield to a manual TRIM if one has
+ * been started for the child vdev.
+ */
+ if (ta->trim_tree == NULL ||
+ ta->trim_vdev->vdev_trim_thread != NULL) {
+ continue;
+ }
+
+ /*
+ * After this point metaslab_enable() must be
+ * called with the sync flag set. This is done
+ * here because vdev_trim_ranges() is allowed
+ * to be interrupted (EINTR) before issuing all
+ * of the required TRIM I/Os.
+ */
+ issued_trim = B_TRUE;
+
+ int error = vdev_trim_ranges(ta);
+ if (error)
+ break;
+ }
+
+ /*
+ * Verify every range which was trimmed is still
+ * contained within the ms_allocatable tree.
+ */
+ if (zfs_flags & ZFS_DEBUG_TRIM) {
+ mutex_enter(&msp->ms_lock);
+ VERIFY0(metaslab_load(msp));
+ VERIFY3P(tap[0].trim_msp, ==, msp);
+ range_tree_walk(trim_tree,
+ vdev_trim_range_verify, &tap[0]);
+ mutex_exit(&msp->ms_lock);
+ }
+
+ range_tree_vacate(trim_tree, NULL, NULL);
+ range_tree_destroy(trim_tree);
+
+ metaslab_enable(msp, issued_trim);
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+ for (uint64_t c = 0; c < children; c++) {
+ trim_args_t *ta = &tap[c];
+
+ if (ta->trim_tree == NULL)
+ continue;
+
+ range_tree_vacate(ta->trim_tree, NULL, NULL);
+ range_tree_destroy(ta->trim_tree);
+ }
+
+ kmem_free(tap, sizeof (trim_args_t) * children);
+ }
+
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+
+ /*
+ * After completing the group of metaslabs wait for the next
+ * open txg. This is done to make sure that a minimum of
+ * zfs_trim_txg_batch txgs will occur before these metaslabs
+ * are trimmed again.
+ */
+ txg_wait_open(spa_get_dsl(spa), 0, issued_trim);
+
+ shift++;
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+ }
+
+ for (uint64_t c = 0; c < vd->vdev_children; c++) {
+ vdev_t *cvd = vd->vdev_child[c];
+ mutex_enter(&cvd->vdev_trim_io_lock);
+
+ while (cvd->vdev_trim_inflight[1] > 0) {
+ cv_wait(&cvd->vdev_trim_io_cv,
+ &cvd->vdev_trim_io_lock);
+ }
+ mutex_exit(&cvd->vdev_trim_io_lock);
+ }
+
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+
+ /*
+ * When exiting because the autotrim property was set to off, then
+ * abandon any unprocessed ms_trim ranges to reclaim the memory.
+ */
+ if (spa_get_autotrim(spa) == SPA_AUTOTRIM_OFF) {
+ for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
+ metaslab_t *msp = vd->vdev_ms[i];
+
+ mutex_enter(&msp->ms_lock);
+ range_tree_vacate(msp->ms_trim, NULL, NULL);
+ mutex_exit(&msp->ms_lock);
+ }
+ }
+
+ mutex_enter(&vd->vdev_autotrim_lock);
+ ASSERT(vd->vdev_autotrim_thread != NULL);
+ vd->vdev_autotrim_thread = NULL;
+ cv_broadcast(&vd->vdev_autotrim_cv);
+ mutex_exit(&vd->vdev_autotrim_lock);
+}
+
+/*
+ * Starts an autotrim thread, if needed, for each top-level vdev which can be
+ * trimmed. A top-level vdev which has been evacuated will never be trimmed.
+ */
+void
+vdev_autotrim(spa_t *spa)
+{
+ vdev_t *root_vd = spa->spa_root_vdev;
+
+ for (uint64_t i = 0; i < root_vd->vdev_children; i++) {
+ vdev_t *tvd = root_vd->vdev_child[i];
+
+ mutex_enter(&tvd->vdev_autotrim_lock);
+ if (vdev_writeable(tvd) && !tvd->vdev_removing &&
+ tvd->vdev_autotrim_thread == NULL) {
+ ASSERT3P(tvd->vdev_top, ==, tvd);
+
+ tvd->vdev_autotrim_thread = thread_create(NULL, 0,
+ vdev_autotrim_thread, tvd, 0, &p0, TS_RUN,
+ maxclsyspri);
+ ASSERT(tvd->vdev_autotrim_thread != NULL);
+ }
+ mutex_exit(&tvd->vdev_autotrim_lock);
+ }
+}
+
+/*
+ * Wait for the vdev_autotrim_thread associated with the passed top-level
+ * vdev to be terminated (canceled or stopped).
+ */
+void
+vdev_autotrim_stop_wait(vdev_t *tvd)
+{
+ mutex_enter(&tvd->vdev_autotrim_lock);
+ if (tvd->vdev_autotrim_thread != NULL) {
+ tvd->vdev_autotrim_exit_wanted = B_TRUE;
+
+ while (tvd->vdev_autotrim_thread != NULL) {
+ cv_wait(&tvd->vdev_autotrim_cv,
+ &tvd->vdev_autotrim_lock);
+ }
+
+ ASSERT3P(tvd->vdev_autotrim_thread, ==, NULL);
+ tvd->vdev_autotrim_exit_wanted = B_FALSE;
+ }
+ mutex_exit(&tvd->vdev_autotrim_lock);
+}
+
+/*
+ * Wait for all of the vdev_autotrim_thread associated with the pool to
+ * be terminated (canceled or stopped).
+ */
+void
+vdev_autotrim_stop_all(spa_t *spa)
+{
+ vdev_t *root_vd = spa->spa_root_vdev;
+
+ for (uint64_t i = 0; i < root_vd->vdev_children; i++)
+ vdev_autotrim_stop_wait(root_vd->vdev_child[i]);
+}
+
+/*
+ * Conditionally restart all of the vdev_autotrim_thread's for the pool.
+ */
+void
+vdev_autotrim_restart(spa_t *spa)
+{
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+
+ if (spa->spa_autotrim)
+ vdev_autotrim(spa);
+}
+
+#if defined(_KERNEL)
+EXPORT_SYMBOL(vdev_trim);
+EXPORT_SYMBOL(vdev_trim_stop);
+EXPORT_SYMBOL(vdev_trim_stop_all);
+EXPORT_SYMBOL(vdev_trim_stop_wait);
+EXPORT_SYMBOL(vdev_trim_restart);
+EXPORT_SYMBOL(vdev_autotrim);
+EXPORT_SYMBOL(vdev_autotrim_stop_all);
+EXPORT_SYMBOL(vdev_autotrim_stop_wait);
+EXPORT_SYMBOL(vdev_autotrim_restart);
+
+/* BEGIN CSTYLED */
+module_param(zfs_trim_extent_bytes_max, uint, 0644);
+MODULE_PARM_DESC(zfs_trim_extent_bytes_max,
+ "Max size of TRIM commands, larger will be split");
+
+module_param(zfs_trim_extent_bytes_min, uint, 0644);
+MODULE_PARM_DESC(zfs_trim_extent_bytes_min,
+ "Min size of TRIM commands, smaller will be skipped");
+
+module_param(zfs_trim_metaslab_skip, uint, 0644);
+MODULE_PARM_DESC(zfs_trim_metaslab_skip,
+ "Skip metaslabs which have never been initialized");
+
+module_param(zfs_trim_txg_batch, uint, 0644);
+MODULE_PARM_DESC(zfs_trim_txg_batch,
+ "Min number of txgs to aggregate frees before issuing TRIM");
+
+module_param(zfs_trim_queue_limit, uint, 0644);
+MODULE_PARM_DESC(zfs_trim_queue_limit,
+ "Max queued TRIMs outstanding per leaf vdev");
+/* END CSTYLED */
+#endif
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 047193c61..debe733da 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -204,6 +204,7 @@
#include <sys/zfs_sysfs.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_initialize.h>
+#include <sys/vdev_trim.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
@@ -3885,7 +3886,7 @@ zfs_ioc_destroy(zfs_cmd_t *zc)
/*
* innvl: {
- * "initialize_command" -> POOL_INITIALIZE_{CANCEL|DO|SUSPEND} (uint64)
+ * "initialize_command" -> POOL_INITIALIZE_{CANCEL|START|SUSPEND} (uint64)
* "initialize_vdevs": { -> guids to initialize (nvlist)
* "vdev_path_1": vdev_guid_1, (uint64),
* "vdev_path_2": vdev_guid_2, (uint64),
@@ -3919,7 +3920,7 @@ zfs_ioc_pool_initialize(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
}
if (!(cmd_type == POOL_INITIALIZE_CANCEL ||
- cmd_type == POOL_INITIALIZE_DO ||
+ cmd_type == POOL_INITIALIZE_START ||
cmd_type == POOL_INITIALIZE_SUSPEND)) {
return (SET_ERROR(EINVAL));
}
@@ -3958,6 +3959,91 @@ zfs_ioc_pool_initialize(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
}
/*
+ * innvl: {
+ * "trim_command" -> POOL_TRIM_{CANCEL|START|SUSPEND} (uint64)
+ * "trim_vdevs": { -> guids to TRIM (nvlist)
+ * "vdev_path_1": vdev_guid_1, (uint64),
+ * "vdev_path_2": vdev_guid_2, (uint64),
+ * ...
+ * },
+ * "trim_rate" -> Target TRIM rate in bytes/sec.
+ * "trim_secure" -> Set to request a secure TRIM.
+ * }
+ *
+ * outnvl: {
+ * "trim_vdevs": { -> TRIM errors (nvlist)
+ * "vdev_path_1": errno, see function body for possible errnos (uint64)
+ * "vdev_path_2": errno, ... (uint64)
+ * ...
+ * }
+ * }
+ *
+ * EINVAL is returned for an unknown commands or if any of the provided vdev
+ * guids have be specified with a type other than uint64.
+ */
+static const zfs_ioc_key_t zfs_keys_pool_trim[] = {
+ {ZPOOL_TRIM_COMMAND, DATA_TYPE_UINT64, 0},
+ {ZPOOL_TRIM_VDEVS, DATA_TYPE_NVLIST, 0},
+ {ZPOOL_TRIM_RATE, DATA_TYPE_UINT64, ZK_OPTIONAL},
+ {ZPOOL_TRIM_SECURE, DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
+};
+
+static int
+zfs_ioc_pool_trim(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
+{
+ uint64_t cmd_type;
+ if (nvlist_lookup_uint64(innvl, ZPOOL_TRIM_COMMAND, &cmd_type) != 0)
+ return (SET_ERROR(EINVAL));
+
+ if (!(cmd_type == POOL_TRIM_CANCEL ||
+ cmd_type == POOL_TRIM_START ||
+ cmd_type == POOL_TRIM_SUSPEND)) {
+ return (SET_ERROR(EINVAL));
+ }
+
+ nvlist_t *vdev_guids;
+ if (nvlist_lookup_nvlist(innvl, ZPOOL_TRIM_VDEVS, &vdev_guids) != 0)
+ return (SET_ERROR(EINVAL));
+
+ for (nvpair_t *pair = nvlist_next_nvpair(vdev_guids, NULL);
+ pair != NULL; pair = nvlist_next_nvpair(vdev_guids, pair)) {
+ uint64_t vdev_guid;
+ if (nvpair_value_uint64(pair, &vdev_guid) != 0) {
+ return (SET_ERROR(EINVAL));
+ }
+ }
+
+ /* Optional, defaults to maximum rate when not provided */
+ uint64_t rate;
+ if (nvlist_lookup_uint64(innvl, ZPOOL_TRIM_RATE, &rate) != 0)
+ rate = 0;
+
+ /* Optional, defaults to standard TRIM when not provided */
+ boolean_t secure;
+ if (nvlist_lookup_boolean_value(innvl, ZPOOL_TRIM_SECURE,
+ &secure) != 0) {
+ secure = B_FALSE;
+ }
+
+ spa_t *spa;
+ int error = spa_open(poolname, &spa, FTAG);
+ if (error != 0)
+ return (error);
+
+ nvlist_t *vdev_errlist = fnvlist_alloc();
+ int total_errors = spa_vdev_trim(spa, vdev_guids, cmd_type,
+ rate, !!zfs_trim_metaslab_skip, secure, vdev_errlist);
+
+ if (fnvlist_size(vdev_errlist) > 0)
+ fnvlist_add_nvlist(outnvl, ZPOOL_TRIM_VDEVS, vdev_errlist);
+
+ fnvlist_free(vdev_errlist);
+
+ spa_close(spa, FTAG);
+ return (total_errors > 0 ? EINVAL : 0);
+}
+
+/*
* fsname is name of dataset to rollback (to most recent snapshot)
*
* innvl may contain name of expected target snapshot
@@ -6580,6 +6666,11 @@ zfs_ioctl_init(void)
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_initialize, ARRAY_SIZE(zfs_keys_pool_initialize));
+ zfs_ioctl_register("trim", ZFS_IOC_POOL_TRIM,
+ zfs_ioc_pool_trim, zfs_secpolicy_config, POOL_NAME,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
+ zfs_keys_pool_trim, ARRAY_SIZE(zfs_keys_pool_trim));
+
/* IOCTLS that use the legacy function signature */
zfs_ioctl_register_legacy(ZFS_IOC_POOL_FREEZE, zfs_ioc_pool_freeze,
diff --git a/module/zfs/zfs_sysfs.c b/module/zfs/zfs_sysfs.c
index 87c4ac117..ec8ae4216 100644
--- a/module/zfs/zfs_sysfs.c
+++ b/module/zfs/zfs_sysfs.c
@@ -358,7 +358,8 @@ pool_property_show(struct kobject *kobj, struct attribute *attr, char *buf)
*/
static const char *zfs_features[] = {
/* --> Add new kernel features here (post ZoL 0.8.0) */
- "vdev_initialize"
+ "initialize",
+ "trim",
};
#define ZFS_FEATURE_COUNT ARRAY_SIZE(zfs_features)
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index 0912f607f..1915de417 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -32,6 +32,7 @@
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
+#include <sys/vdev_trim.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
@@ -58,7 +59,7 @@ const char *zio_type_name[ZIO_TYPES] = {
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
- "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl"
+ "z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
};
int zio_dva_throttle_enabled = B_TRUE;
@@ -761,7 +762,7 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
{
zio_t *zio;
- ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE);
+ IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
@@ -1212,6 +1213,26 @@ zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
}
zio_t *
+zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
+ zio_done_func_t *done, void *private, zio_priority_t priority,
+ enum zio_flag flags, enum trim_flag trim_flags)
+{
+ zio_t *zio;
+
+ ASSERT0(vd->vdev_children);
+ ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
+ ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
+ ASSERT3U(size, !=, 0);
+
+ zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
+ private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
+ vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
+ zio->io_trim_flags = trim_flags;
+
+ return (zio);
+}
+
+zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, boolean_t labels)
@@ -3562,7 +3583,6 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
* ==========================================================================
*/
-
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
@@ -3685,8 +3705,8 @@ zio_vdev_io_start(zio_t *zio)
return (zio);
}
- if (vd->vdev_ops->vdev_op_leaf &&
- (zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) {
+ if (vd->vdev_ops->vdev_op_leaf && (zio->io_type == ZIO_TYPE_READ ||
+ zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM)) {
if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
return (zio);
@@ -3717,7 +3737,8 @@ zio_vdev_io_done(zio_t *zio)
return (NULL);
}
- ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
+ ASSERT(zio->io_type == ZIO_TYPE_READ ||
+ zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
@@ -3736,7 +3757,7 @@ zio_vdev_io_done(zio_t *zio)
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
- if (zio->io_error) {
+ if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
@@ -3866,8 +3887,8 @@ zio_vdev_io_assess(zio_t *zio)
/*
* If a cache flush returns ENOTSUP or ENOTTY, we know that no future
- * attempts will ever succeed. In this case we set a persistent bit so
- * that we don't bother with it in the future.
+ * attempts will ever succeed. In this case we set a persistent
+ * boolean flag so that we don't bother with it in the future.
*/
if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
zio->io_type == ZIO_TYPE_IOCTL &&