summaryrefslogtreecommitdiffstats
path: root/module/zfs/dsl_dataset.c
diff options
context:
space:
mode:
authorPaul Dagnelie <[email protected]>2020-04-01 10:02:06 -0700
committerGitHub <[email protected]>2020-04-01 10:02:06 -0700
commit5a42ef04fd390dc96fbbf31bc9f3d05695998211 (patch)
treeee4aec968084618faa92988b08a3c41c9b904327 /module/zfs/dsl_dataset.c
parentc9e3efdb3a6111b9795becc6594b3c52ba004522 (diff)
Add 'zfs wait' command
Add a mechanism to wait for delete queue to drain. When doing redacted send/recv, many workflows involve deleting files that contain sensitive data. Because of the way zfs handles file deletions, snapshots taken quickly after a rm operation can sometimes still contain the file in question, especially if the file is very large. This can result in issues for redacted send/recv users who expect the deleted files to be redacted in the send streams, and not appear in their clones. This change duplicates much of the zpool wait related logic into a zfs wait command, which can be used to wait until the internal deleteq has been drained. Additional wait activities may be added in the future. Reviewed-by: Matthew Ahrens <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: John Gallagher <[email protected]> Signed-off-by: Paul Dagnelie <[email protected]> Closes #9707
Diffstat (limited to 'module/zfs/dsl_dataset.c')
-rw-r--r--module/zfs/dsl_dataset.c28
1 files changed, 18 insertions, 10 deletions
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index 3e5a67bdb..2d6e95e31 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -3077,20 +3077,26 @@ dsl_dataset_rename_snapshot(const char *fsname,
static int
dsl_dataset_handoff_check(dsl_dataset_t *ds, void *owner, dmu_tx_t *tx)
{
- boolean_t held;
+ boolean_t held = B_FALSE;
if (!dmu_tx_is_syncing(tx))
return (0);
- if (owner != NULL) {
- VERIFY3P(ds->ds_owner, ==, owner);
- dsl_dataset_long_rele(ds, owner);
- }
-
- held = dsl_dataset_long_held(ds);
-
- if (owner != NULL)
- dsl_dataset_long_hold(ds, owner);
+ dsl_dir_t *dd = ds->ds_dir;
+ mutex_enter(&dd->dd_activity_lock);
+ uint64_t holds = zfs_refcount_count(&ds->ds_longholds) -
+ (owner != NULL ? 1 : 0);
+ /*
+ * The value of dd_activity_waiters can chance as soon as we drop the
+ * lock, but we're fine with that; new waiters coming in or old
+ * waiters leaving doesn't cause problems, since we're going to cancel
+ * waiters later anyway. The goal of this check is to verify that no
+ * non-waiters have long-holds, and all new long-holds will be
+ * prevented because we're holding the pool config as writer.
+ */
+ if (holds != dd->dd_activity_waiters)
+ held = B_TRUE;
+ mutex_exit(&dd->dd_activity_lock);
if (held)
return (SET_ERROR(EBUSY));
@@ -4036,6 +4042,8 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
DMU_MAX_ACCESS * spa_asize_inflation);
ASSERT3P(clone->ds_prev, ==, origin_head->ds_prev);
+ dsl_dir_cancel_waiters(origin_head->ds_dir);
+
/*
* Swap per-dataset feature flags.
*/