aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/dsl_dataset.c6
-rw-r--r--module/zfs/dsl_dir.c21
2 files changed, 14 insertions, 13 deletions
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index 9a9e24684..7c8995eba 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -102,9 +102,8 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
used, compressed, uncompressed);
return;
}
- dmu_buf_will_dirty(ds->ds_dbuf, tx);
- mutex_enter(&ds->ds_dir->dd_lock);
+ dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_lock);
delta = parent_delta(ds, used);
ds->ds_phys->ds_referenced_bytes += used;
@@ -116,7 +115,6 @@ dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
compressed, uncompressed, tx);
dsl_dir_transfer_space(ds->ds_dir, used - delta,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
- mutex_exit(&ds->ds_dir->dd_lock);
}
int
@@ -153,7 +151,6 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
- mutex_enter(&ds->ds_dir->dd_lock);
mutex_enter(&ds->ds_lock);
ASSERT(ds->ds_phys->ds_unique_bytes >= used ||
!DS_UNIQUE_IS_ACCURATE(ds));
@@ -164,7 +161,6 @@ dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
delta, -compressed, -uncompressed, tx);
dsl_dir_transfer_space(ds->ds_dir, -used - delta,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
- mutex_exit(&ds->ds_dir->dd_lock);
} else {
dprintf_bp(bp, "putting on dead list: %s", "");
if (async) {
diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c
index c1b518241..44795f344 100644
--- a/module/zfs/dsl_dir.c
+++ b/module/zfs/dsl_dir.c
@@ -846,11 +846,21 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
{
int64_t accounted_delta;
+
+ /*
+ * dsl_dataset_set_refreservation_sync_impl() calls this with
+ * dd_lock held, so that it can atomically update
+ * ds->ds_reserved and the dsl_dir accounting, so that
+ * dsl_dataset_check_quota() can see dataset and dir accounting
+ * consistently.
+ */
boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(type < DD_USED_NUM);
+ dmu_buf_will_dirty(dd->dd_dbuf, tx);
+
if (needlock)
mutex_enter(&dd->dd_lock);
accounted_delta = parent_delta(dd, dd->dd_phys->dd_used_bytes, used);
@@ -859,7 +869,6 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
dd->dd_phys->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
dd->dd_phys->dd_uncompressed_bytes >= -uncompressed);
- dmu_buf_will_dirty(dd->dd_dbuf, tx);
dd->dd_phys->dd_used_bytes += used;
dd->dd_phys->dd_uncompressed_bytes += uncompressed;
dd->dd_phys->dd_compressed_bytes += compressed;
@@ -894,8 +903,6 @@ void
dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
- boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
-
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
@@ -903,17 +910,15 @@ dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
if (delta == 0 || !(dd->dd_phys->dd_flags & DD_FLAG_USED_BREAKDOWN))
return;
- if (needlock)
- mutex_enter(&dd->dd_lock);
+ dmu_buf_will_dirty(dd->dd_dbuf, tx);
+ mutex_enter(&dd->dd_lock);
ASSERT(delta > 0 ?
dd->dd_phys->dd_used_breakdown[oldtype] >= delta :
dd->dd_phys->dd_used_breakdown[newtype] >= -delta);
ASSERT(dd->dd_phys->dd_used_bytes >= ABS(delta));
- dmu_buf_will_dirty(dd->dd_dbuf, tx);
dd->dd_phys->dd_used_breakdown[oldtype] -= delta;
dd->dd_phys->dd_used_breakdown[newtype] += delta;
- if (needlock)
- mutex_exit(&dd->dd_lock);
+ mutex_exit(&dd->dd_lock);
}
typedef struct dsl_dir_set_qr_arg {