aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/zfs/dmu_tx.c7
-rw-r--r--module/zfs/txg.c37
2 files changed, 37 insertions, 7 deletions
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index 374bb710c..ab6ec29e7 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -1097,10 +1097,11 @@ dmu_tx_wait(dmu_tx_t *tx)
tx->tx_needassign_txh = NULL;
} else {
/*
- * A dnode is assigned to the quiescing txg. Wait for its
- * transaction to complete.
+ * If we have a lot of dirty data just wait until we sync
+ * out a TXG at which point we'll hopefully have synced
+ * a portion of the changes.
*/
- txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
+ txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
}
spa_tx_assign_add_nsecs(spa, gethrtime() - before);
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index 30f251573..16b2f845b 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -476,6 +476,30 @@ txg_wait_callbacks(dsl_pool_t *dp)
taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
}
+static boolean_t
+txg_is_syncing(dsl_pool_t *dp)
+{
+ tx_state_t *tx = &dp->dp_tx;
+ ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
+ return (tx->tx_syncing_txg != 0);
+}
+
+static boolean_t
+txg_is_quiescing(dsl_pool_t *dp)
+{
+ tx_state_t *tx = &dp->dp_tx;
+ ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
+ return (tx->tx_quiescing_txg != 0);
+}
+
+static boolean_t
+txg_has_quiesced_to_sync(dsl_pool_t *dp)
+{
+ tx_state_t *tx = &dp->dp_tx;
+ ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
+ return (tx->tx_quiesced_txg != 0);
+}
+
static void
txg_sync_thread(void *arg)
{
@@ -504,7 +528,7 @@ txg_sync_thread(void *arg)
while (!dsl_scan_active(dp->dp_scan) &&
!tx->tx_exiting && timer > 0 &&
tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
- tx->tx_quiesced_txg == 0 &&
+ !txg_has_quiesced_to_sync(dp) &&
dp->dp_dirty_total < zfs_dirty_data_sync) {
dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
@@ -517,7 +541,7 @@ txg_sync_thread(void *arg)
* Wait until the quiesce thread hands off a txg to us,
* prompting it to do so if necessary.
*/
- while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
+ while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) {
if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
cv_broadcast(&tx->tx_quiesce_more_cv);
@@ -532,6 +556,7 @@ txg_sync_thread(void *arg)
* us. This may cause the quiescing thread to now be
* able to quiesce another txg, so we must signal it.
*/
+ ASSERT(tx->tx_quiesced_txg != 0);
txg = tx->tx_quiesced_txg;
tx->tx_quiesced_txg = 0;
tx->tx_syncing_txg = txg;
@@ -582,7 +607,7 @@ txg_quiesce_thread(void *arg)
*/
while (!tx->tx_exiting &&
(tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
- tx->tx_quiesced_txg != 0))
+ txg_has_quiesced_to_sync(dp)))
txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
if (tx->tx_exiting)
@@ -592,6 +617,8 @@ txg_quiesce_thread(void *arg)
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
txg, tx->tx_quiesce_txg_waiting,
tx->tx_sync_txg_waiting);
+ tx->tx_quiescing_txg = txg;
+
mutex_exit(&tx->tx_sync_lock);
txg_quiesce(dp, txg);
mutex_enter(&tx->tx_sync_lock);
@@ -600,6 +627,7 @@ txg_quiesce_thread(void *arg)
* Hand this txg off to the sync thread.
*/
dprintf("quiesce done, handing off txg %llu\n", txg);
+ tx->tx_quiescing_txg = 0;
tx->tx_quiesced_txg = txg;
DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
cv_broadcast(&tx->tx_sync_more_cv);
@@ -699,7 +727,8 @@ txg_kick(dsl_pool_t *dp)
ASSERT(!dsl_pool_config_held(dp));
mutex_enter(&tx->tx_sync_lock);
- if (tx->tx_syncing_txg == 0 &&
+ if (!txg_is_syncing(dp) &&
+ !txg_is_quiescing(dp) &&
tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
tx->tx_sync_txg_waiting <= tx->tx_synced_txg &&
tx->tx_quiesced_txg <= tx->tx_synced_txg) {