summaryrefslogtreecommitdiffstats
path: root/module/zfs/dmu_tx.c
diff options
context:
space:
mode:
authorNed Bass <[email protected]>2014-02-28 15:07:00 -0800
committerBrian Behlendorf <[email protected]>2014-03-04 12:22:24 -0800
commita77c4c8332ea17293bb6565409f8c04a6dd2135b (patch)
treee3621773ddabddab846af18c14056fe4ac811e71 /module/zfs/dmu_tx.c
parent3ccab25205cc8836ceb79bbd164208021468233a (diff)
Improve reporting of tx assignment wait times
Some callers of dmu_tx_assign() use the TXG_NOWAIT flag and call dmu_tx_wait() themselves before retrying if the assignment fails. The wait times for such callers are not accounted for in the dmu_tx_assign kstat histogram, because the histogram only records time spent in dmu_tx_assign(). This change moves the histogram update to dmu_tx_wait() to properly account for all time spent there. One downside of this approach is that it is possible to call dmu_tx_wait() multiple times before successfully assigning a transaction, in which case the cumulative wait time would not be recorded. However, this case should not often arise in practice, because most callers currently use one of these forms: dmu_tx_assign(tx, TXG_WAIT); dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT); The first form should make just one call to dmu_tx_delay() inside of dmu_tx_assign(). The second form retries with TXG_WAITED if the first assignment fails and incurs a delay, in which case no further waiting is performed. Therefore transaction delays normally occur in one call to dmu_tx_wait() so the histogram should be fairly accurate. Another possible downside of this approach is that the histogram will no longer record overhead outside of dmu_tx_wait() such as in dmu_tx_try_assign(). While I'm not aware of any reason for concern on this point, it is conceivable that lock contention, long list traversal, etc. could cause assignment delays that would not be reflected in the histogram. Therefore the histogram should strictly be used for visibility in to the normal delay mechanisms and not as a profiling tool for code performance. Signed-off-by: Ned Bass <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #1915
Diffstat (limited to 'module/zfs/dmu_tx.c')
-rw-r--r--module/zfs/dmu_tx.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index 4159ba740..4f2fae2fc 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -1251,7 +1251,6 @@ dmu_tx_unassign(dmu_tx_t *tx)
int
dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
{
- hrtime_t before;
int err;
ASSERT(tx->tx_txg == 0);
@@ -1259,8 +1258,6 @@ dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
txg_how == TXG_WAITED);
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
- before = gethrtime();
-
if (txg_how == TXG_WAITED)
tx->tx_waited = B_TRUE;
@@ -1278,8 +1275,6 @@ dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
txg_rele_to_quiesce(&tx->tx_txgh);
- spa_tx_assign_add_nsecs(tx->tx_pool->dp_spa, gethrtime() - before);
-
return (0);
}
@@ -1288,10 +1283,13 @@ dmu_tx_wait(dmu_tx_t *tx)
{
spa_t *spa = tx->tx_pool->dp_spa;
dsl_pool_t *dp = tx->tx_pool;
+ hrtime_t before;
ASSERT(tx->tx_txg == 0);
ASSERT(!dsl_pool_config_held(tx->tx_pool));
+ before = gethrtime();
+
if (tx->tx_wait_dirty) {
uint64_t dirty;
@@ -1343,6 +1341,8 @@ dmu_tx_wait(dmu_tx_t *tx)
*/
txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
}
+
+ spa_tx_assign_add_nsecs(spa, gethrtime() - before);
}
void