summaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorPrakash Surya <[email protected]>2013-07-10 15:53:48 -0700
committerBrian Behlendorf <[email protected]>2013-07-11 13:53:44 -0700
commit92334b14ec378b1693573b52c09816bbade9cf3e (patch)
tree2ea0e48d7f7bef661756633ae34d68acc514c71b /module/zfs
parentbf89c19914c66b030333460952dbdfabf9ff527e (diff)
Add new kstat for monitoring time in dmu_tx_assign
This change adds a new kstat to gain some visibility into the amount of time spent in each call to dmu_tx_assign. A histogram is exported via a new dmu_tx_assign_histogram-$POOLNAME file. The information contained in this histogram is the frequency dmu_tx_assign took to complete given an interval range. For example, given the below histogram file: $ cat /proc/spl/kstat/zfs/dmu_tx_assign_histogram-tank 12 1 0x01 32 1536 19792068076691 20516481514522 name type data 1 us 4 859 2 us 4 252 4 us 4 171 8 us 4 2 16 us 4 0 32 us 4 2 64 us 4 0 128 us 4 0 256 us 4 0 512 us 4 0 1024 us 4 0 2048 us 4 0 4096 us 4 0 8192 us 4 0 16384 us 4 0 32768 us 4 1 65536 us 4 1 131072 us 4 1 262144 us 4 4 524288 us 4 0 1048576 us 4 0 2097152 us 4 0 4194304 us 4 0 8388608 us 4 0 16777216 us 4 0 33554432 us 4 0 67108864 us 4 0 134217728 us 4 0 268435456 us 4 0 536870912 us 4 0 1073741824 us 4 0 2147483648 us 4 0 one can see most calls to dmu_tx_assign completed in 32us or less, but a few outliers did not. Specifically, 4 of the calls took between 262144us and 131072us. This information is difficult, if not impossible, to gather without this change. Signed-off-by: Prakash Surya <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Closes #1584
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/dmu_tx.c8
-rw-r--r--module/zfs/dsl_pool.c59
2 files changed, 67 insertions, 0 deletions
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index 31c88c9d6..b0dc64f06 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -1089,12 +1089,15 @@ dmu_tx_unassign(dmu_tx_t *tx)
int
dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
{
+ hrtime_t before, after;
int err;
ASSERT(tx->tx_txg == 0);
ASSERT(txg_how != 0);
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
+ before = gethrtime();
+
while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
dmu_tx_unassign(tx);
@@ -1106,6 +1109,11 @@ dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
txg_rele_to_quiesce(&tx->tx_txgh);
+ after = gethrtime();
+
+ dsl_pool_tx_assign_add_usecs(tx->tx_pool,
+ (after - before) / NSEC_PER_USEC);
+
return (0);
}
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 6d25771a7..7795d8045 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -58,6 +58,63 @@ kmutex_t zfs_write_limit_lock;
static pgcnt_t old_physmem = 0;
+static void
+dsl_pool_tx_assign_init(dsl_pool_t *dp, unsigned int ndata)
+{
+ kstat_named_t *ks;
+ char name[KSTAT_STRLEN];
+ int i, data_size = ndata * sizeof(kstat_named_t);
+
+ (void) snprintf(name, KSTAT_STRLEN, "dmu_tx_assign-%s",
+ spa_name(dp->dp_spa));
+
+ dp->dp_tx_assign_size = ndata;
+
+ if (data_size)
+ dp->dp_tx_assign_buckets = kmem_alloc(data_size, KM_SLEEP);
+ else
+ dp->dp_tx_assign_buckets = NULL;
+
+ for (i = 0; i < dp->dp_tx_assign_size; i++) {
+ ks = &dp->dp_tx_assign_buckets[i];
+ ks->data_type = KSTAT_DATA_UINT64;
+ ks->value.ui64 = 0;
+ (void) snprintf(ks->name, KSTAT_STRLEN, "%u us", 1 << i);
+ }
+
+ dp->dp_tx_assign_kstat = kstat_create("zfs", 0, name, "misc",
+ KSTAT_TYPE_NAMED, 0, KSTAT_FLAG_VIRTUAL);
+
+ if (dp->dp_tx_assign_kstat) {
+ dp->dp_tx_assign_kstat->ks_data = dp->dp_tx_assign_buckets;
+ dp->dp_tx_assign_kstat->ks_ndata = dp->dp_tx_assign_size;
+ dp->dp_tx_assign_kstat->ks_data_size = data_size;
+ kstat_install(dp->dp_tx_assign_kstat);
+ }
+}
+
+static void
+dsl_pool_tx_assign_destroy(dsl_pool_t *dp)
+{
+ if (dp->dp_tx_assign_buckets)
+ kmem_free(dp->dp_tx_assign_buckets,
+ dp->dp_tx_assign_size * sizeof(kstat_named_t));
+
+ if (dp->dp_tx_assign_kstat)
+ kstat_delete(dp->dp_tx_assign_kstat);
+}
+
+void
+dsl_pool_tx_assign_add_usecs(dsl_pool_t *dp, uint64_t usecs)
+{
+ uint64_t idx = 0;
+
+ while (((1 << idx) < usecs) && (idx < dp->dp_tx_assign_size - 1))
+ idx++;
+
+ atomic_inc_64(&dp->dp_tx_assign_buckets[idx].value.ui64);
+}
+
static int
dsl_pool_txg_history_update(kstat_t *ksp, int rw)
{
@@ -238,6 +295,7 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg)
1, 4, 0);
dsl_pool_txg_history_init(dp, txg);
+ dsl_pool_tx_assign_init(dp, 32);
return (dp);
}
@@ -379,6 +437,7 @@ dsl_pool_close(dsl_pool_t *dp)
arc_flush(dp->dp_spa);
txg_fini(dp);
dsl_scan_fini(dp);
+ dsl_pool_tx_assign_destroy(dp);
dsl_pool_txg_history_destroy(dp);
rw_destroy(&dp->dp_config_rwlock);
mutex_destroy(&dp->dp_lock);