diff options
author | Brian Behlendorf <[email protected]> | 2012-10-23 13:48:22 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2012-11-02 15:45:56 -0700 |
commit | e95853a331529a6cb96fdf10476c53441e59f4e1 (patch) | |
tree | 30f0809324bb570d1e97ccbee054f1f0484d4668 /module/zfs | |
parent | e8fd45a0f975c6b8ae8cd644714fc21f14fac2bf (diff) |
Add txgs-<pool> kstat file
Create a kstat file which contains useful statistics about the
last N txgs processed. This can be helpful when analyzing pool
performance. The new KSTAT_TYPE_TXG type was added for this
purpose and it tracks the following statistics per-txg.
txg - Unique txg number
state - State (O)pen/(Q)uiescing/(S)yncing/(C)ommitted
birth; - Creation time
nread - Bytes read
nwritten; - Bytes written
reads - IOPs read
writes - IOPs write
open_time; - Length in nanoseconds the txg was open
quiesce_time - Length in nanoseconds the txg was quiescing
sync_time; - Length in nanoseconds the txg was syncing
Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/zfs')
-rw-r--r-- | module/zfs/dsl_pool.c | 144 | ||||
-rw-r--r-- | module/zfs/txg.c | 46 |
2 files changed, 190 insertions, 0 deletions
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 53419778a..7e0fba589 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -44,6 +44,7 @@ int zfs_no_write_throttle = 0; int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */ +int zfs_txg_history = 60; /* statistics for the last N txgs */ unsigned long zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ unsigned long zfs_write_limit_max = 0; /* max data payload per txg */ @@ -54,6 +55,143 @@ kmutex_t zfs_write_limit_lock; static pgcnt_t old_physmem = 0; +static int +dsl_pool_txg_history_update(kstat_t *ksp, int rw) +{ + dsl_pool_t *dp = ksp->ks_private; + txg_history_t *th; + int i = 0; + + if (rw == KSTAT_WRITE) + return (EACCES); + + if (ksp->ks_data) + kmem_free(ksp->ks_data, ksp->ks_data_size); + + mutex_enter(&dp->dp_lock); + + ksp->ks_ndata = dp->dp_txg_history_size; + ksp->ks_data_size = dp->dp_txg_history_size * sizeof(kstat_txg_t); + if (ksp->ks_data_size > 0) + ksp->ks_data = kmem_alloc(ksp->ks_data_size, KM_PUSHPAGE); + + /* Traversed oldest to youngest for the most readable kstat output */ + for (th = list_tail(&dp->dp_txg_history); th != NULL; + th = list_prev(&dp->dp_txg_history, th)) { + mutex_enter(&th->th_lock); + ASSERT3S(i + sizeof(kstat_txg_t), <=, ksp->ks_data_size); + memcpy(ksp->ks_data + i, &th->th_kstat, sizeof(kstat_txg_t)); + i += sizeof(kstat_txg_t); + mutex_exit(&th->th_lock); + } + + mutex_exit(&dp->dp_lock); + + return (0); +} + +static void +dsl_pool_txg_history_init(dsl_pool_t *dp, uint64_t txg) +{ + char name[KSTAT_STRLEN]; + + list_create(&dp->dp_txg_history, sizeof (txg_history_t), + offsetof(txg_history_t, th_link)); + dsl_pool_txg_history_add(dp, txg); + + (void) snprintf(name, KSTAT_STRLEN, "txgs-%s", spa_name(dp->dp_spa)); + dp->dp_txg_kstat = kstat_create("zfs", 0, name, "misc", + KSTAT_TYPE_TXG, 0, KSTAT_FLAG_VIRTUAL); + if (dp->dp_txg_kstat) { + dp->dp_txg_kstat->ks_data = NULL; + dp->dp_txg_kstat->ks_private = dp; + dp->dp_txg_kstat->ks_update = dsl_pool_txg_history_update; + kstat_install(dp->dp_txg_kstat); + } +} + +static void +dsl_pool_txg_history_destroy(dsl_pool_t *dp) +{ + txg_history_t *th; + + if (dp->dp_txg_kstat) { + if (dp->dp_txg_kstat->ks_data) + kmem_free(dp->dp_txg_kstat->ks_data, + dp->dp_txg_kstat->ks_data_size); + + kstat_delete(dp->dp_txg_kstat); + } + + mutex_enter(&dp->dp_lock); + while ((th = list_remove_head(&dp->dp_txg_history))) { + dp->dp_txg_history_size--; + mutex_destroy(&th->th_lock); + kmem_free(th, sizeof(txg_history_t)); + } + + ASSERT3U(dp->dp_txg_history_size, ==, 0); + list_destroy(&dp->dp_txg_history); + mutex_exit(&dp->dp_lock); +} + +txg_history_t * +dsl_pool_txg_history_add(dsl_pool_t *dp, uint64_t txg) +{ + txg_history_t *th, *rm; + + th = kmem_zalloc(sizeof(txg_history_t), KM_SLEEP); + mutex_init(&th->th_lock, NULL, MUTEX_DEFAULT, NULL); + th->th_kstat.txg = txg; + th->th_kstat.state = TXG_STATE_OPEN; + th->th_kstat.birth = gethrtime(); + + mutex_enter(&dp->dp_lock); + + list_insert_head(&dp->dp_txg_history, th); + dp->dp_txg_history_size++; + + while (dp->dp_txg_history_size > zfs_txg_history) { + dp->dp_txg_history_size--; + rm = list_remove_tail(&dp->dp_txg_history); + mutex_destroy(&rm->th_lock); + kmem_free(rm, sizeof(txg_history_t)); + } + + mutex_exit(&dp->dp_lock); + + return (th); +} + +/* + * Traversed youngest to oldest because lookups are only done for open + * or syncing txgs which are guaranteed to be at the head of the list. + * The txg_history_t structure will be returned locked. + */ +txg_history_t * +dsl_pool_txg_history_get(dsl_pool_t *dp, uint64_t txg) +{ + txg_history_t *th; + + mutex_enter(&dp->dp_lock); + for (th = list_head(&dp->dp_txg_history); th != NULL; + th = list_next(&dp->dp_txg_history, th)) { + if (th->th_kstat.txg == txg) { + mutex_enter(&th->th_lock); + break; + } + } + mutex_exit(&dp->dp_lock); + + return (th); +} + +void +dsl_pool_txg_history_put(txg_history_t *th) +{ + mutex_exit(&th->th_lock); +} + int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp) { @@ -96,6 +234,8 @@ dsl_pool_open_impl(spa_t *spa, uint64_t txg) dp->dp_iput_taskq = taskq_create("zfs_iput_taskq", 1, minclsyspri, 1, 4, 0); + dsl_pool_txg_history_init(dp, txg); + return (dp); } @@ -213,6 +353,7 @@ dsl_pool_close(dsl_pool_t *dp) arc_flush(dp->dp_spa); txg_fini(dp); dsl_scan_fini(dp); + dsl_pool_txg_history_destroy(dp); rw_destroy(&dp->dp_config_rwlock); mutex_destroy(&dp->dp_lock); taskq_destroy(dp->dp_iput_taskq); @@ -864,6 +1005,9 @@ MODULE_PARM_DESC(zfs_write_limit_shift, "log2(fraction of memory) per txg"); module_param(zfs_txg_synctime_ms, int, 0644); MODULE_PARM_DESC(zfs_txg_synctime_ms, "Target milliseconds between txg sync"); +module_param(zfs_txg_history, int, 0644); +MODULE_PARM_DESC(zfs_txg_history, "Historic statistics for the last N txgs"); + module_param(zfs_write_limit_min, ulong, 0444); MODULE_PARM_DESC(zfs_write_limit_min, "Min txg write limit"); diff --git a/module/zfs/txg.c b/module/zfs/txg.c index 5702a61f0..17494bcda 100644 --- a/module/zfs/txg.c +++ b/module/zfs/txg.c @@ -29,6 +29,7 @@ #include <sys/dsl_pool.h> #include <sys/dsl_scan.h> #include <sys/callb.h> +#include <sys/spa_impl.h> /* * Pool-wide transaction groups. @@ -279,6 +280,8 @@ txg_rele_to_sync(txg_handle_t *th) static void txg_quiesce(dsl_pool_t *dp, uint64_t txg) { + hrtime_t start; + txg_history_t *th; tx_state_t *tx = &dp->dp_tx; int g = txg & TXG_MASK; int c; @@ -293,6 +296,15 @@ txg_quiesce(dsl_pool_t *dp, uint64_t txg) tx->tx_open_txg++; /* + * Measure how long the txg was open and replace the kstat. + */ + th = dsl_pool_txg_history_get(dp, txg); + th->th_kstat.open_time = gethrtime() - th->th_kstat.birth; + th->th_kstat.state = TXG_STATE_QUIESCING; + dsl_pool_txg_history_put(th); + dsl_pool_txg_history_add(dp, tx->tx_open_txg); + + /* * Now that we've incremented tx_open_txg, we can let threads * enter the next transaction group. */ @@ -302,6 +314,8 @@ txg_quiesce(dsl_pool_t *dp, uint64_t txg) /* * Quiesce the transaction group by waiting for everyone to txg_exit(). */ + start = gethrtime(); + for (c = 0; c < max_ncpus; c++) { tx_cpu_t *tc = &tx->tx_cpu[c]; mutex_enter(&tc->tc_lock); @@ -309,6 +323,13 @@ txg_quiesce(dsl_pool_t *dp, uint64_t txg) cv_wait(&tc->tc_cv[g], &tc->tc_lock); mutex_exit(&tc->tc_lock); } + + /* + * Measure how long the txg took to quiesce. + */ + th = dsl_pool_txg_history_get(dp, txg); + th->th_kstat.quiesce_time = gethrtime() - start; + dsl_pool_txg_history_put(th); } static void @@ -395,6 +416,8 @@ txg_sync_thread(dsl_pool_t *dp) start = delta = 0; for (;;) { + hrtime_t hrstart; + txg_history_t *th; uint64_t timer, timeout; uint64_t txg; @@ -441,11 +464,17 @@ txg_sync_thread(dsl_pool_t *dp) tx->tx_syncing_txg = txg; cv_broadcast(&tx->tx_quiesce_more_cv); + th = dsl_pool_txg_history_get(dp, txg); + th->th_kstat.state = TXG_STATE_SYNCING; + vdev_get_stats(spa->spa_root_vdev, &th->th_vs1); + dsl_pool_txg_history_put(th); + dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n", txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting); mutex_exit(&tx->tx_sync_lock); start = ddi_get_lbolt(); + hrstart = gethrtime(); spa_sync(spa, txg); delta = ddi_get_lbolt() - start; @@ -458,6 +487,23 @@ txg_sync_thread(dsl_pool_t *dp) * Dispatch commit callbacks to worker threads. */ txg_dispatch_callbacks(dp, txg); + + /* + * Measure the txg sync time determine the amount of I/O done. + */ + th = dsl_pool_txg_history_get(dp, txg); + vdev_get_stats(spa->spa_root_vdev, &th->th_vs2); + th->th_kstat.sync_time = gethrtime() - hrstart; + th->th_kstat.nread = th->th_vs2.vs_bytes[ZIO_TYPE_READ] - + th->th_vs1.vs_bytes[ZIO_TYPE_READ]; + th->th_kstat.nwritten = th->th_vs2.vs_bytes[ZIO_TYPE_WRITE] - + th->th_vs1.vs_bytes[ZIO_TYPE_WRITE]; + th->th_kstat.reads = th->th_vs2.vs_ops[ZIO_TYPE_READ] - + th->th_vs1.vs_ops[ZIO_TYPE_READ]; + th->th_kstat.writes = th->th_vs2.vs_ops[ZIO_TYPE_WRITE] - + th->th_vs1.vs_ops[ZIO_TYPE_WRITE]; + th->th_kstat.state = TXG_STATE_COMMITTED; + dsl_pool_txg_history_put(th); } } |