diff options
author | Brian Behlendorf <[email protected]> | 2012-01-20 10:58:57 -0800 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2012-02-27 08:59:10 -0800 |
commit | 570827e129ed81e066e894530bbe24642f473154 (patch) | |
tree | 90e9128a73735df10392a24280ade08031051b68 /include/sys | |
parent | 13be560d89e9de63bdf63e8187af2ceb90cf094d (diff) |
Add 'dmu_tx' kstats entry
Keep counters for the various reasons that a thread may end up
in txg_wait_open() waiting on a new txg. This can be useful
when attempting to determine why a particular workload is
under performing.
Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'include/sys')
-rw-r--r-- | include/sys/dmu_tx.h | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/include/sys/dmu_tx.h b/include/sys/dmu_tx.h index c5ea50fa8..d87a09bec 100644 --- a/include/sys/dmu_tx.h +++ b/include/sys/dmu_tx.h @@ -105,6 +105,31 @@ typedef struct dmu_tx_callback { } dmu_tx_callback_t; /* + * Used for dmu tx kstat. + */ +typedef struct dmu_tx_stats { + kstat_named_t dmu_tx_assigned; + kstat_named_t dmu_tx_delay; + kstat_named_t dmu_tx_error; + kstat_named_t dmu_tx_suspended; + kstat_named_t dmu_tx_group; + kstat_named_t dmu_tx_how; + kstat_named_t dmu_tx_memory_reserve; + kstat_named_t dmu_tx_memory_reclaim; + kstat_named_t dmu_tx_memory_inflight; + kstat_named_t dmu_tx_dirty_throttle; + kstat_named_t dmu_tx_write_limit; + kstat_named_t dmu_tx_quota; +} dmu_tx_stats_t; + +extern dmu_tx_stats_t dmu_tx_stats; + +#define DMU_TX_STAT_INCR(stat, val) \ + atomic_add_64(&dmu_tx_stats.stat.value.ui64, (val)); +#define DMU_TX_STAT_BUMP(stat) \ + DMU_TX_STAT_INCR(stat, 1); + +/* * These routines are defined in dmu.h, and are called by the user. */ dmu_tx_t *dmu_tx_create(objset_t *dd); @@ -141,6 +166,9 @@ void dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space); #define DMU_TX_DIRTY_BUF(tx, db) #endif +void dmu_tx_init(void); +void dmu_tx_fini(void); + #ifdef __cplusplus } #endif |