summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Caputi <[email protected]>2018-08-20 16:41:53 -0400
committerBrian Behlendorf <[email protected]>2018-08-27 10:16:01 -0700
commit8c4fb36a24d4fd88382e454b13751a5adfea0806 (patch)
tree3ae994cb68fb807d391688c4fee13fa24656cc4c
parenta584ef26053065f486d46a7335bea222cb03eeea (diff)
Small rework of txg_list code
This patch simply adds some missing locking to the txg_list functions and refactors txg_verify() so that it is only compiled in for debug builds. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Tom Caputi <[email protected]> Closes #7795
-rw-r--r--include/sys/txg.h7
-rw-r--r--module/zfs/dmu_tx.c2
-rw-r--r--module/zfs/txg.c64
3 files changed, 52 insertions, 21 deletions
diff --git a/include/sys/txg.h b/include/sys/txg.h
index f52197781..ed0e7297c 100644
--- a/include/sys/txg.h
+++ b/include/sys/txg.h
@@ -133,6 +133,13 @@ extern void *txg_list_next(txg_list_t *tl, void *p, uint64_t txg);
/* Global tuning */
extern int zfs_txg_timeout;
+
+#ifdef ZFS_DEBUG
+#define TXG_VERIFY(spa, txg) txg_verify(spa, txg)
+#else
+#define TXG_VERIFY(spa, txg)
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index c63ba6405..c268f3c40 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -87,7 +87,7 @@ dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
{
dmu_tx_t *tx = dmu_tx_create_dd(NULL);
- txg_verify(dp->dp_spa, txg);
+ TXG_VERIFY(dp->dp_spa, txg);
tx->tx_pool = dp;
tx->tx_txg = txg;
tx->tx_anyobj = TRUE;
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index 16b2f845b..cfc1d0a35 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -758,6 +758,7 @@ txg_sync_waiting(dsl_pool_t *dp)
* Verify that this txg is active (open, quiescing, syncing). Non-active
* txg's should not be manipulated.
*/
+#ifdef ZFS_DEBUG
void
txg_verify(spa_t *spa, uint64_t txg)
{
@@ -768,6 +769,7 @@ txg_verify(spa_t *spa, uint64_t txg)
ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
}
+#endif
/*
* Per-txg object lists.
@@ -786,39 +788,54 @@ txg_list_create(txg_list_t *tl, spa_t *spa, size_t offset)
tl->tl_head[t] = NULL;
}
+static boolean_t
+txg_list_empty_impl(txg_list_t *tl, uint64_t txg)
+{
+ ASSERT(MUTEX_HELD(&tl->tl_lock));
+ TXG_VERIFY(tl->tl_spa, txg);
+ return (tl->tl_head[txg & TXG_MASK] == NULL);
+}
+
+boolean_t
+txg_list_empty(txg_list_t *tl, uint64_t txg)
+{
+ mutex_enter(&tl->tl_lock);
+ boolean_t ret = txg_list_empty_impl(tl, txg);
+ mutex_exit(&tl->tl_lock);
+
+ return (ret);
+}
+
void
txg_list_destroy(txg_list_t *tl)
{
int t;
+ mutex_enter(&tl->tl_lock);
for (t = 0; t < TXG_SIZE; t++)
- ASSERT(txg_list_empty(tl, t));
+ ASSERT(txg_list_empty_impl(tl, t));
+ mutex_exit(&tl->tl_lock);
mutex_destroy(&tl->tl_lock);
}
-boolean_t
-txg_list_empty(txg_list_t *tl, uint64_t txg)
-{
- txg_verify(tl->tl_spa, txg);
- return (tl->tl_head[txg & TXG_MASK] == NULL);
-}
-
/*
* Returns true if all txg lists are empty.
*
* Warning: this is inherently racy (an item could be added immediately
- * after this function returns). We don't bother with the lock because
- * it wouldn't change the semantics.
+ * after this function returns).
*/
boolean_t
txg_all_lists_empty(txg_list_t *tl)
{
+ mutex_enter(&tl->tl_lock);
for (int i = 0; i < TXG_SIZE; i++) {
- if (!txg_list_empty(tl, i)) {
+ if (!txg_list_empty_impl(tl, i)) {
+ mutex_exit(&tl->tl_lock);
return (B_FALSE);
}
}
+ mutex_exit(&tl->tl_lock);
return (B_TRUE);
}
@@ -833,7 +850,7 @@ txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
boolean_t add;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
add = (tn->tn_member[t] == 0);
if (add) {
@@ -858,7 +875,7 @@ txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
boolean_t add;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
add = (tn->tn_member[t] == 0);
if (add) {
@@ -886,7 +903,7 @@ txg_list_remove(txg_list_t *tl, uint64_t txg)
txg_node_t *tn;
void *p = NULL;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
if ((tn = tl->tl_head[t]) != NULL) {
ASSERT(tn->tn_member[t]);
@@ -910,7 +927,7 @@ txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
int t = txg & TXG_MASK;
txg_node_t *tn, **tp;
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
@@ -934,20 +951,24 @@ txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
return (tn->tn_member[t] != 0);
}
/*
- * Walk a txg list -- only safe if you know it's not changing.
+ * Walk a txg list
*/
void *
txg_list_head(txg_list_t *tl, uint64_t txg)
{
int t = txg & TXG_MASK;
- txg_node_t *tn = tl->tl_head[t];
+ txg_node_t *tn;
+
+ mutex_enter(&tl->tl_lock);
+ tn = tl->tl_head[t];
+ mutex_exit(&tl->tl_lock);
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
}
@@ -957,8 +978,11 @@ txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
- txg_verify(tl->tl_spa, txg);
+ TXG_VERIFY(tl->tl_spa, txg);
+
+ mutex_enter(&tl->tl_lock);
tn = tn->tn_next[t];
+ mutex_exit(&tl->tl_lock);
return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
}