summaryrefslogtreecommitdiffstats
path: root/module/zfs/zfs_vnops.c
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2013-11-22 15:13:18 -0800
committerBrian Behlendorf <[email protected]>2013-12-06 09:30:51 -0800
commit384f8a09f8423d951bb81d9ca945e588de14f95f (patch)
treeecc453b3d1561ac9952a418e5d4fc8db9002568f /module/zfs/zfs_vnops.c
parent729210564a5325e190fc4fba22bf17bacf957ace (diff)
Illumos #4347 ZPL can use dmu_tx_assign(TXG_WAIT)
Fix a lock contention issue by allowing threads not holding ZPL locks to block when waiting to assign a transaction. Porting Notes: zfs_putpage() still uses TXG_NOWAIT, unlike the upstream version. This case may be a contention point just like zfs_write(), however it is not safe to block here since it may be called during memory reclaim. Reviewed by: George Wilson <[email protected]> Reviewed by: Adam Leventhal <[email protected]> Reviewed by: Dan McDonald <[email protected]> Reviewed by: Boris Protopopov <[email protected]> Approved by: Dan McDonald <[email protected]> References: https://www.illumos.org/issues/4347 illumos/illumos-gate@e722410c49fe67cbf0f639cbcc288bd6cbcf7dd1 Ported-by: Ned Bass <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module/zfs/zfs_vnops.c')
-rw-r--r--module/zfs/zfs_vnops.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index abf3747db..1552b61e0 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -106,11 +106,18 @@
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
- * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign().
- * This is critical because we don't want to block while holding locks.
- * Note, in particular, that if a lock is sometimes acquired before
- * the tx assigns, and sometimes after (e.g. z_lock), then failing to
- * use a non-blocking assign can deadlock the system. The scenario:
+ * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
+ * dmu_tx_assign(). This is critical because we don't want to block
+ * while holding locks.
+ *
+ * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
+ * reduces lock contention and CPU usage when we must wait (note that if
+ * throughput is constrained by the storage, nearly every transaction
+ * must wait).
+ *
+ * Note, in particular, that if a lock is sometimes acquired before
+ * the tx assigns, and sometimes after (e.g. z_lock), then failing
+ * to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
@@ -712,7 +719,6 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
while (n > 0) {
abuf = NULL;
woff = uio->uio_loffset;
-again:
if (zfs_owner_overquota(zsb, zp, B_FALSE) ||
zfs_owner_overquota(zsb, zp, B_TRUE)) {
if (abuf != NULL)
@@ -762,13 +768,8 @@ again:
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
zfs_sa_upgrade_txholds(tx, zp);
- error = dmu_tx_assign(tx, TXG_NOWAIT);
+ error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
- if (error == ERESTART) {
- dmu_tx_wait(tx);
- dmu_tx_abort(tx);
- goto again;
- }
dmu_tx_abort(tx);
if (abuf != NULL)
dmu_return_arcbuf(abuf);
@@ -2833,12 +2834,9 @@ top:
zfs_sa_upgrade_txholds(tx, zp);
- err = dmu_tx_assign(tx, TXG_NOWAIT);
- if (err) {
- if (err == ERESTART)
- dmu_tx_wait(tx);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err)
goto out;
- }
count = 0;
/*