diff options
author | lidongyang <[email protected]> | 2019-02-23 04:48:37 +1100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2019-02-22 09:48:37 -0800 |
commit | 8d9e51c084805237a36420e6cdcd8e2e9801a7cf (patch) | |
tree | 250c85d7ccbcf69cbb09b0ce1cddc2c188be4a68 /module | |
parent | f8bb2a7e0c889d6c5cf7655d698869b3a32c2bb0 (diff) |
Fix dnode_hold_impl() soft lockup
Soft lockups could happen when multiple threads trying
to get zrl on the same dnode handle in order to allocate
and initialize the dnode marked as DN_SLOT_ALLOCATED.
Don't loop from beginning when we can't get zrl, otherwise
we would increase the zrl refcount and nobody can actually
lock it.
Reviewed by: Tom Caputi <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Signed-off-by: Li Dongyang <[email protected]>
Closes #8433
Diffstat (limited to 'module')
-rw-r--r-- | module/zfs/dnode.c | 108 |
1 files changed, 52 insertions, 56 deletions
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c index b7a7f90cf..9a19ddabb 100644 --- a/module/zfs/dnode.c +++ b/module/zfs/dnode.c @@ -1153,8 +1153,10 @@ dnode_free_interior_slots(dnode_t *dn) ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); - while (!dnode_slots_tryenter(children, idx, slots)) + while (!dnode_slots_tryenter(children, idx, slots)) { DNODE_STAT_BUMP(dnode_free_interior_lock_retry); + cond_resched(); + } dnode_set_slots(children, idx, slots, DN_SLOT_FREE); dnode_slots_rele(children, idx, slots); @@ -1401,34 +1403,30 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, } ASSERT(dnc->dnc_count == epb); - dn = DN_SLOT_UNINIT; if (flag & DNODE_MUST_BE_ALLOCATED) { slots = 1; - while (dn == DN_SLOT_UNINIT) { - dnode_slots_hold(dnc, idx, slots); - dnh = &dnc->dnc_children[idx]; - - if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { - dn = dnh->dnh_dnode; - break; - } else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) { - DNODE_STAT_BUMP(dnode_hold_alloc_interior); - dnode_slots_rele(dnc, idx, slots); - dbuf_rele(db, FTAG); - return (SET_ERROR(EEXIST)); - } else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) { - DNODE_STAT_BUMP(dnode_hold_alloc_misses); - dnode_slots_rele(dnc, idx, slots); - dbuf_rele(db, FTAG); - return (SET_ERROR(ENOENT)); - } + dnode_slots_hold(dnc, idx, slots); + dnh = &dnc->dnc_children[idx]; + if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { + dn = dnh->dnh_dnode; + } else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) { + DNODE_STAT_BUMP(dnode_hold_alloc_interior); dnode_slots_rele(dnc, idx, slots); - if (!dnode_slots_tryenter(dnc, idx, slots)) { + dbuf_rele(db, FTAG); + return (SET_ERROR(EEXIST)); + } else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) { + DNODE_STAT_BUMP(dnode_hold_alloc_misses); + dnode_slots_rele(dnc, idx, slots); + dbuf_rele(db, FTAG); + return (SET_ERROR(ENOENT)); + } else { + dnode_slots_rele(dnc, idx, slots); + while (!dnode_slots_tryenter(dnc, idx, slots)) { DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry); - continue; + cond_resched(); } /* @@ -1463,45 +1461,43 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, return (SET_ERROR(ENOSPC)); } - while (dn == DN_SLOT_UNINIT) { - dnode_slots_hold(dnc, idx, slots); - - if (!dnode_check_slots_free(dnc, idx, slots)) { - DNODE_STAT_BUMP(dnode_hold_free_misses); - dnode_slots_rele(dnc, idx, slots); - dbuf_rele(db, FTAG); - return (SET_ERROR(ENOSPC)); - } + dnode_slots_hold(dnc, idx, slots); + if (!dnode_check_slots_free(dnc, idx, slots)) { + DNODE_STAT_BUMP(dnode_hold_free_misses); dnode_slots_rele(dnc, idx, slots); - if (!dnode_slots_tryenter(dnc, idx, slots)) { - DNODE_STAT_BUMP(dnode_hold_free_lock_retry); - continue; - } + dbuf_rele(db, FTAG); + return (SET_ERROR(ENOSPC)); + } - if (!dnode_check_slots_free(dnc, idx, slots)) { - DNODE_STAT_BUMP(dnode_hold_free_lock_misses); - dnode_slots_rele(dnc, idx, slots); - dbuf_rele(db, FTAG); - return (SET_ERROR(ENOSPC)); - } + dnode_slots_rele(dnc, idx, slots); + while (!dnode_slots_tryenter(dnc, idx, slots)) { + DNODE_STAT_BUMP(dnode_hold_free_lock_retry); + cond_resched(); + } - /* - * Allocated but otherwise free dnodes which would - * be in the interior of a multi-slot dnodes need - * to be freed. Single slot dnodes can be safely - * re-purposed as a performance optimization. - */ - if (slots > 1) - dnode_reclaim_slots(dnc, idx + 1, slots - 1); + if (!dnode_check_slots_free(dnc, idx, slots)) { + DNODE_STAT_BUMP(dnode_hold_free_lock_misses); + dnode_slots_rele(dnc, idx, slots); + dbuf_rele(db, FTAG); + return (SET_ERROR(ENOSPC)); + } - dnh = &dnc->dnc_children[idx]; - if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { - dn = dnh->dnh_dnode; - } else { - dn = dnode_create(os, dn_block + idx, db, - object, dnh); - } + /* + * Allocated but otherwise free dnodes which would + * be in the interior of a multi-slot dnodes need + * to be freed. Single slot dnodes can be safely + * re-purposed as a performance optimization. + */ + if (slots > 1) + dnode_reclaim_slots(dnc, idx + 1, slots - 1); + + dnh = &dnc->dnc_children[idx]; + if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { + dn = dnh->dnh_dnode; + } else { + dn = dnode_create(os, dn_block + idx, db, + object, dnh); } mutex_enter(&dn->dn_mtx); |