diff options
author | Tim Chase <[email protected]> | 2015-05-10 18:34:05 -0500 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2015-06-11 10:27:24 -0700 |
commit | 7807028ccd0a36038aa7b996daacd79d4c9c49cb (patch) | |
tree | a0e15f304807aa9a690c495be7bafb502c57cb65 | |
parent | 44de2f02d6b1365240f66fd5f3794ccbebd61a60 (diff) |
Revert "arc_evict, arc_evict_ghost: reduce stack usage using kmem_zalloc"
This reverts commit 16fcdea36340c658b4557fd34a74915fd618f7a6 in preparation
for the illumos 5497 "lock contention on arcs_mtx" patch which eliminates
"marker" within the ARC code.
Signed-off-by: Tim Chase <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
-rw-r--r-- | module/zfs/arc.c | 32 |
1 files changed, 12 insertions, 20 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 1699ea7e7..9882295b9 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -1825,15 +1825,13 @@ arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, kmutex_t *hash_lock; boolean_t have_lock; void *stolen = NULL; - arc_buf_hdr_t *marker; + arc_buf_hdr_t marker = {{{ 0 }}}; int count = 0; ASSERT(state == arc_mru || state == arc_mfu); evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; - marker = kmem_zalloc(sizeof (arc_buf_hdr_t), KM_SLEEP); - top: mutex_enter(&state->arcs_mtx); mutex_enter(&evicted_state->arcs_mtx); @@ -1868,14 +1866,14 @@ top: * the hot code path, so don't sleep. */ if (!recycle && count++ > arc_evict_iterations) { - list_insert_after(list, ab, marker); + list_insert_after(list, ab, &marker); mutex_exit(&evicted_state->arcs_mtx); mutex_exit(&state->arcs_mtx); kpreempt(KPREEMPT_SYNC); mutex_enter(&state->arcs_mtx); mutex_enter(&evicted_state->arcs_mtx); - ab_prev = list_prev(list, marker); - list_remove(list, marker); + ab_prev = list_prev(list, &marker); + list_remove(list, &marker); count = 0; continue; } @@ -1959,8 +1957,6 @@ top: goto top; } - kmem_free(marker, sizeof (arc_buf_hdr_t)); - if (bytes_evicted < bytes) dprintf("only evicted %lld bytes from %x\n", (longlong_t)bytes_evicted, state->arcs_state); @@ -1990,7 +1986,7 @@ arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes, arc_buf_contents_t type) { arc_buf_hdr_t *ab, *ab_prev; - arc_buf_hdr_t *marker; + arc_buf_hdr_t marker; list_t *list = &state->arcs_list[type]; kmutex_t *hash_lock; uint64_t bytes_deleted = 0; @@ -1998,9 +1994,7 @@ arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes, int count = 0; ASSERT(GHOST_STATE(state)); - - marker = kmem_zalloc(sizeof (arc_buf_hdr_t), KM_SLEEP); - + bzero(&marker, sizeof (marker)); top: mutex_enter(&state->arcs_mtx); for (ab = list_tail(list); ab; ab = ab_prev) { @@ -2026,12 +2020,12 @@ top: * before reacquiring the lock. */ if (count++ > arc_evict_iterations) { - list_insert_after(list, ab, marker); + list_insert_after(list, ab, &marker); mutex_exit(&state->arcs_mtx); kpreempt(KPREEMPT_SYNC); mutex_enter(&state->arcs_mtx); - ab_prev = list_prev(list, marker); - list_remove(list, marker); + ab_prev = list_prev(list, &marker); + list_remove(list, &marker); count = 0; continue; } @@ -2063,13 +2057,13 @@ top: * hash lock to become available. Once its * available, restart from where we left off. */ - list_insert_after(list, ab, marker); + list_insert_after(list, ab, &marker); mutex_exit(&state->arcs_mtx); mutex_enter(hash_lock); mutex_exit(hash_lock); mutex_enter(&state->arcs_mtx); - ab_prev = list_prev(list, marker); - list_remove(list, marker); + ab_prev = list_prev(list, &marker); + list_remove(list, &marker); } else { bufs_skipped += 1; } @@ -2082,8 +2076,6 @@ top: goto top; } - kmem_free(marker, sizeof (arc_buf_hdr_t)); - if (bufs_skipped) { ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); ASSERT(bytes >= 0); |