diff options
author | Brian Behlendorf <[email protected]> | 2018-01-08 09:52:36 -0800 |
---|---|---|
committer | GitHub <[email protected]> | 2018-01-08 09:52:36 -0800 |
commit | 0873bb6337452e3e028e40f5dad945b30deab185 (patch) | |
tree | 13f3528a48e2b1dcdbb9804689217019e8d44cda /module/zfs/arc.c | |
parent | 390d679acdfa6a2498280a4dcd33b7600ace27ce (diff) |
Fix ARC hit rate
When the compressed ARC feature was added in commit d3c2ae1
the method of reference counting in the ARC was modified. As
part of this accounting change the arc_buf_add_ref() function
was removed entirely.
This would have be fine but the arc_buf_add_ref() function
served a second undocumented purpose of updating the ARC access
information when taking a hold on a dbuf. Without this logic
in place a cached dbuf would not migrate its associated
arc_buf_hdr_t to the MFU list. This would negatively impact
the ARC hit rate, particularly on systems with a small ARC.
This change reinstates the missing call to arc_access() from
dbuf_hold() by implementing a new arc_buf_access() function.
Reviewed-by: Giuseppe Di Natale <[email protected]>
Reviewed-by: Tony Hutter <[email protected]>
Reviewed-by: Tim Chase <[email protected]>
Reviewed by: George Wilson <[email protected]>
Reviewed-by: George Melikov <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #6171
Closes #6852
Closes #6989
Diffstat (limited to 'module/zfs/arc.c')
-rw-r--r-- | module/zfs/arc.c | 52 |
1 files changed, 51 insertions, 1 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 476351eb4..45b0abe7f 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -449,8 +449,13 @@ typedef struct arc_stats { */ kstat_named_t arcstat_mutex_miss; /* + * Number of buffers skipped when updating the access state due to the + * header having already been released after acquiring the hash lock. + */ + kstat_named_t arcstat_access_skip; + /* * Number of buffers skipped because they have I/O in progress, are - * indrect prefetch buffers that have not lived long enough, or are + * indirect prefetch buffers that have not lived long enough, or are * not from the spa we're trying to evict from. */ kstat_named_t arcstat_evict_skip; @@ -688,6 +693,7 @@ static arc_stats_t arc_stats = { { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, { "deleted", KSTAT_DATA_UINT64 }, { "mutex_miss", KSTAT_DATA_UINT64 }, + { "access_skip", KSTAT_DATA_UINT64 }, { "evict_skip", KSTAT_DATA_UINT64 }, { "evict_not_enough", KSTAT_DATA_UINT64 }, { "evict_l2_cached", KSTAT_DATA_UINT64 }, @@ -5611,6 +5617,50 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) } } +/* + * This routine is called by dbuf_hold() to update the arc_access() state + * which otherwise would be skipped for entries in the dbuf cache. + */ +void +arc_buf_access(arc_buf_t *buf) +{ + mutex_enter(&buf->b_evict_lock); + arc_buf_hdr_t *hdr = buf->b_hdr; + + /* + * Avoid taking the hash_lock when possible as an optimization. + * The header must be checked again under the hash_lock in order + * to handle the case where it is concurrently being released. + */ + if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { + mutex_exit(&buf->b_evict_lock); + return; + } + + kmutex_t *hash_lock = HDR_LOCK(hdr); + mutex_enter(hash_lock); + + if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { + mutex_exit(hash_lock); + mutex_exit(&buf->b_evict_lock); + ARCSTAT_BUMP(arcstat_access_skip); + return; + } + + mutex_exit(&buf->b_evict_lock); + + ASSERT(hdr->b_l1hdr.b_state == arc_mru || + hdr->b_l1hdr.b_state == arc_mfu); + + DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); + arc_access(hdr, hash_lock); + mutex_exit(hash_lock); + + ARCSTAT_BUMP(arcstat_hits); + ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr) && !HDR_PRESCIENT_PREFETCH(hdr), + demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits); +} + /* a generic arc_read_done_func_t which you can use */ /* ARGSUSED */ void |