aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorPrakash Surya <[email protected]>2015-01-12 19:52:19 -0800
committerBrian Behlendorf <[email protected]>2015-06-11 10:27:25 -0700
commitca0bf58d65f77e944b9905571df9a2eae647aeca (patch)
tree1914cd88ab7e44edb7ad920239b0bd0e4338e26c /module/zfs
parentb9541d6b7d765883f8a5fe7c1bde74df5c256ff6 (diff)
Illumos 5497 - lock contention on arcs_mtx
Reviewed by: George Wilson <[email protected]> Reviewed by: Matthew Ahrens <[email protected]> Reviewed by: Richard Elling <[email protected]> Approved by: Dan McDonald <[email protected]> Porting notes and other significant code changes: The illumos 5368 patch (ARC should cache more metadata), which was never picked up by ZoL, is mostly reverted by this patch. Since ZoL relies on the kernel asynchronously calling the shrinker to actually reap memory, the shrinker wakes up arc_reclaim_waiters_cv every time it runs. The arc_adapt_thread() function no longer calls arc_do_user_evicts() since the newly-added arc_user_evicts_thread() calls it periodically. Notable conflicting ZoL commits which conflicted with this patch or whose effects are either duplicated or un-done by this patch: 302f753 - Integrate ARC more tightly with Linux 39e055c - Adjust arc_p based on "bytes" in arc_shrink f521ce1 - Allow "arc_p" to drop to zero or grow to "arc_c" 77765b5 - Remove "arc_meta_used" from arc_adjust calculation 94520ca - Prune metadata from ghost lists in arc_adjust_meta Trace support for multilist_insert() and multilist_remove() has been added and produces the following output: fio-12498 [077] .... 112936.448324: zfs_multilist__insert: ml { offset 240 numsublists 80 sublistidx 63 } fio-12498 [077] .... 112936.448347: zfs_multilist__remove: ml { offset 240 numsublists 80 sublistidx 29 } The following arcstats have been removed: recycle_miss - Used by arcstat.py and arc_summary.py, both of which have been updated appropriately. l2_writes_hdr_miss The following arcstats have been added: evict_not_enough - Number of times arc_evict_state() was unable to evict enough buffers to reach its target amount. evict_l2_skip - Number of times arc_evict_hdr() skipped eviction because it was being written to the l2arc. l2_writes_lock_retry - Replaces l2_writes_hdr_miss. Number of times l2arc_write_done() failed to acquire hash_lock (and re-tries). arc_meta_min - Shows the value of the zfs_arc_meta_min module parameter (see below). The "index" column of the "dbuf" kstat has been removed since it doesn't have a direct analog in the new multilist scheme. Additional multilist- related stats could be added in the future but would likely require extensions to the mulilist API. The following module parameters have been added: zfs_arc_evict_batch_limit - Number of ARC headers to free per sub-list before moving on to the next sub-list. zfs_arc_meta_min - Enforce a floor on the amount of metadata in the ARC. zfs_arc_num_sublists_per_state - Number of multilist sub-lists per ARC state. zfs_arc_overflow_shift - Controls amount by which the ARC must exceed the target size to be considered "overflowing". Ported-by: Tim Chase <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/Makefile.in1
-rw-r--r--module/zfs/arc.c1859
-rw-r--r--module/zfs/dbuf_stats.c7
-rw-r--r--module/zfs/dsl_pool.c9
-rw-r--r--module/zfs/multilist.c375
-rw-r--r--module/zfs/trace.c3
-rw-r--r--module/zfs/zio_inject.c6
7 files changed, 1593 insertions, 667 deletions
diff --git a/module/zfs/Makefile.in b/module/zfs/Makefile.in
index 954841f33..e5753ae81 100644
--- a/module/zfs/Makefile.in
+++ b/module/zfs/Makefile.in
@@ -37,6 +37,7 @@ $(MODULE)-objs += @top_srcdir@/module/zfs/gzip.o
$(MODULE)-objs += @top_srcdir@/module/zfs/lzjb.o
$(MODULE)-objs += @top_srcdir@/module/zfs/lz4.o
$(MODULE)-objs += @top_srcdir@/module/zfs/metaslab.o
+$(MODULE)-objs += @top_srcdir@/module/zfs/multilist.o
$(MODULE)-objs += @top_srcdir@/module/zfs/range_tree.o
$(MODULE)-objs += @top_srcdir@/module/zfs/refcount.o
$(MODULE)-objs += @top_srcdir@/module/zfs/rrwlock.o
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index e69889ab5..67ef87daf 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -135,6 +135,7 @@
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
+#include <sys/multilist.h>
#ifdef _KERNEL
#include <sys/vmsystm.h>
#include <vm/anon.h>
@@ -154,9 +155,14 @@
boolean_t arc_watch = B_FALSE;
#endif
-static kmutex_t arc_reclaim_thr_lock;
-static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
-static uint8_t arc_thread_exit;
+static kmutex_t arc_reclaim_lock;
+static kcondvar_t arc_reclaim_thread_cv;
+static boolean_t arc_reclaim_thread_exit;
+static kcondvar_t arc_reclaim_waiters_cv;
+
+static kmutex_t arc_user_evicts_lock;
+static kcondvar_t arc_user_evicts_cv;
+static boolean_t arc_user_evicts_thread_exit;
/* number of objects to prune from caches when arc_meta_limit is reached */
int zfs_arc_meta_prune = 10000;
@@ -167,14 +173,27 @@ typedef enum arc_reclaim_strategy {
} arc_reclaim_strategy_t;
/*
- * The number of iterations through arc_evict_*() before we
- * drop & reacquire the lock.
+ * The number of headers to evict in arc_evict_state_impl() before
+ * dropping the sublist lock and evicting from another sublist. A lower
+ * value means we're more likely to evict the "correct" header (i.e. the
+ * oldest header in the arc state), but comes with higher overhead
+ * (i.e. more invocations of arc_evict_state_impl()).
*/
-int arc_evict_iterations = 100;
+int zfs_arc_evict_batch_limit = 10;
+
+/*
+ * The number of sublists used for each of the arc state lists. If this
+ * is not set to a suitable value by the user, it will be configured to
+ * the number of CPUs on the system in arc_init().
+ */
+int zfs_arc_num_sublists_per_state = 0;
/* number of seconds before growing cache again */
int zfs_arc_grow_retry = 5;
+/* shift of arc_c for calculating overflow limit in arc_get_data_buf */
+int zfs_arc_overflow_shift = 8;
+
/* disable anon data aggressively growing arc_p */
int zfs_arc_p_aggressive_disable = 1;
@@ -200,6 +219,12 @@ int zfs_disable_dup_eviction = 0;
int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
+ * minimum lifespan of a prefetch block in clock ticks
+ * (initialized in arc_init())
+ */
+static int arc_min_prefetch_lifespan;
+
+/*
* If this percent of memory is free, don't throttle.
*/
int arc_lotsfree_percent = 10;
@@ -220,6 +245,7 @@ static boolean_t arc_warm;
unsigned long zfs_arc_max = 0;
unsigned long zfs_arc_min = 0;
unsigned long zfs_arc_meta_limit = 0;
+unsigned long zfs_arc_meta_min = 0;
/*
* Limit the number of restarts in arc_adjust_meta()
@@ -250,7 +276,6 @@ typedef struct arc_stats {
kstat_named_t arcstat_mfu_hits;
kstat_named_t arcstat_mfu_ghost_hits;
kstat_named_t arcstat_deleted;
- kstat_named_t arcstat_recycle_miss;
/*
* Number of buffers that could not be evicted because the hash lock
* was held by another thread. The lock may not necessarily be held
@@ -264,9 +289,15 @@ typedef struct arc_stats {
* not from the spa we're trying to evict from.
*/
kstat_named_t arcstat_evict_skip;
+ /*
+ * Number of times arc_evict_state() was unable to evict enough
+ * buffers to reach its target amount.
+ */
+ kstat_named_t arcstat_evict_not_enough;
kstat_named_t arcstat_evict_l2_cached;
kstat_named_t arcstat_evict_l2_eligible;
kstat_named_t arcstat_evict_l2_ineligible;
+ kstat_named_t arcstat_evict_l2_skip;
kstat_named_t arcstat_hash_elements;
kstat_named_t arcstat_hash_elements_max;
kstat_named_t arcstat_hash_collisions;
@@ -305,11 +336,12 @@ typedef struct arc_stats {
kstat_named_t arcstat_l2_writes_sent;
kstat_named_t arcstat_l2_writes_done;
kstat_named_t arcstat_l2_writes_error;
- kstat_named_t arcstat_l2_writes_hdr_miss;
+ kstat_named_t arcstat_l2_writes_lock_retry;
kstat_named_t arcstat_l2_evict_lock_retry;
kstat_named_t arcstat_l2_evict_reading;
kstat_named_t arcstat_l2_evict_l1cached;
kstat_named_t arcstat_l2_free_on_write;
+ kstat_named_t arcstat_l2_cdata_free_on_write;
kstat_named_t arcstat_l2_abort_lowmem;
kstat_named_t arcstat_l2_cksum_bad;
kstat_named_t arcstat_l2_io_error;
@@ -332,6 +364,7 @@ typedef struct arc_stats {
kstat_named_t arcstat_meta_used;
kstat_named_t arcstat_meta_limit;
kstat_named_t arcstat_meta_max;
+ kstat_named_t arcstat_meta_min;
} arc_stats_t;
static arc_stats_t arc_stats = {
@@ -350,12 +383,13 @@ static arc_stats_t arc_stats = {
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
- { "recycle_miss", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
+ { "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
+ { "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
@@ -394,11 +428,12 @@ static arc_stats_t arc_stats = {
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
- { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
+ { "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
+ { "l2_cdata_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
@@ -421,6 +456,7 @@ static arc_stats_t arc_stats = {
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
+ { "arc_meta_min", KSTAT_DATA_UINT64 },
};
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
@@ -486,6 +522,7 @@ static arc_state_t *arc_l2c_only;
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
+#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
#define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
#define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
@@ -495,7 +532,6 @@ static arc_state_t *arc_l2c_only;
static list_t arc_prune_list;
static kmutex_t arc_prune_mtx;
static arc_buf_t *arc_eviction_list;
-static kmutex_t arc_eviction_mtx;
static arc_buf_hdr_t arc_eviction_hdr;
#define GHOST_STATE(state) \
@@ -637,8 +673,7 @@ static uint8_t l2arc_thread_exit;
static void arc_get_data_buf(arc_buf_t *);
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
-static int arc_evict_needed(arc_buf_contents_t);
-static void arc_evict_ghost(arc_state_t *, uint64_t, int64_t);
+static boolean_t arc_is_overflowing(void);
static void arc_buf_watch(arc_buf_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
@@ -830,6 +865,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
+ multilist_link_init(&hdr->b_l1hdr.b_arc_node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
@@ -874,6 +910,7 @@ hdr_full_dest(void *vbuf, void *unused)
cv_destroy(&hdr->b_l1hdr.b_cv);
refcount_destroy(&hdr->b_l1hdr.b_refcnt);
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
+ ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
@@ -981,18 +1018,31 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
+
+ /* Verify previous threads set to NULL before freeing */
+ ASSERT3P(nhdr->b_l1hdr.b_tmp_cdata, ==, NULL);
} else {
ASSERT(hdr->b_l1hdr.b_buf == NULL);
ASSERT0(hdr->b_l1hdr.b_datacnt);
- ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
+
/*
- * We might be removing the L1hdr of a buffer which was just
- * written out to L2ARC. If such a buffer is compressed then we
- * need to free its b_tmp_cdata before destroying the header.
+ * If we've reached here, We must have been called from
+ * arc_evict_hdr(), as such we should have already been
+ * removed from any ghost list we were previously on
+ * (which protects us from racing with arc_evict_state),
+ * thus no locking is needed during this check.
*/
- if (hdr->b_l1hdr.b_tmp_cdata != NULL &&
- HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
- l2arc_release_cdata_buf(hdr);
+ ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
+
+ /*
+ * A buffer must not be moved into the arc_l2c_only
+ * state if it's not finished being written out to the
+ * l2arc device. Otherwise, the b_l1hdr.b_tmp_cdata field
+ * might try to be accessed, even though it was removed.
+ */
+ VERIFY(!HDR_L2_WRITING(hdr));
+ VERIFY3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
+
nhdr->b_flags &= ~ARC_FLAG_HAS_L1HDR;
}
/*
@@ -1188,14 +1238,13 @@ add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
(state != arc_anon)) {
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
+ arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t delta = hdr->b_size * hdr->b_l1hdr.b_datacnt;
- list_t *list = &state->arcs_list[arc_buf_type(hdr)];
- uint64_t *size = &state->arcs_lsize[arc_buf_type(hdr)];
+ multilist_t *list = &state->arcs_list[type];
+ uint64_t *size = &state->arcs_lsize[type];
+
+ multilist_remove(list, hdr);
- ASSERT(!MUTEX_HELD(&state->arcs_mtx));
- mutex_enter(&state->arcs_mtx);
- ASSERT(list_link_active(&hdr->b_l1hdr.b_arc_node));
- list_remove(list, hdr);
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_datacnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
@@ -1204,7 +1253,6 @@ add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
ASSERT(delta > 0);
ASSERT3U(*size, >=, delta);
atomic_add_64(size, -delta);
- mutex_exit(&state->arcs_mtx);
}
/* remove the prefetch flag if we get a reference */
hdr->b_flags &= ~ARC_FLAG_PREFETCH;
@@ -1227,16 +1275,15 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
*/
if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
- uint64_t *size = &state->arcs_lsize[arc_buf_type(hdr)];
+ arc_buf_contents_t type = arc_buf_type(hdr);
+ multilist_t *list = &state->arcs_list[type];
+ uint64_t *size = &state->arcs_lsize[type];
+
+ multilist_insert(list, hdr);
- ASSERT(!MUTEX_HELD(&state->arcs_mtx));
- mutex_enter(&state->arcs_mtx);
- ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
- list_insert_head(&state->arcs_list[arc_buf_type(hdr)], hdr);
ASSERT(hdr->b_l1hdr.b_datacnt > 0);
atomic_add_64(size, hdr->b_size *
hdr->b_l1hdr.b_datacnt);
- mutex_exit(&state->arcs_mtx);
}
return (cnt);
}
@@ -1285,26 +1332,11 @@ arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
- abi->abi_state_index = -1;
abi->abi_size = hdr->b_size;
-
- if (l1hdr && state && state_index &&
- list_link_active(&l1hdr->b_arc_node)) {
- list_t *list = &state->arcs_list[arc_buf_type(hdr)];
- arc_buf_hdr_t *h;
-
- mutex_enter(&state->arcs_mtx);
- for (h = list_head(list); h != NULL; h = list_next(list, h)) {
- abi->abi_state_index++;
- if (h == hdr)
- break;
- }
- mutex_exit(&state->arcs_mtx);
- }
}
/*
- * Move the supplied buffer to the indicated state. The mutex
+ * Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
@@ -1348,15 +1380,10 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
- int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
uint64_t *size = &old_state->arcs_lsize[buftype];
- if (use_mutex)
- mutex_enter(&old_state->arcs_mtx);
-
ASSERT(HDR_HAS_L1HDR(hdr));
- ASSERT(list_link_active(&hdr->b_l1hdr.b_arc_node));
- list_remove(&old_state->arcs_list[buftype], hdr);
+ multilist_remove(&old_state->arcs_list[buftype], hdr);
/*
* If prefetching out of the ghost cache,
@@ -1369,12 +1396,8 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
}
ASSERT3U(*size, >=, from_delta);
atomic_add_64(size, -from_delta);
-
- if (use_mutex)
- mutex_exit(&old_state->arcs_mtx);
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
- int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
uint64_t *size = &new_state->arcs_lsize[buftype];
/*
@@ -1384,10 +1407,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
- if (use_mutex)
- mutex_enter(&new_state->arcs_mtx);
-
- list_insert_head(&new_state->arcs_list[buftype], hdr);
+ multilist_insert(&new_state->arcs_list[buftype], hdr);
/* ghost elements have a ghost size */
if (GHOST_STATE(new_state)) {
@@ -1396,9 +1416,6 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
to_delta = hdr->b_size;
}
atomic_add_64(size, to_delta);
-
- if (use_mutex)
- mutex_exit(&new_state->arcs_mtx);
}
}
@@ -1420,8 +1437,8 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated.
*/
- ASSERT(list_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
- list_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
+ ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
+ multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
}
void
@@ -1524,6 +1541,7 @@ arc_buf_alloc(spa_t *spa, uint64_t size, void *tag, arc_buf_contents_t type)
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_datacnt = 1;
+ hdr->b_l1hdr.b_tmp_cdata = NULL;
arc_get_data_buf(buf);
@@ -1654,6 +1672,21 @@ arc_buf_add_ref(arc_buf_t *buf, void* tag)
data, metadata, hits);
}
+static void
+arc_buf_free_on_write(void *data, size_t size,
+ void (*free_func)(void *, size_t))
+{
+ l2arc_data_free_t *df;
+
+ df = kmem_alloc(sizeof (*df), KM_SLEEP);
+ df->l2df_data = data;
+ df->l2df_size = size;
+ df->l2df_func = free_func;
+ mutex_enter(&l2arc_free_on_write_mtx);
+ list_insert_head(l2arc_free_on_write, df);
+ mutex_exit(&l2arc_free_on_write_mtx);
+}
+
/*
* Free the arc data buffer. If it is an l2arc write in progress,
* the buffer is placed on l2arc_free_on_write to be freed later.
@@ -1664,26 +1697,74 @@ arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
arc_buf_hdr_t *hdr = buf->b_hdr;
if (HDR_L2_WRITING(hdr)) {
- l2arc_data_free_t *df;
- df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
- df->l2df_data = buf->b_data;
- df->l2df_size = hdr->b_size;
- df->l2df_func = free_func;
- mutex_enter(&l2arc_free_on_write_mtx);
- list_insert_head(l2arc_free_on_write, df);
- mutex_exit(&l2arc_free_on_write_mtx);
+ arc_buf_free_on_write(buf->b_data, hdr->b_size, free_func);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else {
free_func(buf->b_data, hdr->b_size);
}
}
+static void
+arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr)
+{
+ ASSERT(HDR_HAS_L2HDR(hdr));
+ ASSERT(MUTEX_HELD(&hdr->b_l2hdr.b_dev->l2ad_mtx));
+
+ /*
+ * The b_tmp_cdata field is linked off of the b_l1hdr, so if
+ * that doesn't exist, the header is in the arc_l2c_only state,
+ * and there isn't anything to free (it's already been freed).
+ */
+ if (!HDR_HAS_L1HDR(hdr))
+ return;
+
+ /*
+ * The header isn't being written to the l2arc device, thus it
+ * shouldn't have a b_tmp_cdata to free.
+ */
+ if (!HDR_L2_WRITING(hdr)) {
+ ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
+ return;
+ }
+
+ /*
+ * The header does not have compression enabled. This can be due
+ * to the buffer not being compressible, or because we're
+ * freeing the buffer before the second phase of
+ * l2arc_write_buffer() has started (which does the compression
+ * step). In either case, b_tmp_cdata does not point to a
+ * separately compressed buffer, so there's nothing to free (it
+ * points to the same buffer as the arc_buf_t's b_data field).
+ */
+ if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) {
+ hdr->b_l1hdr.b_tmp_cdata = NULL;
+ return;
+ }
+
+ /*
+ * There's nothing to free since the buffer was all zero's and
+ * compressed to a zero length buffer.
+ */
+ if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_EMPTY) {
+ ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
+ return;
+ }
+
+ ASSERT(L2ARC_IS_VALID_COMPRESS(HDR_GET_COMPRESS(hdr)));
+
+ arc_buf_free_on_write(hdr->b_l1hdr.b_tmp_cdata,
+ hdr->b_size, zio_data_buf_free);
+
+ ARCSTAT_BUMP(arcstat_l2_cdata_free_on_write);
+ hdr->b_l1hdr.b_tmp_cdata = NULL;
+}
+
/*
* Free up buf->b_data and if 'remove' is set, then pull the
* arc_buf_t off of the the arc_buf_hdr_t's list and free it.
*/
static void
-arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t remove)
+arc_buf_destroy(arc_buf_t *buf, boolean_t remove)
{
arc_buf_t **bufp;
@@ -1696,17 +1777,17 @@ arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t remove)
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
- if (!recycle) {
- if (type == ARC_BUFC_METADATA) {
- arc_buf_data_free(buf, zio_buf_free);
- arc_space_return(size, ARC_SPACE_META);
- } else {
- ASSERT(type == ARC_BUFC_DATA);
- arc_buf_data_free(buf, zio_data_buf_free);
- arc_space_return(size, ARC_SPACE_DATA);
- }
+ if (type == ARC_BUFC_METADATA) {
+ arc_buf_data_free(buf, zio_buf_free);
+ arc_space_return(size, ARC_SPACE_META);
+ } else {
+ ASSERT(type == ARC_BUFC_DATA);
+ arc_buf_data_free(buf, zio_data_buf_free);
+ arc_space_return(size, ARC_SPACE_DATA);
}
- if (list_link_active(&buf->b_hdr->b_l1hdr.b_arc_node)) {
+
+ /* protected by hash lock, if in the hash table */
+ if (multilist_link_active(&buf->b_hdr->b_l1hdr.b_arc_node)) {
uint64_t *cnt = &state->arcs_lsize[type];
ASSERT(refcount_is_zero(
@@ -1774,6 +1855,12 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
list_remove(&l2hdr->b_dev->l2ad_buflist, hdr);
+ /*
+ * We don't want to leak the b_tmp_cdata buffer that was
+ * allocated in l2arc_write_buffers()
+ */
+ arc_buf_l2_cdata_free(hdr);
+
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
@@ -1797,27 +1884,26 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
if (buf->b_efunc != NULL) {
- mutex_enter(&arc_eviction_mtx);
+ mutex_enter(&arc_user_evicts_lock);
mutex_enter(&buf->b_evict_lock);
ASSERT(buf->b_hdr != NULL);
- arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE,
- FALSE);
+ arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE);
hdr->b_l1hdr.b_buf = buf->b_next;
buf->b_hdr = &arc_eviction_hdr;
buf->b_next = arc_eviction_list;
arc_eviction_list = buf;
mutex_exit(&buf->b_evict_lock);
- mutex_exit(&arc_eviction_mtx);
+ cv_signal(&arc_user_evicts_cv);
+ mutex_exit(&arc_user_evicts_lock);
} else {
- arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE,
- TRUE);
+ arc_buf_destroy(hdr->b_l1hdr.b_buf, TRUE);
}
}
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
- ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
+ ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
kmem_cache_free(hdr_full_cache, hdr);
} else {
@@ -1843,7 +1929,7 @@ arc_buf_free(arc_buf_t *buf, void *tag)
(void) remove_reference(hdr, hash_lock, tag);
if (hdr->b_l1hdr.b_datacnt > 1) {
- arc_buf_destroy(buf, FALSE, TRUE);
+ arc_buf_destroy(buf, TRUE);
} else {
ASSERT(buf == hdr->b_l1hdr.b_buf);
ASSERT(buf->b_efunc == NULL);
@@ -1857,16 +1943,16 @@ arc_buf_free(arc_buf_t *buf, void *tag)
* this buffer unless the write completes before we finish
* decrementing the reference count.
*/
- mutex_enter(&arc_eviction_mtx);
+ mutex_enter(&arc_user_evicts_lock);
(void) remove_reference(hdr, NULL, tag);
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
- mutex_exit(&arc_eviction_mtx);
+ mutex_exit(&arc_user_evicts_lock);
if (destroy_hdr)
arc_hdr_destroy(hdr);
} else {
if (remove_reference(hdr, NULL, tag) > 0)
- arc_buf_destroy(buf, FALSE, TRUE);
+ arc_buf_destroy(buf, TRUE);
else
arc_hdr_destroy(hdr);
}
@@ -1896,7 +1982,7 @@ arc_buf_remove_ref(arc_buf_t *buf, void* tag)
(void) remove_reference(hdr, hash_lock, tag);
if (hdr->b_l1hdr.b_datacnt > 1) {
if (no_callback)
- arc_buf_destroy(buf, FALSE, TRUE);
+ arc_buf_destroy(buf, TRUE);
} else if (no_callback) {
ASSERT(hdr->b_l1hdr.b_buf == buf && buf->b_next == NULL);
ASSERT(buf->b_efunc == NULL);
@@ -1957,353 +2043,390 @@ arc_buf_eviction_needed(arc_buf_t *buf)
}
/*
- * Evict buffers from list until we've removed the specified number of
- * bytes. Move the removed buffers to the appropriate evict state.
- * If the recycle flag is set, then attempt to "recycle" a buffer:
- * - look for a buffer to evict that is `bytes' long.
- * - return the data block from this buffer rather than freeing it.
- * This flag is used by callers that are trying to make space for a
- * new buffer in a full arc cache.
+ * Evict the arc_buf_hdr that is provided as a parameter. The resultant
+ * state of the header is dependent on its state prior to entering this
+ * function. The following transitions are possible:
*
- * This function makes a "best effort". It skips over any buffers
- * it can't get a hash_lock on, and so may not catch all candidates.
- * It may also return without evicting as much space as requested.
+ * - arc_mru -> arc_mru_ghost
+ * - arc_mfu -> arc_mfu_ghost
+ * - arc_mru_ghost -> arc_l2c_only
+ * - arc_mru_ghost -> deleted
+ * - arc_mfu_ghost -> arc_l2c_only
+ * - arc_mfu_ghost -> deleted
*/
-static void *
-arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
- arc_buf_contents_t type)
+static int64_t
+arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
- arc_state_t *evicted_state;
- uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
- arc_buf_hdr_t *hdr, *hdr_prev = NULL;
- list_t *list = &state->arcs_list[type];
- kmutex_t *hash_lock;
- boolean_t have_lock;
- void *stolen = NULL;
- arc_buf_hdr_t marker = {{{ 0 }}};
- int count = 0;
-
- ASSERT(state == arc_mru || state == arc_mfu);
-
- evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
+ arc_state_t *evicted_state, *state;
+ int64_t bytes_evicted = 0;
-top:
- /*
- * The ghost list lock must be acquired first in order to prevent
- * a 3 party deadlock:
- *
- * - arc_evict_ghost acquires arc_*_ghost->arcs_mtx, followed by
- * l2ad_mtx in arc_hdr_realloc
- * - l2arc_write_buffers acquires l2ad_mtx, followed by arc_*->arcs_mtx
- * - arc_evict acquires arc_*_ghost->arcs_mtx, followed by
- * arc_*_ghost->arcs_mtx and forms a deadlock cycle.
- *
- * This situation is avoided by acquiring the ghost list lock first.
- */
- mutex_enter(&evicted_state->arcs_mtx);
- mutex_enter(&state->arcs_mtx);
-
- for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
- hdr_prev = list_prev(list, hdr);
- /* prefetch buffers have a minimum lifespan */
- if (HDR_IO_IN_PROGRESS(hdr) ||
- ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
- ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
- zfs_arc_min_prefetch_lifespan)) {
- skipped++;
- continue;
- }
- /* "lookahead" for better eviction candidate */
- if (recycle && hdr->b_size != bytes &&
- hdr_prev && hdr_prev->b_size == bytes)
- continue;
+ ASSERT(MUTEX_HELD(hash_lock));
+ ASSERT(HDR_HAS_L1HDR(hdr));
- /* ignore markers */
- if (hdr->b_spa == 0)
- continue;
+ state = hdr->b_l1hdr.b_state;
+ if (GHOST_STATE(state)) {
+ ASSERT(!HDR_IO_IN_PROGRESS(hdr));
+ ASSERT(hdr->b_l1hdr.b_buf == NULL);
/*
- * It may take a long time to evict all the bufs requested.
- * To avoid blocking all arc activity, periodically drop
- * the arcs_mtx and give other threads a chance to run
- * before reacquiring the lock.
- *
- * If we are looking for a buffer to recycle, we are in
- * the hot code path, so don't sleep.
+ * l2arc_write_buffers() relies on a header's L1 portion
+ * (i.e. its b_tmp_cdata field) during its write phase.
+ * Thus, we cannot push a header onto the arc_l2c_only
+ * state (removing its L1 piece) until the header is
+ * done being written to the l2arc.
*/
- if (!recycle && count++ > arc_evict_iterations) {
- list_insert_after(list, hdr, &marker);
- mutex_exit(&state->arcs_mtx);
- mutex_exit(&evicted_state->arcs_mtx);
- kpreempt(KPREEMPT_SYNC);
- mutex_enter(&evicted_state->arcs_mtx);
- mutex_enter(&state->arcs_mtx);
- hdr_prev = list_prev(list, &marker);
- list_remove(list, &marker);
- count = 0;
- continue;
+ if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
+ ARCSTAT_BUMP(arcstat_evict_l2_skip);
+ return (bytes_evicted);
}
- hash_lock = HDR_LOCK(hdr);
- have_lock = MUTEX_HELD(hash_lock);
- if (have_lock || mutex_tryenter(hash_lock)) {
- ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
- ASSERT3U(hdr->b_l1hdr.b_datacnt, >, 0);
- while (hdr->b_l1hdr.b_buf) {
- arc_buf_t *buf = hdr->b_l1hdr.b_buf;
- if (!mutex_tryenter(&buf->b_evict_lock)) {
- missed += 1;
- break;
- }
- if (buf->b_data != NULL) {
- bytes_evicted += hdr->b_size;
- if (recycle &&
- arc_buf_type(hdr) == type &&
- hdr->b_size == bytes &&
- !HDR_L2_WRITING(hdr)) {
- stolen = buf->b_data;
- recycle = FALSE;
- }
- }
- if (buf->b_efunc != NULL) {
- mutex_enter(&arc_eviction_mtx);
- arc_buf_destroy(buf,
- buf->b_data == stolen, FALSE);
- hdr->b_l1hdr.b_buf = buf->b_next;
- buf->b_hdr = &arc_eviction_hdr;
- buf->b_next = arc_eviction_list;
- arc_eviction_list = buf;
- mutex_exit(&arc_eviction_mtx);
- mutex_exit(&buf->b_evict_lock);
- } else {
- mutex_exit(&buf->b_evict_lock);
- arc_buf_destroy(buf,
- buf->b_data == stolen, TRUE);
- }
- }
+ ARCSTAT_BUMP(arcstat_deleted);
+ bytes_evicted += hdr->b_size;
- if (HDR_HAS_L2HDR(hdr)) {
- ARCSTAT_INCR(arcstat_evict_l2_cached,
- hdr->b_size);
- } else {
- if (l2arc_write_eligible(hdr->b_spa, hdr)) {
- ARCSTAT_INCR(arcstat_evict_l2_eligible,
- hdr->b_size);
- } else {
- ARCSTAT_INCR(
- arcstat_evict_l2_ineligible,
- hdr->b_size);
- }
- }
+ DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
- if (hdr->b_l1hdr.b_datacnt == 0) {
- arc_change_state(evicted_state, hdr, hash_lock);
- ASSERT(HDR_IN_HASH_TABLE(hdr));
- hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
- hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
- DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
- }
- if (!have_lock)
- mutex_exit(hash_lock);
- if (bytes >= 0 && bytes_evicted >= bytes)
- break;
+ if (HDR_HAS_L2HDR(hdr)) {
+ /*
+ * This buffer is cached on the 2nd Level ARC;
+ * don't destroy the header.
+ */
+ arc_change_state(arc_l2c_only, hdr, hash_lock);
+ /*
+ * dropping from L1+L2 cached to L2-only,
+ * realloc to remove the L1 header.
+ */
+ hdr = arc_hdr_realloc(hdr, hdr_full_cache,
+ hdr_l2only_cache);
} else {
- missed += 1;
+ arc_change_state(arc_anon, hdr, hash_lock);
+ arc_hdr_destroy(hdr);
}
+ return (bytes_evicted);
}
- mutex_exit(&state->arcs_mtx);
- mutex_exit(&evicted_state->arcs_mtx);
+ ASSERT(state == arc_mru || state == arc_mfu);
+ evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
- if (list == &state->arcs_list[ARC_BUFC_DATA] &&
- (bytes < 0 || bytes_evicted < bytes)) {
- /* Prevent second pass from recycling metadata into data */
- recycle = FALSE;
- type = ARC_BUFC_METADATA;
- list = &state->arcs_list[type];
- goto top;
+ /* prefetch buffers have a minimum lifespan */
+ if (HDR_IO_IN_PROGRESS(hdr) ||
+ ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
+ ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
+ arc_min_prefetch_lifespan)) {
+ ARCSTAT_BUMP(arcstat_evict_skip);
+ return (bytes_evicted);
}
- if (bytes_evicted < bytes)
- dprintf("only evicted %lld bytes from %x\n",
- (longlong_t)bytes_evicted, state->arcs_state);
+ ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
+ ASSERT3U(hdr->b_l1hdr.b_datacnt, >, 0);
+ while (hdr->b_l1hdr.b_buf) {
+ arc_buf_t *buf = hdr->b_l1hdr.b_buf;
+ if (!mutex_tryenter(&buf->b_evict_lock)) {
+ ARCSTAT_BUMP(arcstat_mutex_miss);
+ break;
+ }
+ if (buf->b_data != NULL)
+ bytes_evicted += hdr->b_size;
+ if (buf->b_efunc != NULL) {
+ mutex_enter(&arc_user_evicts_lock);
+ arc_buf_destroy(buf, FALSE);
+ hdr->b_l1hdr.b_buf = buf->b_next;
+ buf->b_hdr = &arc_eviction_hdr;
+ buf->b_next = arc_eviction_list;
+ arc_eviction_list = buf;
+ cv_signal(&arc_user_evicts_cv);
+ mutex_exit(&arc_user_evicts_lock);
+ mutex_exit(&buf->b_evict_lock);
+ } else {
+ mutex_exit(&buf->b_evict_lock);
+ arc_buf_destroy(buf, TRUE);
+ }
+ }
- if (skipped)
- ARCSTAT_INCR(arcstat_evict_skip, skipped);
+ if (HDR_HAS_L2HDR(hdr)) {
+ ARCSTAT_INCR(arcstat_evict_l2_cached, hdr->b_size);
+ } else {
+ if (l2arc_write_eligible(hdr->b_spa, hdr))
+ ARCSTAT_INCR(arcstat_evict_l2_eligible, hdr->b_size);
+ else
+ ARCSTAT_INCR(arcstat_evict_l2_ineligible, hdr->b_size);
+ }
- if (missed)
- ARCSTAT_INCR(arcstat_mutex_miss, missed);
+ if (hdr->b_l1hdr.b_datacnt == 0) {
+ arc_change_state(evicted_state, hdr, hash_lock);
+ ASSERT(HDR_IN_HASH_TABLE(hdr));
+ hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
+ hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
+ DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
+ }
- /*
- * Note: we have just evicted some data into the ghost state,
- * potentially putting the ghost size over the desired size. Rather
- * that evicting from the ghost list in this hot code path, leave
- * this chore to the arc_reclaim_thread().
- */
- return (stolen);
+ return (bytes_evicted);
}
-/*
- * Remove buffers from list until we've removed the specified number of
- * bytes. Destroy the buffers that are removed.
- */
-static void
-arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
+static uint64_t
+arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
+ uint64_t spa, int64_t bytes)
{
- arc_buf_hdr_t *hdr, *hdr_prev;
- arc_buf_hdr_t marker;
- list_t *list = &state->arcs_list[ARC_BUFC_DATA];
+ multilist_sublist_t *mls;
+ uint64_t bytes_evicted = 0;
+ arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
- uint64_t bytes_deleted = 0;
- uint64_t bufs_skipped = 0;
- int count = 0;
+ int evict_count = 0;
- ASSERT(GHOST_STATE(state));
- bzero(&marker, sizeof (marker));
-top:
- mutex_enter(&state->arcs_mtx);
- for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
- hdr_prev = list_prev(list, hdr);
- if (arc_buf_type(hdr) >= ARC_BUFC_NUMTYPES)
- panic("invalid hdr=%p", (void *)hdr);
- if (spa && hdr->b_spa != spa)
- continue;
+ ASSERT3P(marker, !=, NULL);
+ ASSERTV(if (bytes < 0) ASSERT(bytes == ARC_EVICT_ALL));
+
+ mls = multilist_sublist_lock(ml, idx);
+
+ for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL;
+ hdr = multilist_sublist_prev(mls, marker)) {
+ if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) ||
+ (evict_count >= zfs_arc_evict_batch_limit))
+ break;
- /* ignore markers */
+ /*
+ * To keep our iteration location, move the marker
+ * forward. Since we're not holding hdr's hash lock, we
+ * must be very careful and not remove 'hdr' from the
+ * sublist. Otherwise, other consumers might mistake the
+ * 'hdr' as not being on a sublist when they call the
+ * multilist_link_active() function (they all rely on
+ * the hash lock protecting concurrent insertions and
+ * removals). multilist_sublist_move_forward() was
+ * specifically implemented to ensure this is the case
+ * (only 'marker' will be removed and re-inserted).
+ */
+ multilist_sublist_move_forward(mls, marker);
+
+ /*
+ * The only case where the b_spa field should ever be
+ * zero, is the marker headers inserted by
+ * arc_evict_state(). It's possible for multiple threads
+ * to be calling arc_evict_state() concurrently (e.g.
+ * dsl_pool_close() and zio_inject_fault()), so we must
+ * skip any markers we see from these other threads.
+ */
if (hdr->b_spa == 0)
continue;
- hash_lock = HDR_LOCK(hdr);
- /* caller may be trying to modify this buffer, skip it */
- if (MUTEX_HELD(hash_lock))
+ /* we're only interested in evicting buffers of a certain spa */
+ if (spa != 0 && hdr->b_spa != spa) {
+ ARCSTAT_BUMP(arcstat_evict_skip);
continue;
+ }
+
+ hash_lock = HDR_LOCK(hdr);
/*
- * It may take a long time to evict all the bufs requested.
- * To avoid blocking all arc activity, periodically drop
- * the arcs_mtx and give other threads a chance to run
- * before reacquiring the lock.
+ * We aren't calling this function from any code path
+ * that would already be holding a hash lock, so we're
+ * asserting on this assumption to be defensive in case
+ * this ever changes. Without this check, it would be
+ * possible to incorrectly increment arcstat_mutex_miss
+ * below (e.g. if the code changed such that we called
+ * this function with a hash lock held).
*/
- if (count++ > arc_evict_iterations) {
- list_insert_after(list, hdr, &marker);
- mutex_exit(&state->arcs_mtx);
- kpreempt(KPREEMPT_SYNC);
- mutex_enter(&state->arcs_mtx);
- hdr_prev = list_prev(list, &marker);
- list_remove(list, &marker);
- count = 0;
- continue;
- }
+ ASSERT(!MUTEX_HELD(hash_lock));
+
if (mutex_tryenter(hash_lock)) {
- ASSERT(!HDR_IO_IN_PROGRESS(hdr));
- ASSERT(!HDR_HAS_L1HDR(hdr) ||
- hdr->b_l1hdr.b_buf == NULL);
- ARCSTAT_BUMP(arcstat_deleted);
- bytes_deleted += hdr->b_size;
+ uint64_t evicted = arc_evict_hdr(hdr, hash_lock);
+ mutex_exit(hash_lock);
- if (HDR_HAS_L2HDR(hdr)) {
- /*
- * This buffer is cached on the 2nd Level ARC;
- * don't destroy the header.
- */
- arc_change_state(arc_l2c_only, hdr, hash_lock);
- /*
- * dropping from L1+L2 cached to L2-only,
- * realloc to remove the L1 header.
- */
- hdr = arc_hdr_realloc(hdr, hdr_full_cache,
- hdr_l2only_cache);
- mutex_exit(hash_lock);
- } else {
- arc_change_state(arc_anon, hdr, hash_lock);
- mutex_exit(hash_lock);
- arc_hdr_destroy(hdr);
- }
+ bytes_evicted += evicted;
- DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
- if (bytes >= 0 && bytes_deleted >= bytes)
- break;
- } else if (bytes < 0) {
/*
- * Insert a list marker and then wait for the
- * hash lock to become available. Once its
- * available, restart from where we left off.
+ * If evicted is zero, arc_evict_hdr() must have
+ * decided to skip this header, don't increment
+ * evict_count in this case.
*/
- list_insert_after(list, hdr, &marker);
- mutex_exit(&state->arcs_mtx);
- mutex_enter(hash_lock);
- mutex_exit(hash_lock);
- mutex_enter(&state->arcs_mtx);
- hdr_prev = list_prev(list, &marker);
- list_remove(list, &marker);
+ if (evicted != 0)
+ evict_count++;
+
+ /*
+ * If arc_size isn't overflowing, signal any
+ * threads that might happen to be waiting.
+ *
+ * For each header evicted, we wake up a single
+ * thread. If we used cv_broadcast, we could
+ * wake up "too many" threads causing arc_size
+ * to significantly overflow arc_c; since
+ * arc_get_data_buf() doesn't check for overflow
+ * when it's woken up (it doesn't because it's
+ * possible for the ARC to be overflowing while
+ * full of un-evictable buffers, and the
+ * function should proceed in this case).
+ *
+ * If threads are left sleeping, due to not
+ * using cv_broadcast, they will be woken up
+ * just before arc_reclaim_thread() sleeps.
+ */
+ mutex_enter(&arc_reclaim_lock);
+ if (!arc_is_overflowing())
+ cv_signal(&arc_reclaim_waiters_cv);
+ mutex_exit(&arc_reclaim_lock);
} else {
- bufs_skipped += 1;
+ ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
- mutex_exit(&state->arcs_mtx);
- if (list == &state->arcs_list[ARC_BUFC_DATA] &&
- (bytes < 0 || bytes_deleted < bytes)) {
- list = &state->arcs_list[ARC_BUFC_METADATA];
- goto top;
- }
+ multilist_sublist_unlock(mls);
- if (bufs_skipped) {
- ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
- ASSERT(bytes >= 0);
- }
-
- if (bytes_deleted < bytes)
- dprintf("only deleted %lld bytes from %p\n",
- (longlong_t)bytes_deleted, state);
+ return (bytes_evicted);
}
-static void
-arc_adjust(void)
+/*
+ * Evict buffers from the given arc state, until we've removed the
+ * specified number of bytes. Move the removed buffers to the
+ * appropriate evict state.
+ *
+ * This function makes a "best effort". It skips over any buffers
+ * it can't get a hash_lock on, and so, may not catch all candidates.
+ * It may also return without evicting as much space as requested.
+ *
+ * If bytes is specified using the special value ARC_EVICT_ALL, this
+ * will evict all available (i.e. unlocked and evictable) buffers from
+ * the given arc state; which is used by arc_flush().
+ */
+static uint64_t
+arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
+ arc_buf_contents_t type)
{
- int64_t adjustment, delta;
+ uint64_t total_evicted = 0;
+ multilist_t *ml = &state->arcs_list[type];
+ int num_sublists;
+ arc_buf_hdr_t **markers;
+ int i;
+
+ ASSERTV(if (bytes < 0) ASSERT(bytes == ARC_EVICT_ALL));
+
+ num_sublists = multilist_get_num_sublists(ml);
/*
- * Adjust MRU size
+ * If we've tried to evict from each sublist, made some
+ * progress, but still have not hit the target number of bytes
+ * to evict, we want to keep trying. The markers allow us to
+ * pick up where we left off for each individual sublist, rather
+ * than starting from the tail each time.
*/
+ markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
+ for (i = 0; i < num_sublists; i++) {
+ multilist_sublist_t *mls;
- adjustment = MIN((int64_t)(arc_size - arc_c),
- (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size - arc_p));
+ markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
- if (adjustment > 0 && arc_mru->arcs_size > 0) {
- delta = MIN(arc_mru->arcs_size, adjustment);
- (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
+ /*
+ * A b_spa of 0 is used to indicate that this header is
+ * a marker. This fact is used in arc_adjust_type() and
+ * arc_evict_state_impl().
+ */
+ markers[i]->b_spa = 0;
+
+ mls = multilist_sublist_lock(ml, i);
+ multilist_sublist_insert_tail(mls, markers[i]);
+ multilist_sublist_unlock(mls);
}
/*
- * Adjust MFU size
+ * While we haven't hit our target number of bytes to evict, or
+ * we're evicting all available buffers.
*/
+ while (total_evicted < bytes || bytes == ARC_EVICT_ALL) {
+ /*
+ * Start eviction using a randomly selected sublist,
+ * this is to try and evenly balance eviction across all
+ * sublists. Always starting at the same sublist
+ * (e.g. index 0) would cause evictions to favor certain
+ * sublists over others.
+ */
+ int sublist_idx = multilist_get_random_index(ml);
+ uint64_t scan_evicted = 0;
- adjustment = arc_size - arc_c;
+ for (i = 0; i < num_sublists; i++) {
+ uint64_t bytes_remaining;
+ uint64_t bytes_evicted;
- if (adjustment > 0 && arc_mfu->arcs_size > 0) {
- delta = MIN(arc_mfu->arcs_size, adjustment);
- (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
- }
+ if (bytes == ARC_EVICT_ALL)
+ bytes_remaining = ARC_EVICT_ALL;
+ else if (total_evicted < bytes)
+ bytes_remaining = bytes - total_evicted;
+ else
+ break;
- /*
- * Adjust ghost lists
- */
+ bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
+ markers[sublist_idx], spa, bytes_remaining);
+
+ scan_evicted += bytes_evicted;
+ total_evicted += bytes_evicted;
+
+ /* we've reached the end, wrap to the beginning */
+ if (++sublist_idx >= num_sublists)
+ sublist_idx = 0;
+ }
- adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
+ /*
+ * If we didn't evict anything during this scan, we have
+ * no reason to believe we'll evict more during another
+ * scan, so break the loop.
+ */
+ if (scan_evicted == 0) {
+ /* This isn't possible, let's make that obvious */
+ ASSERT3S(bytes, !=, 0);
- if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
- delta = MIN(arc_mru_ghost->arcs_size, adjustment);
- arc_evict_ghost(arc_mru_ghost, 0, delta);
+ /*
+ * When bytes is ARC_EVICT_ALL, the only way to
+ * break the loop is when scan_evicted is zero.
+ * In that case, we actually have evicted enough,
+ * so we don't want to increment the kstat.
+ */
+ if (bytes != ARC_EVICT_ALL) {
+ ASSERT3S(total_evicted, <, bytes);
+ ARCSTAT_BUMP(arcstat_evict_not_enough);
+ }
+
+ break;
+ }
}
- adjustment =
- arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
+ for (i = 0; i < num_sublists; i++) {
+ multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
+ multilist_sublist_remove(mls, markers[i]);
+ multilist_sublist_unlock(mls);
- if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
- delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
- arc_evict_ghost(arc_mfu_ghost, 0, delta);
+ kmem_cache_free(hdr_full_cache, markers[i]);
}
+ kmem_free(markers, sizeof (*markers) * num_sublists);
+
+ return (total_evicted);
+}
+
+/*
+ * Flush all "evictable" data of the given type from the arc state
+ * specified. This will not evict any "active" buffers (i.e. referenced).
+ *
+ * When 'retry' is set to FALSE, the function will make a single pass
+ * over the state and evict any buffers that it can. Since it doesn't
+ * continually retry the eviction, it might end up leaving some buffers
+ * in the ARC due to lock misses.
+ *
+ * When 'retry' is set to TRUE, the function will continually retry the
+ * eviction until *all* evictable buffers have been removed from the
+ * state. As a result, if concurrent insertions into the state are
+ * allowed (e.g. if the ARC isn't shutting down), this function might
+ * wind up in an infinite loop, continually trying to evict buffers.
+ */
+static uint64_t
+arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
+ boolean_t retry)
+{
+ uint64_t evicted = 0;
+
+ while (state->arcs_lsize[type] != 0) {
+ evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
+
+ if (!retry)
+ break;
+ }
+
+ return (evicted);
}
/*
@@ -2348,27 +2471,26 @@ arc_do_user_prune(int64_t adjustment)
mutex_exit(&arc_prune_mtx);
}
-static void
-arc_do_user_evicts(void)
+/*
+ * Evict the specified number of bytes from the state specified,
+ * restricting eviction to the spa and type given. This function
+ * prevents us from trying to evict more from a state's list than
+ * is "evictable", and to skip evicting altogether when passed a
+ * negative value for "bytes". In contrast, arc_evict_state() will
+ * evict everything it can, when passed a negative value for "bytes".
+ */
+static uint64_t
+arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
+ arc_buf_contents_t type)
{
- mutex_enter(&arc_eviction_mtx);
- while (arc_eviction_list != NULL) {
- arc_buf_t *buf = arc_eviction_list;
- arc_eviction_list = buf->b_next;
- mutex_enter(&buf->b_evict_lock);
- buf->b_hdr = NULL;
- mutex_exit(&buf->b_evict_lock);
- mutex_exit(&arc_eviction_mtx);
-
- if (buf->b_efunc != NULL)
- VERIFY0(buf->b_efunc(buf->b_private));
+ int64_t delta;
- buf->b_efunc = NULL;
- buf->b_private = NULL;
- kmem_cache_free(buf_cache, buf);
- mutex_enter(&arc_eviction_mtx);
+ if (bytes > 0 && state->arcs_lsize[type] > 0) {
+ delta = MIN(state->arcs_lsize[type], bytes);
+ return (arc_evict_state(state, spa, delta, type));
}
- mutex_exit(&arc_eviction_mtx);
+
+ return (0);
}
/*
@@ -2388,10 +2510,11 @@ arc_do_user_evicts(void)
* be dropped from the VFS cache. This will make dnode meta data buffers
* available for reclaim.
*/
-static void
+static uint64_t
arc_adjust_meta(void)
{
int64_t adjustmnt, delta, prune = 0;
+ uint64_t total_evicted = 0;
arc_buf_contents_t type = ARC_BUFC_DATA;
unsigned long restarts = zfs_arc_meta_adjust_restarts;
@@ -2408,7 +2531,7 @@ restart:
if (adjustmnt > 0 && arc_mru->arcs_lsize[type] > 0) {
delta = MIN(arc_mru->arcs_lsize[type], adjustmnt);
- arc_evict(arc_mru, 0, delta, FALSE, type);
+ total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
adjustmnt -= delta;
}
@@ -2424,21 +2547,22 @@ restart:
if (adjustmnt > 0 && arc_mfu->arcs_lsize[type] > 0) {
delta = MIN(arc_mfu->arcs_lsize[type], adjustmnt);
- arc_evict(arc_mfu, 0, delta, FALSE, type);
+ total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
}
adjustmnt = arc_meta_used - arc_meta_limit;
if (adjustmnt > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
delta = MIN(adjustmnt,
- arc_mru_ghost->arcs_lsize[ARC_BUFC_METADATA]);
- arc_evict_ghost(arc_mru_ghost, 0, delta);
+ arc_mru_ghost->arcs_lsize[type]);
+ total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
+ adjustmnt -= delta;
}
if (adjustmnt > 0 && arc_mfu_ghost->arcs_lsize[type] > 0) {
delta = MIN(adjustmnt,
- arc_mfu_ghost->arcs_lsize[ARC_BUFC_METADATA]);
- arc_evict_ghost(arc_mfu_ghost, 0, delta);
+ arc_mfu_ghost->arcs_lsize[type]);
+ total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
}
/*
@@ -2465,47 +2589,278 @@ restart:
goto restart;
}
}
+ return (total_evicted);
}
/*
- * Flush all *evictable* data from the cache for the given spa.
- * NOTE: this will not touch "active" (i.e. referenced) data.
+ * Return the type of the oldest buffer in the given arc state
+ *
+ * This function will select a random sublist of type ARC_BUFC_DATA and
+ * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
+ * is compared, and the type which contains the "older" buffer will be
+ * returned.
*/
-void
-arc_flush(spa_t *spa)
+static arc_buf_contents_t
+arc_adjust_type(arc_state_t *state)
{
- uint64_t guid = 0;
+ multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
+ multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
+ int data_idx = multilist_get_random_index(data_ml);
+ int meta_idx = multilist_get_random_index(meta_ml);
+ multilist_sublist_t *data_mls;
+ multilist_sublist_t *meta_mls;
+ arc_buf_contents_t type;
+ arc_buf_hdr_t *data_hdr;
+ arc_buf_hdr_t *meta_hdr;
- if (spa != NULL)
- guid = spa_load_guid(spa);
+ /*
+ * We keep the sublist lock until we're finished, to prevent
+ * the headers from being destroyed via arc_evict_state().
+ */
+ data_mls = multilist_sublist_lock(data_ml, data_idx);
+ meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
+
+ /*
+ * These two loops are to ensure we skip any markers that
+ * might be at the tail of the lists due to arc_evict_state().
+ */
- while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
- (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
- if (spa != NULL)
+ for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
+ data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
+ if (data_hdr->b_spa != 0)
break;
}
- while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
- (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
- if (spa != NULL)
+
+ for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
+ meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
+ if (meta_hdr->b_spa != 0)
break;
}
- while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
- (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
- if (spa != NULL)
- break;
+
+ if (data_hdr == NULL && meta_hdr == NULL) {
+ type = ARC_BUFC_DATA;
+ } else if (data_hdr == NULL) {
+ ASSERT3P(meta_hdr, !=, NULL);
+ type = ARC_BUFC_METADATA;
+ } else if (meta_hdr == NULL) {
+ ASSERT3P(data_hdr, !=, NULL);
+ type = ARC_BUFC_DATA;
+ } else {
+ ASSERT3P(data_hdr, !=, NULL);
+ ASSERT3P(meta_hdr, !=, NULL);
+
+ /* The headers can't be on the sublist without an L1 header */
+ ASSERT(HDR_HAS_L1HDR(data_hdr));
+ ASSERT(HDR_HAS_L1HDR(meta_hdr));
+
+ if (data_hdr->b_l1hdr.b_arc_access <
+ meta_hdr->b_l1hdr.b_arc_access) {
+ type = ARC_BUFC_DATA;
+ } else {
+ type = ARC_BUFC_METADATA;
+ }
}
- while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
- (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
- if (spa != NULL)
- break;
+
+ multilist_sublist_unlock(meta_mls);
+ multilist_sublist_unlock(data_mls);
+
+ return (type);
+}
+
+/*
+ * Evict buffers from the cache, such that arc_size is capped by arc_c.
+ */
+static uint64_t
+arc_adjust(void)
+{
+ uint64_t total_evicted = 0;
+ uint64_t bytes;
+ int64_t target;
+
+ /*
+ * If we're over arc_meta_limit, we want to correct that before
+ * potentially evicting data buffers below.
+ */
+ total_evicted += arc_adjust_meta();
+
+ /*
+ * Adjust MRU size
+ *
+ * If we're over the target cache size, we want to evict enough
+ * from the list to get back to our target size. We don't want
+ * to evict too much from the MRU, such that it drops below
+ * arc_p. So, if we're over our target cache size more than
+ * the MRU is over arc_p, we'll evict enough to get back to
+ * arc_p here, and then evict more from the MFU below.
+ */
+ target = MIN((int64_t)(arc_size - arc_c),
+ (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
+ arc_p));
+
+ /*
+ * If we're below arc_meta_min, always prefer to evict data.
+ * Otherwise, try to satisfy the requested number of bytes to
+ * evict from the type which contains older buffers; in an
+ * effort to keep newer buffers in the cache regardless of their
+ * type. If we cannot satisfy the number of bytes from this
+ * type, spill over into the next type.
+ */
+ if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
+ arc_meta_used > arc_meta_min) {
+ bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
+ total_evicted += bytes;
+
+ /*
+ * If we couldn't evict our target number of bytes from
+ * metadata, we try to get the rest from data.
+ */
+ target -= bytes;
+
+ total_evicted +=
+ arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
+ } else {
+ bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
+ total_evicted += bytes;
+
+ /*
+ * If we couldn't evict our target number of bytes from
+ * data, we try to get the rest from metadata.
+ */
+ target -= bytes;
+
+ total_evicted +=
+ arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
+ }
+
+ /*
+ * Adjust MFU size
+ *
+ * Now that we've tried to evict enough from the MRU to get its
+ * size back to arc_p, if we're still above the target cache
+ * size, we evict the rest from the MFU.
+ */
+ target = arc_size - arc_c;
+
+ if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
+ arc_meta_used > arc_meta_min) {
+ bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
+ total_evicted += bytes;
+
+ /*
+ * If we couldn't evict our target number of bytes from
+ * metadata, we try to get the rest from data.
+ */
+ target -= bytes;
+
+ total_evicted +=
+ arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
+ } else {
+ bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
+ total_evicted += bytes;
+
+ /*
+ * If we couldn't evict our target number of bytes from
+ * data, we try to get the rest from data.
+ */
+ target -= bytes;
+
+ total_evicted +=
+ arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
+ }
+
+ /*
+ * Adjust ghost lists
+ *
+ * In addition to the above, the ARC also defines target values
+ * for the ghost lists. The sum of the mru list and mru ghost
+ * list should never exceed the target size of the cache, and
+ * the sum of the mru list, mfu list, mru ghost list, and mfu
+ * ghost list should never exceed twice the target size of the
+ * cache. The following logic enforces these limits on the ghost
+ * caches, and evicts from them as needed.
+ */
+ target = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
+
+ bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
+ total_evicted += bytes;
+
+ target -= bytes;
+
+ total_evicted +=
+ arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
+
+ /*
+ * We assume the sum of the mru list and mfu list is less than
+ * or equal to arc_c (we enforced this above), which means we
+ * can use the simpler of the two equations below:
+ *
+ * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
+ * mru ghost + mfu ghost <= arc_c
+ */
+ target = arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
+
+ bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
+ total_evicted += bytes;
+
+ target -= bytes;
+
+ total_evicted +=
+ arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
+
+ return (total_evicted);
+}
+
+static void
+arc_do_user_evicts(void)
+{
+ mutex_enter(&arc_user_evicts_lock);
+ while (arc_eviction_list != NULL) {
+ arc_buf_t *buf = arc_eviction_list;
+ arc_eviction_list = buf->b_next;
+ mutex_enter(&buf->b_evict_lock);
+ buf->b_hdr = NULL;
+ mutex_exit(&buf->b_evict_lock);
+ mutex_exit(&arc_user_evicts_lock);
+
+ if (buf->b_efunc != NULL)
+ VERIFY0(buf->b_efunc(buf->b_private));
+
+ buf->b_efunc = NULL;
+ buf->b_private = NULL;
+ kmem_cache_free(buf_cache, buf);
+ mutex_enter(&arc_user_evicts_lock);
}
+ mutex_exit(&arc_user_evicts_lock);
+}
+
+void
+arc_flush(spa_t *spa, boolean_t retry)
+{
+ uint64_t guid = 0;
+
+ /*
+ * If retry is TRUE, a spa must not be specified since we have
+ * no good way to determine if all of a spa's buffers have been
+ * evicted from an arc state.
+ */
+ ASSERT(!retry || spa == 0);
+
+ if (spa != NULL)
+ guid = spa_load_guid(spa);
- arc_evict_ghost(arc_mru_ghost, guid, -1);
- arc_evict_ghost(arc_mfu_ghost, guid, -1);
+ (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
+ (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
+
+ (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
+ (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
+
+ (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
+ (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
+
+ (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
+ (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
- mutex_enter(&arc_reclaim_thr_lock);
arc_do_user_evicts();
- mutex_exit(&arc_reclaim_thr_lock);
ASSERT(spa || arc_eviction_list == NULL);
}
@@ -2538,7 +2893,7 @@ arc_shrink(uint64_t bytes)
}
if (arc_size > arc_c)
- arc_adjust();
+ (void) arc_adjust();
}
static void
@@ -2568,6 +2923,7 @@ arc_kmem_reap_now(arc_reclaim_strategy_t strat, uint64_t bytes)
}
}
+ kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
}
@@ -2578,21 +2934,41 @@ arc_kmem_reap_now(arc_reclaim_strategy_t strat, uint64_t bytes)
* reclamation has been entirely delegated to the arc_shrinker_func()
* which is registered with the VM. To reflect this change in behavior
* the arc_reclaim thread has been renamed to arc_adapt.
+ *
+ * The following comment from arc_reclaim_thread() in illumos is still
+ * applicable:
+ *
+ * Threads can block in arc_get_data_buf() waiting for this thread to evict
+ * enough data and signal them to proceed. When this happens, the threads in
+ * arc_get_data_buf() are sleeping while holding the hash lock for their
+ * particular arc header. Thus, we must be careful to never sleep on a
+ * hash lock in this thread. This is to prevent the following deadlock:
+ *
+ * - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L",
+ * waiting for the reclaim thread to signal it.
+ *
+ * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
+ * fails, and goes to sleep forever.
+ *
+ * This possible deadlock is avoided by always acquiring a hash lock
+ * using mutex_tryenter() from arc_reclaim_thread().
*/
static void
arc_adapt_thread(void)
{
callb_cpr_t cpr;
fstrans_cookie_t cookie;
+ uint64_t arc_evicted;
- CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
+ CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG);
cookie = spl_fstrans_mark();
- mutex_enter(&arc_reclaim_thr_lock);
- while (arc_thread_exit == 0) {
+ mutex_enter(&arc_reclaim_lock);
+ while (arc_reclaim_thread_exit == 0) {
#ifndef _KERNEL
arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
+ mutex_exit(&arc_reclaim_lock);
if (spa_get_random(100) == 0) {
if (arc_no_grow) {
@@ -2614,6 +2990,8 @@ arc_adapt_thread(void)
arc_kmem_reap_now(last_reclaim, 0);
arc_warm = B_TRUE;
}
+#else /* _KERNEL */
+ mutex_exit(&arc_reclaim_lock);
#endif /* !_KERNEL */
/* No recent memory pressure allow the ARC to grow. */
@@ -2621,18 +2999,23 @@ arc_adapt_thread(void)
ddi_time_after_eq(ddi_get_lbolt(), arc_grow_time))
arc_no_grow = FALSE;
- arc_adjust_meta();
+ arc_evicted = arc_adjust();
- arc_adjust();
+ /*
+ * We're either no longer overflowing, or we
+ * can't evict anything more, so we should wake
+ * up any threads before we go to sleep.
+ */
+ if (arc_size <= arc_c || arc_evicted == 0)
+ cv_broadcast(&arc_reclaim_waiters_cv);
- if (arc_eviction_list != NULL)
- arc_do_user_evicts();
+ mutex_enter(&arc_reclaim_lock);
/* block until needed, or one second, whichever is shorter */
CALLB_CPR_SAFE_BEGIN(&cpr);
- (void) cv_timedwait_interruptible(&arc_reclaim_thr_cv,
- &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
- CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
+ (void) cv_timedwait_interruptible(&arc_reclaim_thread_cv,
+ &arc_reclaim_lock, (ddi_get_lbolt() + hz));
+ CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock);
/* Allow the module options to be changed */
@@ -2650,14 +3033,59 @@ arc_adapt_thread(void)
zfs_arc_meta_limit <= arc_c_max &&
zfs_arc_meta_limit != arc_meta_limit)
arc_meta_limit = zfs_arc_meta_limit;
+ }
+
+ arc_reclaim_thread_exit = 0;
+ cv_broadcast(&arc_reclaim_thread_cv);
+ CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_lock */
+ spl_fstrans_unmark(cookie);
+ thread_exit();
+}
+
+static void
+arc_user_evicts_thread(void)
+{
+ callb_cpr_t cpr;
+ fstrans_cookie_t cookie;
+ CALLB_CPR_INIT(&cpr, &arc_user_evicts_lock, callb_generic_cpr, FTAG);
+ cookie = spl_fstrans_mark();
+ mutex_enter(&arc_user_evicts_lock);
+ while (!arc_user_evicts_thread_exit) {
+ mutex_exit(&arc_user_evicts_lock);
+
+ arc_do_user_evicts();
+ /*
+ * This is necessary in order for the mdb ::arc dcmd to
+ * show up to date information. Since the ::arc command
+ * does not call the kstat's update function, without
+ * this call, the command may show stale stats for the
+ * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
+ * with this change, the data might be up to 1 second
+ * out of date; but that should suffice. The arc_state_t
+ * structures can be queried directly if more accurate
+ * information is needed.
+ */
+ if (arc_ksp != NULL)
+ arc_ksp->ks_update(arc_ksp, KSTAT_READ);
+
+ mutex_enter(&arc_user_evicts_lock);
+
+ /*
+ * Block until signaled, or after one second (we need to
+ * call the arc's kstat update function regularly).
+ */
+ CALLB_CPR_SAFE_BEGIN(&cpr);
+ (void) cv_timedwait_interruptible(&arc_user_evicts_cv,
+ &arc_user_evicts_lock, ddi_get_lbolt() + hz);
+ CALLB_CPR_SAFE_END(&cpr, &arc_user_evicts_lock);
}
- arc_thread_exit = 0;
- cv_broadcast(&arc_reclaim_thr_cv);
- CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
+ arc_user_evicts_thread_exit = FALSE;
+ cv_broadcast(&arc_user_evicts_cv);
+ CALLB_CPR_EXIT(&cpr); /* drops arc_user_evicts_lock */
spl_fstrans_unmark(cookie);
thread_exit();
}
@@ -2759,9 +3187,11 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
return (SHRINK_STOP);
/* Reclaim in progress */
- if (mutex_tryenter(&arc_reclaim_thr_lock) == 0)
+ if (mutex_tryenter(&arc_reclaim_lock) == 0)
return (SHRINK_STOP);
+ mutex_exit(&arc_reclaim_lock);
+
/*
* Evict the requested number of pages by shrinking arc_c the
* requested amount. If there is nothing left to evict just
@@ -2781,6 +3211,11 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
}
/*
+ * We've reaped what we can, wake up threads.
+ */
+ cv_broadcast(&arc_reclaim_waiters_cv);
+
+ /*
* When direct reclaim is observed it usually indicates a rapid
* increase in memory pressure. This occurs because the kswapd
* threads were unable to asynchronously keep enough free memory
@@ -2795,8 +3230,6 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
ARCSTAT_BUMP(arcstat_memory_direct_count);
}
- mutex_exit(&arc_reclaim_thr_lock);
-
return (pages);
}
SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func);
@@ -2871,43 +3304,25 @@ arc_adapt(int bytes, arc_state_t *state)
}
/*
- * Check if the cache has reached its limits and eviction is required
- * prior to insert.
+ * Check if arc_size has grown past our upper threshold, determined by
+ * zfs_arc_overflow_shift.
*/
-static int
-arc_evict_needed(arc_buf_contents_t type)
+static boolean_t
+arc_is_overflowing(void)
{
- if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
- return (1);
-
- if (arc_no_grow)
- return (1);
+ /* Always allow at least one block of overflow */
+ uint64_t overflow = MAX(SPA_MAXBLOCKSIZE,
+ arc_c >> zfs_arc_overflow_shift);
- return (arc_size > arc_c);
+ return (arc_size >= arc_c + overflow);
}
/*
- * The buffer, supplied as the first argument, needs a data block.
- * So, if we are at cache max, determine which cache should be victimized.
- * We have the following cases:
- *
- * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
- * In this situation if we're out of space, but the resident size of the MFU is
- * under the limit, victimize the MFU cache to satisfy this insertion request.
- *
- * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
- * Here, we've used up all of the available space for the MRU, so we need to
- * evict from our own cache instead. Evict from the set of resident MRU
- * entries.
- *
- * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
- * c minus p represents the MFU space in the cache, since p is the size of the
- * cache that is dedicated to the MRU. In this situation there's still space on
- * the MFU side, so the MRU side needs to be victimized.
- *
- * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
- * MFU's resident set is consuming more space than it has been allotted. In
- * this situation, we must victimize our own cache, the MFU, for this insertion.
+ * The buffer, supplied as the first argument, needs a data block. If we
+ * are hitting the hard limit for the cache size, we must sleep, waiting
+ * for the eviction thread to catch up. If we're past the target size
+ * but below the hard limit, we'll only signal the reclaim thread and
+ * continue on.
*/
static void
arc_get_data_buf(arc_buf_t *buf)
@@ -2915,91 +3330,54 @@ arc_get_data_buf(arc_buf_t *buf)
arc_state_t *state = buf->b_hdr->b_l1hdr.b_state;
uint64_t size = buf->b_hdr->b_size;
arc_buf_contents_t type = arc_buf_type(buf->b_hdr);
- arc_buf_contents_t evict = ARC_BUFC_DATA;
- boolean_t recycle = TRUE;
arc_adapt(size, state);
/*
- * We have not yet reached cache maximum size,
- * just allocate a new buffer.
+ * If arc_size is currently overflowing, and has grown past our
+ * upper limit, we must be adding data faster than the evict
+ * thread can evict. Thus, to ensure we don't compound the
+ * problem by adding more data and forcing arc_size to grow even
+ * further past it's target size, we halt and wait for the
+ * eviction thread to catch up.
+ *
+ * It's also possible that the reclaim thread is unable to evict
+ * enough buffers to get arc_size below the overflow limit (e.g.
+ * due to buffers being un-evictable, or hash lock collisions).
+ * In this case, we want to proceed regardless if we're
+ * overflowing; thus we don't use a while loop here.
*/
- if (!arc_evict_needed(type)) {
- if (type == ARC_BUFC_METADATA) {
- buf->b_data = zio_buf_alloc(size);
- arc_space_consume(size, ARC_SPACE_META);
- } else {
- ASSERT(type == ARC_BUFC_DATA);
- buf->b_data = zio_data_buf_alloc(size);
- arc_space_consume(size, ARC_SPACE_DATA);
+ if (arc_is_overflowing()) {
+ mutex_enter(&arc_reclaim_lock);
+
+ /*
+ * Now that we've acquired the lock, we may no longer be
+ * over the overflow limit, lets check.
+ *
+ * We're ignoring the case of spurious wake ups. If that
+ * were to happen, it'd let this thread consume an ARC
+ * buffer before it should have (i.e. before we're under
+ * the overflow limit and were signalled by the reclaim
+ * thread). As long as that is a rare occurrence, it
+ * shouldn't cause any harm.
+ */
+ if (arc_is_overflowing()) {
+ cv_signal(&arc_reclaim_thread_cv);
+ cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock);
}
- goto out;
- }
- /*
- * If we are prefetching from the mfu ghost list, this buffer
- * will end up on the mru list; so steal space from there.
- */
- if (state == arc_mfu_ghost)
- state = HDR_PREFETCH(buf->b_hdr) ? arc_mru : arc_mfu;
- else if (state == arc_mru_ghost)
- state = arc_mru;
-
- if (state == arc_mru || state == arc_anon) {
- uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
- state = (arc_mfu->arcs_lsize[type] >= size &&
- arc_p > mru_used) ? arc_mfu : arc_mru;
- } else {
- /* MFU cases */
- uint64_t mfu_space = arc_c - arc_p;
- state = (arc_mru->arcs_lsize[type] >= size &&
- mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
+ mutex_exit(&arc_reclaim_lock);
}
- /*
- * Evict data buffers prior to metadata buffers, unless we're
- * over the metadata limit and adding a metadata buffer.
- */
if (type == ARC_BUFC_METADATA) {
- if (arc_meta_used >= arc_meta_limit)
- evict = ARC_BUFC_METADATA;
- else
- /*
- * In this case, we're evicting data while
- * adding metadata. Thus, to prevent recycling a
- * data buffer into a metadata buffer, recycling
- * is disabled in the following arc_evict call.
- */
- recycle = FALSE;
+ buf->b_data = zio_buf_alloc(size);
+ arc_space_consume(size, ARC_SPACE_META);
+ } else {
+ ASSERT(type == ARC_BUFC_DATA);
+ buf->b_data = zio_data_buf_alloc(size);
+ arc_space_consume(size, ARC_SPACE_DATA);
}
- if ((buf->b_data = arc_evict(state, 0, size, recycle, evict)) == NULL) {
- if (type == ARC_BUFC_METADATA) {
- buf->b_data = zio_buf_alloc(size);
- arc_space_consume(size, ARC_SPACE_META);
-
- /*
- * If we are unable to recycle an existing meta buffer
- * signal the reclaim thread. It will notify users
- * via the prune callback to drop references. The
- * prune callback in run in the context of the reclaim
- * thread to avoid deadlocking on the hash_lock.
- * Of course, only do this when recycle is true.
- */
- if (recycle)
- cv_signal(&arc_reclaim_thr_cv);
- } else {
- ASSERT(type == ARC_BUFC_DATA);
- buf->b_data = zio_data_buf_alloc(size);
- arc_space_consume(size, ARC_SPACE_DATA);
- }
-
- /* Only bump this if we tried to recycle and failed */
- if (recycle)
- ARCSTAT_BUMP(arcstat_recycle_miss);
- }
- ASSERT(buf->b_data != NULL);
-out:
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
@@ -3008,7 +3386,17 @@ out:
arc_buf_hdr_t *hdr = buf->b_hdr;
atomic_add_64(&hdr->b_l1hdr.b_state->arcs_size, size);
- if (list_link_active(&hdr->b_l1hdr.b_arc_node)) {
+
+ /*
+ * If this is reached via arc_read, the link is
+ * protected by the hash lock. If reached via
+ * arc_buf_alloc, the header should not be accessed by
+ * any other thread. And, if reached via arc_read_done,
+ * the hash lock will protect it if it's found in the
+ * hash table; otherwise no other thread should be
+ * trying to [add|remove]_reference it.
+ */
+ if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
atomic_add_64(&hdr->b_l1hdr.b_state->arcs_lsize[type],
size);
@@ -3017,8 +3405,7 @@ out:
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
- if (!zfs_arc_p_aggressive_disable &&
- arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
+ if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
arc_p = MIN(arc_c, arc_p + size);
}
@@ -3061,7 +3448,8 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
*/
if (HDR_PREFETCH(hdr)) {
if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
- ASSERT(list_link_active(
+ /* link protected by hash lock */
+ ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
} else {
hdr->b_flags &= ~ARC_FLAG_PREFETCH;
@@ -3125,7 +3513,8 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
*/
if ((HDR_PREFETCH(hdr)) != 0) {
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
- ASSERT(list_link_active(&hdr->b_l1hdr.b_arc_node));
+ /* link protected by hash_lock */
+ ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
}
atomic_inc_32(&hdr->b_l1hdr.b_mfu_hits);
ARCSTAT_BUMP(arcstat_mfu_hits);
@@ -3517,7 +3906,7 @@ top:
ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
- ASSERT(hdr->b_l1hdr.b_buf == NULL);
+ ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
/* if this is a prefetch, we don't have a reference */
if (*arc_flags & ARC_FLAG_PREFETCH)
@@ -3813,7 +4202,7 @@ arc_clear_callback(arc_buf_t *buf)
if (hdr->b_l1hdr.b_datacnt > 1) {
mutex_exit(&buf->b_evict_lock);
- arc_buf_destroy(buf, FALSE, TRUE);
+ arc_buf_destroy(buf, TRUE);
} else {
ASSERT(buf == hdr->b_l1hdr.b_buf);
hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
@@ -3839,13 +4228,15 @@ arc_release(arc_buf_t *buf, void *tag)
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
- * It would be nice to assert that if it's DMU metadata (level >
+ * It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
mutex_enter(&buf->b_evict_lock);
+ ASSERT(HDR_HAS_L1HDR(hdr));
+
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
@@ -3892,6 +4283,13 @@ arc_release(arc_buf_t *buf, void *tag)
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
list_remove(&hdr->b_l2hdr.b_dev->l2ad_buflist, hdr);
+
+ /*
+ * We don't want to leak the b_tmp_cdata buffer that was
+ * allocated in l2arc_write_buffers()
+ */
+ arc_buf_l2_cdata_free(hdr);
+
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
@@ -3964,6 +4362,7 @@ arc_release(arc_buf_t *buf, void *tag)
nhdr->b_l1hdr.b_datacnt = 1;
nhdr->b_l1hdr.b_state = arc_anon;
nhdr->b_l1hdr.b_arc_access = 0;
+ nhdr->b_l1hdr.b_tmp_cdata = NULL;
nhdr->b_freeze_cksum = NULL;
(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
@@ -3973,8 +4372,8 @@ arc_release(arc_buf_t *buf, void *tag)
} else {
mutex_exit(&buf->b_evict_lock);
ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
- /* protected by hash lock */
- ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
+ /* protected by hash lock, or hdr is on arc_anon */
+ ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
@@ -4297,11 +4696,50 @@ arc_kstat_update(kstat_t *ksp, int rw)
return (0);
}
+/*
+ * This function *must* return indices evenly distributed between all
+ * sublists of the multilist. This is needed due to how the ARC eviction
+ * code is laid out; arc_evict_state() assumes ARC buffers are evenly
+ * distributed between all sublists and uses this assumption when
+ * deciding which sublist to evict from and how much to evict from it.
+ */
+unsigned int
+arc_state_multilist_index_func(multilist_t *ml, void *obj)
+{
+ arc_buf_hdr_t *hdr = obj;
+
+ /*
+ * We rely on b_dva to generate evenly distributed index
+ * numbers using buf_hash below. So, as an added precaution,
+ * let's make sure we never add empty buffers to the arc lists.
+ */
+ ASSERT(!BUF_EMPTY(hdr));
+
+ /*
+ * The assumption here, is the hash value for a given
+ * arc_buf_hdr_t will remain constant throughout its lifetime
+ * (i.e. its b_spa, b_dva, and b_birth fields don't change).
+ * Thus, we don't need to store the header's sublist index
+ * on insertion, as this index can be recalculated on removal.
+ *
+ * Also, the low order bits of the hash value are thought to be
+ * distributed evenly. Otherwise, in the case that the multilist
+ * has a power of two number of sublists, each sublists' usage
+ * would not be evenly distributed.
+ */
+ return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
+ multilist_get_num_sublists(ml));
+}
+
void
arc_init(void)
{
- mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
- cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL);
+
+ mutex_init(&arc_user_evicts_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&arc_user_evicts_cv, NULL, CV_DEFAULT, NULL);
/* Convert seconds to clock ticks */
zfs_arc_min_prefetch_lifespan = 1 * hz;
@@ -4349,6 +4787,9 @@ arc_init(void)
if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
arc_meta_limit = zfs_arc_meta_limit;
+ if (zfs_arc_num_sublists_per_state < 1)
+ zfs_arc_num_sublists_per_state = num_online_cpus();
+
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
@@ -4363,43 +4804,46 @@ arc_init(void)
arc_l2c_only = &ARC_l2c_only;
arc_size = 0;
- mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
-
- list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
+ multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
- list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
+ multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
- offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
+ offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
+ zfs_arc_num_sublists_per_state, arc_state_multilist_index_func);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
@@ -4410,12 +4854,12 @@ arc_init(void)
buf_init();
- arc_thread_exit = 0;
+ arc_reclaim_thread_exit = FALSE;
+ arc_user_evicts_thread_exit = FALSE;
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
arc_eviction_list = NULL;
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
- mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
@@ -4430,6 +4874,9 @@ arc_init(void)
(void) thread_create(NULL, 0, arc_adapt_thread, NULL, 0, &p0,
TS_RUN, minclsyspri);
+ (void) thread_create(NULL, 0, arc_user_evicts_thread, NULL, 0, &p0,
+ TS_RUN, minclsyspri);
+
arc_dead = FALSE;
arc_warm = B_FALSE;
@@ -4458,17 +4905,36 @@ arc_fini(void)
{
arc_prune_t *p;
- mutex_enter(&arc_reclaim_thr_lock);
#ifdef _KERNEL
spl_unregister_shrinker(&arc_shrinker);
#endif /* _KERNEL */
- arc_thread_exit = 1;
- while (arc_thread_exit != 0)
- cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
- mutex_exit(&arc_reclaim_thr_lock);
+ mutex_enter(&arc_reclaim_lock);
+ arc_reclaim_thread_exit = TRUE;
+ /*
+ * The reclaim thread will set arc_reclaim_thread_exit back to
+ * FALSE when it is finished exiting; we're waiting for that.
+ */
+ while (arc_reclaim_thread_exit) {
+ cv_signal(&arc_reclaim_thread_cv);
+ cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock);
+ }
+ mutex_exit(&arc_reclaim_lock);
+
+ mutex_enter(&arc_user_evicts_lock);
+ arc_user_evicts_thread_exit = TRUE;
+ /*
+ * The user evicts thread will set arc_user_evicts_thread_exit
+ * to FALSE when it is finished exiting; we're waiting for that.
+ */
+ while (arc_user_evicts_thread_exit) {
+ cv_signal(&arc_user_evicts_cv);
+ cv_wait(&arc_user_evicts_cv, &arc_user_evicts_lock);
+ }
+ mutex_exit(&arc_user_evicts_lock);
- arc_flush(NULL);
+ /* Use TRUE to ensure *all* buffers are evicted */
+ arc_flush(NULL, TRUE);
arc_dead = TRUE;
@@ -4488,25 +4954,23 @@ arc_fini(void)
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
- mutex_destroy(&arc_eviction_mtx);
- mutex_destroy(&arc_reclaim_thr_lock);
- cv_destroy(&arc_reclaim_thr_cv);
-
- list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
- list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
- list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
- list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
- list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
- list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
- list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
- list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
-
- mutex_destroy(&arc_anon->arcs_mtx);
- mutex_destroy(&arc_mru->arcs_mtx);
- mutex_destroy(&arc_mru_ghost->arcs_mtx);
- mutex_destroy(&arc_mfu->arcs_mtx);
- mutex_destroy(&arc_mfu_ghost->arcs_mtx);
- mutex_destroy(&arc_l2c_only->arcs_mtx);
+ mutex_destroy(&arc_reclaim_lock);
+ cv_destroy(&arc_reclaim_thread_cv);
+ cv_destroy(&arc_reclaim_waiters_cv);
+
+ mutex_destroy(&arc_user_evicts_lock);
+ cv_destroy(&arc_user_evicts_cv);
+
+ multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
+ multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
+ multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
+ multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
+ multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
+ multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
+ multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
+ multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
+ multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
+ multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
buf_fini();
@@ -4835,34 +5299,62 @@ l2arc_write_done(zio_t *zio)
if (zio->io_error != 0)
ARCSTAT_BUMP(arcstat_l2_writes_error);
- mutex_enter(&dev->l2ad_mtx);
-
/*
* All writes completed, or an error was hit.
*/
+top:
+ mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
+
+ /*
+ * We cannot use mutex_enter or else we can deadlock
+ * with l2arc_write_buffers (due to swapping the order
+ * the hash lock and l2ad_mtx are taken).
+ */
if (!mutex_tryenter(hash_lock)) {
/*
- * This buffer misses out. It may be in a stage
- * of eviction. Its ARC_FLAG_L2_WRITING flag will be
- * left set, denying reads to this buffer.
+ * Missed the hash lock. We must retry so we
+ * don't leave the ARC_FLAG_L2_WRITING bit set.
*/
- ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
- continue;
+ ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
+
+ /*
+ * We don't want to rescan the headers we've
+ * already marked as having been written out, so
+ * we reinsert the head node so we can pick up
+ * where we left off.
+ */
+ list_remove(buflist, head);
+ list_insert_after(buflist, hdr, head);
+
+ mutex_exit(&dev->l2ad_mtx);
+
+ /*
+ * We wait for the hash lock to become available
+ * to try and prevent busy waiting, and increase
+ * the chance we'll be able to acquire the lock
+ * the next time around.
+ */
+ mutex_enter(hash_lock);
+ mutex_exit(hash_lock);
+ goto top;
}
/*
- * It's possible that this buffer got evicted from the L1 cache
- * before we grabbed the vdev + hash locks, in which case
- * arc_hdr_realloc freed b_tmp_cdata for us if it was allocated.
- * Only free the buffer if we still have an L1 hdr.
+ * We could not have been moved into the arc_l2c_only
+ * state while in-flight due to our ARC_FLAG_L2_WRITING
+ * bit being set. Let's just ensure that's being enforced.
+ */
+ ASSERT(HDR_HAS_L1HDR(hdr));
+
+ /*
+ * We may have allocated a buffer for L2ARC compression,
+ * we must release it to avoid leaking this data.
*/
- if (HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_tmp_cdata != NULL &&
- HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
- l2arc_release_cdata_buf(hdr);
+ l2arc_release_cdata_buf(hdr);
if (zio->io_error != 0) {
/*
@@ -4876,7 +5368,8 @@ l2arc_write_done(zio_t *zio)
}
/*
- * Allow ARC to begin reads to this L2ARC entry.
+ * Allow ARC to begin reads and ghost list evictions to
+ * this L2ARC entry.
*/
hdr->b_flags &= ~ARC_FLAG_L2_WRITING;
@@ -4984,35 +5477,37 @@ l2arc_read_done(zio_t *zio)
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
-static list_t *
-l2arc_list_locked(int list_num, kmutex_t **lock)
+static multilist_sublist_t *
+l2arc_sublist_lock(int list_num)
{
- list_t *list = NULL;
+ multilist_t *ml = NULL;
+ unsigned int idx;
ASSERT(list_num >= 0 && list_num <= 3);
switch (list_num) {
case 0:
- list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
- *lock = &arc_mfu->arcs_mtx;
+ ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
- list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
- *lock = &arc_mru->arcs_mtx;
+ ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
- list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
- *lock = &arc_mfu->arcs_mtx;
+ ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
- list = &arc_mru->arcs_list[ARC_BUFC_DATA];
- *lock = &arc_mru->arcs_mtx;
+ ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
}
- ASSERT(!(MUTEX_HELD(*lock)));
- mutex_enter(*lock);
- return (list);
+ /*
+ * Return a randomly-selected sublist. This is acceptable
+ * because the caller feeds only a little bit of data for each
+ * call (8MB). Subsequent calls will result in different
+ * sublists being selected.
+ */
+ idx = multilist_get_random_index(ml);
+ return (multilist_sublist_lock(ml, idx));
}
/*
@@ -5058,6 +5553,12 @@ top:
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
+
+ /*
+ * We cannot use mutex_enter or else we can deadlock
+ * with l2arc_write_buffers (due to swapping the order
+ * the hash lock and l2ad_mtx are taken).
+ */
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
@@ -5122,8 +5623,9 @@ top:
hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
list_remove(buflist, hdr);
- /* This may have been leftover after a failed write. */
- hdr->b_flags &= ~ARC_FLAG_L2_WRITING;
+ /* Ensure this header has finished being written */
+ ASSERT(!HDR_L2_WRITING(hdr));
+ ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
}
mutex_exit(hash_lock);
}
@@ -5149,11 +5651,9 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
boolean_t *headroom_boost)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
- list_t *list;
uint64_t write_asize, write_psize, write_sz, headroom,
buf_compress_minsz;
void *buf_data;
- kmutex_t *list_lock = NULL;
boolean_t full;
l2arc_write_callback_t *cb;
zio_t *pio, *wzio;
@@ -5182,12 +5682,10 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
/*
* Copy buffers for L2ARC writing.
*/
- mutex_enter(&dev->l2ad_mtx);
for (try = 0; try <= 3; try++) {
+ multilist_sublist_t *mls = l2arc_sublist_lock(try);
uint64_t passed_sz = 0;
- list = l2arc_list_locked(try, &list_lock);
-
/*
* L2ARC fast warmup.
*
@@ -5195,9 +5693,9 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
- hdr = list_head(list);
+ hdr = multilist_sublist_head(mls);
else
- hdr = list_tail(list);
+ hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (do_headroom_boost)
@@ -5208,9 +5706,9 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
uint64_t buf_sz;
if (arc_warm == B_FALSE)
- hdr_prev = list_next(list, hdr);
+ hdr_prev = multilist_sublist_next(mls, hdr);
else
- hdr_prev = list_prev(list, hdr);
+ hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
@@ -5246,7 +5744,9 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
+ mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
+ mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(sizeof (l2arc_write_callback_t),
KM_SLEEP);
@@ -5278,7 +5778,9 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
buf_sz = hdr->b_size;
hdr->b_flags |= ARC_FLAG_HAS_L2HDR;
+ mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
+ mutex_exit(&dev->l2ad_mtx);
/*
* Compute and store the buffer cksum before
@@ -5292,7 +5794,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
write_sz += buf_sz;
}
- mutex_exit(list_lock);
+ multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
@@ -5301,12 +5803,13 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_sz);
- mutex_exit(&dev->l2ad_mtx);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
return (0);
}
+ mutex_enter(&dev->l2ad_mtx);
+
/*
* Now start writing the buffers. We're starting at the write head
* and work backwards, retracing the course of the buffer selector
@@ -5317,6 +5820,14 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
uint64_t buf_sz;
/*
+ * We rely on the L1 portion of the header below, so
+ * it's invalid for this header to have been evicted out
+ * of the ghost cache, prior to being written out. The
+ * ARC_FLAG_L2_WRITING bit ensures this won't happen.
+ */
+ ASSERT(HDR_HAS_L1HDR(hdr));
+
+ /*
* We shouldn't need to lock the buffer here, since we flagged
* it as ARC_FLAG_L2_WRITING in the previous step, but we must
* take care to only access its L2 cache parameters. In
@@ -5538,8 +6049,26 @@ l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
static void
l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
{
+ enum zio_compress comp = HDR_GET_COMPRESS(hdr);
+
ASSERT(HDR_HAS_L1HDR(hdr));
- if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_EMPTY) {
+ ASSERT(comp == ZIO_COMPRESS_OFF || L2ARC_IS_VALID_COMPRESS(comp));
+
+ if (comp == ZIO_COMPRESS_OFF) {
+ /*
+ * In this case, b_tmp_cdata points to the same buffer
+ * as the arc_buf_t's b_data field. We don't want to
+ * free it, since the arc_buf_t will handle that.
+ */
+ hdr->b_l1hdr.b_tmp_cdata = NULL;
+ } else if (comp == ZIO_COMPRESS_EMPTY) {
+ /*
+ * In this case, b_tmp_cdata was compressed to an empty
+ * buffer, thus there's nothing to free and b_tmp_cdata
+ * should have been set to NULL in l2arc_write_buffers().
+ */
+ ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL);
+ } else {
/*
* If the data was compressed, then we've allocated a
* temporary buffer for it, so now we need to release it.
@@ -5547,8 +6076,9 @@ l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
zio_data_buf_free(hdr->b_l1hdr.b_tmp_cdata,
hdr->b_size);
+ hdr->b_l1hdr.b_tmp_cdata = NULL;
}
- hdr->b_l1hdr.b_tmp_cdata = NULL;
+
}
/*
@@ -5834,6 +6364,9 @@ MODULE_PARM_DESC(zfs_arc_max, "Max arc size");
module_param(zfs_arc_meta_limit, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_meta_limit, "Meta limit for arc size");
+module_param(zfs_arc_meta_min, ulong, 0644);
+MODULE_PARM_DESC(zfs_arc_meta_min, "Min arc metadata");
+
module_param(zfs_arc_meta_prune, int, 0644);
MODULE_PARM_DESC(zfs_arc_meta_prune, "Meta objects to scan for prune");
@@ -5865,6 +6398,10 @@ MODULE_PARM_DESC(zfs_arc_memory_throttle_disable, "disable memory throttle");
module_param(zfs_arc_min_prefetch_lifespan, int, 0644);
MODULE_PARM_DESC(zfs_arc_min_prefetch_lifespan, "Min life of prefetch block");
+module_param(zfs_arc_num_sublists_per_state, int, 0644);
+MODULE_PARM_DESC(zfs_arc_num_sublists_per_state,
+ "Number of sublists used in each of the ARC state lists");
+
module_param(l2arc_write_max, ulong, 0644);
MODULE_PARM_DESC(l2arc_write_max, "Max write bytes per interval");
diff --git a/module/zfs/dbuf_stats.c b/module/zfs/dbuf_stats.c
index 5e7eaf1ac..afdf828ed 100644
--- a/module/zfs/dbuf_stats.c
+++ b/module/zfs/dbuf_stats.c
@@ -48,12 +48,12 @@ dbuf_stats_hash_table_headers(char *buf, size_t size)
(void) snprintf(buf, size,
"%-88s | %-124s | %s\n"
"%-16s %-8s %-8s %-8s %-8s %-8s %-8s %-5s %-5s %5s | "
- "%-5s %-5s %-6s %-8s %-6s %-8s %-12s "
+ "%-5s %-5s %-8s %-6s %-8s %-12s "
"%-6s %-6s %-6s %-6s %-6s %-8s %-8s %-8s %-5s | "
"%-6s %-6s %-8s %-8s %-6s %-6s %-5s %-8s %-8s\n",
"dbuf", "arcbuf", "dnode", "pool", "objset", "object", "level",
"blkid", "offset", "dbsize", "meta", "state", "dbholds", "list",
- "atype", "index", "flags", "count", "asize", "access",
+ "atype", "flags", "count", "asize", "access",
"mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize",
"l2_comp", "aholds", "dtype", "btype", "data_bs", "meta_bs",
"bsize", "lvls", "dholds", "blocks", "dsize");
@@ -77,7 +77,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
nwritten = snprintf(buf, size,
"%-16s %-8llu %-8lld %-8lld %-8lld %-8llu %-8llu %-5d %-5d %-5lu | "
- "%-5d %-5d %-6lld 0x%-6x %-6lu %-8llu %-12llu "
+ "%-5d %-5d 0x%-6x %-6lu %-8llu %-12llu "
"%-6lu %-6lu %-6lu %-6lu %-6lu %-8llu %-8llu %-8d %-5lu | "
"%-6d %-6d %-8lu %-8lu %-6llu %-6lu %-5lu %-8llu %-8llu\n",
/* dmu_buf_impl_t */
@@ -94,7 +94,6 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
/* arc_buf_info_t */
abi.abi_state_type,
abi.abi_state_contents,
- (longlong_t)abi.abi_state_index,
abi.abi_flags,
(ulong_t)abi.abi_datacnt,
(u_longlong_t)abi.abi_size,
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 7d84d8bf4..108fc5299 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -317,7 +317,14 @@ dsl_pool_close(dsl_pool_t *dp)
txg_list_destroy(&dp->dp_sync_tasks);
txg_list_destroy(&dp->dp_dirty_dirs);
- arc_flush(dp->dp_spa);
+ /*
+ * We can't set retry to TRUE since we're explicitly specifying
+ * a spa to flush. This is good enough; any missed buffers for
+ * this spa won't cause trouble, and they'll eventually fall
+ * out of the ARC just like any other unused buffer.
+ */
+ arc_flush(dp->dp_spa, FALSE);
+
txg_fini(dp);
dsl_scan_fini(dp);
dmu_buf_user_evict_wait();
diff --git a/module/zfs/multilist.c b/module/zfs/multilist.c
new file mode 100644
index 000000000..e4446ded2
--- /dev/null
+++ b/module/zfs/multilist.c
@@ -0,0 +1,375 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/multilist.h>
+#include <sys/trace_multilist.h>
+
+/* needed for spa_get_random() */
+#include <sys/spa.h>
+
+/*
+ * Given the object contained on the list, return a pointer to the
+ * object's multilist_node_t structure it contains.
+ */
+#ifdef DEBUG
+static multilist_node_t *
+multilist_d2l(multilist_t *ml, void *obj)
+{
+ return ((multilist_node_t *)((char *)obj + ml->ml_offset));
+}
+#endif
+
+/*
+ * Initialize a new mutlilist using the parameters specified.
+ *
+ * - 'size' denotes the size of the structure containing the
+ * multilist_node_t.
+ * - 'offset' denotes the byte offset of the mutlilist_node_t within
+ * the structure that contains it.
+ * - 'num' specifies the number of internal sublists to create.
+ * - 'index_func' is used to determine which sublist to insert into
+ * when the multilist_insert() function is called; as well as which
+ * sublist to remove from when multilist_remove() is called. The
+ * requirements this function must meet, are the following:
+ *
+ * - It must always return the same value when called on the same
+ * object (to ensure the object is removed from the list it was
+ * inserted into).
+ *
+ * - It must return a value in the range [0, number of sublists).
+ * The multilist_get_num_sublists() function may be used to
+ * determine the number of sublists in the multilist.
+ *
+ * Also, in order to reduce internal contention between the sublists
+ * during insertion and removal, this function should choose evenly
+ * between all available sublists when inserting. This isn't a hard
+ * requirement, but a general rule of thumb in order to garner the
+ * best multi-threaded performance out of the data structure.
+ */
+void
+multilist_create(multilist_t *ml, size_t size, size_t offset, unsigned int num,
+ multilist_sublist_index_func_t *index_func)
+{
+ int i;
+
+ ASSERT3P(ml, !=, NULL);
+ ASSERT3U(size, >, 0);
+ ASSERT3U(size, >=, offset + sizeof (multilist_node_t));
+ ASSERT3U(num, >, 0);
+ ASSERT3P(index_func, !=, NULL);
+
+ ml->ml_offset = offset;
+ ml->ml_num_sublists = num;
+ ml->ml_index_func = index_func;
+
+ ml->ml_sublists = kmem_zalloc(sizeof (multilist_sublist_t) *
+ ml->ml_num_sublists, KM_SLEEP);
+
+ ASSERT3P(ml->ml_sublists, !=, NULL);
+
+ for (i = 0; i < ml->ml_num_sublists; i++) {
+ multilist_sublist_t *mls = &ml->ml_sublists[i];
+ mutex_init(&mls->mls_lock, NULL, MUTEX_DEFAULT, NULL);
+ list_create(&mls->mls_list, size, offset);
+ }
+}
+
+/*
+ * Destroy the given multilist object, and free up any memory it holds.
+ */
+void
+multilist_destroy(multilist_t *ml)
+{
+ int i;
+
+ ASSERT(multilist_is_empty(ml));
+
+ for (i = 0; i < ml->ml_num_sublists; i++) {
+ multilist_sublist_t *mls = &ml->ml_sublists[i];
+
+ ASSERT(list_is_empty(&mls->mls_list));
+
+ list_destroy(&mls->mls_list);
+ mutex_destroy(&mls->mls_lock);
+ }
+
+ ASSERT3P(ml->ml_sublists, !=, NULL);
+ kmem_free(ml->ml_sublists,
+ sizeof (multilist_sublist_t) * ml->ml_num_sublists);
+
+ ml->ml_num_sublists = 0;
+ ml->ml_offset = 0;
+}
+
+/*
+ * Insert the given object into the multilist.
+ *
+ * This function will insert the object specified into the sublist
+ * determined using the function given at multilist creation time.
+ *
+ * The sublist locks are automatically acquired if not already held, to
+ * ensure consistency when inserting and removing from multiple threads.
+ */
+void
+multilist_insert(multilist_t *ml, void *obj)
+{
+ unsigned int sublist_idx = ml->ml_index_func(ml, obj);
+ multilist_sublist_t *mls;
+ boolean_t need_lock;
+
+ DTRACE_PROBE3(multilist__insert, multilist_t *, ml,
+ unsigned int, sublist_idx, void *, obj);
+
+ ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
+
+ mls = &ml->ml_sublists[sublist_idx];
+
+ /*
+ * Note: Callers may already hold the sublist lock by calling
+ * multilist_sublist_lock(). Here we rely on MUTEX_HELD()
+ * returning TRUE if and only if the current thread holds the
+ * lock. While it's a little ugly to make the lock recursive in
+ * this way, it works and allows the calling code to be much
+ * simpler -- otherwise it would have to pass around a flag
+ * indicating that it already has the lock.
+ */
+ need_lock = !MUTEX_HELD(&mls->mls_lock);
+
+ if (need_lock)
+ mutex_enter(&mls->mls_lock);
+
+ ASSERT(!multilist_link_active(multilist_d2l(ml, obj)));
+
+ multilist_sublist_insert_head(mls, obj);
+
+ if (need_lock)
+ mutex_exit(&mls->mls_lock);
+}
+
+/*
+ * Remove the given object from the multilist.
+ *
+ * This function will remove the object specified from the sublist
+ * determined using the function given at multilist creation time.
+ *
+ * The necessary sublist locks are automatically acquired, to ensure
+ * consistency when inserting and removing from multiple threads.
+ */
+void
+multilist_remove(multilist_t *ml, void *obj)
+{
+ unsigned int sublist_idx = ml->ml_index_func(ml, obj);
+ multilist_sublist_t *mls;
+ boolean_t need_lock;
+
+ DTRACE_PROBE3(multilist__remove, multilist_t *, ml,
+ unsigned int, sublist_idx, void *, obj);
+
+ ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
+
+ mls = &ml->ml_sublists[sublist_idx];
+ /* See comment in multilist_insert(). */
+ need_lock = !MUTEX_HELD(&mls->mls_lock);
+
+ if (need_lock)
+ mutex_enter(&mls->mls_lock);
+
+ ASSERT(multilist_link_active(multilist_d2l(ml, obj)));
+
+ multilist_sublist_remove(mls, obj);
+
+ if (need_lock)
+ mutex_exit(&mls->mls_lock);
+}
+
+/*
+ * Check to see if this multilist object is empty.
+ *
+ * This will return TRUE if it finds all of the sublists of this
+ * multilist to be empty, and FALSE otherwise. Each sublist lock will be
+ * automatically acquired as necessary.
+ *
+ * If concurrent insertions and removals are occurring, the semantics
+ * of this function become a little fuzzy. Instead of locking all
+ * sublists for the entire call time of the function, each sublist is
+ * only locked as it is individually checked for emptiness. Thus, it's
+ * possible for this function to return TRUE with non-empty sublists at
+ * the time the function returns. This would be due to another thread
+ * inserting into a given sublist, after that specific sublist was check
+ * and deemed empty, but before all sublists have been checked.
+ */
+int
+multilist_is_empty(multilist_t *ml)
+{
+ int i;
+
+ for (i = 0; i < ml->ml_num_sublists; i++) {
+ multilist_sublist_t *mls = &ml->ml_sublists[i];
+ /* See comment in multilist_insert(). */
+ boolean_t need_lock = !MUTEX_HELD(&mls->mls_lock);
+
+ if (need_lock)
+ mutex_enter(&mls->mls_lock);
+
+ if (!list_is_empty(&mls->mls_list)) {
+ if (need_lock)
+ mutex_exit(&mls->mls_lock);
+
+ return (FALSE);
+ }
+
+ if (need_lock)
+ mutex_exit(&mls->mls_lock);
+ }
+
+ return (TRUE);
+}
+
+/* Return the number of sublists composing this multilist */
+unsigned int
+multilist_get_num_sublists(multilist_t *ml)
+{
+ return (ml->ml_num_sublists);
+}
+
+/* Return a randomly selected, valid sublist index for this multilist */
+unsigned int
+multilist_get_random_index(multilist_t *ml)
+{
+ return (spa_get_random(ml->ml_num_sublists));
+}
+
+/* Lock and return the sublist specified at the given index */
+multilist_sublist_t *
+multilist_sublist_lock(multilist_t *ml, unsigned int sublist_idx)
+{
+ multilist_sublist_t *mls;
+
+ ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
+ mls = &ml->ml_sublists[sublist_idx];
+ mutex_enter(&mls->mls_lock);
+
+ return (mls);
+}
+
+void
+multilist_sublist_unlock(multilist_sublist_t *mls)
+{
+ mutex_exit(&mls->mls_lock);
+}
+
+/*
+ * We're allowing any object to be inserted into this specific sublist,
+ * but this can lead to trouble if multilist_remove() is called to
+ * remove this object. Specifically, if calling ml_index_func on this
+ * object returns an index for sublist different than what is passed as
+ * a parameter here, any call to multilist_remove() with this newly
+ * inserted object is undefined! (the call to multilist_remove() will
+ * remove the object from a list that it isn't contained in)
+ */
+void
+multilist_sublist_insert_head(multilist_sublist_t *mls, void *obj)
+{
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ list_insert_head(&mls->mls_list, obj);
+}
+
+/* please see comment above multilist_sublist_insert_head */
+void
+multilist_sublist_insert_tail(multilist_sublist_t *mls, void *obj)
+{
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ list_insert_tail(&mls->mls_list, obj);
+}
+
+/*
+ * Move the object one element forward in the list.
+ *
+ * This function will move the given object forward in the list (towards
+ * the head) by one object. So, in essence, it will swap its position in
+ * the list with its "prev" pointer. If the given object is already at the
+ * head of the list, it cannot be moved forward any more than it already
+ * is, so no action is taken.
+ *
+ * NOTE: This function **must not** remove any object from the list other
+ * than the object given as the parameter. This is relied upon in
+ * arc_evict_state_impl().
+ */
+void
+multilist_sublist_move_forward(multilist_sublist_t *mls, void *obj)
+{
+ void *prev = list_prev(&mls->mls_list, obj);
+
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ ASSERT(!list_is_empty(&mls->mls_list));
+
+ /* 'obj' must be at the head of the list, nothing to do */
+ if (prev == NULL)
+ return;
+
+ list_remove(&mls->mls_list, obj);
+ list_insert_before(&mls->mls_list, prev, obj);
+}
+
+void
+multilist_sublist_remove(multilist_sublist_t *mls, void *obj)
+{
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ list_remove(&mls->mls_list, obj);
+}
+
+void *
+multilist_sublist_head(multilist_sublist_t *mls)
+{
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ return (list_head(&mls->mls_list));
+}
+
+void *
+multilist_sublist_tail(multilist_sublist_t *mls)
+{
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ return (list_tail(&mls->mls_list));
+}
+
+void *
+multilist_sublist_next(multilist_sublist_t *mls, void *obj)
+{
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ return (list_next(&mls->mls_list, obj));
+}
+
+void *
+multilist_sublist_prev(multilist_sublist_t *mls, void *obj)
+{
+ ASSERT(MUTEX_HELD(&mls->mls_lock));
+ return (list_prev(&mls->mls_list, obj));
+}
+
+void
+multilist_link_init(multilist_node_t *link)
+{
+ list_link_init(link);
+}
+
+int
+multilist_link_active(multilist_node_t *link)
+{
+ return (list_link_active(link));
+}
diff --git a/module/zfs/trace.c b/module/zfs/trace.c
index 470cf18bf..0c9990e85 100644
--- a/module/zfs/trace.c
+++ b/module/zfs/trace.c
@@ -23,6 +23,7 @@
* (and only one) C file, so this dummy file exists for that purpose.
*/
+#include <sys/multilist.h>
#include <sys/arc_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
@@ -31,6 +32,7 @@
#include <sys/dsl_dataset.h>
#include <sys/dmu_tx.h>
#include <sys/dnode.h>
+#include <sys/multilist.h>
#include <sys/zfs_znode.h>
#include <sys/zil_impl.h>
#include <sys/zrlock.h>
@@ -42,6 +44,7 @@
#include <sys/trace_dbuf.h>
#include <sys/trace_dmu.h>
#include <sys/trace_dnode.h>
+#include <sys/trace_multilist.h>
#include <sys/trace_txg.h>
#include <sys/trace_zil.h>
#include <sys/trace_zrlock.h>
diff --git a/module/zfs/zio_inject.c b/module/zfs/zio_inject.c
index 5afb23c59..40b507a0b 100644
--- a/module/zfs/zio_inject.c
+++ b/module/zfs/zio_inject.c
@@ -439,7 +439,11 @@ zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
* fault injection isn't a performance critical path.
*/
if (flags & ZINJECT_FLUSH_ARC)
- arc_flush(NULL);
+ /*
+ * We must use FALSE to ensure arc_flush returns, since
+ * we're not preventing concurrent ARC insertions.
+ */
+ arc_flush(NULL, FALSE);
return (0);
}