aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
authorPrakash Surya <[email protected]>2014-01-03 11:40:52 -0800
committerBrian Behlendorf <[email protected]>2014-02-21 16:10:49 -0800
commit94520ca4626c7b01340473bccdaa3ed038a85a8f (patch)
treed5e2d58a193a19cdc12a14d15dc8f57d8e244c1f /module/zfs
parent1e3cb67b53fba067fd7bf9a13d21b53de4626dc1 (diff)
Prune metadata from ghost lists in arc_adjust_meta
To maintain a strict limit on the metadata contained in the arc, while preventing the arc buffer headers from completely consuming the "arc_meta_used" space, we need to evict metadata buffers from the arc's ghost lists along with the regular lists. This change modifies arc_adjust_meta such that it more closely models the adjustments made in arc_adjust. "arc_meta_used" is used similarly to "arc_size", and "arc_meta_limit" is used similarly to "arc_c". Testing metadata intensive workloads (e.g. creating, copying, and removing millions of small files and/or directories) has shown this change to make a dramatic improvement to the hit rate maintained in the arc. While I think there is still room for improvement, this is a big step in the right direction. In addition, zpl_free_cached_objects was made into a no-op as I'm not yet sure how to properly implement that function. Signed-off-by: Prakash Surya <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #2110
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/arc.c67
-rw-r--r--module/zfs/zpl_super.c2
2 files changed, 49 insertions, 20 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index ad2e8a92d..9c2d0eaab 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -2268,24 +2268,61 @@ arc_do_user_evicts(void)
* This is only used to enforce the tunable arc_meta_limit, if we are
* unable to evict enough buffers notify the user via the prune callback.
*/
-void
-arc_adjust_meta(int64_t adjustment, boolean_t may_prune)
+static void
+arc_adjust_meta(void)
{
- int64_t delta;
+ int64_t adjustmnt, delta;
- if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
- delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
+ /*
+ * This slightly differs than the way we evict from the mru in
+ * arc_adjust because we don't have a "target" value (i.e. no
+ * "meta" arc_p). As a result, I think we can completely
+ * cannibalize the metadata in the MRU before we evict the
+ * metadata from the MFU. I think we probably need to implement a
+ * "metadata arc_p" value to do this properly.
+ */
+ adjustmnt = arc_meta_used - arc_meta_limit;
+
+ if (adjustmnt > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
+ delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustmnt);
arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_METADATA);
- adjustment -= delta;
+ adjustmnt -= delta;
}
- if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
- delta = MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], adjustment);
+ /*
+ * We can't afford to recalculate adjustmnt here. If we do,
+ * new metadata buffers can sneak into the MRU or ANON lists,
+ * thus penalize the MFU metadata. Although the fudge factor is
+ * small, it has been empirically shown to be significant for
+ * certain workloads (e.g. creating many empty directories). As
+ * such, we use the original calculation for adjustmnt, and
+ * simply decrement the amount of data evicted from the MRU.
+ */
+
+ if (adjustmnt > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
+ delta = MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], adjustmnt);
arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_METADATA);
- adjustment -= delta;
}
- if (may_prune && (adjustment > 0) && (arc_meta_used > arc_meta_limit))
+ adjustmnt = arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
+ arc_mru_ghost->arcs_lsize[ARC_BUFC_METADATA] - arc_meta_limit;
+
+ if (adjustmnt > 0 && arc_mru_ghost->arcs_lsize[ARC_BUFC_METADATA] > 0) {
+ delta = MIN(adjustmnt,
+ arc_mru_ghost->arcs_lsize[ARC_BUFC_METADATA]);
+ arc_evict_ghost(arc_mru_ghost, 0, delta, ARC_BUFC_METADATA);
+ }
+
+ adjustmnt = arc_mru_ghost->arcs_lsize[ARC_BUFC_METADATA] +
+ arc_mfu_ghost->arcs_lsize[ARC_BUFC_METADATA] - arc_meta_limit;
+
+ if (adjustmnt > 0 && arc_mfu_ghost->arcs_lsize[ARC_BUFC_METADATA] > 0) {
+ delta = MIN(adjustmnt,
+ arc_mfu_ghost->arcs_lsize[ARC_BUFC_METADATA]);
+ arc_evict_ghost(arc_mfu_ghost, 0, delta, ARC_BUFC_METADATA);
+ }
+
+ if (arc_meta_used > arc_meta_limit)
arc_do_user_prune(zfs_arc_meta_prune);
}
@@ -2405,7 +2442,6 @@ static void
arc_adapt_thread(void)
{
callb_cpr_t cpr;
- int64_t prune;
CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
@@ -2441,14 +2477,7 @@ arc_adapt_thread(void)
if (arc_no_grow && ddi_get_lbolt() >= arc_grow_time)
arc_no_grow = FALSE;
- /*
- * Keep meta data usage within limits, arc_shrink() is not
- * used to avoid collapsing the arc_c value when only the
- * arc_meta_limit is being exceeded.
- */
- prune = (int64_t)arc_meta_used - (int64_t)arc_meta_limit;
- if (prune > 0)
- arc_adjust_meta(prune, B_TRUE);
+ arc_adjust_meta();
arc_adjust();
diff --git a/module/zfs/zpl_super.c b/module/zfs/zpl_super.c
index b4e7b6ed0..45639a6dd 100644
--- a/module/zfs/zpl_super.c
+++ b/module/zfs/zpl_super.c
@@ -342,7 +342,7 @@ zpl_nr_cached_objects(struct super_block *sb)
static void
zpl_free_cached_objects(struct super_block *sb, int nr_to_scan)
{
- arc_adjust_meta(nr_to_scan * sizeof (znode_t), B_FALSE);
+ /* noop */
}
#endif /* HAVE_FREE_CACHED_OBJECTS */