summaryrefslogtreecommitdiffstats
path: root/module/zfs/arc.c
diff options
context:
space:
mode:
authorPrakash Surya <[email protected]>2014-01-03 10:20:21 -0800
committerBrian Behlendorf <[email protected]>2014-02-21 16:10:27 -0800
commitf521ce1b9c6102f9175f26548d4c521e115f8d60 (patch)
treef96e0ab3f4190b7c4273c3a8bea1a45afd7843f0 /module/zfs/arc.c
parent89c8cac493687875eecc80a4a03f667d98dd82d0 (diff)
Allow "arc_p" to drop to zero or grow to "arc_c"
Setting a limit on the minimum value of "arc_p" has been shown to have detrimental effects on the arc hit rate for certain "metadata" intensive workloads. Specifically, this has been exhibited with a workload that constantly dirties new "metadata" but also frequently touches a "small" amount of mfu data (e.g. mkdir's). What is seen is that the new anon data throttles the mfu list to a negligible size (because arc_p > anon + mru in arc_get_data_buf), even though the mfu ghost list receives a constant stream of hits. To remedy this, arc_p is now allowed to drop to zero if the algorithm deems it necessary. Signed-off-by: Prakash Surya <[email protected]> Signed-off-by: Brian Behlendorf <[email protected]> Issue #2110
Diffstat (limited to 'module/zfs/arc.c')
-rw-r--r--module/zfs/arc.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index eac6ea448..f66eaa407 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -172,9 +172,6 @@ int arc_evict_iterations = 100;
/* number of seconds before growing cache again */
int zfs_arc_grow_retry = 5;
-/* shift of arc_c for calculating both min and max arc_p */
-int zfs_arc_p_min_shift = 4;
-
/* disable anon data aggressively growing arc_p */
int zfs_arc_p_aggressive_disable = 1;
@@ -2335,7 +2332,6 @@ void
arc_shrink(uint64_t bytes)
{
if (arc_c > arc_c_min) {
- uint64_t arc_p_min;
uint64_t to_free;
to_free = bytes ? bytes : arc_c >> zfs_arc_shrink_shift;
@@ -2345,13 +2341,12 @@ arc_shrink(uint64_t bytes)
else
arc_c = arc_c_min;
- arc_p_min = (arc_c >> zfs_arc_p_min_shift);
to_free = bytes ? bytes : arc_p >> zfs_arc_shrink_shift;
- if (arc_p > arc_p_min + to_free)
+ if (arc_p > to_free)
atomic_add_64(&arc_p, -to_free);
else
- arc_p = arc_p_min;
+ arc_p = 0;
if (arc_c > arc_size)
arc_c = MAX(arc_size, arc_c_min);
@@ -2622,7 +2617,6 @@ static void
arc_adapt(int bytes, arc_state_t *state)
{
int mult;
- uint64_t arc_p_min = (arc_c >> zfs_arc_p_min_shift);
if (state == arc_l2c_only)
return;
@@ -2641,7 +2635,7 @@ arc_adapt(int bytes, arc_state_t *state)
1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
- arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
+ arc_p = MIN(arc_c, arc_p + bytes * mult);
} else if (state == arc_mfu_ghost) {
uint64_t delta;
@@ -2650,7 +2644,7 @@ arc_adapt(int bytes, arc_state_t *state)
mult = MIN(mult, 10);
delta = MIN(bytes * mult, arc_p);
- arc_p = MAX(arc_p_min, arc_p - delta);
+ arc_p = MAX(0, arc_p - delta);
}
ASSERT((int64_t)arc_p >= 0);
@@ -5563,9 +5557,6 @@ MODULE_PARM_DESC(zfs_arc_p_aggressive_disable, "disable aggressive arc_p grow");
module_param(zfs_arc_shrink_shift, int, 0644);
MODULE_PARM_DESC(zfs_arc_shrink_shift, "log2(fraction of arc to reclaim)");
-module_param(zfs_arc_p_min_shift, int, 0644);
-MODULE_PARM_DESC(zfs_arc_p_min_shift, "arc_c shift to calc min/max arc_p");
-
module_param(zfs_disable_dup_eviction, int, 0644);
MODULE_PARM_DESC(zfs_disable_dup_eviction, "disable duplicate buffer eviction");