aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/arc.c
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2020-07-19 09:58:30 -0700
committerGitHub <[email protected]>2020-07-19 09:58:30 -0700
commit026e529cb336d1d656eaa33f58ac911a70f273cd (patch)
treea6bf5ca27f8063de8d068cc3e818ab84ba87524f /module/zfs/arc.c
parente862b7ecfc6049df19cf0d439510f385a7707b8b (diff)
Remove skc_reclaim, hdr_recl, kmem_cache shrinker
The SPL kmem_cache implementation provides a mechanism, `skc_reclaim`, whereby individual caches can register a callback to be invoked when there is memory pressure. This mechanism is used in only one place: the ARC registers the `hdr_recl()` reclaim function. This function wakes up the `arc_reap_zthr`, whose job is to call `kmem_cache_reap()` and `arc_reduce_target_size()`. The `skc_reclaim` callbacks are invoked only by shrinker callbacks and `arc_reap_zthr`, and only callback only wakes up `arc_reap_zthr`. When called from `arc_reap_zthr`, waking `arc_reap_zthr` is a no-op. When called from shrinker callbacks, we are already aware of memory pressure and responding to it. Therefore there is little benefit to ever calling the `hdr_recl()` `skc_reclaim` callback. The `arc_reap_zthr` also wakes once a second, and if memory is low when allocating an ARC buffer. Therefore, additionally waking it from the shrinker calbacks has little benefit. The shrinker callbacks can be invoked very frequently, e.g. 10,000 times per second. Additionally, for invocation of the shrinker callback, skc_reclaim is invoked many times. Therefore, this mechanism consumes significant amounts of CPU time. The kmem_cache shrinker calls `spl_kmem_cache_reap_now()`, which, in addition to invoking `skc_reclaim()`, does two things to attempt to free pages for use by the system: 1. Return free objects from the magazine layer to the slab layer 2. Return entirely-free slabs to the page layer (i.e. free pages) These actions apply only to caches implemented by the SPL, not those that use the underlying kernel SLAB/SLUB caches. The SPL caches are used for objects >=32KB, which are primarily linear ABD's cached in the DBUF cache. These actions (freeing objects from the magazine layer and returning entirely-free slabs) are also taken whenever a `kmem_cache_free()` call finds a full magazine. So there would typically be zero entirely-free slabs, and the number of objects in magazines is limited (typically no more than 64 objects per magazine, and there's one magazine per CPU). Therefore the benefit of `spl_kmem_cache_reap_now()`, while nonzero, is modest. We also call `spl_kmem_cache_reap_now()` from the `arc_reap_zthr`, when memory pressure is detected. Therefore, calling `spl_kmem_cache_reap_now()` from the kmem_cache shrinker is not needed. This commit removes the `skc_reclaim` mechanism, its only callback `hdr_recl()`, and the kmem_cache shrinker callback. Reviewed-By: Brian Behlendorf <[email protected]> Reviewed-by: George Wilson <[email protected]> Reviewed-by: Pavel Zakharov <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> Closes #10576
Diffstat (limited to 'module/zfs/arc.c')
-rw-r--r--module/zfs/arc.c42
1 files changed, 3 insertions, 39 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index ea22686cc..5b9df43d2 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -381,11 +381,6 @@ static int arc_min_prescient_prefetch_ms;
int arc_lotsfree_percent = 10;
/*
- * hdr_recl() uses this to determine if the arc is up and running.
- */
-static boolean_t arc_initialized;
-
-/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
@@ -1198,22 +1193,6 @@ buf_dest(void *vbuf, void *unused)
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
-/*
- * Reclaim callback -- invoked when memory is low.
- */
-/* ARGSUSED */
-static void
-hdr_recl(void *unused)
-{
- dprintf("hdr_recl called\n");
- /*
- * umem calls the reclaim func when we destroy the buf cache,
- * which is after we do arc_fini().
- */
- if (arc_initialized)
- zthr_wakeup(arc_reap_zthr);
-}
-
static void
buf_init(void)
{
@@ -1249,12 +1228,12 @@ retry:
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
- 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
+ 0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
- hdr_recl, NULL, NULL, 0);
+ NULL, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
- HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
+ HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
@@ -4688,9 +4667,6 @@ arc_kmem_reap_soon(void)
static boolean_t
arc_adjust_cb_check(void *arg, zthr_t *zthr)
{
- if (!arc_initialized)
- return (B_FALSE);
-
/*
* This is necessary so that any changes which may have been made to
* many of the zfs_arc_* module parameters will be propagated to
@@ -4778,9 +4754,6 @@ arc_adjust_cb(void *arg, zthr_t *zthr)
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
- if (!arc_initialized)
- return (B_FALSE);
-
int64_t free_memory = arc_available_memory();
/*
@@ -7348,12 +7321,6 @@ arc_init(void)
arc_state_init();
- /*
- * The arc must be "uninitialized", so that hdr_recl() (which is
- * registered by buf_init()) will not access arc_reap_zthr before
- * it is created.
- */
- ASSERT(!arc_initialized);
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
@@ -7377,7 +7344,6 @@ arc_init(void)
arc_reap_zthr = zthr_create_timer(arc_reap_cb_check,
arc_reap_cb, NULL, SEC2NSEC(1));
- arc_initialized = B_TRUE;
arc_warm = B_FALSE;
/*
@@ -7412,8 +7378,6 @@ arc_fini(void)
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
- arc_initialized = B_FALSE;
-
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;