summaryrefslogtreecommitdiffstats
path: root/module/os
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2020-06-27 10:27:02 -0700
committerGitHub <[email protected]>2020-06-27 10:27:02 -0700
commit270ece24b6f90c649a6c8880adec161119b60e18 (patch)
treeee9c52f01494d5d6b060c547bace9162789d460f /module/os
parentec1fea4516ac2f0c08d31d6308929298d1b281d0 (diff)
Revise SPL wrapper for shrinker callbacks
The SPL provides a wrapper for the kernel's shrinker callbacks, which enables the ZFS code to interface with multiple versions of the shrinker API's from different kernel versions. Specifically, Linux kernels 3.0 - 3.11 has a single "combined" callback, and Linux kernels 3.12 and later have two "split" callbacks. The SPL provides a wrapper function so that the ZFS code only needs to implement one version of the callbacks. Currently the SPL's wrappers are designed such that the ZFS code implements the older, "combined" callback. There are a few downsides to this approach: * The general design within ZFS is for the latest Linux kernel to be considered the "first class" API. * The newer, "split" callback API is easier to understand, because each callback has one purpose. * The current wrappers do not completely abstract out the differing API's, so ZFS code needs `#ifdef` code to handle the differing return values required for different kernel versions. This commit addresses these drawbacks by having the ZFS code provide the latest, "split" callbacks, and the SPL provides a wrapping function for the older, "combined" API. Reviewed-by: Pavel Zakharov <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> Closes #10502
Diffstat (limited to 'module/os')
-rw-r--r--module/os/linux/spl/spl-kmem-cache.c71
-rw-r--r--module/os/linux/zfs/arc_os.c32
2 files changed, 43 insertions, 60 deletions
diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c
index 3fab184c8..9506eda36 100644
--- a/module/os/linux/spl/spl-kmem-cache.c
+++ b/module/os/linux/spl/spl-kmem-cache.c
@@ -190,10 +190,6 @@ taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
-SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
-SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
- spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS);
-
static void *
kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
@@ -1619,23 +1615,27 @@ EXPORT_SYMBOL(spl_kmem_cache_free);
* We always attempt to shrink all caches when this generic shrinker
* is called.
*
- * If sc->nr_to_scan is zero, the caller is requesting a query of the
- * number of objects which can potentially be freed. If it is nonzero,
- * the request is to free that many objects.
- *
- * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
- * in struct shrinker and also require the shrinker to return the number
- * of objects freed.
- *
- * Older kernels require the shrinker to return the number of freeable
- * objects following the freeing of nr_to_free.
- *
- * Linux semantics differ from those under Solaris, which are to
- * free all available objects which may (and probably will) be more
- * objects than the requested nr_to_scan.
+ * The _count() function returns the number of free-able objects.
+ * The _scan() function returns the number of objects that were freed.
*/
-static spl_shrinker_t
-__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
+static unsigned long
+spl_kmem_cache_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ spl_kmem_cache_t *skc = NULL;
+ int alloc = 0;
+
+ down_read(&spl_kmem_cache_sem);
+ list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
+ alloc += skc->skc_obj_alloc;
+ }
+ up_read(&spl_kmem_cache_sem);
+
+ return (MAX(alloc, 0));
+}
+
+static unsigned long
+spl_kmem_cache_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
spl_kmem_cache_t *skc = NULL;
@@ -1644,27 +1644,16 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
/*
* No shrinking in a transaction context. Can cause deadlocks.
*/
- if (sc->nr_to_scan && spl_fstrans_check())
+ if (spl_fstrans_check())
return (SHRINK_STOP);
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
- if (sc->nr_to_scan) {
-#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
- uint64_t oldalloc = skc->skc_obj_alloc;
- spl_kmem_cache_reap_now(skc,
- MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
- if (oldalloc > skc->skc_obj_alloc)
- alloc += oldalloc - skc->skc_obj_alloc;
-#else
- spl_kmem_cache_reap_now(skc,
- MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
- alloc += skc->skc_obj_alloc;
-#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
- } else {
- /* Request to query number of freeable objects */
- alloc += skc->skc_obj_alloc;
- }
+ uint64_t oldalloc = skc->skc_obj_alloc;
+ spl_kmem_cache_reap_now(skc,
+ MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
+ if (oldalloc > skc->skc_obj_alloc)
+ alloc += oldalloc - skc->skc_obj_alloc;
}
up_read(&spl_kmem_cache_sem);
@@ -1674,13 +1663,15 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
* shrink_slabs() is repeatedly invoked by many cores causing the
* system to thrash.
*/
- if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
+ if (spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE)
return (SHRINK_STOP);
return (MAX(alloc, 0));
}
-SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
+SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
+ spl_kmem_cache_shrinker_count, spl_kmem_cache_shrinker_scan,
+ KMC_DEFAULT_SEEKS);
/*
* Call the registered reclaim function for a cache. Depending on how
@@ -1789,7 +1780,7 @@ spl_kmem_reap(void)
sc.nr_to_scan = KMC_REAP_CHUNK;
sc.gfp_mask = GFP_KERNEL;
- (void) __spl_kmem_cache_generic_shrinker(NULL, &sc);
+ (void) spl_kmem_cache_shrinker_scan(NULL, &sc);
}
EXPORT_SYMBOL(spl_kmem_reap);
diff --git a/module/os/linux/zfs/arc_os.c b/module/os/linux/zfs/arc_os.c
index 9ac4e3221..0c0289db6 100644
--- a/module/os/linux/zfs/arc_os.c
+++ b/module/os/linux/zfs/arc_os.c
@@ -225,19 +225,17 @@ arc_evictable_memory(void)
}
/*
- * If sc->nr_to_scan is zero, the caller is requesting a query of the
- * number of objects which can potentially be freed. If it is nonzero,
- * the request is to free that many objects.
- *
- * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
- * in struct shrinker and also require the shrinker to return the number
- * of objects freed.
- *
- * Older kernels require the shrinker to return the number of freeable
- * objects following the freeing of nr_to_free.
+ * The _count() function returns the number of free-able objects.
+ * The _scan() function returns the number of objects that were freed.
*/
-static spl_shrinker_t
-__arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long
+arc_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ return (btop((int64_t)arc_evictable_memory()));
+}
+
+static unsigned long
+arc_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
{
int64_t pages;
@@ -247,8 +245,6 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
/* Return the potential number of reclaimable pages */
pages = btop((int64_t)arc_evictable_memory());
- if (sc->nr_to_scan == 0)
- return (pages);
/* Not allowed to perform filesystem reclaim */
if (!(sc->gfp_mask & __GFP_FS))
@@ -288,12 +284,8 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
if (current_is_kswapd())
arc_kmem_reap_soon();
-#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
pages = MAX((int64_t)pages -
(int64_t)btop(arc_evictable_memory()), 0);
-#else
- pages = btop(arc_evictable_memory());
-#endif
/*
* We've shrunk what we can, wake up threads.
*/
@@ -318,9 +310,9 @@ __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
return (pages);
}
-SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func);
-SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS);
+SPL_SHRINKER_DECLARE(arc_shrinker,
+ arc_shrinker_count, arc_shrinker_scan, DEFAULT_SEEKS);
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)