aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorMatthew Ahrens <[email protected]>2020-06-29 09:01:07 -0700
committerGitHub <[email protected]>2020-06-29 09:01:07 -0700
commit3c42c9ed84f1755ee8898b76e0264b8ebab19fd8 (patch)
treefd05c656b380baf2e720d27957fbd088e3776548 /module
parent94a2dca6a0b826a25ddc43c80297cbad02b48547 (diff)
Clean up OS-specific ARC and kmem code
OS-specific code (e.g. under `module/os/linux`) does not need to share its code structure with any other operating systems. In particular, the ARC and kmem code need not be similar to the code in illumos, because we won't be syncing this OS-specific code between operating systems. For example, if/when illumos support is added to the common repo, we would add a file `module/os/illumos/zfs/arc_os.c` for the illumos versions of this code. Therefore, we can simplify the code in the OS-specific ARC and kmem routines. These changes do not impact system behavior, they are purely code cleanup. The changes are: Arenas are not used on Linux or FreeBSD (they are always `NULL`), so `heap_arena`, `zio_arena`, and `zio_alloc_arena` can be removed, along with code that uses them. In `arc_available_memory()`: * `desfree` is unused, remove it * rename `freemem` to avoid conflict with pre-existing `#define` * remove checks related to arenas * use units of bytes, rather than converting from bytes to pages and then back to bytes `SPL_KMEM_CACHE_REAP` is unused, remove it. `skc_reap` is unused, remove it. The `count` argument to `spl_kmem_cache_reap_now()` is unused, remove it. `vmem_size()` and associated type and macros are unused, remove them. In `arc_memory_throttle()`, use a less confusing variable name to store the result of `arc_free_memory()`. Reviewed-by: George Wilson <[email protected]> Reviewed-by: Pavel Zakharov <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Ryan Moeller <[email protected]> Signed-off-by: Matthew Ahrens <[email protected]> Closes #10499
Diffstat (limited to 'module')
-rw-r--r--module/os/freebsd/zfs/arc_os.c26
-rw-r--r--module/os/linux/spl/spl-kmem-cache.c6
-rw-r--r--module/os/linux/spl/spl-vmem.c45
-rw-r--r--module/os/linux/zfs/arc_os.c77
-rw-r--r--module/zfs/arc.c13
5 files changed, 10 insertions, 157 deletions
diff --git a/module/os/freebsd/zfs/arc_os.c b/module/os/freebsd/zfs/arc_os.c
index d7b842f84..23b580c43 100644
--- a/module/os/freebsd/zfs/arc_os.c
+++ b/module/os/freebsd/zfs/arc_os.c
@@ -48,13 +48,6 @@
extern struct vfsops zfs_vfsops;
-/* vmem_size typemask */
-#define VMEM_ALLOC 0x01
-#define VMEM_FREE 0x02
-#define VMEM_MAXFREE 0x10
-typedef size_t vmem_size_t;
-extern vmem_size_t vmem_size(vmem_t *vm, int typemask);
-
uint_t zfs_arc_free_target = 0;
int64_t last_free_memory;
@@ -135,25 +128,6 @@ arc_available_memory(void)
}
#endif
- /*
- * If zio data pages are being allocated out of a separate heap segment,
- * then enforce that the size of available vmem for this arena remains
- * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
- *
- * Note that reducing the arc_zio_arena_free_shift keeps more virtual
- * memory (in the zio_arena) free, which can avoid memory
- * fragmentation issues.
- */
- if (zio_arena != NULL) {
- n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
- (vmem_size(zio_arena, VMEM_ALLOC) >>
- arc_zio_arena_free_shift);
- if (n < lowest) {
- lowest = n;
- r = FMR_ZIO_ARENA;
- }
- }
-
last_free_memory = lowest;
last_free_reason = r;
DTRACE_PROBE2(arc__available_memory, int64_t, lowest, int, r);
diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c
index 9506eda36..a59b559f5 100644
--- a/module/os/linux/spl/spl-kmem-cache.c
+++ b/module/os/linux/spl/spl-kmem-cache.c
@@ -923,7 +923,6 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
skc->skc_obj_size = size;
skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
skc->skc_delay = SPL_KMEM_CACHE_DELAY;
- skc->skc_reap = SPL_KMEM_CACHE_REAP;
atomic_set(&skc->skc_ref, 0);
INIT_LIST_HEAD(&skc->skc_list);
@@ -1650,8 +1649,7 @@ spl_kmem_cache_shrinker_scan(struct shrinker *shrink,
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
uint64_t oldalloc = skc->skc_obj_alloc;
- spl_kmem_cache_reap_now(skc,
- MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1));
+ spl_kmem_cache_reap_now(skc);
if (oldalloc > skc->skc_obj_alloc)
alloc += oldalloc - skc->skc_obj_alloc;
}
@@ -1682,7 +1680,7 @@ SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
* effort and we do not want to thrash creating and destroying slabs.
*/
void
-spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
+spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
{
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
diff --git a/module/os/linux/spl/spl-vmem.c b/module/os/linux/spl/spl-vmem.c
index a2630ecdd..103a04901 100644
--- a/module/os/linux/spl/spl-vmem.c
+++ b/module/os/linux/spl/spl-vmem.c
@@ -28,51 +28,6 @@
#include <sys/shrinker.h>
#include <linux/module.h>
-vmem_t *heap_arena = NULL;
-EXPORT_SYMBOL(heap_arena);
-
-vmem_t *zio_alloc_arena = NULL;
-EXPORT_SYMBOL(zio_alloc_arena);
-
-vmem_t *zio_arena = NULL;
-EXPORT_SYMBOL(zio_arena);
-
-#define VMEM_FLOOR_SIZE (4 * 1024 * 1024) /* 4MB floor */
-
-/*
- * Return approximate virtual memory usage based on these assumptions:
- *
- * 1) The major SPL consumer of virtual memory is the kmem cache.
- * 2) Memory allocated with vmem_alloc() is short lived and can be ignored.
- * 3) Allow a 4MB floor as a generous pad given normal consumption.
- * 4) The spl_kmem_cache_sem only contends with cache create/destroy.
- */
-size_t
-vmem_size(vmem_t *vmp, int typemask)
-{
- spl_kmem_cache_t *skc = NULL;
- size_t alloc = VMEM_FLOOR_SIZE;
-
- if ((typemask & VMEM_ALLOC) && (typemask & VMEM_FREE))
- return (VMALLOC_TOTAL);
-
-
- down_read(&spl_kmem_cache_sem);
- list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
- if (skc->skc_flags & KMC_VMEM)
- alloc += skc->skc_slab_size * skc->skc_slab_total;
- }
- up_read(&spl_kmem_cache_sem);
-
- if (typemask & VMEM_ALLOC)
- return (MIN(alloc, VMALLOC_TOTAL));
- else if (typemask & VMEM_FREE)
- return (MAX(VMALLOC_TOTAL - alloc, 0));
- else
- return (0);
-}
-EXPORT_SYMBOL(vmem_size);
-
/*
* Public vmem_alloc(), vmem_zalloc() and vmem_free() interfaces.
*/
diff --git a/module/os/linux/zfs/arc_os.c b/module/os/linux/zfs/arc_os.c
index 0c0289db6..b7a471c2f 100644
--- a/module/os/linux/zfs/arc_os.c
+++ b/module/os/linux/zfs/arc_os.c
@@ -126,72 +126,16 @@ arc_available_memory(void)
int64_t lowest = INT64_MAX;
free_memory_reason_t r = FMR_UNKNOWN;
int64_t n;
-#ifdef freemem
-#undef freemem
-#endif
- pgcnt_t needfree = btop(arc_need_free);
- pgcnt_t lotsfree = btop(arc_sys_free);
- pgcnt_t desfree = 0;
- pgcnt_t freemem = btop(arc_free_memory());
-
- if (needfree > 0) {
- n = PAGESIZE * (-needfree);
- if (n < lowest) {
- lowest = n;
- r = FMR_NEEDFREE;
- }
- }
- /*
- * check that we're out of range of the pageout scanner. It starts to
- * schedule paging if freemem is less than lotsfree and needfree.
- * lotsfree is the high-water mark for pageout, and needfree is the
- * number of needed free pages. We add extra pages here to make sure
- * the scanner doesn't start up while we're freeing memory.
- */
- n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
- if (n < lowest) {
- lowest = n;
- r = FMR_LOTSFREE;
+ if (arc_need_free > 0) {
+ lowest = -arc_need_free;
+ r = FMR_NEEDFREE;
}
-#if defined(_ILP32)
- /*
- * If we're on a 32-bit platform, it's possible that we'll exhaust the
- * kernel heap space before we ever run out of available physical
- * memory. Most checks of the size of the heap_area compare against
- * tune.t_minarmem, which is the minimum available real memory that we
- * can have in the system. However, this is generally fixed at 25 pages
- * which is so low that it's useless. In this comparison, we seek to
- * calculate the total heap-size, and reclaim if more than 3/4ths of the
- * heap is allocated. (Or, in the calculation, if less than 1/4th is
- * free)
- */
- n = vmem_size(heap_arena, VMEM_FREE) -
- (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
+ n = arc_free_memory() - arc_sys_free - arc_need_free;
if (n < lowest) {
lowest = n;
- r = FMR_HEAP_ARENA;
- }
-#endif
-
- /*
- * If zio data pages are being allocated out of a separate heap segment,
- * then enforce that the size of available vmem for this arena remains
- * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
- *
- * Note that reducing the arc_zio_arena_free_shift keeps more virtual
- * memory (in the zio_arena) free, which can avoid memory
- * fragmentation issues.
- */
- if (zio_arena != NULL) {
- n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
- (vmem_size(zio_arena, VMEM_ALLOC) >>
- arc_zio_arena_free_shift);
- if (n < lowest) {
- lowest = n;
- r = FMR_ZIO_ARENA;
- }
+ r = FMR_LOTSFREE;
}
last_free_memory = lowest;
@@ -317,14 +261,9 @@ SPL_SHRINKER_DECLARE(arc_shrinker,
int
arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
{
- uint64_t available_memory = arc_free_memory();
-
-#if defined(_ILP32)
- available_memory =
- MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
-#endif
+ uint64_t free_memory = arc_free_memory();
- if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100)
+ if (free_memory > arc_all_memory() * arc_lotsfree_percent / 100)
return (0);
if (txg > spa->spa_lowmem_last_txg) {
@@ -338,7 +277,7 @@ arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg)
*/
if (current_is_kswapd()) {
if (spa->spa_lowmem_page_load >
- MAX(arc_sys_free / 4, available_memory) / 4) {
+ MAX(arc_sys_free / 4, free_memory) / 4) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(ERESTART));
}
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index e75c1e453..2048df467 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -391,11 +391,6 @@ static boolean_t arc_initialized;
boolean_t arc_warm;
/*
- * log2 fraction of the zio arena to keep free.
- */
-int arc_zio_arena_free_shift = 2;
-
-/*
* These tunables are for performance analysis.
*/
unsigned long zfs_arc_max = 0;
@@ -4687,14 +4682,6 @@ arc_kmem_reap_soon(void)
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
-
- if (zio_arena != NULL) {
- /*
- * Ask the vmem arena to reclaim unused memory from its
- * quantum caches.
- */
- vmem_qcache_reap(zio_arena);
- }
}
/* ARGSUSED */