diff options
Diffstat (limited to 'module')
-rw-r--r-- | module/os/linux/zfs/Makefile.in | 1 | ||||
-rw-r--r-- | module/os/linux/zfs/arc_os.c | 445 | ||||
-rw-r--r-- | module/zfs/arc.c | 460 |
3 files changed, 468 insertions, 438 deletions
diff --git a/module/os/linux/zfs/Makefile.in b/module/os/linux/zfs/Makefile.in index e6cad2da4..e8ea81db3 100644 --- a/module/os/linux/zfs/Makefile.in +++ b/module/os/linux/zfs/Makefile.in @@ -10,6 +10,7 @@ endif ccflags-y += -I@abs_top_srcdir@/module/os/linux/zfs $(MODULE)-objs += ../os/linux/zfs/abd.o +$(MODULE)-objs += ../os/linux/zfs/arc_os.o $(MODULE)-objs += ../os/linux/zfs/mmp_os.o $(MODULE)-objs += ../os/linux/zfs/policy.o $(MODULE)-objs += ../os/linux/zfs/trace.o diff --git a/module/os/linux/zfs/arc_os.c b/module/os/linux/zfs/arc_os.c new file mode 100644 index 000000000..696f671ab --- /dev/null +++ b/module/os/linux/zfs/arc_os.c @@ -0,0 +1,445 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, Joyent, Inc. + * Copyright (c) 2011, 2019 by Delphix. All rights reserved. + * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. + * Copyright 2017 Nexenta Systems, Inc. All rights reserved. + */ + +#include <sys/spa.h> +#include <sys/zio.h> +#include <sys/spa_impl.h> +#include <sys/zio_compress.h> +#include <sys/zio_checksum.h> +#include <sys/zfs_context.h> +#include <sys/arc.h> +#include <sys/refcount.h> +#include <sys/vdev.h> +#include <sys/vdev_trim.h> +#include <sys/vdev_impl.h> +#include <sys/dsl_pool.h> +#include <sys/zio_checksum.h> +#include <sys/multilist.h> +#include <sys/abd.h> +#include <sys/zil.h> +#include <sys/fm/fs/zfs.h> +#ifdef _KERNEL +#include <sys/shrinker.h> +#include <sys/vmsystm.h> +#include <sys/zpl.h> +#include <linux/page_compat.h> +#endif +#include <sys/callb.h> +#include <sys/kstat.h> +#include <sys/zthr.h> +#include <zfs_fletcher.h> +#include <sys/arc_impl.h> +#include <sys/trace_defs.h> +#include <sys/aggsum.h> + +int64_t last_free_memory; +free_memory_reason_t last_free_reason; + +#ifdef _KERNEL +/* + * Return maximum amount of memory that we could possibly use. Reduced + * to half of all memory in user space which is primarily used for testing. + */ +uint64_t +arc_all_memory(void) +{ +#ifdef CONFIG_HIGHMEM + return (ptob(zfs_totalram_pages - zfs_totalhigh_pages)); +#else + return (ptob(zfs_totalram_pages)); +#endif /* CONFIG_HIGHMEM */ +} + +/* + * Return the amount of memory that is considered free. In user space + * which is primarily used for testing we pretend that free memory ranges + * from 0-20% of all memory. + */ +uint64_t +arc_free_memory(void) +{ +#ifdef CONFIG_HIGHMEM + struct sysinfo si; + si_meminfo(&si); + return (ptob(si.freeram - si.freehigh)); +#else + return (ptob(nr_free_pages() + + nr_inactive_file_pages() + + nr_inactive_anon_pages() + + nr_slab_reclaimable_pages())); +#endif /* CONFIG_HIGHMEM */ +} + +/* + * Additional reserve of pages for pp_reserve. + */ +int64_t arc_pages_pp_reserve = 64; + +/* + * Additional reserve of pages for swapfs. + */ +int64_t arc_swapfs_reserve = 64; + +/* + * Return the amount of memory that can be consumed before reclaim will be + * needed. Positive if there is sufficient free memory, negative indicates + * the amount of memory that needs to be freed up. + */ +int64_t +arc_available_memory(void) +{ + int64_t lowest = INT64_MAX; + free_memory_reason_t r = FMR_UNKNOWN; + int64_t n; +#ifdef freemem +#undef freemem +#endif + pgcnt_t needfree = btop(arc_need_free); + pgcnt_t lotsfree = btop(arc_sys_free); + pgcnt_t desfree = 0; + pgcnt_t freemem = btop(arc_free_memory()); + + if (needfree > 0) { + n = PAGESIZE * (-needfree); + if (n < lowest) { + lowest = n; + r = FMR_NEEDFREE; + } + } + + /* + * check that we're out of range of the pageout scanner. It starts to + * schedule paging if freemem is less than lotsfree and needfree. + * lotsfree is the high-water mark for pageout, and needfree is the + * number of needed free pages. We add extra pages here to make sure + * the scanner doesn't start up while we're freeing memory. + */ + n = PAGESIZE * (freemem - lotsfree - needfree - desfree); + if (n < lowest) { + lowest = n; + r = FMR_LOTSFREE; + } + +#if defined(_ILP32) + /* + * If we're on a 32-bit platform, it's possible that we'll exhaust the + * kernel heap space before we ever run out of available physical + * memory. Most checks of the size of the heap_area compare against + * tune.t_minarmem, which is the minimum available real memory that we + * can have in the system. However, this is generally fixed at 25 pages + * which is so low that it's useless. In this comparison, we seek to + * calculate the total heap-size, and reclaim if more than 3/4ths of the + * heap is allocated. (Or, in the calculation, if less than 1/4th is + * free) + */ + n = vmem_size(heap_arena, VMEM_FREE) - + (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); + if (n < lowest) { + lowest = n; + r = FMR_HEAP_ARENA; + } +#endif + + /* + * If zio data pages are being allocated out of a separate heap segment, + * then enforce that the size of available vmem for this arena remains + * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free. + * + * Note that reducing the arc_zio_arena_free_shift keeps more virtual + * memory (in the zio_arena) free, which can avoid memory + * fragmentation issues. + */ + if (zio_arena != NULL) { + n = (int64_t)vmem_size(zio_arena, VMEM_FREE) - + (vmem_size(zio_arena, VMEM_ALLOC) >> + arc_zio_arena_free_shift); + if (n < lowest) { + lowest = n; + r = FMR_ZIO_ARENA; + } + } + + last_free_memory = lowest; + last_free_reason = r; + + return (lowest); +} + +static uint64_t +arc_evictable_memory(void) +{ + int64_t asize = aggsum_value(&arc_size); + uint64_t arc_clean = + zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) + + zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) + + zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) + + zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); + uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0); + + /* + * Scale reported evictable memory in proportion to page cache, cap + * at specified min/max. + */ + uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent; + min = MAX(arc_c_min, MIN(arc_c_max, min)); + + if (arc_dirty >= min) + return (arc_clean); + + return (MAX((int64_t)asize - (int64_t)min, 0)); +} + +/* + * If sc->nr_to_scan is zero, the caller is requesting a query of the + * number of objects which can potentially be freed. If it is nonzero, + * the request is to free that many objects. + * + * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks + * in struct shrinker and also require the shrinker to return the number + * of objects freed. + * + * Older kernels require the shrinker to return the number of freeable + * objects following the freeing of nr_to_free. + */ +static spl_shrinker_t +__arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc) +{ + int64_t pages; + + /* The arc is considered warm once reclaim has occurred */ + if (unlikely(arc_warm == B_FALSE)) + arc_warm = B_TRUE; + + /* Return the potential number of reclaimable pages */ + pages = btop((int64_t)arc_evictable_memory()); + if (sc->nr_to_scan == 0) + return (pages); + + /* Not allowed to perform filesystem reclaim */ + if (!(sc->gfp_mask & __GFP_FS)) + return (SHRINK_STOP); + + /* Reclaim in progress */ + if (mutex_tryenter(&arc_adjust_lock) == 0) { + ARCSTAT_INCR(arcstat_need_free, ptob(sc->nr_to_scan)); + return (0); + } + + mutex_exit(&arc_adjust_lock); + + /* + * Evict the requested number of pages by shrinking arc_c the + * requested amount. + */ + if (pages > 0) { + arc_reduce_target_size(ptob(sc->nr_to_scan)); + if (current_is_kswapd()) + arc_kmem_reap_soon(); +#ifdef HAVE_SPLIT_SHRINKER_CALLBACK + pages = MAX((int64_t)pages - + (int64_t)btop(arc_evictable_memory()), 0); +#else + pages = btop(arc_evictable_memory()); +#endif + /* + * We've shrunk what we can, wake up threads. + */ + cv_broadcast(&arc_adjust_waiters_cv); + } else + pages = SHRINK_STOP; + + /* + * When direct reclaim is observed it usually indicates a rapid + * increase in memory pressure. This occurs because the kswapd + * threads were unable to asynchronously keep enough free memory + * available. In this case set arc_no_grow to briefly pause arc + * growth to avoid compounding the memory pressure. + */ + if (current_is_kswapd()) { + ARCSTAT_BUMP(arcstat_memory_indirect_count); + } else { + arc_no_grow = B_TRUE; + arc_kmem_reap_soon(); + ARCSTAT_BUMP(arcstat_memory_direct_count); + } + + return (pages); +} +SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func); + +SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS); + +int +arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg) +{ + uint64_t available_memory = arc_free_memory(); + +#if defined(_ILP32) + available_memory = + MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); +#endif + + if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100) + return (0); + + if (txg > spa->spa_lowmem_last_txg) { + spa->spa_lowmem_last_txg = txg; + spa->spa_lowmem_page_load = 0; + } + /* + * If we are in pageout, we know that memory is already tight, + * the arc is already going to be evicting, so we just want to + * continue to let page writes occur as quickly as possible. + */ + if (current_is_kswapd()) { + if (spa->spa_lowmem_page_load > + MAX(arc_sys_free / 4, available_memory) / 4) { + DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); + return (SET_ERROR(ERESTART)); + } + /* Note: reserve is inflated, so we deflate */ + atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8); + return (0); + } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) { + /* memory is low, delay before restarting */ + ARCSTAT_INCR(arcstat_memory_throttle_count, 1); + DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); + return (SET_ERROR(EAGAIN)); + } + spa->spa_lowmem_page_load = 0; + return (0); +} + +void +arc_lowmem_init(void) +{ + uint64_t allmem = arc_all_memory(); + + /* + * Register a shrinker to support synchronous (direct) memory + * reclaim from the arc. This is done to prevent kswapd from + * swapping out pages when it is preferable to shrink the arc. + */ + spl_register_shrinker(&arc_shrinker); + + /* Set to 1/64 of all memory or a minimum of 512K */ + arc_sys_free = MAX(allmem / 64, (512 * 1024)); + arc_need_free = 0; +} + +void +arc_lowmem_fini(void) +{ + spl_unregister_shrinker(&arc_shrinker); +} +#else /* _KERNEL */ +int64_t +arc_available_memory(void) +{ + int64_t lowest = INT64_MAX; + free_memory_reason_t r = FMR_UNKNOWN; + + /* Every 100 calls, free a small amount */ + if (spa_get_random(100) == 0) + lowest = -1024; + + last_free_memory = lowest; + last_free_reason = r; + + return (lowest); +} + +int +arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg) +{ + return (0); +} + +uint64_t +arc_all_memory(void) +{ + return (ptob(physmem) / 2); +} + +uint64_t +arc_free_memory(void) +{ + return (spa_get_random(arc_all_memory() * 20 / 100)); +} +#endif /* _KERNEL */ + +/* + * Helper function for arc_prune_async() it is responsible for safely + * handling the execution of a registered arc_prune_func_t. + */ +static void +arc_prune_task(void *ptr) +{ + arc_prune_t *ap = (arc_prune_t *)ptr; + arc_prune_func_t *func = ap->p_pfunc; + + if (func != NULL) + func(ap->p_adjust, ap->p_private); + + zfs_refcount_remove(&ap->p_refcnt, func); +} + +/* + * Notify registered consumers they must drop holds on a portion of the ARC + * buffered they reference. This provides a mechanism to ensure the ARC can + * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This + * is analogous to dnlc_reduce_cache() but more generic. + * + * This operation is performed asynchronously so it may be safely called + * in the context of the arc_reclaim_thread(). A reference is taken here + * for each registered arc_prune_t and the arc_prune_task() is responsible + * for releasing it once the registered arc_prune_func_t has completed. + */ +void +arc_prune_async(int64_t adjust) +{ + arc_prune_t *ap; + + mutex_enter(&arc_prune_mtx); + for (ap = list_head(&arc_prune_list); ap != NULL; + ap = list_next(&arc_prune_list, ap)) { + + if (zfs_refcount_count(&ap->p_refcnt) >= 2) + continue; + + zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc); + ap->p_adjust = adjust; + if (taskq_dispatch(arc_prune_taskq, arc_prune_task, + ap, TQ_SLEEP) == TASKQID_INVALID) { + zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc); + continue; + } + ARCSTAT_BUMP(arcstat_prune); + } + mutex_exit(&arc_prune_mtx); +} diff --git a/module/zfs/arc.c b/module/zfs/arc.c index c1ad8785d..07c52689b 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -291,12 +291,6 @@ #include <sys/abd.h> #include <sys/zil.h> #include <sys/fm/fs/zfs.h> -#ifdef _KERNEL -#include <sys/shrinker.h> -#include <sys/vmsystm.h> -#include <sys/zpl.h> -#include <linux/page_compat.h> -#endif #include <sys/callb.h> #include <sys/kstat.h> #include <sys/zthr.h> @@ -322,11 +316,11 @@ static zthr_t *arc_reap_zthr; * This thread's job is to keep arc_size under arc_c, by calling * arc_adjust(), which improves arc_is_overflowing(). */ -static zthr_t *arc_adjust_zthr; +zthr_t *arc_adjust_zthr; -static kmutex_t arc_adjust_lock; -static kcondvar_t arc_adjust_waiters_cv; -static boolean_t arc_adjust_needed = B_FALSE; +kmutex_t arc_adjust_lock; +kcondvar_t arc_adjust_waiters_cv; +boolean_t arc_adjust_needed = B_FALSE; /* * The number of headers to evict in arc_evict_state_impl() before @@ -338,7 +332,7 @@ static boolean_t arc_adjust_needed = B_FALSE; int zfs_arc_evict_batch_limit = 10; /* number of seconds before growing cache again */ -static int arc_grow_retry = 5; +int arc_grow_retry = 5; /* * Minimum time between calls to arc_kmem_reap_soon(). @@ -352,11 +346,11 @@ int zfs_arc_overflow_shift = 8; int arc_p_min_shift = 4; /* log2(fraction of arc to reclaim) */ -static int arc_shrink_shift = 7; +int arc_shrink_shift = 7; /* percent of pagecache to reclaim arc to */ #ifdef _KERNEL -static uint_t zfs_arc_pc_percent = 0; +uint_t zfs_arc_pc_percent = 0; #endif /* @@ -391,7 +385,7 @@ static boolean_t arc_initialized; /* * The arc has filled available memory and has now warmed up. */ -static boolean_t arc_warm; +boolean_t arc_warm; /* * log2 fraction of the zio arena to keep free. @@ -455,7 +449,7 @@ arc_state_t ARC_mfu; arc_state_t ARC_mfu_ghost; arc_state_t ARC_l2c_only; -static arc_stats_t arc_stats = { +arc_stats_t arc_stats = { { "hits", KSTAT_DATA_UINT64 }, { "misses", KSTAT_DATA_UINT64 }, { "demand_data_hits", KSTAT_DATA_UINT64 }, @@ -556,14 +550,6 @@ static arc_stats_t arc_stats = { { "arc_raw_size", KSTAT_DATA_UINT64 } }; -#define ARCSTAT(stat) (arc_stats.stat.value.ui64) - -#define ARCSTAT_INCR(stat, val) \ - atomic_add_64(&arc_stats.stat.value.ui64, (val)) - -#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) -#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) - #define ARCSTAT_MAX(stat, val) { \ uint64_t m; \ while ((val) > (m = arc_stats.stat.value.ui64) && \ @@ -596,12 +582,13 @@ static arc_stats_t arc_stats = { kstat_t *arc_ksp; static arc_state_t *arc_anon; -static arc_state_t *arc_mru; static arc_state_t *arc_mru_ghost; -static arc_state_t *arc_mfu; static arc_state_t *arc_mfu_ghost; static arc_state_t *arc_l2c_only; +arc_state_t *arc_mru; +arc_state_t *arc_mfu; + /* * There are several ARC variables that are critical to export as kstats -- * but we don't want to have to grovel around in the kstat whenever we wish to @@ -610,11 +597,6 @@ static arc_state_t *arc_l2c_only; * the possibility of inconsistency by having shadow copies of the variables, * while still allowing the code to be readable. */ -#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ -#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ -#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ -#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ -#define arc_no_grow ARCSTAT(arcstat_no_grow) /* do not grow cache size */ #define arc_tempreserve ARCSTAT(arcstat_tempreserve) #define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes) #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ @@ -622,8 +604,6 @@ static arc_state_t *arc_l2c_only; #define arc_dnode_size_limit ARCSTAT(arcstat_dnode_limit) #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ -#define arc_need_free ARCSTAT(arcstat_need_free) /* bytes to be freed */ -#define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */ /* size of all b_rabd's in entire arc */ #define arc_raw_size ARCSTAT(arcstat_raw_size) @@ -652,10 +632,10 @@ aggsum_t astat_bonus_size; aggsum_t astat_hdr_size; aggsum_t astat_l2_hdr_size; -static hrtime_t arc_growtime; -static list_t arc_prune_list; -static kmutex_t arc_prune_mtx; -static taskq_t *arc_prune_taskq; +hrtime_t arc_growtime; +list_t arc_prune_list; +kmutex_t arc_prune_mtx; +taskq_t *arc_prune_taskq; #define GHOST_STATE(state) \ ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ @@ -835,7 +815,6 @@ static void arc_access(arc_buf_hdr_t *, kmutex_t *); static boolean_t arc_is_overflowing(void); static void arc_buf_watch(arc_buf_t *); static void arc_tuning_update(void); -static void arc_prune_async(int64_t); static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); static uint32_t arc_bufc_to_flags(arc_buf_contents_t); @@ -4043,57 +4022,6 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, } /* - * Helper function for arc_prune_async() it is responsible for safely - * handling the execution of a registered arc_prune_func_t. - */ -static void -arc_prune_task(void *ptr) -{ - arc_prune_t *ap = (arc_prune_t *)ptr; - arc_prune_func_t *func = ap->p_pfunc; - - if (func != NULL) - func(ap->p_adjust, ap->p_private); - - zfs_refcount_remove(&ap->p_refcnt, func); -} - -/* - * Notify registered consumers they must drop holds on a portion of the ARC - * buffered they reference. This provides a mechanism to ensure the ARC can - * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This - * is analogous to dnlc_reduce_cache() but more generic. - * - * This operation is performed asynchronously so it may be safely called - * in the context of the arc_reclaim_thread(). A reference is taken here - * for each registered arc_prune_t and the arc_prune_task() is responsible - * for releasing it once the registered arc_prune_func_t has completed. - */ -static void -arc_prune_async(int64_t adjust) -{ - arc_prune_t *ap; - - mutex_enter(&arc_prune_mtx); - for (ap = list_head(&arc_prune_list); ap != NULL; - ap = list_next(&arc_prune_list, ap)) { - - if (zfs_refcount_count(&ap->p_refcnt) >= 2) - continue; - - zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc); - ap->p_adjust = adjust; - if (taskq_dispatch(arc_prune_taskq, arc_prune_task, - ap, TQ_SLEEP) == TASKQID_INVALID) { - zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc); - continue; - } - ARCSTAT_BUMP(arcstat_prune); - } - mutex_exit(&arc_prune_mtx); -} - -/* * Evict the specified number of bytes from the state specified, * restricting eviction to the spa and type given. This function * prevents us from trying to evict more from a state's list than @@ -4524,7 +4452,7 @@ arc_flush(spa_t *spa, boolean_t retry) (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); } -static void +void arc_reduce_target_size(int64_t to_free) { uint64_t asize = aggsum_value(&arc_size); @@ -4549,209 +4477,19 @@ arc_reduce_target_size(int64_t to_free) zthr_wakeup(arc_adjust_zthr); } } -/* - * Return maximum amount of memory that we could possibly use. Reduced - * to half of all memory in user space which is primarily used for testing. - */ -uint64_t -arc_all_memory(void) -{ -#ifdef _KERNEL -#ifdef CONFIG_HIGHMEM - return (ptob(zfs_totalram_pages - zfs_totalhigh_pages)); -#else - return (ptob(zfs_totalram_pages)); -#endif /* CONFIG_HIGHMEM */ -#else - return (ptob(physmem) / 2); -#endif /* _KERNEL */ -} - -/* - * Return the amount of memory that is considered free. In user space - * which is primarily used for testing we pretend that free memory ranges - * from 0-20% of all memory. - */ -static uint64_t -arc_free_memory(void) -{ -#ifdef _KERNEL -#ifdef CONFIG_HIGHMEM - struct sysinfo si; - si_meminfo(&si); - return (ptob(si.freeram - si.freehigh)); -#else - return (ptob(nr_free_pages() + - nr_inactive_file_pages() + - nr_inactive_anon_pages() + - nr_slab_reclaimable_pages())); - -#endif /* CONFIG_HIGHMEM */ -#else - return (spa_get_random(arc_all_memory() * 20 / 100)); -#endif /* _KERNEL */ -} - -typedef enum free_memory_reason_t { - FMR_UNKNOWN, - FMR_NEEDFREE, - FMR_LOTSFREE, - FMR_SWAPFS_MINFREE, - FMR_PAGES_PP_MAXIMUM, - FMR_HEAP_ARENA, - FMR_ZIO_ARENA, -} free_memory_reason_t; - -int64_t last_free_memory; -free_memory_reason_t last_free_reason; - -#ifdef _KERNEL -/* - * Additional reserve of pages for pp_reserve. - */ -int64_t arc_pages_pp_reserve = 64; - -/* - * Additional reserve of pages for swapfs. - */ -int64_t arc_swapfs_reserve = 64; -#endif /* _KERNEL */ - -/* - * Return the amount of memory that can be consumed before reclaim will be - * needed. Positive if there is sufficient free memory, negative indicates - * the amount of memory that needs to be freed up. - */ -static int64_t -arc_available_memory(void) -{ - int64_t lowest = INT64_MAX; - free_memory_reason_t r = FMR_UNKNOWN; -#ifdef _KERNEL - int64_t n; -#ifdef __linux__ -#ifdef freemem -#undef freemem -#endif - pgcnt_t needfree = btop(arc_need_free); - pgcnt_t lotsfree = btop(arc_sys_free); - pgcnt_t desfree = 0; - pgcnt_t freemem = btop(arc_free_memory()); -#endif - - if (needfree > 0) { - n = PAGESIZE * (-needfree); - if (n < lowest) { - lowest = n; - r = FMR_NEEDFREE; - } - } - - /* - * check that we're out of range of the pageout scanner. It starts to - * schedule paging if freemem is less than lotsfree and needfree. - * lotsfree is the high-water mark for pageout, and needfree is the - * number of needed free pages. We add extra pages here to make sure - * the scanner doesn't start up while we're freeing memory. - */ - n = PAGESIZE * (freemem - lotsfree - needfree - desfree); - if (n < lowest) { - lowest = n; - r = FMR_LOTSFREE; - } - -#ifndef __linux__ - /* - * check to make sure that swapfs has enough space so that anon - * reservations can still succeed. anon_resvmem() checks that the - * availrmem is greater than swapfs_minfree, and the number of reserved - * swap pages. We also add a bit of extra here just to prevent - * circumstances from getting really dire. - */ - n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - - desfree - arc_swapfs_reserve); - if (n < lowest) { - lowest = n; - r = FMR_SWAPFS_MINFREE; - } - - /* - * Check that we have enough availrmem that memory locking (e.g., via - * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum - * stores the number of pages that cannot be locked; when availrmem - * drops below pages_pp_maximum, page locking mechanisms such as - * page_pp_lock() will fail.) - */ - n = PAGESIZE * (availrmem - pages_pp_maximum - - arc_pages_pp_reserve); - if (n < lowest) { - lowest = n; - r = FMR_PAGES_PP_MAXIMUM; - } -#endif - -#if defined(_ILP32) - /* - * If we're on a 32-bit platform, it's possible that we'll exhaust the - * kernel heap space before we ever run out of available physical - * memory. Most checks of the size of the heap_area compare against - * tune.t_minarmem, which is the minimum available real memory that we - * can have in the system. However, this is generally fixed at 25 pages - * which is so low that it's useless. In this comparison, we seek to - * calculate the total heap-size, and reclaim if more than 3/4ths of the - * heap is allocated. (Or, in the calculation, if less than 1/4th is - * free) - */ - n = vmem_size(heap_arena, VMEM_FREE) - - (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); - if (n < lowest) { - lowest = n; - r = FMR_HEAP_ARENA; - } -#endif - - /* - * If zio data pages are being allocated out of a separate heap segment, - * then enforce that the size of available vmem for this arena remains - * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free. - * - * Note that reducing the arc_zio_arena_free_shift keeps more virtual - * memory (in the zio_arena) free, which can avoid memory - * fragmentation issues. - */ - if (zio_arena != NULL) { - n = (int64_t)vmem_size(zio_arena, VMEM_FREE) - - (vmem_size(zio_arena, VMEM_ALLOC) >> - arc_zio_arena_free_shift); - if (n < lowest) { - lowest = n; - r = FMR_ZIO_ARENA; - } - } -#else /* _KERNEL */ - /* Every 100 calls, free a small amount */ - if (spa_get_random(100) == 0) - lowest = -1024; -#endif /* _KERNEL */ - - last_free_memory = lowest; - last_free_reason = r; - - return (lowest); -} /* * Determine if the system is under memory pressure and is asking * to reclaim memory. A return value of B_TRUE indicates that the system * is under memory pressure and that the arc should adjust accordingly. */ -static boolean_t +boolean_t arc_reclaim_needed(void) { return (arc_available_memory() < 0); } -static void +void arc_kmem_reap_soon(void) { size_t i; @@ -5027,109 +4765,7 @@ arc_reap_cb(void *arg, zthr_t *zthr) * already below arc_c_min, evicting any more would only * increase this negative difference. */ -static uint64_t -arc_evictable_memory(void) -{ - int64_t asize = aggsum_value(&arc_size); - uint64_t arc_clean = - zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) + - zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) + - zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) + - zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); - uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0); - - /* - * Scale reported evictable memory in proportion to page cache, cap - * at specified min/max. - */ - uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent; - min = MAX(arc_c_min, MIN(arc_c_max, min)); - - if (arc_dirty >= min) - return (arc_clean); - - return (MAX((int64_t)asize - (int64_t)min, 0)); -} - -/* - * If sc->nr_to_scan is zero, the caller is requesting a query of the - * number of objects which can potentially be freed. If it is nonzero, - * the request is to free that many objects. - * - * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks - * in struct shrinker and also require the shrinker to return the number - * of objects freed. - * - * Older kernels require the shrinker to return the number of freeable - * objects following the freeing of nr_to_free. - */ -static spl_shrinker_t -__arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc) -{ - int64_t pages; - - /* The arc is considered warm once reclaim has occurred */ - if (unlikely(arc_warm == B_FALSE)) - arc_warm = B_TRUE; - - /* Return the potential number of reclaimable pages */ - pages = btop((int64_t)arc_evictable_memory()); - if (sc->nr_to_scan == 0) - return (pages); - - /* Not allowed to perform filesystem reclaim */ - if (!(sc->gfp_mask & __GFP_FS)) - return (SHRINK_STOP); - - /* Reclaim in progress */ - if (mutex_tryenter(&arc_adjust_lock) == 0) { - ARCSTAT_INCR(arcstat_need_free, ptob(sc->nr_to_scan)); - return (0); - } - mutex_exit(&arc_adjust_lock); - - /* - * Evict the requested number of pages by shrinking arc_c the - * requested amount. - */ - if (pages > 0) { - arc_reduce_target_size(ptob(sc->nr_to_scan)); - if (current_is_kswapd()) - arc_kmem_reap_soon(); -#ifdef HAVE_SPLIT_SHRINKER_CALLBACK - pages = MAX((int64_t)pages - - (int64_t)btop(arc_evictable_memory()), 0); -#else - pages = btop(arc_evictable_memory()); -#endif - /* - * We've shrunk what we can, wake up threads. - */ - cv_broadcast(&arc_adjust_waiters_cv); - } else - pages = SHRINK_STOP; - - /* - * When direct reclaim is observed it usually indicates a rapid - * increase in memory pressure. This occurs because the kswapd - * threads were unable to asynchronously keep enough free memory - * available. In this case set arc_no_grow to briefly pause arc - * growth to avoid compounding the memory pressure. - */ - if (current_is_kswapd()) { - ARCSTAT_BUMP(arcstat_memory_indirect_count); - } else { - arc_no_grow = B_TRUE; - arc_kmem_reap_soon(); - ARCSTAT_BUMP(arcstat_memory_direct_count); - } - - return (pages); -} -SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func); - -SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS); #endif /* _KERNEL */ /* @@ -7051,49 +6687,6 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg, return (zio); } -static int -arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg) -{ -#ifdef _KERNEL - uint64_t available_memory = arc_free_memory(); - -#if defined(_ILP32) - available_memory = - MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); -#endif - - if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100) - return (0); - - if (txg > spa->spa_lowmem_last_txg) { - spa->spa_lowmem_last_txg = txg; - spa->spa_lowmem_page_load = 0; - } - /* - * If we are in pageout, we know that memory is already tight, - * the arc is already going to be evicting, so we just want to - * continue to let page writes occur as quickly as possible. - */ - if (current_is_kswapd()) { - if (spa->spa_lowmem_page_load > - MAX(arc_sys_free / 4, available_memory) / 4) { - DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); - return (SET_ERROR(ERESTART)); - } - /* Note: reserve is inflated, so we deflate */ - atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8); - return (0); - } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) { - /* memory is low, delay before restarting */ - ARCSTAT_INCR(arcstat_memory_throttle_count, 1); - DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); - return (SET_ERROR(EAGAIN)); - } - spa->spa_lowmem_page_load = 0; -#endif /* _KERNEL */ - return (0); -} - void arc_tempreserve_clear(uint64_t reserve) { @@ -7522,17 +7115,8 @@ arc_init(void) arc_min_prefetch_ms = 1000; arc_min_prescient_prefetch_ms = 6000; -#ifdef _KERNEL - /* - * Register a shrinker to support synchronous (direct) memory - * reclaim from the arc. This is done to prevent kswapd from - * swapping out pages when it is preferable to shrink the arc. - */ - spl_register_shrinker(&arc_shrinker); - - /* Set to 1/64 of all memory or a minimum of 512K */ - arc_sys_free = MAX(allmem / 64, (512 * 1024)); - arc_need_free = 0; +#if defined(_KERNEL) + arc_lowmem_init(); #endif /* Set max to 1/2 of all memory */ @@ -7636,7 +7220,7 @@ arc_fini(void) arc_prune_t *p; #ifdef _KERNEL - spl_unregister_shrinker(&arc_shrinker); + arc_lowmem_fini(); #endif /* _KERNEL */ /* Use B_TRUE to ensure *all* buffers are evicted */ |