aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/sys/arc_impl.h2
-rw-r--r--module/os/linux/zfs/arc_os.c4
-rw-r--r--module/zfs/arc.c40
3 files changed, 31 insertions, 15 deletions
diff --git a/include/sys/arc_impl.h b/include/sys/arc_impl.h
index d4167945f..2468a4aac 100644
--- a/include/sys/arc_impl.h
+++ b/include/sys/arc_impl.h
@@ -612,7 +612,7 @@ extern void arc_prune_async(int64_t);
extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
extern uint64_t arc_free_memory(void);
extern int64_t arc_available_memory(void);
-extern void arc_tuning_update(void);
+extern void arc_tuning_update(boolean_t);
extern int param_set_arc_long(ZFS_MODULE_PARAM_ARGS);
extern int param_set_arc_int(ZFS_MODULE_PARAM_ARGS);
diff --git a/module/os/linux/zfs/arc_os.c b/module/os/linux/zfs/arc_os.c
index bff0f0517..e34d4ae08 100644
--- a/module/os/linux/zfs/arc_os.c
+++ b/module/os/linux/zfs/arc_os.c
@@ -377,7 +377,7 @@ param_set_arc_long(const char *buf, zfs_kernel_param_t *kp)
if (error < 0)
return (SET_ERROR(error));
- arc_tuning_update();
+ arc_tuning_update(B_TRUE);
return (0);
}
@@ -391,7 +391,7 @@ param_set_arc_int(const char *buf, zfs_kernel_param_t *kp)
if (error < 0)
return (SET_ERROR(error));
- arc_tuning_update();
+ arc_tuning_update(B_TRUE);
return (0);
}
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 9c5ee9829..b5d17431c 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -4558,7 +4558,7 @@ arc_adjust_cb_check(void *arg, zthr_t *zthr)
* their actual internal variable counterparts. Without this,
* changing those module params at runtime would have no effect.
*/
- arc_tuning_update();
+ arc_tuning_update(B_FALSE);
/*
* This is necessary in order to keep the kstat information
@@ -6900,6 +6900,14 @@ arc_state_multilist_index_func(multilist_t *ml, void *obj)
multilist_get_num_sublists(ml));
}
+#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
+ if ((do_warn) && (tuning) && ((tuning) != (value))) { \
+ cmn_err(CE_WARN, \
+ "ignoring tunable %s (using %llu instead)", \
+ (#tuning), (value)); \
+ } \
+} while (0)
+
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
@@ -6908,11 +6916,20 @@ arc_state_multilist_index_func(multilist_t *ml, void *obj)
* values will be applied.
*/
void
-arc_tuning_update(void)
+arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
unsigned long limit;
+ /* Valid range: 32M - <arc_c_max> */
+ if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
+ (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
+ (zfs_arc_min <= arc_c_max)) {
+ arc_c_min = zfs_arc_min;
+ arc_c = MAX(arc_c, arc_c_min);
+ }
+ WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
+
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= 64 << 20) && (zfs_arc_max < allmem) &&
@@ -6925,14 +6942,7 @@ arc_tuning_update(void)
if (arc_dnode_size_limit > arc_meta_limit)
arc_dnode_size_limit = arc_meta_limit;
}
-
- /* Valid range: 32M - <arc_c_max> */
- if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
- (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
- (zfs_arc_min <= arc_c_max)) {
- arc_c_min = zfs_arc_min;
- arc_c = MAX(arc_c, arc_c_min);
- }
+ WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 16M - <arc_c_max> */
if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) &&
@@ -6944,6 +6954,7 @@ arc_tuning_update(void)
if (arc_dnode_size_limit < arc_meta_min)
arc_dnode_size_limit = arc_meta_min;
}
+ WARN_IF_TUNING_IGNORED(zfs_arc_meta_min, arc_meta_min, verbose);
/* Valid range: <arc_meta_min> - <arc_c_max> */
limit = zfs_arc_meta_limit ? zfs_arc_meta_limit :
@@ -6952,6 +6963,7 @@ arc_tuning_update(void)
(limit >= arc_meta_min) &&
(limit <= arc_c_max))
arc_meta_limit = limit;
+ WARN_IF_TUNING_IGNORED(zfs_arc_meta_limit, arc_meta_limit, verbose);
/* Valid range: <arc_meta_min> - <arc_meta_limit> */
limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
@@ -6960,6 +6972,8 @@ arc_tuning_update(void)
(limit >= arc_meta_min) &&
(limit <= arc_meta_limit))
arc_dnode_size_limit = limit;
+ WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_size_limit,
+ verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
@@ -6989,11 +7003,13 @@ arc_tuning_update(void)
if ((zfs_arc_lotsfree_percent >= 0) &&
(zfs_arc_lotsfree_percent <= 100))
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
+ WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
+ verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem);
-
+ WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
@@ -7183,7 +7199,7 @@ arc_init(void)
arc_dnode_size_limit = (percent * arc_meta_limit) / 100;
/* Apply user specified tunings */
- arc_tuning_update();
+ arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())