aboutsummaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorRyan Moeller <[email protected]>2020-01-14 17:57:28 -0500
committerBrian Behlendorf <[email protected]>2020-01-14 14:57:28 -0800
commit2476f103069e83e17d2c9cec4191af34a7996885 (patch)
tree567a82eafe27b98501c564b24e0f5c3ed221f629 /tests
parent61152d1069595db08f9b53ee518683382caf313e (diff)
ZTS: Catalog tunable names for tests in tunables.cfg
Update tests to use the variables for tunable names. Reviewed-by: John Kennedy <[email protected]> Reviewed-by: Kjeld Schouten <[email protected]> Reviewed-by: George Melikov <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Ryan Moeller <[email protected]> Closes #9831
Diffstat (limited to 'tests')
-rw-r--r--tests/zfs-tests/include/Makefile.am1
-rw-r--r--tests/zfs-tests/include/libtest.shlib37
-rw-r--r--tests/zfs-tests/include/tunables.cfg81
-rwxr-xr-xtests/zfs-tests/tests/functional/arc/arcstats_runtime_tuning.ksh16
-rw-r--r--tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh16
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh28
-rw-r--r--tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg17
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh14
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_006_pos.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh12
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh8
-rw-r--r--tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib14
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata4.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_split/zpool_split_resilver.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_partial.ksh18
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_discard.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_freeing.ksh18
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh10
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/deadman/deadman_sync.ksh12
-rwxr-xr-xtests/zfs-tests/tests/functional/deadman/deadman_zio.ksh12
-rwxr-xr-xtests/zfs-tests/tests/functional/delegate/cleanup.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/delegate/setup.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/fault/decompress_fault.ksh10
-rwxr-xr-xtests/zfs-tests/tests/functional/fault/zpool_status_-s.ksh12
-rwxr-xr-xtests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh8
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/cleanup.ksh2
-rw-r--r--tests/zfs-tests/tests/functional/mmp/mmp.kshlib8
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/mmp_interval.ksh18
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/mmp_on_off.ksh10
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/mmp_on_thread.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh10
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh36
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/mmp/setup.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh9
-rwxr-xr-xtests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/redacted_send/cleanup.ksh2
-rw-r--r--tests/zfs-tests/tests/functional/redacted_send/redacted.kshlib6
-rwxr-xr-xtests/zfs-tests/tests/functional/redacted_send/redacted_mounts.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/redacted_send/redacted_resume.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/redacted_send/redacted_volume.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh6
-rw-r--r--tests/zfs-tests/tests/functional/removal/removal.kshlib4
-rwxr-xr-xtests/zfs-tests/tests/functional/removal/removal_cancel.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/removal/removal_condense_export.ksh8
-rwxr-xr-xtests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh8
-rwxr-xr-xtests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh22
-rwxr-xr-xtests/zfs-tests/tests/functional/rsend/send_hole_birth.ksh4
-rwxr-xr-xtests/zfs-tests/tests/functional/slog/slog_015_neg.ksh11
-rwxr-xr-xtests/zfs-tests/tests/functional/snapshot/cleanup.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/snapshot/clone_001_pos.ksh6
-rwxr-xr-xtests/zfs-tests/tests/functional/snapshot/setup.ksh2
-rwxr-xr-xtests/zfs-tests/tests/functional/trim/autotrim_config.ksh20
-rwxr-xr-xtests/zfs-tests/tests/functional/trim/autotrim_integrity.ksh14
-rwxr-xr-xtests/zfs-tests/tests/functional/trim/autotrim_trim_integrity.ksh14
-rwxr-xr-xtests/zfs-tests/tests/functional/trim/trim_config.ksh20
-rwxr-xr-xtests/zfs-tests/tests/functional/trim/trim_integrity.ksh14
-rwxr-xr-xtests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh12
-rw-r--r--tests/zfs-tests/tests/perf/perf.shlib2
85 files changed, 434 insertions, 366 deletions
diff --git a/tests/zfs-tests/include/Makefile.am b/tests/zfs-tests/include/Makefile.am
index 86c387c67..929adc98d 100644
--- a/tests/zfs-tests/include/Makefile.am
+++ b/tests/zfs-tests/include/Makefile.am
@@ -5,6 +5,7 @@ dist_pkgdata_DATA = \
libtest.shlib \
math.shlib \
properties.shlib \
+ tunables.cfg \
zpool_script.shlib
EXTRA_DIST = default.cfg.in
diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib
index 3ba7a8cbe..d9ba374f2 100644
--- a/tests/zfs-tests/include/libtest.shlib
+++ b/tests/zfs-tests/include/libtest.shlib
@@ -34,6 +34,8 @@
. ${STF_SUITE}/include/math.shlib
. ${STF_SUITE}/include/blkdev.shlib
+. ${STF_SUITE}/include/tunables.cfg
+
#
# Apply constrained path when available. This is required since the
# PATH may have been modified by sudo's secure_path behavior.
@@ -3713,7 +3715,7 @@ function swap_cleanup
#
# Set a global system tunable (64-bit value)
#
-# $1 tunable name
+# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable64
@@ -3724,7 +3726,7 @@ function set_tunable64
#
# Set a global system tunable (32-bit value)
#
-# $1 tunable name
+# $1 tunable name (use a NAME defined in tunables.cfg)
# $2 tunable values
#
function set_tunable32
@@ -3734,12 +3736,23 @@ function set_tunable32
function set_tunable_impl
{
- typeset tunable="$1"
+ typeset name="$1"
typeset value="$2"
typeset mdb_cmd="$3"
typeset module="${4:-zfs}"
- [[ -z "$tunable" ]] && return 1
+ eval "typeset tunable=\$$name"
+ case "$tunable" in
+ UNSUPPORTED)
+ log_unsupported "Tunable '$name' is unsupported on $(uname)"
+ ;;
+ "")
+ log_fail "Tunable '$name' must be added to tunables.cfg"
+ ;;
+ *)
+ ;;
+ esac
+
[[ -z "$value" ]] && return 1
[[ -z "$mdb_cmd" ]] && return 1
@@ -3765,7 +3778,7 @@ function set_tunable_impl
#
# Get a global system tunable
#
-# $1 tunable name
+# $1 tunable name (use a NAME defined in tunables.cfg)
#
function get_tunable
{
@@ -3774,10 +3787,20 @@ function get_tunable
function get_tunable_impl
{
- typeset tunable="$1"
+ typeset name="$1"
typeset module="${2:-zfs}"
- [[ -z "$tunable" ]] && return 1
+ eval "typeset tunable=\$$name"
+ case "$tunable" in
+ UNSUPPORTED)
+ log_unsupported "Tunable '$name' is unsupported on $(uname)"
+ ;;
+ "")
+ log_fail "Tunable '$name' must be added to tunables.cfg"
+ ;;
+ *)
+ ;;
+ esac
case "$(uname)" in
Linux)
diff --git a/tests/zfs-tests/include/tunables.cfg b/tests/zfs-tests/include/tunables.cfg
new file mode 100644
index 000000000..8e24df5da
--- /dev/null
+++ b/tests/zfs-tests/include/tunables.cfg
@@ -0,0 +1,81 @@
+# This file exports variables for each tunable used in the test suite.
+#
+# Different platforms use different names for most tunables. To avoid littering
+# the tests with conditional logic for deciding how to set each tunable, the
+# logic is instead consolidated to this one file.
+#
+# Any use of tunables in tests must use a name defined here. New entries
+# should be added to the table as needed. Please keep the table sorted
+# alphabetically for ease of maintenance.
+#
+# Platform-specific tunables should still use a NAME from this table for
+# consistency. Enter UNSUPPORTED in the column for platforms on which the
+# tunable is not implemented.
+
+UNAME=$(uname)
+
+# NAME FreeBSD tunable Linux tunable
+cat <<%%%% |
+ADMIN_SNAPSHOT UNSUPPORTED zfs_admin_snapshot
+ALLOW_REDACTED_DATASET_MOUNT allow_redacted_dataset_mount zfs_allow_redacted_dataset_mount
+ARC_MAX arc.max zfs_arc_max
+ARC_MIN arc.min zfs_arc_min
+ASYNC_BLOCK_MAX_BLOCKS async_block_max_blocks zfs_async_block_max_blocks
+CHECKSUM_EVENTS_PER_SECOND checksum_events_per_second zfs_checksum_events_per_second
+COMMIT_TIMEOUT_PCT commit_timeout_pct zfs_commit_timeout_pct
+COMPRESSED_ARC_ENABLED compressed_arc_enabled zfs_compressed_arc_enabled
+CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS condense.indirect_commit_entry_delay_ms zfs_condense_indirect_commit_entry_delay_ms
+CONDENSE_MIN_MAPPING_BYTES condense.min_mapping_bytes zfs_condense_min_mapping_bytes
+DBUF_CACHE_MAX_BYTES dbuf_cache.max_bytes dbuf_cache_max_bytes
+DEADMAN_CHECKTIME_MS deadman_checktime_ms zfs_deadman_checktime_ms
+DEADMAN_FAILMODE deadman_failmode zfs_deadman_failmode
+DEADMAN_SYNCTIME_MS deadman_synctime_ms zfs_deadman_synctime_ms
+DEADMAN_ZIOTIME_MS deadman_ziotime_ms zfs_deadman_ziotime_ms
+DISABLE_IVSET_GUID_CHECK UNSUPPORTED zfs_disable_ivset_guid_check
+INITIALIZE_CHUNK_SIZE initialize_chunk_size zfs_initialize_chunk_size
+INITIALIZE_VALUE initialize_value zfs_initialize_value
+KEEP_LOG_SPACEMAPS_AT_EXPORT keep_log_spacemaps_at_export zfs_keep_log_spacemaps_at_export
+LIVELIST_CONDENSE_NEW_ALLOC livelist.condense.new_alloc zfs_livelist_condense_new_alloc
+LIVELIST_CONDENSE_SYNC_CANCEL livelist.condense.sync_cancel zfs_livelist_condense_sync_cancel
+LIVELIST_CONDENSE_SYNC_PAUSE livelist.condense.sync_pause zfs_livelist_condense_sync_pause
+LIVELIST_CONDENSE_ZTHR_CANCEL livelist.condense.zthr_cancel zfs_livelist_condense_zthr_cancel
+LIVELIST_CONDENSE_ZTHR_PAUSE livelist.condense.zthr_pause zfs_livelist_condense_zthr_pause
+LIVELIST_MAX_ENTRIES livelist.max_entries zfs_livelist_max_entries
+LIVELIST_MIN_PERCENT_SHARED livelist.min_percent_shared zfs_livelist_min_percent_shared
+MAX_DATASET_NESTING max_dataset_nesting zfs_max_dataset_nesting
+MAX_MISSING_TVDS max_missing_tvds zfs_max_missing_tvds
+METASLAB_DEBUG_LOAD metaslab.debug_load metaslab_debug_load
+METASLAB_FORCE_GANGING metaslab.force_ganging metaslab_force_ganging
+MULTIHOST_FAIL_INTERVALS multihost.fail_intervals zfs_multihost_fail_intervals
+MULTIHOST_HISTORY UNSUPPORTED zfs_multihost_history
+MULTIHOST_IMPORT_INTERVALS multihost.import_intervals zfs_multihost_import_intervals
+MULTIHOST_INTERVAL UNSUPPORTED zfs_multihost_interval
+OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize
+REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress
+REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment
+RESILVER_MIN_TIME_MS resilver_min_time_ms zfs_resilver_min_time_ms
+SCAN_SUSPEND_PROGRESS scan_suspend_progress zfs_scan_suspend_progress
+SCAN_VDEV_LIMIT scan_vdev_limit zfs_scan_vdev_limit
+SEND_HOLES_WITHOUT_BIRTH_TIME send_holes_without_birth_time send_holes_without_birth_time
+SLOW_IO_EVENTS_PER_SECOND slow_io_events_per_second zfs_slow_io_events_per_second
+SPA_ASIZE_INFLATION spa.asize_inflation spa_asize_inflation
+SPA_DISCARD_MEMORY_LIMIT spa.discard_memory_limit zfs_spa_discard_memory_limit
+SPA_LOAD_VERIFY_DATA spa.load_verify_data spa_load_verify_data
+SPA_LOAD_VERIFY_METADATA spa.load_verify_metadata spa_load_verify_metadata
+TRIM_EXTENT_BYTES_MIN trim.extent_bytes_min zfs_trim_extent_bytes_min
+TRIM_METASLAB_SKIP trim.metaslab_skip zfs_trim_metaslab_skip
+TRIM_TXG_BATCH trim.txg_batch zfs_trim_txg_batch
+TXG_HISTORY UNSUPPORTED zfs_txg_history
+TXG_TIMEOUT txg_timeout zfs_txg_timeout
+UNLINK_SUSPEND_PROGRESS UNSUPPORTED zfs_unlink_suspend_progress
+VDEV_MIN_MS_COUNT vdev.min_ms_count zfs_vdev_min_ms_count
+VDEV_VALIDATE_SKIP vdev.validate_skip vdev_validate_skip
+VOL_INHIBIT_DEV UNSUPPORTED zvol_inhibit_dev
+VOL_MODE vol.mode zvol_volmode
+VOL_RECURSIVE vol.recursive UNSUPPORTED
+ZEVENT_LEN_MAX UNSUPPORTED zfs_zevent_len_max
+ZIO_SLOW_IO_MS zio.slow_io_ms zio_slow_io_ms
+%%%%
+while read name FreeBSD Linux; do
+ eval "export ${name}=\$${UNAME}"
+done
diff --git a/tests/zfs-tests/tests/functional/arc/arcstats_runtime_tuning.ksh b/tests/zfs-tests/tests/functional/arc/arcstats_runtime_tuning.ksh
index 6d007aecf..6650b2e1a 100755
--- a/tests/zfs-tests/tests/functional/arc/arcstats_runtime_tuning.ksh
+++ b/tests/zfs-tests/tests/functional/arc/arcstats_runtime_tuning.ksh
@@ -21,25 +21,25 @@ function cleanup
{
# Set tunables to their recorded actual size and then to their original
# value: this works for previously unconfigured tunables.
- log_must set_tunable64 zfs_arc_min "$MINSIZE"
- log_must set_tunable64 zfs_arc_min "$ZFS_ARC_MIN"
- log_must set_tunable64 zfs_arc_max "$MAXSIZE"
- log_must set_tunable64 zfs_arc_max "$ZFS_ARC_MAX"
+ log_must set_tunable64 ARC_MIN "$MINSIZE"
+ log_must set_tunable64 ARC_MIN "$ZFS_ARC_MIN"
+ log_must set_tunable64 ARC_MAX "$MAXSIZE"
+ log_must set_tunable64 ARC_MAX "$ZFS_ARC_MAX"
}
log_onexit cleanup
-ZFS_ARC_MAX="$(get_tunable zfs_arc_max)"
-ZFS_ARC_MIN="$(get_tunable zfs_arc_min)"
+ZFS_ARC_MAX="$(get_tunable ARC_MAX)"
+ZFS_ARC_MIN="$(get_tunable ARC_MIN)"
MINSIZE="$(get_min_arc_size)"
MAXSIZE="$(get_max_arc_size)"
log_assert "ARC tunables should be updated dynamically"
for size in $((MAXSIZE/4)) $((MAXSIZE/3)) $((MAXSIZE/2)) $MAXSIZE; do
- log_must set_tunable64 zfs_arc_max "$size"
+ log_must set_tunable64 ARC_MAX "$size"
log_must test "$(get_max_arc_size)" == "$size"
- log_must set_tunable64 zfs_arc_min "$size"
+ log_must set_tunable64 ARC_MIN "$size"
log_must test "$(get_min_arc_size)" == "$size"
done
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib b/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib
index c75318e5c..e886de432 100644
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib
@@ -108,11 +108,11 @@ function do_vol_test
if is_freebsd; then
# Pool creation on zvols is forbidden by default.
# Save and restore the current setting.
- typeset _saved=$(get_tunable vol.recursive)
- log_must set_tunable64 vol.recursive 1 # Allow
+ typeset _saved=$(get_tunable VOL_RECURSIVE)
+ log_must set_tunable64 VOL_RECURSIVE 1 # Allow
zpool create $TESTPOOL1 $vol_b_path
typeset _zpool_create_result=$?
- log_must set_tunable64 vol.recursive $_saved # Restore
+ log_must set_tunable64 VOL_RECURSIVE $_saved # Restore
log_must test $_zpool_create_result = 0
else
log_must zpool create $TESTPOOL1 $vol_b_path
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh
index 13976e7c4..ab506debe 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_and_disable.ksh
@@ -36,9 +36,9 @@ function cleanup
{
log_must zfs destroy -Rf $TESTPOOL/$TESTFS1
# reset the livelist sublist size to the original value
- set_tunable64 $LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
+ set_tunable64 LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
# reset the minimum percent shared to 75
- set_tunable32 $LIVELIST_MIN_PERCENT_SHARED $ORIGINAL_MIN
+ set_tunable32 LIVELIST_MIN_PERCENT_SHARED $ORIGINAL_MIN
}
function check_ll_len
@@ -58,9 +58,9 @@ function test_condense
{
# set the max livelist entries to a small value to more easily
# trigger a condense
- set_tunable64 $LIVELIST_MAX_ENTRIES 20
+ set_tunable64 LIVELIST_MAX_ENTRIES 20
# set a small percent shared threshold so the livelist is not disabled
- set_tunable32 $LIVELIST_MIN_PERCENT_SHARED 10
+ set_tunable32 LIVELIST_MIN_PERCENT_SHARED 10
clone_dataset $TESTFS1 snap $TESTCLONE
# sync between each write to make sure a new entry is created
@@ -86,7 +86,7 @@ function test_condense
function test_deactivated
{
# Threshold set to 50 percent
- set_tunable32 $LIVELIST_MIN_PERCENT_SHARED 50
+ set_tunable32 LIVELIST_MIN_PERCENT_SHARED 50
clone_dataset $TESTFS1 snap $TESTCLONE
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
@@ -97,7 +97,7 @@ function test_deactivated
log_must zfs destroy -R $TESTPOOL/$TESTCLONE
# Threshold set to 20 percent
- set_tunable32 $LIVELIST_MIN_PERCENT_SHARED 20
+ set_tunable32 LIVELIST_MIN_PERCENT_SHARED 20
clone_dataset $TESTFS1 snap $TESTCLONE
log_must mkfile 5m /$TESTPOOL/$TESTCLONE/$TESTFILE0
@@ -112,8 +112,8 @@ function test_deactivated
log_must zfs destroy -R $TESTPOOL/$TESTCLONE
}
-ORIGINAL_MAX=$(get_tunable $LIVELIST_MAX_ENTRIES)
-ORIGINAL_MIN=$(get_tunable $LIVELIST_MIN_PERCENT_SHARED)
+ORIGINAL_MAX=$(get_tunable LIVELIST_MAX_ENTRIES)
+ORIGINAL_MIN=$(get_tunable LIVELIST_MIN_PERCENT_SHARED)
log_onexit cleanup
log_must zfs create $TESTPOOL/$TESTFS1
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh
index 4c99a05e0..453b50241 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_clone_livelist_condense_races.ksh
@@ -38,10 +38,10 @@ function cleanup
{
log_must zfs destroy -Rf $TESTPOOL/$TESTFS1
# reset the livelist sublist size to the original value
- set_tunable64 $LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
+ set_tunable64 LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
# reset the condense tests to 0
- set_tunable32 $LIVELIST_CONDENSE_ZTHR_PAUSE 0
- set_tunable32 $LIVELIST_CONDENSE_SYNC_PAUSE 0
+ set_tunable32 LIVELIST_CONDENSE_ZTHR_PAUSE 0
+ set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 0
}
function delete_race
@@ -89,7 +89,7 @@ function disable_race
log_must zfs destroy $TESTPOOL/$TESTCLONE
}
-ORIGINAL_MAX=$(get_tunable $LIVELIST_MAX_ENTRIES)
+ORIGINAL_MAX=$(get_tunable LIVELIST_MAX_ENTRIES)
log_onexit cleanup
@@ -99,19 +99,19 @@ log_must zpool sync $TESTPOOL
log_must zfs snapshot $TESTPOOL/$TESTFS1@snap
# Reduce livelist size to trigger condense more easily
-set_tunable64 $LIVELIST_MAX_ENTRIES 20
+set_tunable64 LIVELIST_MAX_ENTRIES 20
# Test cancellation path in the zthr
-set_tunable32 $LIVELIST_CONDENSE_ZTHR_PAUSE 1
-set_tunable32 $LIVELIST_CONDENSE_SYNC_PAUSE 0
-disable_race $LIVELIST_CONDENSE_ZTHR_CANCEL
-delete_race $LIVELIST_CONDENSE_ZTHR_CANCEL
-export_race $LIVELIST_CONDENSE_ZTHR_CANCEL
+set_tunable32 LIVELIST_CONDENSE_ZTHR_PAUSE 1
+set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 0
+disable_race LIVELIST_CONDENSE_ZTHR_CANCEL
+delete_race LIVELIST_CONDENSE_ZTHR_CANCEL
+export_race LIVELIST_CONDENSE_ZTHR_CANCEL
# Test cancellation path in the synctask
-set_tunable32 $LIVELIST_CONDENSE_ZTHR_PAUSE 0
-set_tunable32 $LIVELIST_CONDENSE_SYNC_PAUSE 1
-disable_race $LIVELIST_CONDENSE_SYNC_CANCEL
-delete_race $LIVELIST_CONDENSE_SYNC_CANCEL
+set_tunable32 LIVELIST_CONDENSE_ZTHR_PAUSE 0
+set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 1
+disable_race LIVELIST_CONDENSE_SYNC_CANCEL
+delete_race LIVELIST_CONDENSE_SYNC_CANCEL
log_pass "Clone livelist condense race conditions passed."
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg
index 4a7b8967d..a62739b07 100644
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg
@@ -37,20 +37,3 @@ export FSSNAP=$FS@$TESTSNAP
export VOLSNAP=$VOL@$TESTSNAP
export FSCLONE=$TESTPOOL/$TESTFSCLONE
export VOLCLONE=$TESTPOOL/$TESTVOLCLONE
-if is_freebsd; then
- export LIVELIST_MAX_ENTRIES=livelist.max_entries
- export LIVELIST_MIN_PERCENT_SHARED=livelist.min_percent_shared
- export LIVELIST_CONDENSE_NEW_ALLOC=livelist.condense.new_alloc
- export LIVELIST_CONDENSE_ZTHR_CANCEL=livelist.condense.zthr_cancel
- export LIVELIST_CONDENSE_SYNC_CANCEL=livelist.condense.sync_cancel
- export LIVELIST_CONDENSE_ZTHR_PAUSE=livelist.condense.zthr_pause
- export LIVELIST_CONDENSE_SYNC_PAUSE=livelist.condense.sync_pause
-else
- export LIVELIST_MAX_ENTRIES=zfs_livelist_max_entries
- export LIVELIST_MIN_PERCENT_SHARED=zfs_livelist_min_percent_shared
- export LIVELIST_CONDENSE_NEW_ALLOC=zfs_livelist_condense_new_alloc
- export LIVELIST_CONDENSE_ZTHR_CANCEL=zfs_livelist_condense_zthr_cancel
- export LIVELIST_CONDENSE_SYNC_CANCEL=zfs_livelist_condense_sync_cancel
- export LIVELIST_CONDENSE_ZTHR_PAUSE=zfs_livelist_condense_zthr_pause
- export LIVELIST_CONDENSE_SYNC_PAUSE=zfs_livelist_condense_sync_pause
-fi
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh
index d1cc92633..6d8984fe9 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_clone_livelist.ksh
@@ -40,7 +40,7 @@ function cleanup
{
datasetexists $TESTPOOL/$TESTFS1 && zfs destroy -R $TESTPOOL/$TESTFS1
# reset the livelist sublist size to its original value
- set_tunable64 $LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
+ set_tunable64 LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
}
function clone_write_file
@@ -120,7 +120,7 @@ function test_promote
log_must zfs destroy -R $TESTPOOL/$TESTCLONE
}
-ORIGINAL_MAX=$(get_tunable $LIVELIST_MAX_ENTRIES)
+ORIGINAL_MAX=$(get_tunable LIVELIST_MAX_ENTRIES)
log_onexit cleanup
log_must zfs create $TESTPOOL/$TESTFS1
@@ -128,7 +128,7 @@ log_must mkfile 20m /$TESTPOOL/$TESTFS1/atestfile
log_must zfs snapshot $TESTPOOL/$TESTFS1@snap
# set a small livelist entry size to more easily test multiple entry livelists
-set_tunable64 $LIVELIST_MAX_ENTRIES 20
+set_tunable64 LIVELIST_MAX_ENTRIES 20
test_one_empty
test_one
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh
index 677dcf511..ab646daec 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_dev_removal_condense.ksh
@@ -38,15 +38,15 @@ function cleanup
{
poolexists $TESTPOOL2 && zpool destroy $TESTPOOL2
# reset livelist max size
- set_tunable64 $LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
+ set_tunable64 LIVELIST_MAX_ENTRIES $ORIGINAL_MAX
[[ -f $VIRTUAL_DISK1 ]] && log_must rm $VIRTUAL_DISK1
[[ -f $VIRTUAL_DISK2 ]] && log_must rm $VIRTUAL_DISK2
}
log_onexit cleanup
-ORIGINAL_MAX=$(get_tunable $LIVELIST_MAX_ENTRIES)
-set_tunable64 $LIVELIST_MAX_ENTRIES 20
+ORIGINAL_MAX=$(get_tunable LIVELIST_MAX_ENTRIES)
+set_tunable64 LIVELIST_MAX_ENTRIES 20
VIRTUAL_DISK1=$TEST_BASE_DIR/disk1
VIRTUAL_DISK2=$TEST_BASE_DIR/disk2
@@ -66,14 +66,14 @@ log_must zfs clone $TESTPOOL2/$TESTFS@snap $TESTPOOL2/$TESTCLONE
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
log_must zpool sync $TESTPOOL2
-set_tunable32 $LIVELIST_CONDENSE_SYNC_PAUSE 1
+set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 1
# Add a new dev and remove the old one
log_must zpool add $TESTPOOL2 $VIRTUAL_DISK2
log_must zpool remove $TESTPOOL2 $VIRTUAL_DISK1
wait_for_removal $TESTPOOL2
-set_tunable32 $LIVELIST_CONDENSE_NEW_ALLOC 0
+set_tunable32 LIVELIST_CONDENSE_NEW_ALLOC 0
# Trigger a condense
log_must mkfile 10m /$TESTPOOL2/$TESTCLONE/A
log_must zpool sync $TESTPOOL2
@@ -83,10 +83,10 @@ log_must zpool sync $TESTPOOL2
log_must mkfile 1m /$TESTPOOL2/$TESTCLONE/B
# Resume condense thr
-set_tunable32 $LIVELIST_CONDENSE_SYNC_PAUSE 0
+set_tunable32 LIVELIST_CONDENSE_SYNC_PAUSE 0
log_must zpool sync $TESTPOOL2
# Check that we've added new ALLOC blkptrs during the condense
-[[ "0" < "$(get_tunable $LIVELIST_CONDENSE_NEW_ALLOC)" ]] || \
+[[ "0" < "$(get_tunable LIVELIST_CONDENSE_NEW_ALLOC)" ]] || \
log_fail "removal/condense test failed"
log_must zfs destroy $TESTPOOL2/$TESTCLONE
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh
index 7d99e9f69..1c962608d 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_014_neg.ksh
@@ -81,7 +81,7 @@ function nesting_cleanup
# before resetting it, it will be left at the modified
# value for the remaining tests. That's the reason
# we reset it again here just in case.
- log_must set_tunable_impl zfs_max_dataset_nesting 50 Z zcommon
+ log_must set_tunable_impl MAX_DATASET_NESTING 50 Z zcommon
}
log_onexit nesting_cleanup
@@ -93,13 +93,13 @@ log_must zfs create -p $TESTPOOL/$dsC16
log_mustnot zfs rename $TESTPOOL/$dsA02 $TESTPOOL/$dsB15A
# extend limit
-log_must set_tunable_impl zfs_max_dataset_nesting 64 Z zcommon
+log_must set_tunable_impl MAX_DATASET_NESTING 64 Z zcommon
log_mustnot zfs rename $TESTPOOL/$dsA02 $TESTPOOL/$dsB16A
log_must zfs rename $TESTPOOL/$dsA02 $TESTPOOL/$dsB15A
# bring back old limit
-log_must set_tunable_impl zfs_max_dataset_nesting 50 Z zcommon
+log_must set_tunable_impl MAX_DATASET_NESTING 50 Z zcommon
log_mustnot zfs rename $TESTPOOL/$dsC01 $TESTPOOL/$dsB15A47C
log_must zfs rename $TESTPOOL/$dsB15A47A $TESTPOOL/$dsB15A47B
diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_006_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_006_pos.ksh
index 7dac6798d..42628a051 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_006_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zfs_send/zfs_send_006_pos.ksh
@@ -36,7 +36,7 @@ verify_runnable "both"
function cleanup
{
- log_must set_tunable32 zfs_override_estimate_recordsize 8192
+ log_must set_tunable32 OVERRIDE_ESTIMATE_RECORDSIZE 8192
for ds in $datasets; do
destroy_dataset $ds "-rf"
done
@@ -91,7 +91,7 @@ function verify_size_estimates
log_assert "Verify 'zfs send -nvP' generates valid stream estimates"
log_onexit cleanup
-log_must set_tunable32 zfs_override_estimate_recordsize 0
+log_must set_tunable32 OVERRIDE_ESTIMATE_RECORDSIZE 0
typeset -l block_count=0
typeset -l block_size
typeset -i PERCENT=1
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh
index 0dc551bbd..ba940210e 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_events/zpool_events_errors.ksh
@@ -45,13 +45,13 @@ VDEV2=$TEST_BASE_DIR/file2
VDEV3=$TEST_BASE_DIR/file3
POOL=error_pool
FILESIZE=$((20 * 1024 * 1024))
-OLD_CHECKSUMS=$(get_tunable zfs_checksum_events_per_second)
-OLD_LEN_MAX=$(get_tunable zfs_zevent_len_max)
+OLD_CHECKSUMS=$(get_tunable CHECKSUM_EVENTS_PER_SECOND)
+OLD_LEN_MAX=$(get_tunable ZEVENT_LEN_MAX)
function cleanup
{
- log_must set_tunable64 zfs_checksum_events_per_second $OLD_CHECKSUMS
- log_must set_tunable64 zfs_zevent_len_max $OLD_LEN_MAX
+ log_must set_tunable64 CHECKSUM_EVENTS_PER_SECOND $OLD_CHECKSUMS
+ log_must set_tunable64 ZEVENT_LEN_MAX $OLD_LEN_MAX
log_must zinject -c all
log_must zpool events -c
@@ -66,8 +66,8 @@ log_assert "Check that the number of zpool errors match the number of events"
log_onexit cleanup
# Set our thresholds high so we never ratelimit or drop events.
-set_tunable64 zfs_checksum_events_per_second 20000
-set_tunable64 zfs_zevent_len_max 20000
+set_tunable64 CHECKSUM_EVENTS_PER_SECOND 20000
+set_tunable64 ZEVENT_LEN_MAX 20000
log_must truncate -s $MINVDEVSIZE $VDEV1 $VDEV2 $VDEV3
log_must mkdir -p $MOUNTDIR
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh
index 029fa6681..4af162bfd 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/cleanup.ksh
@@ -34,7 +34,7 @@
verify_runnable "global"
-log_must set_tunable32 zfs_scan_suspend_progress 0
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
for pool in "$TESTPOOL" "$TESTPOOL1"; do
datasetexists $pool/$TESTFS && \
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh
index e7edb1a3b..a42c69747 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_cachefile_device_replaced.ksh
@@ -59,7 +59,7 @@ function custom_cleanup
[[ -n ZFS_TXG_TIMEOUT ]] &&
log_must set_zfs_txg_timeout $ZFS_TXG_TIMEOUT
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
cleanup
}
@@ -87,7 +87,7 @@ function test_replacing_vdevs
log_must zpool export $TESTPOOL1
log_must cp $CPATHBKP $CPATH
log_must zpool import -c $CPATH -o cachefile=$CPATH $TESTPOOL1
- log_must set_tunable32 zfs_scan_suspend_progress 1
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool replace $TESTPOOL1 $replacevdev $replaceby
# Cachefile: pool in resilvering state
@@ -96,7 +96,7 @@ function test_replacing_vdevs
# Confirm pool is still replacing
log_must pool_is_replacing $TESTPOOL1
log_must zpool export $TESTPOOL1
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
( $earlyremove ) && log_must rm $replacevdev
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh
index f42ba10d6..74d75b6cd 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_config_changed.ksh
@@ -48,7 +48,7 @@ function custom_cleanup
{
set_vdev_validate_skip 0
cleanup
- log_must set_tunable64 zfs_vdev_min_ms_count 16
+ log_must set_tunable64 VDEV_MIN_MS_COUNT 16
}
log_onexit custom_cleanup
@@ -201,14 +201,14 @@ function test_remove_vdev
}
# Record txg history
-is_linux && log_must set_tunable32 zfs_txg_history 100
+is_linux && log_must set_tunable32 TXG_HISTORY 100
# Make the devices bigger to reduce chances of overwriting MOS metadata.
increase_device_sizes $(( FILE_SIZE * 4 ))
# Increase the number of metaslabs for small pools temporarily to
# reduce the chance of reusing a metaslab that holds old MOS metadata.
-log_must set_tunable64 zfs_vdev_min_ms_count 150
+log_must set_tunable64 VDEV_MIN_MS_COUNT 150
# Part of the rewind test is to see how it reacts to path changes
typeset pathstochange="$VDEV0 $VDEV1 $VDEV2 $VDEV3"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh
index bc2c611ae..9a2f0c673 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/import_rewind_device_replaced.ksh
@@ -63,7 +63,7 @@ function custom_cleanup
[[ -n ZFS_TXG_TIMEOUT ]] &&
log_must set_zfs_txg_timeout $ZFS_TXG_TIMEOUT
log_must rm -rf $BACKUP_DEVICE_DIR
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
cleanup
}
@@ -102,13 +102,13 @@ function test_replace_vdev
log_must zpool import -d $DEVICE_DIR $TESTPOOL1
# Ensure resilvering doesn't complete.
- log_must set_tunable32 zfs_scan_suspend_progress 1
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool replace $TESTPOOL1 $replacevdev $replaceby
# Confirm pool is still replacing
log_must pool_is_replacing $TESTPOOL1
log_must zpool export $TESTPOOL1
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
############################################################
# Test 1: rewind while device is resilvering.
@@ -151,7 +151,7 @@ function test_replace_vdev
}
# Record txg history
-is_linux && log_must set_tunable32 zfs_txg_history 100
+is_linux && log_must set_tunable32 TXG_HISTORY 100
log_must mkdir -p $BACKUP_DEVICE_DIR
# Make the devices bigger to reduce chances of overwriting MOS metadata.
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib
index c365ec4ad..201c3803a 100644
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.kshlib
@@ -34,7 +34,7 @@ function cleanup
log_must mkfile $FILE_SIZE ${DEVICE_DIR}/${DEVICE_FILE}$i
((i += 1))
done
- is_linux && set_tunable32 "zfs_txg_history" 0
+ is_linux && set_tunable32 TXG_HISTORY 0
}
#
@@ -317,32 +317,32 @@ function pool_is_replacing
function set_vdev_validate_skip
{
- set_tunable32 "vdev_validate_skip" "$1"
+ set_tunable32 VDEV_VALIDATE_SKIP "$1"
}
function get_zfs_txg_timeout
{
- get_tunable "zfs_txg_timeout"
+ get_tunable TXG_TIMEOUT
}
function set_zfs_txg_timeout
{
- set_tunable32 "zfs_txg_timeout" "$1"
+ set_tunable32 TXG_TIMEOUT "$1"
}
function set_spa_load_verify_metadata
{
- set_tunable32 "spa_load_verify_metadata" "$1"
+ set_tunable32 SPA_LOAD_VERIFY_METADATA "$1"
}
function set_spa_load_verify_data
{
- set_tunable32 "spa_load_verify_data" "$1"
+ set_tunable32 SPA_LOAD_VERIFY_DATA "$1"
}
function set_zfs_max_missing_tvds
{
- set_tunable32 "zfs_max_missing_tvds" "$1"
+ set_tunable32 MAX_MISSING_TVDS "$1"
}
#
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata4.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata4.ksh
index d06a9cd75..a0f063a8d 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata4.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata4.ksh
@@ -51,7 +51,7 @@ function uncompress_pool
function cleanup
{
- log_must set_tunable32 zfs_disable_ivset_guid_check 0
+ log_must set_tunable32 DISABLE_IVSET_GUID_CHECK 0
poolexists $POOL_NAME && log_must zpool destroy $POOL_NAME
[[ -e /$TESTPOOL/$POOL_FILE ]] && rm /$TESTPOOL/$POOL_FILE
return 0
@@ -91,7 +91,7 @@ log_mustnot has_ivset_guid $POOL_NAME/testvol@snap3
# 2. Prepare pool to fix existing datasets
log_must zpool set feature@bookmark_v2=enabled $POOL_NAME
-log_must set_tunable32 zfs_disable_ivset_guid_check 1
+log_must set_tunable32 DISABLE_IVSET_GUID_CHECK 1
log_must zfs create $POOL_NAME/fixed
# 3. Use raw sends to fix datasets
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh
index 0fa6a0be9..6a8f7d49f 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_initialize/zpool_initialize_verify_initialized.ksh
@@ -39,7 +39,7 @@
function cleanup
{
- set_tunable64 zfs_initialize_value $ORIG_PATTERN
+ set_tunable64 INITIALIZE_VALUE $ORIG_PATTERN
zpool import -d $TESTDIR $TESTPOOL
if datasetexists $TESTPOOL ; then
@@ -54,8 +54,8 @@ log_onexit cleanup
PATTERN="deadbeefdeadbeef"
SMALLFILE="$TESTDIR/smallfile"
-ORIG_PATTERN=$(get_tunable zfs_initialize_value)
-log_must set_tunable64 zfs_initialize_value $(printf %llu 0x$PATTERN)
+ORIG_PATTERN=$(get_tunable INITIALIZE_VALUE)
+log_must set_tunable64 INITIALIZE_VALUE $(printf %llu 0x$PATTERN)
log_must mkdir "$TESTDIR"
log_must mkfile $MINVDEVSIZE "$SMALLFILE"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh
index cfafbb6b5..80fc16912 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_resilver/zpool_resilver_restart.ksh
@@ -45,7 +45,7 @@ verify_runnable "global"
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
log_must rm -f $mntpnt/biggerfile1
log_must rm -f $mntpnt/biggerfile2
}
@@ -67,7 +67,7 @@ log_must sync
log_must zpool detach $TESTPOOL $DISK3
# 3. Reattach the drives, causing the second drive's resilver to be deferred
-log_must set_tunable32 zfs_scan_suspend_progress 1
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool attach $TESTPOOL $DISK1 $DISK2
log_must is_pool_resilvering $TESTPOOL true
@@ -78,7 +78,7 @@ log_must is_pool_resilvering $TESTPOOL true
# 4. Manually restart the resilver with all drives
log_must zpool resilver $TESTPOOL
log_must is_deferred_scan_started $TESTPOOL
-log_must set_tunable32 zfs_scan_suspend_progress 0
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
log_must wait_for_resilver_end $TESTPOOL $MAXTIMEOUT
log_must check_state $TESTPOOL "$DISK2" "online"
log_must check_state $TESTPOOL "$DISK3" "online"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh
index b3cb58ceb..03eb9901c 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/cleanup.ksh
@@ -30,5 +30,5 @@
verify_runnable "global"
-log_must set_tunable32 zfs_scan_suspend_progress 0
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
destroy_mirrors
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh
index 71a204060..449bb9a82 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_002_pos.ksh
@@ -50,7 +50,7 @@ verify_runnable "global"
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
log_must rm -f $mntpnt/biggerfile
}
@@ -63,7 +63,7 @@ mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
log_must file_write -b 1048576 -c 1024 -o create -d 0 -f $mntpnt/biggerfile
log_must sync
-log_must set_tunable32 zfs_scan_suspend_progress 1
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool scrub $TESTPOOL
log_must is_pool_scrubbing $TESTPOOL true
log_must zpool scrub -p $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh
index 56225456b..12dc044e9 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_003_pos.ksh
@@ -47,14 +47,14 @@ verify_runnable "global"
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
}
log_onexit cleanup
log_assert "Scrub command fails when there is already a scrub in progress"
-log_must set_tunable32 zfs_scan_suspend_progress 1
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool scrub $TESTPOOL
log_must is_pool_scrubbing $TESTPOOL true
log_mustnot zpool scrub $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh
index 92450d3b9..a7ae7f16b 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_004_pos.ksh
@@ -46,7 +46,7 @@
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
rm -f $mntpnt/extra
}
@@ -59,7 +59,7 @@ log_assert "Resilver prevent scrub from starting until the resilver completes"
mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS)
# Temporarily prevent scan progress so our test doesn't race
-log_must set_tunable32 zfs_scan_suspend_progress 1
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
while ! is_pool_resilvering $TESTPOOL; do
log_must zpool detach $TESTPOOL $DISK2
@@ -72,7 +72,7 @@ done
log_must is_pool_resilvering $TESTPOOL
log_mustnot zpool scrub $TESTPOOL
-log_must set_tunable32 zfs_scan_suspend_progress 0
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
log_must zpool wait -t resilver $TESTPOOL
log_pass "Resilver prevent scrub from starting until the resilver completes"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh
index e4cb2b51e..4b51cd962 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_scrub/zpool_scrub_print_repairing.ksh
@@ -43,7 +43,7 @@ log_assert "Verify we see '(repairing)' while scrubbing a bad vdev."
function cleanup
{
log_must zinject -c all
- log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_DEFAULT
+ log_must set_tunable64 SCAN_VDEV_LIMIT $ZFS_SCAN_VDEV_LIMIT_DEFAULT
zpool scrub -s $TESTPOOL || true
}
@@ -54,7 +54,7 @@ log_must zinject -d $DISK1 -e io -T read -f 100 $TESTPOOL
# Make the scrub slow
log_must zinject -d $DISK1 -D10:1 $TESTPOOL
-log_must set_tunable64 zfs_scan_vdev_limit $ZFS_SCAN_VDEV_LIMIT_SLOW
+log_must set_tunable64 SCAN_VDEV_LIMIT $ZFS_SCAN_VDEV_LIMIT_SLOW
log_must zpool scrub $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_split/zpool_split_resilver.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_split/zpool_split_resilver.ksh
index 1a5c3198f..99a40ecf2 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_split/zpool_split_resilver.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_split/zpool_split_resilver.ksh
@@ -41,7 +41,7 @@ verify_runnable "both"
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
destroy_pool $TESTPOOL
destroy_pool $TESTPOOL2
rm -f $DEVICE1 $DEVICE2
@@ -69,7 +69,7 @@ function zpool_split #disk_to_be_offline/online
log_must sync
# temporarily prevent resilvering progress, so it will not finish too early
- log_must set_tunable32 zfs_scan_suspend_progress 1
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool online $TESTPOOL $disk
@@ -84,7 +84,7 @@ function zpool_split #disk_to_be_offline/online
log_mustnot zpool split $TESTPOOL $TESTPOOL2
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
}
log_assert "Verify 'zpool split' will fail if resilver in progress for a disk"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_partial.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_partial.ksh
index 58e0ef77c..5d14b74ec 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_partial.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_partial.ksh
@@ -44,9 +44,9 @@ function cleanup
rm -rf "$TESTDIR"
fi
- log_must set_tunable64 zfs_trim_metaslab_skip 0
- log_must set_tunable64 zfs_trim_extent_bytes_min $trim_extent_bytes_min
- log_must set_tunable64 zfs_vdev_min_ms_count $vdev_min_ms_count
+ log_must set_tunable64 TRIM_METASLAB_SKIP 0
+ log_must set_tunable64 TRIM_EXTENT_BYTES_MIN $trim_extent_bytes_min
+ log_must set_tunable64 VDEV_MIN_MS_COUNT $vdev_min_ms_count
}
log_onexit cleanup
@@ -55,12 +55,12 @@ LARGEFILE="$TESTDIR/largefile"
# The minimum number of metaslabs is increased in order to simulate the
# behavior of partial trimming on a more typically sized 1TB disk.
-typeset vdev_min_ms_count=$(get_tunable zfs_vdev_min_ms_count)
-log_must set_tunable64 zfs_vdev_min_ms_count 64
+typeset vdev_min_ms_count=$(get_tunable VDEV_MIN_MS_COUNT)
+log_must set_tunable64 VDEV_MIN_MS_COUNT 64
# Minimum trim size is decreased to verify all trim sizes.
-typeset trim_extent_bytes_min=$(get_tunable zfs_trim_extent_bytes_min)
-log_must set_tunable64 zfs_trim_extent_bytes_min 4096
+typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
+log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096
log_must mkdir "$TESTDIR"
log_must truncate -s $LARGESIZE "$LARGEFILE"
@@ -85,9 +85,9 @@ log_must test $new_size -gt $((4 * floor(LARGESIZE * 0.70) ))
# Perform a partial trim, we expect it to skip most of the new metaslabs
# which have never been used and therefore do not need be trimmed.
-log_must set_tunable64 zfs_trim_metaslab_skip 1
+log_must set_tunable64 TRIM_METASLAB_SKIP 1
log_must zpool trim $TESTPOOL
-log_must set_tunable64 zfs_trim_metaslab_skip 0
+log_must set_tunable64 TRIM_METASLAB_SKIP 0
log_must zpool sync
while [[ "$(trim_progress $TESTPOOL $LARGEFILE)" -lt "100" ]]; do
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh
index a216d132f..d5aaf49ae 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_trim/zpool_trim_verify_trimmed.ksh
@@ -43,7 +43,7 @@ function cleanup
rm -rf "$TESTDIR"
fi
- log_must set_tunable64 zfs_trim_extent_bytes_min $trim_extent_bytes_min
+ log_must set_tunable64 TRIM_EXTENT_BYTES_MIN $trim_extent_bytes_min
}
log_onexit cleanup
@@ -51,8 +51,8 @@ LARGESIZE=$((MINVDEVSIZE * 4))
LARGEFILE="$TESTDIR/largefile"
# Reduce trim size to allow for tighter tolerance below when checking.
-typeset trim_extent_bytes_min=$(get_tunable zfs_trim_extent_bytes_min)
-log_must set_tunable64 zfs_trim_extent_bytes_min 4096
+typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
+log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096
log_must mkdir "$TESTDIR"
log_must truncate -s $LARGESIZE "$LARGEFILE"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh
index b6c60b0c5..f135de4bc 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_replace_cancel.ksh
@@ -35,7 +35,7 @@
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
kill_if_running $pid
get_disklist $TESTPOOL | grep $DISK2 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK2
@@ -49,7 +49,7 @@ log_onexit cleanup
log_must zpool attach -w $TESTPOOL $DISK1 $DISK2
-log_must set_tunable32 zfs_scan_suspend_progress 1
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool replace $TESTPOOL $DISK2 $DISK3
log_bkgrnd zpool wait -t replace $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh
index 1f7f1e42b..7adb3b2b8 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_cancel.ksh
@@ -34,7 +34,7 @@
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
kill_if_running $pid
is_pool_scrubbing $TESTPOOL && log_must zpool scrub -s $TESTPOOL
}
@@ -58,7 +58,7 @@ typeset pid
log_onexit cleanup
-log_must set_tunable32 zfs_scan_suspend_progress 1
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
do_test "zpool scrub -p $TESTPOOL"
do_test "zpool scrub -s $TESTPOOL"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh
index 9b0da29ad..aac62cf46 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/zpool_wait_scrub_flag.ksh
@@ -30,7 +30,7 @@
function cleanup
{
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
kill_if_running $pid
}
@@ -38,7 +38,7 @@ typeset pid
log_onexit cleanup
-log_must set_tunable32 zfs_scan_suspend_progress 1
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_bkgrnd zpool scrub -w $TESTPOOL
pid=$!
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_discard.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_discard.ksh
index 47cf374d9..8d5747e09 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_discard.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_discard.ksh
@@ -39,7 +39,7 @@ function cleanup
kill_if_running $pid
[[ $default_mem_limit ]] && log_must set_tunable64 \
- zfs_spa_discard_memory_limit $default_mem_limit
+ SPA_DISCARD_MEMORY_LIMIT $default_mem_limit
}
function do_test
@@ -76,8 +76,8 @@ typeset pid default_mem_limit
log_onexit cleanup
-default_mem_limit=$(get_tunable zfs_spa_discard_memory_limit)
-log_must set_tunable64 zfs_spa_discard_memory_limit 32
+default_mem_limit=$(get_tunable SPA_DISCARD_MEMORY_LIMIT)
+log_must set_tunable64 SPA_DISCARD_MEMORY_LIMIT 32
log_must zpool create $TESTPOOL $DISK1
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_freeing.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_freeing.ksh
index 88dbfb8cc..7f5a9e6a8 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_freeing.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_freeing.ksh
@@ -34,9 +34,9 @@
function cleanup
{
- log_must set_tunable64 zfs_async_block_max_blocks $default_async_block_max_blocks
- log_must set_tunable64 zfs_livelist_max_entries $default_max_livelist_entries
- log_must set_tunable64 zfs_livelist_min_percent_shared $default_min_pct_shared
+ log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS $default_async_block_max_blocks
+ log_must set_tunable64 LIVELIST_MAX_ENTRIES $default_max_livelist_entries
+ log_must set_tunable64 LIVELIST_MIN_PERCENT_SHARED $default_min_pct_shared
poolexists $TESTPOOL && destroy_pool $TESTPOOL
kill_if_running $pid
@@ -63,18 +63,18 @@ log_must zpool create $TESTPOOL $DISK1
# Limit the number of blocks that can be freed in a single txg. This slows down
# freeing so that we actually have something to wait for.
#
-default_async_block_max_blocks=$(get_tunable zfs_async_block_max_blocks)
-log_must set_tunable64 zfs_async_block_max_blocks 8
+default_async_block_max_blocks=$(get_tunable ASYNC_BLOCK_MAX_BLOCKS)
+log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 8
#
# Space from clones gets freed one livelist per txg instead of being controlled
# by zfs_async_block_max_blocks. Limit the rate at which space is freed by
# limiting the size of livelists so that we end up with a number of them.
#
-default_max_livelist_entries=$(get_tunable zfs_livelist_max_entries)
-log_must set_tunable64 zfs_livelist_max_entries 16
+default_max_livelist_entries=$(get_tunable LIVELIST_MAX_ENTRIES)
+log_must set_tunable64 LIVELIST_MAX_ENTRIES 16
# Don't disable livelists, no matter how much clone diverges from snapshot
-default_min_pct_shared=$(get_tunable zfs_livelist_min_percent_shared)
-log_must set_tunable64 zfs_livelist_min_percent_shared -1
+default_min_pct_shared=$(get_tunable LIVELIST_MIN_PERCENT_SHARED)
+log_must set_tunable64 LIVELIST_MIN_PERCENT_SHARED -1
#
# Test waiting for space from destroyed filesystem to be freed
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh
index e19360e85..924ae5f0d 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_basic.ksh
@@ -38,7 +38,7 @@ function cleanup
[[ -d "$TESTDIR" ]] && log_must rm -r "$TESTDIR"
[[ "$default_chunk_sz" ]] && \
- log_must set_tunable64 zfs_initialize_chunk_size $default_chunk_sz
+ log_must set_tunable64 INITIALIZE_CHUNK_SIZE $default_chunk_sz
}
typeset -r FILE_VDEV="$TESTDIR/file_vdev"
@@ -46,8 +46,8 @@ typeset pid default_chunk_sz
log_onexit cleanup
-default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
-log_must set_tunable64 zfs_initialize_chunk_size 2048
+default_chunk_sz=$(get_tunable INITIALIZE_CHUNK_SIZE)
+log_must set_tunable64 INITIALIZE_CHUNK_SIZE 2048
log_must mkdir "$TESTDIR"
log_must mkfile 256M "$FILE_VDEV"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh
index ced0a482f..8b19ee62a 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_cancel.ksh
@@ -40,7 +40,7 @@ function cleanup
poolexists $TESTPOOL && destroy_pool $TESTPOOL
[[ "$default_chunk_sz" ]] &&
- log_must set_tunable64 zfs_initialize_chunk_size $default_chunk_sz
+ log_must set_tunable64 INITIALIZE_CHUNK_SIZE $default_chunk_sz
}
function do_test
@@ -66,8 +66,8 @@ typeset pid default_chunk_sz
log_onexit cleanup
# Make sure the initialization takes a while
-default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
-log_must set_tunable64 zfs_initialize_chunk_size 512
+default_chunk_sz=$(get_tunable INITIALIZE_CHUNK_SIZE)
+log_must set_tunable64 INITIALIZE_CHUNK_SIZE 512
log_must zpool create $TESTPOOL $DISK1
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh
index c95e8661b..8c8c45a51 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_initialize_flag.ksh
@@ -42,7 +42,7 @@ function cleanup
poolexists $TESTPOOL && destroy_pool $TESTPOOL
[[ "$default_chunk_sz" ]] &&
- log_must set_tunable64 zfs_initialize_chunk_size $default_chunk_sz
+ log_must set_tunable64 INITIALIZE_CHUNK_SIZE $default_chunk_sz
}
typeset init12_pid init3_pid default_chunk_sz
@@ -52,8 +52,8 @@ log_onexit cleanup
log_must zpool create -f $TESTPOOL $DISK1 $DISK2 $DISK3
# Make sure the initialization takes a while
-default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
-log_must set_tunable64 zfs_initialize_chunk_size 512
+default_chunk_sz=$(get_tunable INITIALIZE_CHUNK_SIZE)
+log_must set_tunable64 INITIALIZE_CHUNK_SIZE 512
log_bkgrnd zpool initialize -w $TESTPOOL $DISK1 $DISK2
init12_pid=$!
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh
index b17ea7ff5..4a3b61c52 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_multiple.ksh
@@ -37,8 +37,8 @@ function cleanup
poolexists $TESTPOOL && destroy_pool $TESTPOOL
[[ "$default_chunk_sz" ]] && log_must set_tunable64 \
- zfs_initialize_chunk_size $default_chunk_sz
- log_must set_tunable32 zfs_scan_suspend_progress 0
+ INITIALIZE_CHUNK_SIZE $default_chunk_sz
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
}
typeset pid default_chunk_sz
@@ -48,9 +48,9 @@ log_onexit cleanup
log_must zpool create -f $TESTPOOL $DISK1
log_must dd if=/dev/urandom of="/$TESTPOOL/testfile" bs=64k count=1k
-default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
-log_must set_tunable64 zfs_initialize_chunk_size 512
-log_must set_tunable32 zfs_scan_suspend_progress 1
+default_chunk_sz=$(get_tunable INITIALIZE_CHUNK_SIZE)
+log_must set_tunable64 INITIALIZE_CHUNK_SIZE 512
+log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
log_must zpool scrub $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove.ksh
index 7d089aee3..19298d193 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove.ksh
@@ -37,7 +37,7 @@
function cleanup
{
kill_if_running $pid
- log_must set_tunable32 zfs_removal_suspend_progress 0
+ log_must set_tunable32 REMOVAL_SUSPEND_PROGRESS 0
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
@@ -49,7 +49,7 @@ function do_test
log_must dd if=/dev/urandom of="/$TESTPOOL/testfile" bs=1k count=16k
# Start removal, but don't allow it to make any progress at first
- log_must set_tunable32 zfs_removal_suspend_progress 1
+ log_must set_tunable32 REMOVAL_SUSPEND_PROGRESS 1
if $use_flag; then
log_bkgrnd zpool remove -w $TESTPOOL $DISK1
@@ -69,7 +69,7 @@ function do_test
proc_must_exist $pid
# Unpause removal, and wait for it to finish
- log_must set_tunable32 zfs_removal_suspend_progress 0
+ log_must set_tunable32 REMOVAL_SUSPEND_PROGRESS 0
check_while_waiting $pid "is_pool_removing $TESTPOOL"
log_must zpool destroy $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh
index 42bef8b9f..4373b5777 100755
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_wait/zpool_wait_remove_cancel.ksh
@@ -34,7 +34,7 @@
function cleanup
{
kill_if_running $pid
- log_must set_tunable32 zfs_removal_suspend_progress 0
+ log_must set_tunable32 REMOVAL_SUSPEND_PROGRESS 0
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
@@ -47,7 +47,7 @@ log_must zpool create -f $TESTPOOL $DISK1 $DISK2
log_must dd if=/dev/urandom of="/$TESTPOOL/testfile" bs=1k count=16k
# Start removal, but don't allow it to make any progress
-log_must set_tunable32 zfs_removal_suspend_progress 1
+log_must set_tunable32 REMOVAL_SUSPEND_PROGRESS 1
log_must zpool remove $TESTPOOL $DISK1
log_bkgrnd zpool wait -t remove $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh b/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh
index a5537c435..5d803af85 100755
--- a/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh
+++ b/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh
@@ -46,17 +46,17 @@ function cleanup
log_must zinject -c all
default_cleanup_noexit
- log_must set_tunable64 zfs_deadman_synctime_ms $SYNCTIME_DEFAULT
- log_must set_tunable64 zfs_deadman_checktime_ms $CHECKTIME_DEFAULT
- log_must set_tunable64 zfs_deadman_failmode $FAILMODE_DEFAULT
+ log_must set_tunable64 DEADMAN_SYNCTIME_MS $SYNCTIME_DEFAULT
+ log_must set_tunable64 DEADMAN_CHECKTIME_MS $CHECKTIME_DEFAULT
+ log_must set_tunable64 DEADMAN_FAILMODE $FAILMODE_DEFAULT
}
log_assert "Verify spa deadman detects a hung txg"
log_onexit cleanup
-log_must set_tunable64 zfs_deadman_synctime_ms 5000
-log_must set_tunable64 zfs_deadman_checktime_ms 1000
-log_must set_tunable64 zfs_deadman_failmode "wait"
+log_must set_tunable64 DEADMAN_SYNCTIME_MS 5000
+log_must set_tunable64 DEADMAN_CHECKTIME_MS 1000
+log_must set_tunable64 DEADMAN_FAILMODE "wait"
# Create a new pool in order to use the updated deadman settings.
default_setup_noexit $DISK1
diff --git a/tests/zfs-tests/tests/functional/deadman/deadman_zio.ksh b/tests/zfs-tests/tests/functional/deadman/deadman_zio.ksh
index a61be995a..c1cfc1151 100755
--- a/tests/zfs-tests/tests/functional/deadman/deadman_zio.ksh
+++ b/tests/zfs-tests/tests/functional/deadman/deadman_zio.ksh
@@ -49,19 +49,19 @@ function cleanup
log_must zinject -c all
default_cleanup_noexit
- log_must set_tunable64 zfs_deadman_ziotime_ms $ZIOTIME_DEFAULT
- log_must set_tunable64 zfs_deadman_checktime_ms $CHECKTIME_DEFAULT
- log_must set_tunable64 zfs_deadman_failmode $FAILMODE_DEFAULT
+ log_must set_tunable64 DEADMAN_ZIOTIME_MS $ZIOTIME_DEFAULT
+ log_must set_tunable64 DEADMAN_CHECKTIME_MS $CHECKTIME_DEFAULT
+ log_must set_tunable64 DEADMAN_FAILMODE $FAILMODE_DEFAULT
}
log_assert "Verify zio deadman detects a hung zio"
log_onexit cleanup
# 1. Reduce the zfs_deadman_ziotime_ms to 5s.
-log_must set_tunable64 zfs_deadman_ziotime_ms 5000
+log_must set_tunable64 DEADMAN_ZIOTIME_MS 5000
# 2. Reduce the zfs_deadman_checktime_ms to 1s.
-log_must set_tunable64 zfs_deadman_checktime_ms 1000
-log_must set_tunable64 zfs_deadman_failmode "wait"
+log_must set_tunable64 DEADMAN_CHECKTIME_MS 1000
+log_must set_tunable64 DEADMAN_FAILMODE "wait"
# Create a new pool in order to use the updated deadman settings.
default_setup_noexit $DISK1
diff --git a/tests/zfs-tests/tests/functional/delegate/cleanup.ksh b/tests/zfs-tests/tests/functional/delegate/cleanup.ksh
index 2d129fbc1..1951c00e2 100755
--- a/tests/zfs-tests/tests/functional/delegate/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/delegate/cleanup.ksh
@@ -48,7 +48,7 @@ if is_freebsd; then
fi
if is_linux; then
- log_must set_tunable64 zfs_admin_snapshot 0
+ log_must set_tunable64 ADMIN_SNAPSHOT 0
fi
default_cleanup
diff --git a/tests/zfs-tests/tests/functional/delegate/setup.ksh b/tests/zfs-tests/tests/functional/delegate/setup.ksh
index d28e16375..13d0f3bfb 100755
--- a/tests/zfs-tests/tests/functional/delegate/setup.ksh
+++ b/tests/zfs-tests/tests/functional/delegate/setup.ksh
@@ -76,7 +76,7 @@ fi
DISK=${DISKS%% *}
if is_linux; then
- log_must set_tunable64 zfs_admin_snapshot 1
+ log_must set_tunable64 ADMIN_SNAPSHOT 1
fi
default_volume_setup $DISK
diff --git a/tests/zfs-tests/tests/functional/fault/decompress_fault.ksh b/tests/zfs-tests/tests/functional/fault/decompress_fault.ksh
index 2b3998049..81eab5666 100755
--- a/tests/zfs-tests/tests/functional/fault/decompress_fault.ksh
+++ b/tests/zfs-tests/tests/functional/fault/decompress_fault.ksh
@@ -31,15 +31,9 @@
log_assert "Testing that injected decompression errors are handled correctly"
-if is_freebsd; then
- COMPRESSION=compressed_arc_enabled
-else
- COMPRESSION=zfs_compressed_arc_enabled
-fi
-
function cleanup
{
- log_must set_tunable64 $COMPRESSION 1
+ log_must set_tunable64 COMPRESSED_ARC_ENABLED 1
log_must zinject -c all
default_cleanup_noexit
}
@@ -47,7 +41,7 @@ function cleanup
log_onexit cleanup
default_mirror_setup_noexit $DISK1 $DISK2
-log_must set_tunable64 $COMPRESSION 0
+log_must set_tunable64 COMPRESSED_ARC_ENABLED 0
log_must zfs create -o compression=on $TESTPOOL/fs
mntpt=$(get_prop mountpoint $TESTPOOL/fs)
write_compressible $mntpt 32m 1 1024k "testfile"
diff --git a/tests/zfs-tests/tests/functional/fault/zpool_status_-s.ksh b/tests/zfs-tests/tests/functional/fault/zpool_status_-s.ksh
index c919ae608..85f0083a0 100755
--- a/tests/zfs-tests/tests/functional/fault/zpool_status_-s.ksh
+++ b/tests/zfs-tests/tests/functional/fault/zpool_status_-s.ksh
@@ -46,8 +46,8 @@ default_mirror_setup_noexit $DISKS
function cleanup
{
log_must zinject -c all
- log_must set_tunable64 zio_slow_io_ms $OLD_SLOW_IO
- log_must set_tunable64 zfs_slow_io_events_per_second $OLD_SLOW_IO_EVENTS
+ log_must set_tunable64 ZIO_SLOW_IO_MS $OLD_SLOW_IO
+ log_must set_tunable64 SLOW_IO_EVENTS_PER_SECOND $OLD_SLOW_IO_EVENTS
default_cleanup_noexit
}
@@ -56,10 +56,10 @@ log_onexit cleanup
log_must zpool events -c
# Mark any IOs greater than 10ms as slow IOs
-OLD_SLOW_IO=$(get_tunable zio_slow_io_ms)
-OLD_SLOW_IO_EVENTS=$(get_tunable zfs_slow_io_events_per_second)
-log_must set_tunable64 zio_slow_io_ms 10
-log_must set_tunable64 zfs_slow_io_events_per_second 1000
+OLD_SLOW_IO=$(get_tunable ZIO_SLOW_IO_MS)
+OLD_SLOW_IO_EVENTS=$(get_tunable SLOW_IO_EVENTS_PER_SECOND)
+log_must set_tunable64 ZIO_SLOW_IO_MS 10
+log_must set_tunable64 SLOW_IO_EVENTS_PER_SECOND 1000
# Create 20ms IOs
log_must zinject -d $DISK -D20:100 $TESTPOOL
diff --git a/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh b/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh
index 20b61da92..23fb16d6e 100755
--- a/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh
@@ -49,7 +49,7 @@ verify_runnable "both"
function cleanup
{
datasetexists $TEST_FS && log_must zfs destroy $TEST_FS
- log_must set_tunable64 zfs_async_block_max_blocks 100000
+ log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 100000
}
log_onexit cleanup
@@ -64,7 +64,7 @@ log_must dd bs=1024k count=128 if=/dev/zero of=/$TEST_FS/file
# Decrease the max blocks to free each txg, so that freeing takes
# long enough that we can observe it.
#
-log_must set_tunable64 zfs_async_block_max_blocks 100
+log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 100
log_must sync
log_must zfs destroy $TEST_FS
@@ -88,7 +88,7 @@ done
# per txg.
#
sleep 10
-log_must set_tunable64 zfs_async_block_max_blocks 100000
+log_must set_tunable64 ASYNC_BLOCK_MAX_BLOCKS 100000
# Wait for everything to be freed.
while [[ "0" != "$(zpool list -Ho freeing $TESTPOOL)" ]]; do
diff --git a/tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh b/tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh
index 702322a0c..fca0e8e4a 100755
--- a/tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh
+++ b/tests/zfs-tests/tests/functional/log_spacemap/log_spacemap_import_logs.ksh
@@ -48,8 +48,8 @@ verify_runnable "global"
function cleanup
{
- log_must set_tunable64 zfs_keep_log_spacemaps_at_export 0
- log_must set_tunable64 metaslab_debug_load 0
+ log_must set_tunable64 KEEP_LOG_SPACEMAPS_AT_EXPORT 0
+ log_must set_tunable64 METASLAB_DEBUG_LOAD 0
if poolexists $LOGSM_POOL; then
log_must zpool destroy -f $LOGSM_POOL
fi
@@ -67,7 +67,7 @@ log_must sync
log_must dd if=/dev/urandom of=/$LOGSM_POOL/fs/00 bs=128k count=10
log_must sync
-log_must set_tunable64 zfs_keep_log_spacemaps_at_export 1
+log_must set_tunable64 KEEP_LOG_SPACEMAPS_AT_EXPORT 1
log_must zpool export $LOGSM_POOL
LOGSM_COUNT=$(zdb -m -e $LOGSM_POOL | grep "Log Spacemap object" | wc -l)
@@ -75,7 +75,7 @@ if (( LOGSM_COUNT == 0 )); then
log_fail "Pool does not have any log spacemaps after being exported"
fi
-log_must set_tunable64 metaslab_debug_load 1
+log_must set_tunable64 METASLAB_DEBUG_LOAD 1
log_must zpool import $LOGSM_POOL
log_pass "Log spacemaps imported with no errors"
diff --git a/tests/zfs-tests/tests/functional/mmp/cleanup.ksh b/tests/zfs-tests/tests/functional/mmp/cleanup.ksh
index 8146f773a..b41d6ccbe 100755
--- a/tests/zfs-tests/tests/functional/mmp/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/cleanup.ksh
@@ -23,6 +23,6 @@
verify_runnable "global"
-log_must set_tunable64 zfs_multihost_history $MMP_HISTORY_OFF
+log_must set_tunable64 MULTIHOST_HISTORY $MMP_HISTORY_OFF
log_pass "mmp cleanup passed"
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib
index fda57c002..661cbf3a5 100644
--- a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib
+++ b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib
@@ -173,8 +173,8 @@ function seconds_mmp_waits_for_activity
typeset seconds=0
typeset devices=${#DISK[@]}
- typeset import_intervals=$(get_tunable zfs_multihost_import_intervals)
- typeset import_interval=$(get_tunable zfs_multihost_interval)
+ typeset import_intervals=$(get_tunable MULTIHOST_IMPORT_INTERVALS)
+ typeset import_interval=$(get_tunable MULTIHOST_INTERVAL)
typeset tmpfile=$(mktemp)
typeset mmp_fail
typeset mmp_write
@@ -241,8 +241,8 @@ function import_activity_check # pool opts act_test_duration
function clear_mmp_history
{
- log_must set_tunable64 zfs_multihost_history $MMP_HISTORY_OFF
- log_must set_tunable64 zfs_multihost_history $MMP_HISTORY
+ log_must set_tunable64 MULTIHOST_HISTORY $MMP_HISTORY_OFF
+ log_must set_tunable64 MULTIHOST_HISTORY $MMP_HISTORY
}
function count_skipped_mmp_writes # pool duration
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh
index 64ed9bf97..6e7bb6375 100755
--- a/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh
@@ -43,7 +43,7 @@ function cleanup
{
default_cleanup_noexit
log_must mmp_clear_hostid
- log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
+ log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
}
log_assert "multihost=on|off inactive pool activity checks"
@@ -103,7 +103,7 @@ log_mustnot import_no_activity_check $TESTPOOL "-f"
# 9. Verify activity check duration based on mmp_write and mmp_fail
# Specify a short test via tunables but import pool imported while
# tunables set to default duration.
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_MIN
log_must mmp_clear_hostid
log_must mmp_set_hostid $HOSTID1
log_must import_activity_check $TESTPOOL "-f" $MMP_TEST_DURATION_DEFAULT
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_interval.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_interval.ksh
index fb44d6191..0c080ab5d 100755
--- a/tests/zfs-tests/tests/functional/mmp/mmp_interval.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/mmp_interval.ksh
@@ -19,11 +19,11 @@
#
# DESCRIPTION:
-# zfs_multihost_interval should only accept valid values.
+# MULTIHOST_INTERVAL should only accept valid values.
#
# STRATEGY:
-# 1. Set zfs_multihost_interval to invalid values (negative).
-# 2. Set zfs_multihost_interval to valid values.
+# 1. Set MULTIHOST_INTERVAL to invalid values (negative).
+# 2. Set MULTIHOST_INTERVAL to valid values.
#
. $STF_SUITE/include/libtest.shlib
@@ -34,14 +34,14 @@ verify_runnable "both"
function cleanup
{
- log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
+ log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
}
-log_assert "zfs_multihost_interval cannot be set to an invalid value"
+log_assert "MULTIHOST_INTERVAL cannot be set to an invalid value"
log_onexit cleanup
-log_mustnot set_tunable64 zfs_multihost_interval -1
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
+log_mustnot set_tunable64 MULTIHOST_INTERVAL -1
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_MIN
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
-log_pass "zfs_multihost_interval cannot be set to an invalid value"
+log_pass "MULTIHOST_INTERVAL cannot be set to an invalid value"
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_on_off.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_on_off.ksh
index 8bef86a0f..29d771de8 100755
--- a/tests/zfs-tests/tests/functional/mmp/mmp_on_off.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/mmp_on_off.ksh
@@ -23,7 +23,7 @@
#
# STRATEGY:
# 1. Set multihost=off (disables mmp)
-# 2. Set zfs_txg_timeout to large value
+# 2. Set TXG_TIMEOUT to large value
# 3. Create a zpool
# 4. Find the current "best" uberblock
# 5. Sleep for enough time for uberblocks to change
@@ -44,8 +44,8 @@ verify_runnable "both"
function cleanup
{
default_cleanup_noexit
- log_must set_tunable64 zfs_txg_timeout $TXG_TIMEOUT_DEFAULT
- log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
+ log_must set_tunable64 TXG_TIMEOUT $TXG_TIMEOUT_DEFAULT
+ log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
log_must rm -f $PREV_UBER $CURR_UBER
log_must mmp_clear_hostid
}
@@ -53,8 +53,8 @@ function cleanup
log_assert "mmp thread won't write uberblocks with multihost=off"
log_onexit cleanup
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN
-log_must set_tunable64 zfs_txg_timeout $TXG_TIMEOUT_LONG
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_MIN
+log_must set_tunable64 TXG_TIMEOUT $TXG_TIMEOUT_LONG
log_must mmp_set_hostid $HOSTID1
default_setup_noexit $DISK
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_on_thread.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_on_thread.ksh
index 07384c623..01cca61c3 100755
--- a/tests/zfs-tests/tests/functional/mmp/mmp_on_thread.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/mmp_on_thread.ksh
@@ -39,7 +39,7 @@ verify_runnable "both"
function cleanup
{
default_cleanup_noexit
- log_must set_tunable64 zfs_txg_timeout $TXG_TIMEOUT_DEFAULT
+ log_must set_tunable64 TXG_TIMEOUT $TXG_TIMEOUT_DEFAULT
log_must rm -f $PREV_UBER $CURR_UBER
log_must mmp_clear_hostid
}
@@ -47,7 +47,7 @@ function cleanup
log_assert "mmp thread writes uberblocks (MMP)"
log_onexit cleanup
-log_must set_tunable64 zfs_txg_timeout $TXG_TIMEOUT_LONG
+log_must set_tunable64 TXG_TIMEOUT $TXG_TIMEOUT_LONG
log_must mmp_set_hostid $HOSTID1
default_setup_noexit $DISK
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh
index 9c4552b0c..007288a78 100755
--- a/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh
@@ -22,7 +22,7 @@
# Ensure that MMP updates uberblocks with MMP info at expected intervals.
#
# STRATEGY:
-# 1. Set zfs_txg_timeout to large value
+# 1. Set TXG_TIMEOUT to large value
# 2. Create a zpool
# 3. Clear multihost history
# 4. Sleep, then collect count of uberblocks written
@@ -47,15 +47,15 @@ MIN_SEQ_VALUES=7
function cleanup
{
default_cleanup_noexit
- log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
- set_tunable64 zfs_txg_timeout $TXG_TIMEOUT_DEFAULT
+ log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
+ set_tunable64 TXG_TIMEOUT $TXG_TIMEOUT_DEFAULT
log_must mmp_clear_hostid
}
log_assert "Ensure MMP uberblocks update at the correct interval"
log_onexit cleanup
-log_must set_tunable64 zfs_txg_timeout $TXG_TIMEOUT_LONG
+log_must set_tunable64 TXG_TIMEOUT $TXG_TIMEOUT_LONG
log_must mmp_set_hostid $HOSTID1
default_setup_noexit "$DISKS"
@@ -73,7 +73,7 @@ if [ $UBER_CHANGES -gt $MAX_UB_WRITES ]; then
log_fail "More uberblock writes occurred than expected ($EXPECTED)"
fi
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_MIN
SEQ_BEFORE=$(zdb -luuuu ${DISK[0]} | awk '/mmp_seq/ {if ($NF>max) max=$NF}; END {print max}')
sleep 1
SEQ_AFTER=$(zdb -luuuu ${DISK[0]} | awk '/mmp_seq/ {if ($NF>max) max=$NF}; END {print max}')
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh
index 842df284b..6e3d1fe34 100755
--- a/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh
@@ -19,15 +19,15 @@
#
# DESCRIPTION:
-# Ensure that the MMP thread is notified when zfs_multihost_interval is
-# reduced, and that changes to zfs_multihost_interval and
-# zfs_multihost_fail_intervals do not trigger pool suspensions.
+# Ensure that the MMP thread is notified when MULTIHOST_INTERVAL is
+# reduced, and that changes to MULTIHOST_INTERVAL and
+# MULTIHOST_FAIL_INTERVALS do not trigger pool suspensions.
#
# STRATEGY:
-# 1. Set zfs_multihost_interval to much longer than the test duration
+# 1. Set MULTIHOST_INTERVAL to much longer than the test duration
# 2. Create a zpool and enable multihost
# 3. Verify no MMP writes occurred
-# 4. Set zfs_multihost_interval to 1 second
+# 4. Set MULTIHOST_INTERVAL to 1 second
# 5. Sleep briefly
# 6. Verify MMP writes began
# 7. Verify mmp_fail and mmp_write in uberblock reflect tunables
@@ -43,34 +43,34 @@ verify_runnable "both"
function cleanup
{
default_cleanup_noexit
- log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
- log_must set_tunable64 zfs_multihost_fail_intervals \
+ log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
+ log_must set_tunable64 MULTIHOST_FAIL_INTERVALS \
$MMP_FAIL_INTERVALS_DEFAULT
log_must mmp_clear_hostid
}
-log_assert "mmp threads notified when zfs_multihost_interval reduced"
+log_assert "mmp threads notified when MULTIHOST_INTERVAL reduced"
log_onexit cleanup
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_HOUR
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_HOUR
log_must mmp_set_hostid $HOSTID1
default_setup_noexit $DISK
log_must zpool set multihost=on $TESTPOOL
clear_mmp_history
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
uber_count=$(count_mmp_writes $TESTPOOL 1)
if [ $uber_count -eq 0 ]; then
- log_fail "ERROR: mmp writes did not start when zfs_multihost_interval reduced"
+ log_fail "ERROR: mmp writes did not start when MULTIHOST_INTERVAL reduced"
fi
# 7. Verify mmp_write and mmp_fail are written
for fails in $(seq $MMP_FAIL_INTERVALS_MIN $((MMP_FAIL_INTERVALS_MIN*2))); do
for interval in $(seq $MMP_INTERVAL_MIN 200 $MMP_INTERVAL_DEFAULT); do
- log_must set_tunable64 zfs_multihost_fail_intervals $fails
- log_must set_tunable64 zfs_multihost_interval $interval
+ log_must set_tunable64 MULTIHOST_FAIL_INTERVALS $fails
+ log_must set_tunable64 MULTIHOST_INTERVAL $interval
log_must sync_pool $TESTPOOL
typeset mmp_fail=$(zdb $TESTPOOL 2>/dev/null |
awk '/mmp_fail/ {print $NF}')
@@ -86,10 +86,10 @@ for fails in $(seq $MMP_FAIL_INTERVALS_MIN $((MMP_FAIL_INTERVALS_MIN*2))); do
done
-# 8. Repeatedly change zfs_multihost_interval and fail_intervals
+# 8. Repeatedly change MULTIHOST_INTERVAL and fail_intervals
for x in $(seq 10); do
typeset new_interval=$(( (RANDOM % 20 + 1) * $MMP_INTERVAL_MIN ))
- log_must set_tunable64 zfs_multihost_interval $new_interval
+ log_must set_tunable64 MULTIHOST_INTERVAL $new_interval
typeset action=$((RANDOM %10))
if [ $action -eq 0 ]; then
log_must zpool export -a
@@ -106,14 +106,14 @@ for x in $(seq 10); do
log_must zpool import -f $TESTPOOL
elif [ $action -eq 3 ]; then
log_must zpool export -F $TESTPOOL
- log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN
+ log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_MIN
log_must zpool import $TESTPOOL
elif [ $action -eq 4 ]; then
- log_must set_tunable64 zfs_multihost_fail_intervals \
+ log_must set_tunable64 MULTIHOST_FAIL_INTERVALS \
$((RANDOM % MMP_FAIL_INTERVALS_DEFAULT))
fi
sleep 5
done
-log_pass "mmp threads notified when zfs_multihost_interval reduced"
+log_pass "mmp threads notified when MULTIHOST_INTERVAL reduced"
diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh
index 7504caa4d..b6bdc6811 100755
--- a/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh
@@ -57,8 +57,8 @@ log_must zpool create -f $MMP_POOL mirror $MMP_DIR/file.{0,1} mirror $MMP_DIR/fi
# Step 2
log_must mmp_set_hostid $HOSTID1
log_must zpool set multihost=on $MMP_POOL
-set_tunable64 zfs_multihost_history 0
-set_tunable64 zfs_multihost_history 40
+set_tunable64 MULTIHOST_HISTORY 0
+set_tunable64 MULTIHOST_HISTORY 40
# Step 3
# default settings, every leaf written once/second
diff --git a/tests/zfs-tests/tests/functional/mmp/setup.ksh b/tests/zfs-tests/tests/functional/mmp/setup.ksh
index c91f61979..b1e5431c8 100755
--- a/tests/zfs-tests/tests/functional/mmp/setup.ksh
+++ b/tests/zfs-tests/tests/functional/mmp/setup.ksh
@@ -27,8 +27,8 @@ if [ -e $HOSTID_FILE ]; then
log_unsupported "System has existing $HOSTID_FILE file"
fi
-log_must set_tunable64 zfs_multihost_history $MMP_HISTORY
-log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT
-log_must set_tunable64 zfs_multihost_fail_intervals $MMP_FAIL_INTERVALS_DEFAULT
+log_must set_tunable64 MULTIHOST_HISTORY $MMP_HISTORY
+log_must set_tunable64 MULTIHOST_INTERVAL $MMP_INTERVAL_DEFAULT
+log_must set_tunable64 MULTIHOST_FAIL_INTERVALS $MMP_FAIL_INTERVALS_DEFAULT
log_pass "mmp setup pass"
diff --git a/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh b/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh
index 0d2628079..6130e2c82 100755
--- a/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh
+++ b/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh
@@ -32,7 +32,7 @@
function cleanup
{
- log_must set_tunable32 zfs_unlink_suspend_progress $default_unlink_sp
+ log_must set_tunable32 UNLINK_SUSPEND_PROGRESS $default_unlink_sp
for fs in $(seq 1 3); do
mounted $TESTDIR.$fs || zfs mount $TESTPOOL/$TESTFS.$fs
rm -f $TESTDIR.$fs/file-*
@@ -66,8 +66,7 @@ function unlinked_size_is
}
-UNLINK_SP_PARAM=/sys/module/zfs/parameters/zfs_unlink_suspend_progress
-default_unlink_sp=$(get_tunable zfs_unlink_suspend_progress)
+default_unlink_sp=$(get_tunable UNLINK_SUSPEND_PROGRESS)
log_onexit cleanup
@@ -89,7 +88,7 @@ for fs in 1 2 3; do
log_must xattrtest -f 175 -x 3 -r -k -p $TESTDIR.$fs
fi
- log_must set_tunable32 zfs_unlink_suspend_progress 1
+ log_must set_tunable32 UNLINK_SUSPEND_PROGRESS 1
log_must unlinked_size_is 0 $TESTPOOL $TESTPOOL/$TESTFS.$fs
# build up unlinked set
@@ -106,7 +105,7 @@ for fs in 1 2 3; do
log_must unlinked_size_is 100 $TESTPOOL $TESTPOOL/$TESTFS.$fs
# confirm we can drain and add to unlinked set at the same time
- log_must set_tunable32 zfs_unlink_suspend_progress 0
+ log_must set_tunable32 UNLINK_SUSPEND_PROGRESS 0
log_must zfs umount $TESTPOOL/$TESTFS.$fs
log_must zfs mount $TESTPOOL/$TESTFS.$fs
for fn in $(seq 101 175); do
diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh
index c473451c2..6dd88f72d 100755
--- a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh
+++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_capacity.ksh
@@ -46,7 +46,7 @@
function test_cleanup
{
poolexists $NESTEDPOOL && destroy_pool $NESTEDPOOL
- log_must set_tunable32 spa_asize_inflation 24
+ log_must set_tunable32 SPA_ASIZE_INFLATION 24
cleanup_test_pool
}
@@ -54,7 +54,7 @@ verify_runnable "global"
setup_test_pool
log_onexit test_cleanup
-log_must set_tunable32 spa_asize_inflation 4
+log_must set_tunable32 SPA_ASIZE_INFLATION 4
log_must zfs create $DISKFS
diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh
index f1abad063..ae099ff27 100755
--- a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh
+++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_discard_busy.ksh
@@ -41,7 +41,7 @@ verify_runnable "global"
function test_cleanup
{
# reset memory limit to 16M
- set_tunable64 zfs_spa_discard_memory_limit 1000000
+ set_tunable64 SPA_DISCARD_MEMORY_LIMIT 1000000
cleanup_nested_pools
}
@@ -67,7 +67,7 @@ log_onexit test_cleanup
# map, we should have even more time to
# verify this.
#
-set_tunable64 zfs_spa_discard_memory_limit 128
+set_tunable64 SPA_DISCARD_MEMORY_LIMIT 128
log_must zpool checkpoint $NESTEDPOOL
@@ -100,7 +100,7 @@ log_mustnot zpool remove $NESTEDPOOL $FILEDISK1
log_mustnot zpool reguid $NESTEDPOOL
# reset memory limit to 16M
-set_tunable64 zfs_spa_discard_memory_limit 16777216
+set_tunable64 SPA_DISCARD_MEMORY_LIMIT 16777216
nested_wait_discard_finish
diff --git a/tests/zfs-tests/tests/functional/redacted_send/cleanup.ksh b/tests/zfs-tests/tests/functional/redacted_send/cleanup.ksh
index 596c661ed..1a7c142b8 100755
--- a/tests/zfs-tests/tests/functional/redacted_send/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/redacted_send/cleanup.ksh
@@ -28,6 +28,6 @@
destroy_pool $POOL
destroy_pool $POOL2
-log_must set_tunable32 zfs_allow_redacted_dataset_mount 0
+log_must set_tunable32 ALLOW_REDACTED_DATASET_MOUNT 0
log_pass
diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted.kshlib b/tests/zfs-tests/tests/functional/redacted_send/redacted.kshlib
index 5150ec5d8..1a942a634 100644
--- a/tests/zfs-tests/tests/functional/redacted_send/redacted.kshlib
+++ b/tests/zfs-tests/tests/functional/redacted_send/redacted.kshlib
@@ -186,9 +186,9 @@ function mount_redacted
shift $(($OPTIND - 1))
typeset ds=$1
- log_must set_tunable32 zfs_allow_redacted_dataset_mount 1
+ log_must set_tunable32 ALLOW_REDACTED_DATASET_MOUNT 1
zfs mount $flag -oro $ds || return 1
- log_must set_tunable32 zfs_allow_redacted_dataset_mount 0
+ log_must set_tunable32 ALLOW_REDACTED_DATASET_MOUNT 0
return 0
}
@@ -245,7 +245,7 @@ function redacted_cleanup
datasetexists $ds && log_must zfs destroy -R $ds
done
- log_must set_tunable32 zfs_allow_redacted_dataset_mount 0
+ log_must set_tunable32 ALLOW_REDACTED_DATASET_MOUNT 0
rm -f $(get_prop mountpoint $POOL)/tmp/*
}
diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted_mounts.ksh b/tests/zfs-tests/tests/functional/redacted_send/redacted_mounts.ksh
index 205797184..0bc4bf461 100755
--- a/tests/zfs-tests/tests/functional/redacted_send/redacted_mounts.ksh
+++ b/tests/zfs-tests/tests/functional/redacted_send/redacted_mounts.ksh
@@ -20,11 +20,11 @@
#
# Description:
# Verify that received redacted datasets are not mounted by default, but
-# can still be mounted after setting zfs_allow_redacted_dataset_mount.
+# can still be mounted after setting ALLOW_REDACTED_DATASET_MOUNT.
#
# Strategy:
# 1. Verify a received redacted stream isn't mounted by default.
-# 2. Set zfs_allow_redacted_dataset_mount and verify it can't be mounted
+# 2. Set ALLOW_REDACTED_DATASET_MOUNT and verify it can't be mounted
# without the -f flag, but can with -f.
# 3. Receive a redacted volume.
# 4. Verify the device file isn't present until the kernel variable is set.
@@ -77,7 +77,7 @@ log_must zfs redact $sendvol@snap book2 $clonevol@snap
log_must eval "zfs send --redact book2 $sendvol@snap >$stream"
log_must eval "zfs receive $recvvol <$stream"
is_disk_device $recv_vol_file && log_fail "Volume device file should not exist."
-log_must set_tunable32 zfs_allow_redacted_dataset_mount 1
+log_must set_tunable32 ALLOW_REDACTED_DATASET_MOUNT 1
log_must zpool export $POOL2
log_must zpool import $POOL2
udevadm settle
diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted_resume.ksh b/tests/zfs-tests/tests/functional/redacted_send/redacted_resume.ksh
index 7e043e176..8118ea59e 100755
--- a/tests/zfs-tests/tests/functional/redacted_send/redacted_resume.ksh
+++ b/tests/zfs-tests/tests/functional/redacted_send/redacted_resume.ksh
@@ -50,7 +50,7 @@ log_must zfs snapshot $clone@snap1
log_must zfs redact $sendfs@snap book1 $clone@snap1
resume_test "zfs send --redact book1 $sendfs@snap" $tmpdir $recvfs
log_must mount_redacted -f $recvfs
-log_must set_tunable32 zfs_allow_redacted_dataset_mount 1
+log_must set_tunable32 ALLOW_REDACTED_DATASET_MOUNT 1
log_must diff $send_mnt/f1 $recv_mnt/f1
log_must eval "get_diff $send_mnt/f2 $recv_mnt/f2 >$tmpdir/get_diff.out"
typeset range=$(cat $tmpdir/get_diff.out)
diff --git a/tests/zfs-tests/tests/functional/redacted_send/redacted_volume.ksh b/tests/zfs-tests/tests/functional/redacted_send/redacted_volume.ksh
index 13453fa36..2ea10638c 100755
--- a/tests/zfs-tests/tests/functional/redacted_send/redacted_volume.ksh
+++ b/tests/zfs-tests/tests/functional/redacted_send/redacted_volume.ksh
@@ -58,7 +58,7 @@ log_must zfs snapshot $sendvol@snap
log_must zfs clone $sendvol@snap $clone
log_must zfs snapshot $clone@snap
-log_must set_tunable32 zfs_allow_redacted_dataset_mount 1
+log_must set_tunable32 ALLOW_REDACTED_DATASET_MOUNT 1
log_must zfs redact $sendvol@snap book1 $clone@snap
log_must eval "zfs send --redact book1 $sendvol@snap >$stream"
log_must eval "zfs recv $recvvol <$stream"
diff --git a/tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh b/tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh
index e7f40ec71..6e4da3621 100755
--- a/tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh
+++ b/tests/zfs-tests/tests/functional/refquota/refquota_008_neg.ksh
@@ -39,10 +39,10 @@
verify_runnable "both"
-oldvalue=$(get_tunable spa_asize_inflation)
+oldvalue=$(get_tunable SPA_ASIZE_INFLATION)
function cleanup
{
- set_tunable32 spa_asize_inflation $oldvalue
+ set_tunable32 SPA_ASIZE_INFLATION $oldvalue
log_must zfs destroy -rf $TESTPOOL/$TESTFS
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
@@ -50,7 +50,7 @@ function cleanup
log_onexit cleanup
-set_tunable32 spa_asize_inflation 2
+set_tunable32 SPA_ASIZE_INFLATION 2
TESTFILE='testfile'
FS=$TESTPOOL/$TESTFS
diff --git a/tests/zfs-tests/tests/functional/removal/removal.kshlib b/tests/zfs-tests/tests/functional/removal/removal.kshlib
index 31e31ace6..e1f43fbe7 100644
--- a/tests/zfs-tests/tests/functional/removal/removal.kshlib
+++ b/tests/zfs-tests/tests/functional/removal/removal.kshlib
@@ -60,7 +60,7 @@ function attempt_during_removal # pool disk callback [args]
typeset callback=$3
shift 3
- set_tunable32 zfs_removal_suspend_progress 1
+ set_tunable32 REMOVAL_SUSPEND_PROGRESS 1
log_must zpool remove $pool $disk
@@ -79,7 +79,7 @@ function attempt_during_removal # pool disk callback [args]
#
log_must is_pool_removing $pool
- set_tunable32 zfs_removal_suspend_progress 0
+ set_tunable32 REMOVAL_SUSPEND_PROGRESS 0
log_must wait_for_removal $pool
log_mustnot vdevs_in_pool $pool $disk
diff --git a/tests/zfs-tests/tests/functional/removal/removal_cancel.ksh b/tests/zfs-tests/tests/functional/removal/removal_cancel.ksh
index afb318ef2..fdcaef635 100755
--- a/tests/zfs-tests/tests/functional/removal/removal_cancel.ksh
+++ b/tests/zfs-tests/tests/functional/removal/removal_cancel.ksh
@@ -42,7 +42,7 @@ function cleanup
#
# Reset tunable.
#
- log_must set_tunable32 zfs_removal_suspend_progress 0
+ log_must set_tunable32 REMOVAL_SUSPEND_PROGRESS 0
}
log_onexit cleanup
@@ -79,7 +79,7 @@ log_must sleep 1
#
# Block removal.
#
-log_must set_tunable32 zfs_removal_suspend_progress 1
+log_must set_tunable32 REMOVAL_SUSPEND_PROGRESS 1
#
# Only for debugging purposes in test logs.
diff --git a/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh b/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
index b5c2ed0ba..7648900ac 100755
--- a/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
+++ b/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
@@ -23,15 +23,15 @@
function reset
{
- log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 0
- log_must set_tunable64 zfs_condense_min_mapping_bytes 131072
+ log_must set_tunable64 CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS 0
+ log_must set_tunable64 CONDENSE_MIN_MAPPING_BYTES 131072
default_cleanup_noexit
}
default_setup_noexit "$DISKS" "true"
log_onexit reset
-log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 5000
-log_must set_tunable64 zfs_condense_min_mapping_bytes 1
+log_must set_tunable64 CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS 5000
+log_must set_tunable64 CONDENSE_MIN_MAPPING_BYTES 1
log_must zfs set recordsize=512 $TESTPOOL/$TESTFS
diff --git a/tests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh b/tests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh
index 97b67a462..6c52fd781 100755
--- a/tests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh
+++ b/tests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh
@@ -57,14 +57,14 @@ function cleanup
default_cleanup_noexit
log_must rm -f $DISKS
- # reset zfs_remove_max_segment to 1M
- set_tunable32 zfs_remove_max_segment 1048576
+ # reset REMOVE_MAX_SEGMENT to 1M
+ set_tunable32 REMOVE_MAX_SEGMENT 1048576
}
log_onexit cleanup
-# set zfs_remove_max_segment to 32k
-log_must set_tunable32 zfs_remove_max_segment 32768
+# set REMOVE_MAX_SEGMENT to 32k
+log_must set_tunable32 REMOVE_MAX_SEGMENT 32768
log_must dd if=/dev/urandom of=$TESTDIR/$TESTFILE0 bs=128k count=1
FILE_CONTENTS=$(<$TESTDIR/$TESTFILE0)
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh
index 35c90e6a5..e3e635998 100755
--- a/tests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh
@@ -23,12 +23,12 @@
function cleanup
{
- log_must set_tunable64 metaslab_force_ganging $((2**17 + 1))
+ log_must set_tunable64 METASLAB_FORCE_GANGING $((2**17 + 1))
default_cleanup_noexit
}
default_setup_noexit "$DISKS"
-log_must set_tunable64 metaslab_force_ganging $((2**14))
+log_must set_tunable64 METASLAB_FORCE_GANGING $((2**14))
log_onexit cleanup
FILE_CONTENTS="Leeloo Dallas mul-ti-pass."
diff --git a/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh b/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh
index 876b28690..6c37dc4e5 100755
--- a/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh
+++ b/tests/zfs-tests/tests/functional/resilver/resilver_restart_001.ksh
@@ -45,10 +45,10 @@
function cleanup
{
- log_must set_tunable32 zfs_resilver_min_time_ms $ORIG_RESILVER_MIN_TIME
- log_must set_tunable32 zfs_scan_suspend_progress \
+ log_must set_tunable32 RESILVER_MIN_TIME_MS $ORIG_RESILVER_MIN_TIME
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS \
$ORIG_SCAN_SUSPEND_PROGRESS
- log_must set_tunable32 zfs_zevent_len_max $ORIG_ZFS_ZEVENT_LEN_MAX
+ log_must set_tunable32 ZEVENT_LEN_MAX $ORIG_ZFS_ZEVENT_LEN_MAX
log_must zinject -c all
destroy_pool $TESTPOOL
rm -f ${VDEV_FILES[@]} $SPARE_VDEV_FILE
@@ -87,9 +87,9 @@ function verify_restarts # <msg> <cnt> <defer>
log_assert "Check for unnecessary resilver restarts"
-ORIG_RESILVER_MIN_TIME=$(get_tunable zfs_resilver_min_time_ms)
-ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable zfs_scan_suspend_progress)
-ORIG_ZFS_ZEVENT_LEN_MAX=$(get_tunable zfs_zevent_len_max)
+ORIG_RESILVER_MIN_TIME=$(get_tunable RESILVER_MIN_TIME_MS)
+ORIG_SCAN_SUSPEND_PROGRESS=$(get_tunable SCAN_SUSPEND_PROGRESS)
+ORIG_ZFS_ZEVENT_LEN_MAX=$(get_tunable ZEVENT_LEN_MAX)
set -A RESTARTS -- '1' '2' '2' '2'
set -A VDEVS -- '' '' '' ''
@@ -101,7 +101,7 @@ VDEV_REPLACE="${VDEV_FILES[1]} $SPARE_VDEV_FILE"
log_onexit cleanup
# ensure that enough events will be saved
-log_must set_tunable32 zfs_zevent_len_max 512
+log_must set_tunable32 ZEVENT_LEN_MAX 512
log_must truncate -s $VDEV_FILE_SIZE ${VDEV_FILES[@]} $SPARE_VDEV_FILE
@@ -140,11 +140,11 @@ do
log_must zpool events -c
# limit scanning time
- log_must set_tunable32 zfs_resilver_min_time_ms 50
+ log_must set_tunable32 RESILVER_MIN_TIME_MS 50
# initiate a resilver and suspend the scan as soon as possible
log_must zpool replace $TESTPOOL $VDEV_REPLACE
- log_must set_tunable32 zfs_scan_suspend_progress 1
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1
# there should only be 1 resilver start
verify_restarts '' "${RESTARTS[0]}" "${VDEVS[0]}"
@@ -168,8 +168,8 @@ do
verify_restarts ' after zinject' "${RESTARTS[2]}" "${VDEVS[2]}"
# unsuspend resilver
- log_must set_tunable32 zfs_scan_suspend_progress 0
- log_must set_tunable32 zfs_resilver_min_time_ms 3000
+ log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0
+ log_must set_tunable32 RESILVER_MIN_TIME_MS 3000
# wait for resilver to finish
for iter in {0..59}
diff --git a/tests/zfs-tests/tests/functional/rsend/send_hole_birth.ksh b/tests/zfs-tests/tests/functional/rsend/send_hole_birth.ksh
index c2b5ff7a0..cb2a982b6 100755
--- a/tests/zfs-tests/tests/functional/rsend/send_hole_birth.ksh
+++ b/tests/zfs-tests/tests/functional/rsend/send_hole_birth.ksh
@@ -53,7 +53,7 @@ function cleanup
{
cleanup_pool $sendpool
cleanup_pool $recvpool
- set_tunable64 send_holes_without_birth_time 1
+ set_tunable64 SEND_HOLES_WITHOUT_BIRTH_TIME 1
}
function send_and_verify
@@ -72,7 +72,7 @@ function send_and_verify
# to be re-enabled for this test case to verify correctness. Once we're
# comfortable that all hole_birth bugs has been resolved this behavior may
# be re-enabled by default.
-log_must set_tunable64 send_holes_without_birth_time 0
+log_must set_tunable64 SEND_HOLES_WITHOUT_BIRTH_TIME 0
# Incremental send truncating the file and adding new data.
log_must zfs create -o recordsize=4k $sendfs
diff --git a/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh
index ebca409a1..04fb225ed 100755
--- a/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh
+++ b/tests/zfs-tests/tests/functional/slog/slog_015_neg.ksh
@@ -40,22 +40,17 @@ function cleanup
#
wait
- set_tunable64 $COMMIT_TIMEOUT_PCT $ORIG_TIMEOUT
+ set_tunable64 COMMIT_TIMEOUT_PCT $ORIG_TIMEOUT
poolexists $TESTPOOL && zpool destroy -f $TESTPOOL
}
-if is_freebsd; then
- typeset COMMIT_TIMEOUT_PCT=commit_timeout_pct
-else
- typeset COMMIT_TIMEOUT_PCT=zfs_commit_timeout_pct
-fi
-typeset ORIG_TIMEOUT=$(get_tunable $COMMIT_TIMEOUT_PCT)
+typeset ORIG_TIMEOUT=$(get_tunable COMMIT_TIMEOUT_PCT)
log_onexit cleanup
log_must setup
for PCT in 0 1 2 4 8 16 32 64 128 256 512 1024; do
- log_must set_tunable64 $COMMIT_TIMEOUT_PCT $PCT
+ log_must set_tunable64 COMMIT_TIMEOUT_PCT $PCT
log_must zpool create $TESTPOOL $VDEV log $SDEV
diff --git a/tests/zfs-tests/tests/functional/snapshot/cleanup.ksh b/tests/zfs-tests/tests/functional/snapshot/cleanup.ksh
index 12d950999..530a78533 100755
--- a/tests/zfs-tests/tests/functional/snapshot/cleanup.ksh
+++ b/tests/zfs-tests/tests/functional/snapshot/cleanup.ksh
@@ -32,7 +32,7 @@
. $STF_SUITE/include/libtest.shlib
if is_linux; then
- log_must set_tunable64 zfs_admin_snapshot 0
+ log_must set_tunable64 ADMIN_SNAPSHOT 0
fi
default_container_cleanup
diff --git a/tests/zfs-tests/tests/functional/snapshot/clone_001_pos.ksh b/tests/zfs-tests/tests/functional/snapshot/clone_001_pos.ksh
index 63bd98147..3abdff8c3 100755
--- a/tests/zfs-tests/tests/functional/snapshot/clone_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/snapshot/clone_001_pos.ksh
@@ -64,13 +64,13 @@ function setup_all
if is_freebsd; then
# Pool creation on zvols is forbidden by default.
# Save and the current setting.
- typeset _saved=$(get_tunable vol.recursive)
- log_must set_tunable64 vol.recursive 1
+ typeset _saved=$(get_tunable VOL_RECURSIVE)
+ log_must set_tunable64 VOL_RECURSIVE 1
fi
create_pool $TESTPOOL1 ${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
if is_freebsd; then
# Restore the previous setting.
- log_must set_tunable64 vol.recursive $_saved
+ log_must set_tunable64 VOL_RECURSIVE $_saved
fi
log_must zfs create $TESTPOOL1/$TESTFS
log_must zfs set mountpoint=$TESTDIR2 $TESTPOOL1/$TESTFS
diff --git a/tests/zfs-tests/tests/functional/snapshot/setup.ksh b/tests/zfs-tests/tests/functional/snapshot/setup.ksh
index 6f0646737..a73d1aff3 100755
--- a/tests/zfs-tests/tests/functional/snapshot/setup.ksh
+++ b/tests/zfs-tests/tests/functional/snapshot/setup.ksh
@@ -34,7 +34,7 @@
DISK=${DISKS%% *}
if is_linux; then
- log_must set_tunable64 zfs_admin_snapshot 1
+ log_must set_tunable64 ADMIN_SNAPSHOT 1
fi
default_container_volume_setup ${DISK}
diff --git a/tests/zfs-tests/tests/functional/trim/autotrim_config.ksh b/tests/zfs-tests/tests/functional/trim/autotrim_config.ksh
index e41e32568..d48ee45d0 100755
--- a/tests/zfs-tests/tests/functional/trim/autotrim_config.ksh
+++ b/tests/zfs-tests/tests/functional/trim/autotrim_config.ksh
@@ -49,23 +49,23 @@ function cleanup
log_must rm -f $TRIM_VDEVS
- log_must set_tunable64 zfs_trim_extent_bytes_min $trim_extent_bytes_min
- log_must set_tunable64 zfs_trim_txg_batch $trim_txg_batch
- log_must set_tunable64 zfs_vdev_min_ms_count $vdev_min_ms_count
+ log_must set_tunable64 TRIM_EXTENT_BYTES_MIN $trim_extent_bytes_min
+ log_must set_tunable64 TRIM_TXG_BATCH $trim_txg_batch
+ log_must set_tunable64 VDEV_MIN_MS_COUNT $vdev_min_ms_count
}
log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes.
-typeset trim_extent_bytes_min=$(get_tunable zfs_trim_extent_bytes_min)
-log_must set_tunable64 zfs_trim_extent_bytes_min 4096
+typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
+log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096
-# Reduced zfs_trim_txg_batch to make trimming more frequent.
-typeset trim_txg_batch=$(get_tunable zfs_trim_txg_batch)
-log_must set_tunable64 zfs_trim_txg_batch 8
+# Reduced TRIM_TXG_BATCH to make trimming more frequent.
+typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)
+log_must set_tunable64 TRIM_TXG_BATCH 8
# Increased metaslabs to better simulate larger more realistic devices.
-typeset vdev_min_ms_count=$(get_tunable zfs_vdev_min_ms_count)
-log_must set_tunable64 zfs_vdev_min_ms_count 32
+typeset vdev_min_ms_count=$(get_tunable VDEV_MIN_MS_COUNT)
+log_must set_tunable64 VDEV_MIN_MS_COUNT 32
typeset VDEV_MAX_MB=$(( floor(4 * MINVDEVSIZE * 0.75 / 1024 / 1024) ))
typeset VDEV_MIN_MB=$(( floor(4 * MINVDEVSIZE * 0.30 / 1024 / 1024) ))
diff --git a/tests/zfs-tests/tests/functional/trim/autotrim_integrity.ksh b/tests/zfs-tests/tests/functional/trim/autotrim_integrity.ksh
index c7b3da7c0..6af877241 100755
--- a/tests/zfs-tests/tests/functional/trim/autotrim_integrity.ksh
+++ b/tests/zfs-tests/tests/functional/trim/autotrim_integrity.ksh
@@ -47,18 +47,18 @@ function cleanup
log_must rm -f $TRIM_VDEVS
- log_must set_tunable64 zfs_trim_extent_bytes_min $trim_extent_bytes_min
- log_must set_tunable64 zfs_trim_txg_batch $trim_txg_batch
+ log_must set_tunable64 TRIM_EXTENT_BYTES_MIN $trim_extent_bytes_min
+ log_must set_tunable64 TRIM_TXG_BATCH $trim_txg_batch
}
log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes.
-typeset trim_extent_bytes_min=$(get_tunable zfs_trim_extent_bytes_min)
-log_must set_tunable64 zfs_trim_extent_bytes_min 4096
+typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
+log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096
-# Reduced zfs_trim_txg_batch to make trimming more frequent.
-typeset trim_txg_batch=$(get_tunable zfs_trim_txg_batch)
-log_must set_tunable64 zfs_trim_txg_batch 8
+# Reduced TRIM_TXG_BATCH to make trimming more frequent.
+typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)
+log_must set_tunable64 TRIM_TXG_BATCH 8
for type in "" "mirror" "raidz" "raidz2" "raidz3"; do
log_must truncate -s 1G $TRIM_VDEVS
diff --git a/tests/zfs-tests/tests/functional/trim/autotrim_trim_integrity.ksh b/tests/zfs-tests/tests/functional/trim/autotrim_trim_integrity.ksh
index c0e850c48..a355d0565 100755
--- a/tests/zfs-tests/tests/functional/trim/autotrim_trim_integrity.ksh
+++ b/tests/zfs-tests/tests/functional/trim/autotrim_trim_integrity.ksh
@@ -48,18 +48,18 @@ function cleanup
log_must rm -f $TRIM_VDEVS
- log_must set_tunable64 zfs_trim_extent_bytes_min $trim_extent_bytes_min
- log_must set_tunable64 zfs_trim_txg_batch $trim_txg_batch
+ log_must set_tunable64 TRIM_EXTENT_BYTES_MIN $trim_extent_bytes_min
+ log_must set_tunable64 TRIM_TXG_BATCH $trim_txg_batch
}
log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes.
-typeset trim_extent_bytes_min=$(get_tunable zfs_trim_extent_bytes_min)
-log_must set_tunable64 zfs_trim_extent_bytes_min 4096
+typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
+log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096
-# Reduced zfs_trim_txg_batch to make trimming more frequent.
-typeset trim_txg_batch=$(get_tunable zfs_trim_txg_batch)
-log_must set_tunable64 zfs_trim_txg_batch 8
+# Reduced TRIM_TXG_BATCH to make trimming more frequent.
+typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)
+log_must set_tunable64 TRIM_TXG_BATCH 8
for type in "" "mirror" "raidz" "raidz2" "raidz3"; do
log_must truncate -s 1G $TRIM_VDEVS
diff --git a/tests/zfs-tests/tests/functional/trim/trim_config.ksh b/tests/zfs-tests/tests/functional/trim/trim_config.ksh
index 993072b10..ba36edabf 100755
--- a/tests/zfs-tests/tests/functional/trim/trim_config.ksh
+++ b/tests/zfs-tests/tests/functional/trim/trim_config.ksh
@@ -49,23 +49,23 @@ function cleanup
log_must rm -f $TRIM_VDEVS
- log_must set_tunable64 zfs_trim_extent_bytes_min $trim_extent_bytes_min
- log_must set_tunable64 zfs_trim_txg_batch $trim_txg_batch
- log_must set_tunable64 zfs_vdev_min_ms_count $vdev_min_ms_count
+ log_must set_tunable64 TRIM_EXTENT_BYTES_MIN $trim_extent_bytes_min
+ log_must set_tunable64 TRIM_TXG_BATCH $trim_txg_batch
+ log_must set_tunable64 VDEV_MIN_MS_COUNT $vdev_min_ms_count
}
log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes.
-typeset trim_extent_bytes_min=$(get_tunable zfs_trim_extent_bytes_min)
-log_must set_tunable64 zfs_trim_extent_bytes_min 4096
+typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
+log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096
-# Reduced zfs_trim_txg_batch to make trimming more frequent.
-typeset trim_txg_batch=$(get_tunable zfs_trim_txg_batch)
-log_must set_tunable64 zfs_trim_txg_batch 8
+# Reduced TRIM_TXG_BATCH to make trimming more frequent.
+typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)
+log_must set_tunable64 TRIM_TXG_BATCH 8
# Increased metaslabs to better simulate larger more realistic devices.
-typeset vdev_min_ms_count=$(get_tunable zfs_vdev_min_ms_count)
-log_must set_tunable64 zfs_vdev_min_ms_count 32
+typeset vdev_min_ms_count=$(get_tunable VDEV_MIN_MS_COUNT)
+log_must set_tunable64 VDEV_MIN_MS_COUNT 32
typeset VDEV_MAX_MB=$(( floor(4 * MINVDEVSIZE * 0.75 / 1024 / 1024) ))
typeset VDEV_MIN_MB=$(( floor(4 * MINVDEVSIZE * 0.30 / 1024 / 1024) ))
diff --git a/tests/zfs-tests/tests/functional/trim/trim_integrity.ksh b/tests/zfs-tests/tests/functional/trim/trim_integrity.ksh
index 0bbc439ee..ba0e691b3 100755
--- a/tests/zfs-tests/tests/functional/trim/trim_integrity.ksh
+++ b/tests/zfs-tests/tests/functional/trim/trim_integrity.ksh
@@ -47,18 +47,18 @@ function cleanup
log_must rm -f $TRIM_VDEVS
- log_must set_tunable64 zfs_trim_extent_bytes_min $trim_extent_bytes_min
- log_must set_tunable64 zfs_trim_txg_batch $trim_txg_batch
+ log_must set_tunable64 TRIM_EXTENT_BYTES_MIN $trim_extent_bytes_min
+ log_must set_tunable64 TRIM_TXG_BATCH $trim_txg_batch
}
log_onexit cleanup
# Minimum trim size is decreased to verify all trim sizes.
-typeset trim_extent_bytes_min=$(get_tunable zfs_trim_extent_bytes_min)
-log_must set_tunable64 zfs_trim_extent_bytes_min 4096
+typeset trim_extent_bytes_min=$(get_tunable TRIM_EXTENT_BYTES_MIN)
+log_must set_tunable64 TRIM_EXTENT_BYTES_MIN 4096
-# Reduced zfs_trim_txg_batch to make trimming more frequent.
-typeset trim_txg_batch=$(get_tunable zfs_trim_txg_batch)
-log_must set_tunable64 zfs_trim_txg_batch 8
+# Reduced TRIM_TXG_BATCH to make trimming more frequent.
+typeset trim_txg_batch=$(get_tunable TRIM_TXG_BATCH)
+log_must set_tunable64 TRIM_TXG_BATCH 8
for type in "" "mirror" "raidz" "raidz2" "raidz3"; do
log_must truncate -s 1G $TRIM_VDEVS
diff --git a/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh b/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
index 934d8942f..aabd746f3 100755
--- a/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
+++ b/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
@@ -68,8 +68,7 @@ function sysctl_inhibit_dev # value
if is_linux; then
log_note "Setting zvol_inhibit_dev tunable to $value"
- log_must eval "echo $value > "\
- "/sys/module/zfs/parameters/zvol_inhibit_dev"
+ log_must set_tunable32 VOL_INHIBIT_DEV $value
fi
}
@@ -81,14 +80,7 @@ function sysctl_volmode # value
typeset value="$1"
log_note "Setting volmode tunable to $value"
- if is_linux; then
- echo "$value" > '/sys/module/zfs/parameters/zvol_volmode'
- else
- sysctl 'vfs.zfs.vol.mode' "$value"
- fi
- if [[ $? -ne 0 ]]; then
- log_fail "Unable to set volmode tunable to $value"
- fi
+ log_must set_tunable32 VOL_MODE $value
}
log_assert "Verify that ZFS volume property 'volmode' works as intended"
diff --git a/tests/zfs-tests/tests/perf/perf.shlib b/tests/zfs-tests/tests/perf/perf.shlib
index 47abff09d..196db2497 100644
--- a/tests/zfs-tests/tests/perf/perf.shlib
+++ b/tests/zfs-tests/tests/perf/perf.shlib
@@ -415,7 +415,7 @@ function get_max_dbuf_cache_size
typeset -l max_dbuf_cache_size
if is_linux; then
- max_dbuf_cache_size=$(get_tunable dbuf_cache_max_bytes)
+ max_dbuf_cache_size=$(get_tunable DBUF_CACHE_MAX_BYTES)
else
max_dbuf_cache_size=$(dtrace -qn 'BEGIN {
printf("%u\n", `dbuf_cache_max_bytes);