diff options
author | Richard Yao <[email protected]> | 2022-09-27 19:42:41 -0400 |
---|---|---|
committer | GitHub <[email protected]> | 2022-09-27 16:42:41 -0700 |
commit | fdc2d303710416868a05084e984fd8f231e948bd (patch) | |
tree | 058a29de2a9d3c998f008b626dd63951487f136e /module | |
parent | 7584fbe846b4127d5a16c5967a005e8f5c1e7b08 (diff) |
Cleanup: Specify unsignedness on things that should not be signed
In #13871, zfs_vdev_aggregation_limit_non_rotating and
zfs_vdev_aggregation_limit being signed was pointed out as a possible
reason not to eliminate an unnecessary MAX(unsigned, 0) since the
unsigned value was assigned from them.
There is no reason for these module parameters to be signed and upon
inspection, it was found that there are a number of other module
parameters that are signed, but should not be, so we make them unsigned.
Making them unsigned made it clear that some other variables in the code
should also be unsigned, so we also make those unsigned. This prevents
users from setting negative values that could potentially cause bad
behaviors. It also makes the code slightly easier to understand.
Mostly module parameters that deal with timeouts, limits, bitshifts and
percentages are made unsigned by this. Any that are boolean are left
signed, since whether booleans should be considered signed or unsigned
does not matter.
Making zfs_arc_lotsfree_percent unsigned caused a
`zfs_arc_lotsfree_percent >= 0` check to become redundant, so it was
removed. Removing the check was also necessary to prevent a compiler
error from -Werror=type-limits.
Several end of line comments had to be moved to their own lines because
replacing int with uint_t caused us to exceed the 80 character limit
enforced by cstyle.pl.
The following were kept signed because they are passed to
taskq_create(), which expects signed values and modifying the
OpenSolaris/Illumos DDI is out of scope of this patch:
* metaslab_load_pct
* zfs_sync_taskq_batch_pct
* zfs_zil_clean_taskq_nthr_pct
* zfs_zil_clean_taskq_minalloc
* zfs_zil_clean_taskq_maxalloc
* zfs_arc_prune_task_threads
Also, negative values in those parameters was found to be harmless.
The following were left signed because either negative values make
sense, or more analysis was needed to determine whether negative values
should be disallowed:
* zfs_metaslab_switch_threshold
* zfs_pd_bytes_max
* zfs_livelist_min_percent_shared
zfs_multihost_history was made static to be consistent with other
parameters.
A number of module parameters were marked as signed, but in reality
referenced unsigned variables. upgrade_errlog_limit is one of the
numerous examples. In the case of zfs_vdev_async_read_max_active, it was
already uint32_t, but zdb had an extern int declaration for it.
Interestingly, the documentation in zfs.4 was right for
upgrade_errlog_limit despite the module parameter being wrongly marked,
while the documentation for zfs_vdev_async_read_max_active (and friends)
was wrong. It was also wrong for zstd_abort_size, which was unsigned,
but was documented as signed.
Also, the documentation in zfs.4 incorrectly described the following
parameters as ulong when they were int:
* zfs_arc_meta_adjust_restarts
* zfs_override_estimate_recordsize
They are now uint_t as of this patch and thus the man page has been
updated to describe them as uint.
dbuf_state_index was left alone since it does nothing and perhaps should
be removed in another patch.
If any module parameters were missed, they were not found by `grep -r
'ZFS_MODULE_PARAM' | grep ', INT'`. I did find a few that grep missed,
but only because they were in files that had hits.
This patch intentionally did not attempt to address whether some of
these module parameters should be elevated to 64-bit parameters, because
the length of a long on 32-bit is 32-bit.
Lastly, it was pointed out during review that uint_t is a better match
for these variables than uint32_t because FreeBSD kernel parameter
definitions are designed for uint_t, whose bit width can change in
future memory models. As a result, we change the existing parameters
that are uint32_t to use uint_t.
Reviewed-by: Alexander Motin <[email protected]>
Reviewed-by: Brian Behlendorf <[email protected]>
Reviewed-by: Neal Gompa <[email protected]>
Signed-off-by: Richard Yao <[email protected]>
Closes #13875
Diffstat (limited to 'module')
34 files changed, 332 insertions, 307 deletions
diff --git a/module/os/freebsd/zfs/arc_os.c b/module/os/freebsd/zfs/arc_os.c index 30e96a889..dfe5c3d31 100644 --- a/module/os/freebsd/zfs/arc_os.c +++ b/module/os/freebsd/zfs/arc_os.c @@ -138,7 +138,7 @@ arc_default_max(uint64_t min, uint64_t allmem) static void arc_prune_task(void *arg) { - int64_t nr_scan = (intptr_t)arg; + uint64_t nr_scan = (uintptr_t)arg; arc_reduce_target_size(ptob(nr_scan)); @@ -168,12 +168,12 @@ arc_prune_task(void *arg) * for releasing it once the registered arc_prune_func_t has completed. */ void -arc_prune_async(int64_t adjust) +arc_prune_async(uint64_t adjust) { #ifndef __LP64__ - if (adjust > INTPTR_MAX) - adjust = INTPTR_MAX; + if (adjust > UINTPTR_MAX) + adjust = UINTPTR_MAX; #endif taskq_dispatch(arc_prune_taskq, arc_prune_task, (void *)(intptr_t)adjust, TQ_SLEEP); diff --git a/module/os/freebsd/zfs/sysctl_os.c b/module/os/freebsd/zfs/sysctl_os.c index 4d908381c..980bb1c0f 100644 --- a/module/os/freebsd/zfs/sysctl_os.c +++ b/module/os/freebsd/zfs/sysctl_os.c @@ -514,19 +514,19 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, sm_blksz_with_log, * space map representation must be before we compact it on-disk. * Values should be greater than or equal to 100. */ -extern int zfs_condense_pct; +extern uint_t zfs_condense_pct; /* BEGIN CSTYLED */ -SYSCTL_INT(_vfs_zfs, OID_AUTO, condense_pct, +SYSCTL_UINT(_vfs_zfs, OID_AUTO, condense_pct, CTLFLAG_RWTUN, &zfs_condense_pct, 0, "Condense on-disk spacemap when it is more than this many percents" " of in-memory counterpart"); /* END CSTYLED */ -extern int zfs_remove_max_segment; +extern uint_t zfs_remove_max_segment; /* BEGIN CSTYLED */ -SYSCTL_INT(_vfs_zfs, OID_AUTO, remove_max_segment, +SYSCTL_UINT(_vfs_zfs, OID_AUTO, remove_max_segment, CTLFLAG_RWTUN, &zfs_remove_max_segment, 0, "Largest contiguous segment ZFS will attempt to allocate when removing" " a device"); @@ -561,10 +561,10 @@ SYSCTL_QUAD(_vfs_zfs_metaslab, OID_AUTO, df_alloc_threshold, * Once the space map's free space drops below this level we dynamically * switch to using best-fit allocations. */ -extern int metaslab_df_free_pct; +extern uint_t metaslab_df_free_pct; /* BEGIN CSTYLED */ -SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, +SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, df_free_pct, CTLFLAG_RWTUN, &metaslab_df_free_pct, 0, "The minimum free space, in percent, which must be available in a" " space map to continue allocations in a first-fit fashion"); @@ -584,10 +584,10 @@ SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, load_pct, /* * Max number of metaslabs per group to preload. */ -extern int metaslab_preload_limit; +extern uint_t metaslab_preload_limit; /* BEGIN CSTYLED */ -SYSCTL_INT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, +SYSCTL_UINT(_vfs_zfs_metaslab, OID_AUTO, preload_limit, CTLFLAG_RWTUN, &metaslab_preload_limit, 0, "Max number of metaslabs per group to preload"); /* END CSTYLED */ @@ -852,7 +852,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, validate_skip, /* vdev_queue.c */ -extern uint32_t zfs_vdev_max_active; +extern uint_t zfs_vdev_max_active; /* BEGIN CSTYLED */ SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, @@ -861,10 +861,10 @@ SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight, " (LEGACY)"); /* END CSTYLED */ -extern int zfs_vdev_def_queue_depth; +extern uint_t zfs_vdev_def_queue_depth; /* BEGIN CSTYLED */ -SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth, +SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth, CTLFLAG_RWTUN, &zfs_vdev_def_queue_depth, 0, "Default queue depth for each allocator"); /* END CSTYLED */ diff --git a/module/os/freebsd/zfs/zfs_debug.c b/module/os/freebsd/zfs/zfs_debug.c index 32fb5a872..abb3c0033 100644 --- a/module/os/freebsd/zfs/zfs_debug.c +++ b/module/os/freebsd/zfs/zfs_debug.c @@ -29,14 +29,14 @@ typedef struct zfs_dbgmsg { list_node_t zdm_node; time_t zdm_timestamp; - int zdm_size; + uint_t zdm_size; char zdm_msg[1]; /* variable length allocation */ } zfs_dbgmsg_t; static list_t zfs_dbgmsgs; -static int zfs_dbgmsg_size = 0; +static uint_t zfs_dbgmsg_size = 0; static kmutex_t zfs_dbgmsgs_lock; -int zfs_dbgmsg_maxsize = 4<<20; /* 4MB */ +uint_t zfs_dbgmsg_maxsize = 4<<20; /* 4MB */ static kstat_t *zfs_dbgmsg_kstat; /* @@ -88,10 +88,10 @@ zfs_dbgmsg_addr(kstat_t *ksp, loff_t n) } static void -zfs_dbgmsg_purge(int max_size) +zfs_dbgmsg_purge(uint_t max_size) { zfs_dbgmsg_t *zdm; - int size; + uint_t size; ASSERT(MUTEX_HELD(&zfs_dbgmsgs_lock)); @@ -155,7 +155,7 @@ void __zfs_dbgmsg(char *buf) { zfs_dbgmsg_t *zdm; - int size; + uint_t size; DTRACE_PROBE1(zfs__dbgmsg, char *, buf); @@ -168,7 +168,7 @@ __zfs_dbgmsg(char *buf) mutex_enter(&zfs_dbgmsgs_lock); list_insert_tail(&zfs_dbgmsgs, zdm); zfs_dbgmsg_size += size; - zfs_dbgmsg_purge(MAX(zfs_dbgmsg_maxsize, 0)); + zfs_dbgmsg_purge(zfs_dbgmsg_maxsize); mutex_exit(&zfs_dbgmsgs_lock); } @@ -248,5 +248,5 @@ zfs_dbgmsg_print(const char *tag) ZFS_MODULE_PARAM(zfs, zfs_, dbgmsg_enable, INT, ZMOD_RW, "Enable ZFS debug message log"); -ZFS_MODULE_PARAM(zfs, zfs_, dbgmsg_maxsize, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, dbgmsg_maxsize, UINT, ZMOD_RW, "Maximum ZFS debug log size"); diff --git a/module/os/linux/spl/spl-taskq.c b/module/os/linux/spl/spl-taskq.c index 3b0c29606..abf4dca58 100644 --- a/module/os/linux/spl/spl-taskq.c +++ b/module/os/linux/spl/spl-taskq.c @@ -46,8 +46,10 @@ module_param(spl_taskq_thread_priority, int, 0644); MODULE_PARM_DESC(spl_taskq_thread_priority, "Allow non-default priority for taskq threads"); -static int spl_taskq_thread_sequential = 4; -module_param(spl_taskq_thread_sequential, int, 0644); +static uint_t spl_taskq_thread_sequential = 4; +/* BEGIN CSTYLED */ +module_param(spl_taskq_thread_sequential, uint, 0644); +/* END CSTYLED */ MODULE_PARM_DESC(spl_taskq_thread_sequential, "Create new taskq threads after N sequential tasks"); diff --git a/module/os/linux/zfs/arc_os.c b/module/os/linux/zfs/arc_os.c index d2a176d77..eaaf7d0bb 100644 --- a/module/os/linux/zfs/arc_os.c +++ b/module/os/linux/zfs/arc_os.c @@ -513,7 +513,7 @@ arc_prune_task(void *ptr) * for releasing it once the registered arc_prune_func_t has completed. */ void -arc_prune_async(int64_t adjust) +arc_prune_async(uint64_t adjust) { arc_prune_t *ap; diff --git a/module/os/linux/zfs/zfs_debug.c b/module/os/linux/zfs/zfs_debug.c index 2a4e3f378..819416b68 100644 --- a/module/os/linux/zfs/zfs_debug.c +++ b/module/os/linux/zfs/zfs_debug.c @@ -29,13 +29,13 @@ typedef struct zfs_dbgmsg { procfs_list_node_t zdm_node; uint64_t zdm_timestamp; - int zdm_size; + uint_t zdm_size; char zdm_msg[1]; /* variable length allocation */ } zfs_dbgmsg_t; static procfs_list_t zfs_dbgmsgs; -static int zfs_dbgmsg_size = 0; -int zfs_dbgmsg_maxsize = 4<<20; /* 4MB */ +static uint_t zfs_dbgmsg_size = 0; +uint_t zfs_dbgmsg_maxsize = 4<<20; /* 4MB */ /* * Internal ZFS debug messages are enabled by default. @@ -68,14 +68,14 @@ zfs_dbgmsg_show(struct seq_file *f, void *p) } static void -zfs_dbgmsg_purge(int max_size) +zfs_dbgmsg_purge(uint_t max_size) { while (zfs_dbgmsg_size > max_size) { zfs_dbgmsg_t *zdm = list_remove_head(&zfs_dbgmsgs.pl_list); if (zdm == NULL) return; - int size = zdm->zdm_size; + uint_t size = zdm->zdm_size; kmem_free(zdm, size); zfs_dbgmsg_size -= size; } @@ -135,7 +135,7 @@ __set_error(const char *file, const char *func, int line, int err) void __zfs_dbgmsg(char *buf) { - int size = sizeof (zfs_dbgmsg_t) + strlen(buf); + uint_t size = sizeof (zfs_dbgmsg_t) + strlen(buf); zfs_dbgmsg_t *zdm = kmem_zalloc(size, KM_SLEEP); zdm->zdm_size = size; zdm->zdm_timestamp = gethrestime_sec(); @@ -144,7 +144,7 @@ __zfs_dbgmsg(char *buf) mutex_enter(&zfs_dbgmsgs.pl_lock); procfs_list_add(&zfs_dbgmsgs, zdm); zfs_dbgmsg_size += size; - zfs_dbgmsg_purge(MAX(zfs_dbgmsg_maxsize, 0)); + zfs_dbgmsg_purge(zfs_dbgmsg_maxsize); mutex_exit(&zfs_dbgmsgs.pl_lock); } @@ -252,6 +252,8 @@ zfs_dbgmsg_print(const char *tag) module_param(zfs_dbgmsg_enable, int, 0644); MODULE_PARM_DESC(zfs_dbgmsg_enable, "Enable ZFS debug message log"); -module_param(zfs_dbgmsg_maxsize, int, 0644); +/* BEGIN CSTYLED */ +module_param(zfs_dbgmsg_maxsize, uint, 0644); +/* END CSTYLED */ MODULE_PARM_DESC(zfs_dbgmsg_maxsize, "Maximum ZFS debug log size"); #endif diff --git a/module/zfs/arc.c b/module/zfs/arc.c index 7957b1b56..33865f715 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -354,7 +354,7 @@ static list_t arc_evict_waiters; * can still happen, even during the potentially long time that arc_size is * more than arc_c. */ -static int zfs_arc_eviction_pct = 200; +static uint_t zfs_arc_eviction_pct = 200; /* * The number of headers to evict in arc_evict_state_impl() before @@ -363,10 +363,10 @@ static int zfs_arc_eviction_pct = 200; * oldest header in the arc state), but comes with higher overhead * (i.e. more invocations of arc_evict_state_impl()). */ -static int zfs_arc_evict_batch_limit = 10; +static uint_t zfs_arc_evict_batch_limit = 10; /* number of seconds before growing cache again */ -int arc_grow_retry = 5; +uint_t arc_grow_retry = 5; /* * Minimum time between calls to arc_kmem_reap_soon(). @@ -377,10 +377,10 @@ static const int arc_kmem_cache_reap_retry_ms = 1000; static int zfs_arc_overflow_shift = 8; /* shift of arc_c for calculating both min and max arc_p */ -static int arc_p_min_shift = 4; +static uint_t arc_p_min_shift = 4; /* log2(fraction of arc to reclaim) */ -int arc_shrink_shift = 7; +uint_t arc_shrink_shift = 7; /* percent of pagecache to reclaim arc to */ #ifdef _KERNEL @@ -396,20 +396,20 @@ uint_t zfs_arc_pc_percent = 0; * This must be less than arc_shrink_shift, so that when we shrink the ARC, * we will still not allow it to grow. */ -int arc_no_grow_shift = 5; +uint_t arc_no_grow_shift = 5; /* * minimum lifespan of a prefetch block in clock ticks * (initialized in arc_init()) */ -static int arc_min_prefetch_ms; -static int arc_min_prescient_prefetch_ms; +static uint_t arc_min_prefetch_ms; +static uint_t arc_min_prescient_prefetch_ms; /* * If this percent of memory is free, don't throttle. */ -int arc_lotsfree_percent = 10; +uint_t arc_lotsfree_percent = 10; /* * The arc has filled available memory and has now warmed up. @@ -425,10 +425,10 @@ unsigned long zfs_arc_meta_limit = 0; unsigned long zfs_arc_meta_min = 0; static unsigned long zfs_arc_dnode_limit = 0; static unsigned long zfs_arc_dnode_reduce_percent = 10; -static int zfs_arc_grow_retry = 0; -static int zfs_arc_shrink_shift = 0; -static int zfs_arc_p_min_shift = 0; -int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ +static uint_t zfs_arc_grow_retry = 0; +static uint_t zfs_arc_shrink_shift = 0; +static uint_t zfs_arc_p_min_shift = 0; +uint_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ /* * ARC dirty data constraints for arc_tempreserve_space() throttle: @@ -460,13 +460,13 @@ static unsigned long zfs_arc_dnode_limit_percent = 10; * These tunables are Linux-specific */ static unsigned long zfs_arc_sys_free = 0; -static int zfs_arc_min_prefetch_ms = 0; -static int zfs_arc_min_prescient_prefetch_ms = 0; +static uint_t zfs_arc_min_prefetch_ms = 0; +static uint_t zfs_arc_min_prescient_prefetch_ms = 0; static int zfs_arc_p_dampener_disable = 1; -static int zfs_arc_meta_prune = 10000; -static int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED; -static int zfs_arc_meta_adjust_restarts = 4096; -static int zfs_arc_lotsfree_percent = 10; +static uint_t zfs_arc_meta_prune = 10000; +static uint_t zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED; +static uint_t zfs_arc_meta_adjust_restarts = 4096; +static uint_t zfs_arc_lotsfree_percent = 10; /* * Number of arc_prune threads @@ -790,7 +790,7 @@ unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */ int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ int l2arc_feed_again = B_TRUE; /* turbo warmup */ int l2arc_norw = B_FALSE; /* no reads during writes */ -static int l2arc_meta_percent = 33; /* limit on headers size */ +static uint_t l2arc_meta_percent = 33; /* limit on headers size */ /* * L2ARC Internals @@ -3898,7 +3898,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, uint64_t *real_evicted) { arc_state_t *evicted_state, *state; int64_t bytes_evicted = 0; - int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ? + uint_t min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ? arc_min_prescient_prefetch_ms : arc_min_prefetch_ms; ASSERT(MUTEX_HELD(hash_lock)); @@ -4053,7 +4053,7 @@ arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, uint64_t bytes_evicted = 0, real_evicted = 0; arc_buf_hdr_t *hdr; kmutex_t *hash_lock; - int evict_count = zfs_arc_evict_batch_limit; + uint_t evict_count = zfs_arc_evict_batch_limit; ASSERT3P(marker, !=, NULL); @@ -4061,7 +4061,7 @@ arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL); hdr = multilist_sublist_prev(mls, marker)) { - if ((evict_count <= 0) || (bytes_evicted >= bytes)) + if ((evict_count == 0) || (bytes_evicted >= bytes)) break; /* @@ -4404,10 +4404,10 @@ arc_evict_impl(arc_state_t *state, uint64_t spa, int64_t bytes, static uint64_t arc_evict_meta_balanced(uint64_t meta_used) { - int64_t delta, prune = 0, adjustmnt; - uint64_t total_evicted = 0; + int64_t delta, adjustmnt; + uint64_t total_evicted = 0, prune = 0; arc_buf_contents_t type = ARC_BUFC_DATA; - int restarts = MAX(zfs_arc_meta_adjust_restarts, 0); + uint_t restarts = zfs_arc_meta_adjust_restarts; restart: /* @@ -7656,8 +7656,7 @@ arc_tuning_update(boolean_t verbose) } /* Valid range: 0 - 100 */ - if ((zfs_arc_lotsfree_percent >= 0) && - (zfs_arc_lotsfree_percent <= 100)) + if (zfs_arc_lotsfree_percent <= 100) arc_lotsfree_percent = zfs_arc_lotsfree_percent; WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent, verbose); @@ -11077,56 +11076,56 @@ EXPORT_SYMBOL(arc_add_prune_callback); EXPORT_SYMBOL(arc_remove_prune_callback); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min, - param_get_long, ZMOD_RW, "Minimum ARC size in bytes"); + param_get_ulong, ZMOD_RW, "Minimum ARC size in bytes"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max, - param_get_long, ZMOD_RW, "Maximum ARC size in bytes"); + param_get_ulong, ZMOD_RW, "Maximum ARC size in bytes"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long, - param_get_long, ZMOD_RW, "Metadata limit for ARC size in bytes"); + param_get_ulong, ZMOD_RW, "Metadata limit for ARC size in bytes"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent, - param_set_arc_long, param_get_long, ZMOD_RW, + param_set_arc_long, param_get_ulong, ZMOD_RW, "Percent of ARC size for ARC meta limit"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long, - param_get_long, ZMOD_RW, "Minimum ARC metadata size in bytes"); + param_get_ulong, ZMOD_RW, "Minimum ARC metadata size in bytes"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW, "Meta objects to scan for prune"); -ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, UINT, ZMOD_RW, "Limit number of restarts in arc_evict_meta"); -ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, UINT, ZMOD_RW, "Meta reclaim strategy"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int, - param_get_int, ZMOD_RW, "Seconds before growing ARC size"); + param_get_uint, ZMOD_RW, "Seconds before growing ARC size"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW, "Disable arc_p adapt dampener"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int, - param_get_int, ZMOD_RW, "log2(fraction of ARC to reclaim)"); + param_get_uint, ZMOD_RW, "log2(fraction of ARC to reclaim)"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW, "Percent of pagecache to reclaim ARC to"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int, - param_get_int, ZMOD_RW, "arc_c shift to calc min/max arc_p"); + param_get_uint, ZMOD_RW, "arc_c shift to calc min/max arc_p"); -ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, INT, ZMOD_RD, +ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, UINT, ZMOD_RD, "Target average block size"); ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW, "Disable compressed ARC buffers"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int, - param_get_int, ZMOD_RW, "Min life of prefetch block in ms"); + param_get_uint, ZMOD_RW, "Min life of prefetch block in ms"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms, - param_set_arc_int, param_get_int, ZMOD_RW, + param_set_arc_int, param_get_uint, ZMOD_RW, "Min life of prescient prefetched block in ms"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW, @@ -11159,7 +11158,7 @@ ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW, "No reads during writes"); -ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, UINT, ZMOD_RW, "Percent of ARC size allowed for L2ARC-only headers"); ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW, @@ -11175,25 +11174,25 @@ ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, exclude_special, INT, ZMOD_RW, "Exclude dbufs on special vdevs from being cached to L2ARC if set."); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int, - param_get_int, ZMOD_RW, "System free memory I/O throttle in bytes"); + param_get_uint, ZMOD_RW, "System free memory I/O throttle in bytes"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long, - param_get_long, ZMOD_RW, "System free memory target size in bytes"); + param_get_ulong, ZMOD_RW, "System free memory target size in bytes"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long, - param_get_long, ZMOD_RW, "Minimum bytes of dnodes in ARC"); + param_get_ulong, ZMOD_RW, "Minimum bytes of dnodes in ARC"); ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent, - param_set_arc_long, param_get_long, ZMOD_RW, + param_set_arc_long, param_get_ulong, ZMOD_RW, "Percent of ARC meta buffers for dnodes"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW, "Percentage of excess dnodes to try to unpin"); -ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, UINT, ZMOD_RW, "When full, ARC allocation waits for eviction of this % of alloc size"); -ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, UINT, ZMOD_RW, "The number of headers to evict per sublist before moving to the next"); ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, prune_task_threads, INT, ZMOD_RW, diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 85ba4e322..db1123d37 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -231,11 +231,11 @@ static unsigned long dbuf_cache_max_bytes = ULONG_MAX; static unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX; /* Set the default sizes of the caches to log2 fraction of arc size */ -static int dbuf_cache_shift = 5; -static int dbuf_metadata_cache_shift = 6; +static uint_t dbuf_cache_shift = 5; +static uint_t dbuf_metadata_cache_shift = 6; /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */ -static uint32_t dbuf_mutex_cache_shift = 0; +static uint_t dbuf_mutex_cache_shift = 0; static unsigned long dbuf_cache_target_bytes(void); static unsigned long dbuf_metadata_cache_target_bytes(void); @@ -5132,10 +5132,10 @@ ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW, "Maximum size in bytes of dbuf metadata cache."); -ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW, "Set size of dbuf cache to log2 fraction of arc size."); -ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW, "Set size of dbuf metadata cache to log2 fraction of arc size."); ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD, diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 58c88c7d7..ba9224777 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -86,7 +86,7 @@ static int zfs_dmu_offset_next_sync = 1; * helps to limit the amount of memory that can be used by prefetching. * Larger objects should be prefetched a bit at a time. */ -int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; +uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" }, @@ -2362,5 +2362,5 @@ ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW, "Enable forcing txg sync to find holes"); /* CSTYLED */ -ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW, "Limit one prefetch call to this size"); diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c index 640af59be..6fe4480d6 100644 --- a/module/zfs/dmu_object.c +++ b/module/zfs/dmu_object.c @@ -41,7 +41,7 @@ * determined to be the lowest value that eliminates the measurable effect * of lock contention from this code path. */ -int dmu_object_alloc_chunk_shift = 7; +uint_t dmu_object_alloc_chunk_shift = 7; static uint64_t dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize, @@ -55,7 +55,7 @@ dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize, int dn_slots = dnodesize >> DNODE_SHIFT; boolean_t restarted = B_FALSE; uint64_t *cpuobj = NULL; - int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift; + uint_t dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift; int error; cpuobj = &os->os_obj_next_percpu[CPU_SEQID_UNSTABLE % @@ -518,6 +518,6 @@ EXPORT_SYMBOL(dmu_object_zapify); EXPORT_SYMBOL(dmu_object_free_zapified); /* BEGIN CSTYLED */ -ZFS_MODULE_PARAM(zfs, , dmu_object_alloc_chunk_shift, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, , dmu_object_alloc_chunk_shift, UINT, ZMOD_RW, "CPU-specific allocator grabs 2^N objects at once"); /* END CSTYLED */ diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index e22b6b13a..a9e4a6745 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -67,9 +67,9 @@ #endif #include <sys/zfs_file.h> -static int zfs_recv_queue_length = SPA_MAXBLOCKSIZE; -static int zfs_recv_queue_ff = 20; -static int zfs_recv_write_batch_size = 1024 * 1024; +static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE; +static uint_t zfs_recv_queue_ff = 20; +static uint_t zfs_recv_write_batch_size = 1024 * 1024; static int zfs_recv_best_effort_corrective = 0; static const void *const dmu_recv_tag = "dmu_recv_tag"; @@ -3729,13 +3729,13 @@ dmu_objset_is_receiving(objset_t *os) os->os_dsl_dataset->ds_owner == dmu_recv_tag); } -ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, UINT, ZMOD_RW, "Maximum receive queue length"); -ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, UINT, ZMOD_RW, "Receive queue fill fraction"); -ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW, "Maximum amount of writes to batch into one transaction"); ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW, diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c index 283e2d3b3..95377fa46 100644 --- a/module/zfs/dmu_send.c +++ b/module/zfs/dmu_send.c @@ -75,7 +75,7 @@ static int zfs_send_corrupt_data = B_FALSE; * thread is issuing new reads because the prefetches have fallen out of the * cache, this may need to be decreased. */ -static int zfs_send_queue_length = SPA_MAXBLOCKSIZE; +static uint_t zfs_send_queue_length = SPA_MAXBLOCKSIZE; /* * This tunable controls the length of the queues that zfs send worker threads * use to communicate. If the send_main_thread is blocking on these queues, @@ -83,7 +83,7 @@ static int zfs_send_queue_length = SPA_MAXBLOCKSIZE; * at the start of a send as these threads consume all the available IO * resources, this variable may need to be decreased. */ -static int zfs_send_no_prefetch_queue_length = 1024 * 1024; +static uint_t zfs_send_no_prefetch_queue_length = 1024 * 1024; /* * These tunables control the fill fraction of the queues by zfs send. The fill * fraction controls the frequency with which threads have to be cv_signaled. @@ -91,13 +91,13 @@ static int zfs_send_no_prefetch_queue_length = 1024 * 1024; * down. If the queues empty before the signalled thread can catch up, then * these should be tuned up. */ -static int zfs_send_queue_ff = 20; -static int zfs_send_no_prefetch_queue_ff = 20; +static uint_t zfs_send_queue_ff = 20; +static uint_t zfs_send_no_prefetch_queue_ff = 20; /* * Use this to override the recordsize calculation for fast zfs send estimates. */ -static int zfs_override_estimate_recordsize = 0; +static uint_t zfs_override_estimate_recordsize = 0; /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ static const boolean_t zfs_send_set_freerecords_bit = B_TRUE; @@ -3089,20 +3089,20 @@ out: ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW, "Allow sending corrupt data"); -ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, UINT, ZMOD_RW, "Maximum send queue length"); ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW, "Send unmodified spill blocks"); -ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, UINT, ZMOD_RW, "Maximum send queue length for non-prefetch queues"); -ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, UINT, ZMOD_RW, "Send queue fill fraction"); -ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, UINT, ZMOD_RW, "Send queue fill fraction for non-prefetch queues"); -ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, UINT, ZMOD_RW, "Override block size estimate with fixed size"); diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c index 44b3c9fc5..2ed75640f 100644 --- a/module/zfs/dmu_traverse.c +++ b/module/zfs/dmu_traverse.c @@ -41,7 +41,7 @@ static int32_t zfs_pd_bytes_max = 50 * 1024 * 1024; /* 50MB */ static int32_t send_holes_without_birth_time = 1; -static int32_t zfs_traverse_indirect_prefetch_limit = 32; +static uint_t zfs_traverse_indirect_prefetch_limit = 32; typedef struct prefetch_data { kmutex_t pd_mtx; @@ -812,7 +812,7 @@ EXPORT_SYMBOL(traverse_pool); ZFS_MODULE_PARAM(zfs, zfs_, pd_bytes_max, INT, ZMOD_RW, "Max number of bytes to prefetch"); -ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, traverse_indirect_prefetch_limit, UINT, ZMOD_RW, "Traverse prefetch number of blocks pointed by indirect block"); #if defined(_KERNEL) diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index c9d0a9940..64bc70c24 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -82,9 +82,9 @@ * 3/1 memory split doesn't leave much room for 16M chunks. */ #ifdef _ILP32 -int zfs_max_recordsize = 1 * 1024 * 1024; +uint_t zfs_max_recordsize = 1 * 1024 * 1024; #else -int zfs_max_recordsize = 16 * 1024 * 1024; +uint_t zfs_max_recordsize = 16 * 1024 * 1024; #endif static int zfs_allow_redacted_dataset_mount = 0; @@ -106,7 +106,7 @@ static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds, static void unload_zfeature(dsl_dataset_t *ds, spa_feature_t f); -extern int spa_asize_inflation; +extern uint_t spa_asize_inflation; static zil_header_t zero_zil; @@ -4971,7 +4971,7 @@ dsl_dataset_oldest_snapshot(spa_t *spa, uint64_t head_ds, uint64_t min_txg, return (0); } -ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, max_recordsize, UINT, ZMOD_RW, "Max allowed record size"); ZFS_MODULE_PARAM(zfs, zfs_, allow_redacted_dataset_mount, INT, ZMOD_RW, diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 7a589cadb..4fd3722a0 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -101,8 +101,8 @@ */ unsigned long zfs_dirty_data_max = 0; unsigned long zfs_dirty_data_max_max = 0; -int zfs_dirty_data_max_percent = 10; -int zfs_dirty_data_max_max_percent = 25; +uint_t zfs_dirty_data_max_percent = 10; +uint_t zfs_dirty_data_max_max_percent = 25; /* * The upper limit of TX_WRITE log data. Write operations are throttled @@ -116,14 +116,14 @@ unsigned long zfs_wrlog_data_max = 0; * zfs_dirty_data_max), push out a txg. This should be less than * zfs_vdev_async_write_active_min_dirty_percent. */ -static int zfs_dirty_data_sync_percent = 20; +static uint_t zfs_dirty_data_sync_percent = 20; /* * Once there is this amount of dirty data, the dmu_tx_delay() will kick in * and delay each transaction. * This value should be >= zfs_vdev_async_write_active_max_dirty_percent. */ -int zfs_delay_min_dirty_percent = 60; +uint_t zfs_delay_min_dirty_percent = 60; /* * This controls how quickly the delay approaches infinity. @@ -1455,14 +1455,14 @@ EXPORT_SYMBOL(dsl_pool_config_enter); EXPORT_SYMBOL(dsl_pool_config_exit); /* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ -ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, INT, ZMOD_RD, +ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_percent, UINT, ZMOD_RD, "Max percent of RAM allowed to be dirty"); /* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ -ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, INT, ZMOD_RD, +ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max_percent, UINT, ZMOD_RD, "zfs_dirty_data_max upper bound as % of RAM"); -ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, delay_min_dirty_percent, UINT, ZMOD_RW, "Transaction delay threshold"); ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max, ULONG, ZMOD_RW, @@ -1475,7 +1475,7 @@ ZFS_MODULE_PARAM(zfs, zfs_, wrlog_data_max, ULONG, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_max_max, ULONG, ZMOD_RD, "zfs_dirty_data_max upper bound in bytes"); -ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, dirty_data_sync_percent, UINT, ZMOD_RW, "Dirty data txg sync threshold as a percentage of zfs_dirty_data_max"); ZFS_MODULE_PARAM(zfs, zfs_, delay_scale, ULONG, ZMOD_RW, diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index 5ad8ff1f3..f0cd1feaf 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -128,7 +128,7 @@ static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj); static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx); static uint64_t dsl_scan_count_data_disks(vdev_t *vd); -extern int zfs_vdev_async_write_active_min_dirty_percent; +extern uint_t zfs_vdev_async_write_active_min_dirty_percent; static int zfs_scan_blkstats = 0; /* @@ -149,8 +149,10 @@ static int zfs_scan_strict_mem_lim = B_FALSE; */ static unsigned long zfs_scan_vdev_limit = 4 << 20; -static int zfs_scan_issue_strategy = 0; -static int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */ +static uint_t zfs_scan_issue_strategy = 0; + +/* don't queue & sort zios, go direct */ +static int zfs_scan_legacy = B_FALSE; static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ /* @@ -158,20 +160,33 @@ static unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */ * zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would * break queue sorting. */ -static int zfs_scan_fill_weight = 3; +static uint_t zfs_scan_fill_weight = 3; static uint64_t fill_weight; /* See dsl_scan_should_clear() for details on the memory limit tunables */ static const uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */ static const uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */ -static int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */ -static int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */ - -static int zfs_scrub_min_time_ms = 1000; /* min millis to scrub per txg */ -static int zfs_obsolete_min_time_ms = 500; /* min millis to obsolete per txg */ -static int zfs_free_min_time_ms = 1000; /* min millis to free per txg */ -static int zfs_resilver_min_time_ms = 3000; /* min millis to resilver per txg */ -static int zfs_scan_checkpoint_intval = 7200; /* in seconds */ + + +/* fraction of physmem */ +static uint_t zfs_scan_mem_lim_fact = 20; + +/* fraction of mem lim above */ +static uint_t zfs_scan_mem_lim_soft_fact = 20; + +/* minimum milliseconds to scrub per txg */ +static uint_t zfs_scrub_min_time_ms = 1000; + +/* minimum milliseconds to obsolete per txg */ +static uint_t zfs_obsolete_min_time_ms = 500; + +/* minimum milliseconds to free per txg */ +static uint_t zfs_free_min_time_ms = 1000; + +/* minimum milliseconds to resilver per txg */ +static uint_t zfs_resilver_min_time_ms = 3000; + +static uint_t zfs_scan_checkpoint_intval = 7200; /* in seconds */ int zfs_scan_suspend_progress = 0; /* set to prevent scans from progressing */ static int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ static int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */ @@ -1350,7 +1365,7 @@ dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb) scn->scn_dp->dp_spa->spa_sync_starttime; uint64_t dirty_min_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_min_dirty_percent / 100; - int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? + uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; if ((NSEC2MSEC(scan_time_ns) > mintime && @@ -2840,7 +2855,7 @@ scan_io_queue_check_suspend(dsl_scan_t *scn) scn->scn_dp->dp_spa->spa_sync_starttime; uint64_t dirty_min_bytes = zfs_dirty_data_max * zfs_vdev_async_write_active_min_dirty_percent / 100; - int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? + uint_t mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; return ((NSEC2MSEC(scan_time_ns) > mintime && @@ -3622,8 +3637,9 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx) */ if (zfs_scan_suspend_progress) { uint64_t scan_time_ns = gethrtime() - scn->scn_sync_start_time; - int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ? - zfs_resilver_min_time_ms : zfs_scrub_min_time_ms; + uint_t mintime = (scn->scn_phys.scn_func == + POOL_SCAN_RESILVER) ? zfs_resilver_min_time_ms : + zfs_scrub_min_time_ms; while (zfs_scan_suspend_progress && !txg_sync_waiting(scn->scn_dp) && @@ -4433,16 +4449,16 @@ dsl_scan_assess_vdev(dsl_pool_t *dp, vdev_t *vd) ZFS_MODULE_PARAM(zfs, zfs_, scan_vdev_limit, ULONG, ZMOD_RW, "Max bytes in flight per leaf vdev for scrubs and resilvers"); -ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, scrub_min_time_ms, UINT, ZMOD_RW, "Min millisecs to scrub per txg"); -ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, obsolete_min_time_ms, UINT, ZMOD_RW, "Min millisecs to obsolete per txg"); -ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, free_min_time_ms, UINT, ZMOD_RW, "Min millisecs to free per txg"); -ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, resilver_min_time_ms, UINT, ZMOD_RW, "Min millisecs to resilver per txg"); ZFS_MODULE_PARAM(zfs, zfs_, scan_suspend_progress, INT, ZMOD_RW, @@ -4466,28 +4482,28 @@ ZFS_MODULE_PARAM(zfs, zfs_, free_bpobj_enabled, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, scan_blkstats, INT, ZMOD_RW, "Enable block statistics calculation during scrub"); -ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_fact, UINT, ZMOD_RW, "Fraction of RAM for scan hard limit"); -ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, scan_issue_strategy, UINT, ZMOD_RW, "IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size"); ZFS_MODULE_PARAM(zfs, zfs_, scan_legacy, INT, ZMOD_RW, "Scrub using legacy non-sequential method"); -ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, scan_checkpoint_intval, UINT, ZMOD_RW, "Scan progress on-disk checkpointing interval"); ZFS_MODULE_PARAM(zfs, zfs_, scan_max_ext_gap, ULONG, ZMOD_RW, "Max gap in bytes between sequential scrub / resilver I/Os"); -ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, scan_mem_lim_soft_fact, UINT, ZMOD_RW, "Fraction of hard limit used as soft limit"); ZFS_MODULE_PARAM(zfs, zfs_, scan_strict_mem_lim, INT, ZMOD_RW, "Tunable to attempt to reduce lock contention"); -ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, scan_fill_weight, UINT, ZMOD_RW, "Tunable to adjust bias towards more filled segments during scans"); ZFS_MODULE_PARAM(zfs, zfs_, resilver_disable_defer, INT, ZMOD_RW, diff --git a/module/zfs/fm.c b/module/zfs/fm.c index bc13b5517..32b5cf8fa 100644 --- a/module/zfs/fm.c +++ b/module/zfs/fm.c @@ -68,9 +68,9 @@ #include <sys/condvar.h> #include <sys/zfs_ioctl.h> -static int zfs_zevent_len_max = 512; +static uint_t zfs_zevent_len_max = 512; -static int zevent_len_cur = 0; +static uint_t zevent_len_cur = 0; static int zevent_waiters = 0; static int zevent_flags = 0; @@ -158,7 +158,7 @@ zfs_zevent_drain(zevent_t *ev) } void -zfs_zevent_drain_all(int *count) +zfs_zevent_drain_all(uint_t *count) { zevent_t *ev; @@ -1342,7 +1342,7 @@ fm_init(void) void fm_fini(void) { - int count; + uint_t count; zfs_ereport_fini(); @@ -1370,5 +1370,5 @@ fm_fini(void) } #endif /* _KERNEL */ -ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, len_max, UINT, ZMOD_RW, "Max event queue length"); diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 4234f8ebf..efcfeecd7 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -81,7 +81,7 @@ int zfs_metaslab_sm_blksz_with_log = (1 << 17); * space map representation must be before we compact it on-disk. * Values should be greater than or equal to 100. */ -int zfs_condense_pct = 200; +uint_t zfs_condense_pct = 200; /* * Condensing a metaslab is not guaranteed to actually reduce the amount of @@ -111,7 +111,7 @@ static const int zfs_metaslab_condense_block_threshold = 4; * eligible to allocate on any metaslab group. The default value of 0 means * no metaslab group will be excluded based on this criterion. */ -static int zfs_mg_noalloc_threshold = 0; +static uint_t zfs_mg_noalloc_threshold = 0; /* * Metaslab groups are considered eligible for allocations if their @@ -135,7 +135,7 @@ static int zfs_mg_noalloc_threshold = 0; * enough to avoid hitting the speed bump on pools that are being pushed * to the edge. */ -static int zfs_mg_fragmentation_threshold = 95; +static uint_t zfs_mg_fragmentation_threshold = 95; /* * Allow metaslabs to keep their active state as long as their fragmentation @@ -143,7 +143,7 @@ static int zfs_mg_fragmentation_threshold = 95; * active metaslab that exceeds this threshold will no longer keep its active * status allowing better metaslabs to be selected. */ -static int zfs_metaslab_fragmentation_threshold = 70; +static uint_t zfs_metaslab_fragmentation_threshold = 70; /* * When set will load all metaslabs when pool is first opened. @@ -169,7 +169,7 @@ uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; * Once the space map's free space drops below this level we dynamically * switch to using best-fit allocations. */ -int metaslab_df_free_pct = 4; +uint_t metaslab_df_free_pct = 4; /* * Maximum distance to search forward from the last offset. Without this @@ -184,7 +184,7 @@ int metaslab_df_free_pct = 4; * With the default setting of 16MB this is 16*1024 (with ashift=9) or * 2048 (with ashift=12). */ -static int metaslab_df_max_search = 16 * 1024 * 1024; +static uint_t metaslab_df_max_search = 16 * 1024 * 1024; /* * Forces the metaslab_block_picker function to search for at least this many @@ -215,13 +215,13 @@ int metaslab_load_pct = 50; * unloaded sooner. These settings are intended to be generous -- to keep * metaslabs loaded for a long time, reducing the rate of metaslab loading. */ -static int metaslab_unload_delay = 32; -static int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ +static uint_t metaslab_unload_delay = 32; +static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ /* * Max number of metaslabs per group to preload. */ -int metaslab_preload_limit = 10; +uint_t metaslab_preload_limit = 10; /* * Enable/disable preloading of metaslab. @@ -293,7 +293,7 @@ static unsigned long zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ * a metaslab would take it over this percentage, the oldest selected metaslab * is automatically unloaded. */ -static int zfs_metaslab_mem_limit = 25; +static uint_t zfs_metaslab_mem_limit = 25; /* * Force the per-metaslab range trees to use 64-bit integers to store @@ -337,7 +337,7 @@ static int zfs_metaslab_try_hard_before_gang = B_FALSE; * subsequent metaslab has ms_max_size >60KB (but fewer segments in this * bucket, and therefore a lower weight). */ -static int zfs_metaslab_find_max_tries = 100; +static uint_t zfs_metaslab_find_max_tries = 100; static uint64_t metaslab_weight(metaslab_t *, boolean_t); static void metaslab_set_fragmentation(metaslab_t *, boolean_t); @@ -1672,7 +1672,7 @@ metaslab_df_alloc(metaslab_t *msp, uint64_t size) uint64_t align = size & -size; uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; range_tree_t *rt = msp->ms_allocatable; - int free_pct = range_tree_space(rt) * 100 / msp->ms_size; + uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size; uint64_t offset; ASSERT(MUTEX_HELD(&msp->ms_lock)); @@ -2169,7 +2169,7 @@ metaslab_potentially_evict(metaslab_class_t *mc) uint64_t allmem = arc_all_memory(); uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache); - int tries = 0; + uint_t tries = 0; for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2; tries++) { @@ -4640,7 +4640,7 @@ find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, if (msp == NULL) msp = avl_nearest(t, idx, AVL_AFTER); - int tries = 0; + uint_t tries = 0; for (; msp != NULL; msp = AVL_NEXT(t, msp)) { int i; @@ -6215,18 +6215,18 @@ ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW, "Preload potential metaslabs during reassessment"); -ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW, "Delay in txgs after metaslab was last used before unloading"); -ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW, "Delay in milliseconds after metaslab was last used before unloading"); /* BEGIN CSTYLED */ -ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW, "Percentage of metaslab group size that should be free to make it " "eligible for allocation"); -ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW, "Percentage of metaslab group size that should be considered eligible " "for allocations unless all metaslab groups within the metaslab class " "have also crossed this threshold"); @@ -6236,7 +6236,7 @@ ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, "Use the fragmentation metric to prefer less fragmented metaslabs"); /* END CSTYLED */ -ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT, +ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT, ZMOD_RW, "Fragmentation for metaslab to allow allocation"); ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW, @@ -6254,7 +6254,7 @@ ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW, "Blocks larger than this size are forced to be gang blocks"); -ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW, "Max distance (bytes) to search forward before using size tree"); ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, @@ -6263,11 +6263,11 @@ ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG, ZMOD_RW, "How long to trust the cached max chunk size of a metaslab"); -ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW, "Percentage of memory that can be used to store metaslab range trees"); ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT, ZMOD_RW, "Try hard to allocate before ganging"); -ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW, "Normally only consider this many of the best metaslabs in each vdev"); diff --git a/module/zfs/multilist.c b/module/zfs/multilist.c index fdc5f07c4..b1cdf1c5c 100644 --- a/module/zfs/multilist.c +++ b/module/zfs/multilist.c @@ -24,7 +24,7 @@ * This overrides the number of sublists in each multilist_t, which defaults * to the number of CPUs in the system (see multilist_create()). */ -int zfs_multilist_num_sublists = 0; +uint_t zfs_multilist_num_sublists = 0; /* * Given the object contained on the list, return a pointer to the @@ -69,7 +69,7 @@ multilist_d2l(multilist_t *ml, void *obj) */ static void multilist_create_impl(multilist_t *ml, size_t size, size_t offset, - unsigned int num, multilist_sublist_index_func_t *index_func) + uint_t num, multilist_sublist_index_func_t *index_func) { ASSERT3U(size, >, 0); ASSERT3U(size, >=, offset + sizeof (multilist_node_t)); @@ -104,7 +104,7 @@ void multilist_create(multilist_t *ml, size_t size, size_t offset, multilist_sublist_index_func_t *index_func) { - int num_sublists; + uint_t num_sublists; if (zfs_multilist_num_sublists > 0) { num_sublists = zfs_multilist_num_sublists; @@ -425,5 +425,5 @@ multilist_link_active(multilist_node_t *link) return (list_link_active(link)); } -ZFS_MODULE_PARAM(zfs, zfs_, multilist_num_sublists, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, multilist_num_sublists, UINT, ZMOD_RW, "Number of sublists used in each multilist"); diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c index c65228457..62ec03e10 100644 --- a/module/zfs/refcount.c +++ b/module/zfs/refcount.c @@ -33,7 +33,7 @@ * cpu time. Until its performance is improved it should be manually enabled. */ int reference_tracking_enable = B_FALSE; -static int reference_history = 3; /* tunable */ +static uint_t reference_history = 3; /* tunable */ static kmem_cache_t *reference_cache; static kmem_cache_t *reference_history_cache; @@ -329,7 +329,7 @@ EXPORT_SYMBOL(zfs_refcount_held); ZFS_MODULE_PARAM(zfs, , reference_tracking_enable, INT, ZMOD_RW, "Track reference holders to refcount_t objects"); -ZFS_MODULE_PARAM(zfs, , reference_history, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, , reference_history, UINT, ZMOD_RW, "Maximum reference holders being tracked"); /* END CSTYLED */ #endif /* ZFS_DEBUG */ diff --git a/module/zfs/spa.c b/module/zfs/spa.c index eeec3b6be..10efb36f0 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -2301,7 +2301,7 @@ spa_load_verify_done(zio_t *zio) * Maximum number of inflight bytes is the log2 fraction of the arc size. * By default, we set it to 1/16th of the arc. */ -static int spa_load_verify_shift = 4; +static uint_t spa_load_verify_shift = 4; static int spa_load_verify_metadata = B_TRUE; static int spa_load_verify_data = B_TRUE; @@ -9988,7 +9988,7 @@ EXPORT_SYMBOL(spa_prop_clear_bootfs); EXPORT_SYMBOL(spa_event_notify); /* BEGIN CSTYLED */ -ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, UINT, ZMOD_RW, "log2 fraction of arc that can be used by inflight I/Os when " "verifying pool during import"); /* END CSTYLED */ diff --git a/module/zfs/spa_errlog.c b/module/zfs/spa_errlog.c index e682f6c69..30e1249dd 100644 --- a/module/zfs/spa_errlog.c +++ b/module/zfs/spa_errlog.c @@ -78,7 +78,7 @@ * format when enabling head_errlog. Defaults to 0 which converts * all log entries. */ -static uint32_t spa_upgrade_errlog_limit = 0; +static uint_t spa_upgrade_errlog_limit = 0; /* * Convert a bookmark to a string. @@ -1367,7 +1367,7 @@ EXPORT_SYMBOL(spa_upgrade_errlog); #endif /* BEGIN CSTYLED */ -ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW, "Limit the number of errors which will be upgraded to the new " "on-disk error log when enabling head_errlog"); /* END CSTYLED */ diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index daab1d6fc..f7865bc49 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -343,7 +343,7 @@ const char *zfs_deadman_failmode = "wait"; * the worst case is: * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 */ -int spa_asize_inflation = 24; +uint_t spa_asize_inflation = 24; /* * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in @@ -383,7 +383,7 @@ int spa_asize_inflation = 24; * * See also the comments in zfs_space_check_t. */ -int spa_slop_shift = 5; +uint_t spa_slop_shift = 5; static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; static const int spa_allocators = 4; @@ -428,7 +428,7 @@ static int zfs_user_indirect_is_special = B_TRUE; * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only * let metadata into the class. */ -static int zfs_special_class_metadata_reserve_pct = 25; +static uint_t zfs_special_class_metadata_reserve_pct = 25; /* * ========================================================================== @@ -1657,7 +1657,7 @@ spa_altroot(spa_t *spa, char *buf, size_t buflen) (void) strlcpy(buf, spa->spa_root, buflen); } -int +uint32_t spa_sync_pass(spa_t *spa) { return (spa->spa_sync_pass); @@ -2928,7 +2928,7 @@ ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW, ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, "Enable deadman timer"); -ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW, "SPA size estimate multiplication factor"); ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, @@ -2950,10 +2950,10 @@ ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, param_set_deadman_ziotime, param_get_ulong, ZMOD_RW, "IO expiration time in milliseconds"); -ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, "Small file blocks in special vdevs depends on this much " "free space available"); /* END CSTYLED */ ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, - param_get_int, ZMOD_RW, "Reserved free space in pool"); + param_get_uint, ZMOD_RW, "Reserved free space in pool"); diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c index 59844d5d6..17ed2a620 100644 --- a/module/zfs/spa_stats.c +++ b/module/zfs/spa_stats.c @@ -28,7 +28,7 @@ /* * Keeps stats on last N reads per spa_t, disabled by default. */ -static int zfs_read_history = B_FALSE; +static uint_t zfs_read_history = B_FALSE; /* * Include cache hits in history, disabled by default. @@ -38,12 +38,12 @@ static int zfs_read_history_hits = B_FALSE; /* * Keeps stats on the last 100 txgs by default. */ -static int zfs_txg_history = 100; +static uint_t zfs_txg_history = 100; /* * Keeps stats on the last N MMP updates, disabled by default. */ -int zfs_multihost_history = B_FALSE; +static uint_t zfs_multihost_history = B_FALSE; /* * ========================================================================== @@ -1012,14 +1012,14 @@ spa_stats_destroy(spa_t *spa) spa_guid_destroy(spa); } -ZFS_MODULE_PARAM(zfs, zfs_, read_history, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, read_history, UINT, ZMOD_RW, "Historical statistics for the last N reads"); ZFS_MODULE_PARAM(zfs, zfs_, read_history_hits, INT, ZMOD_RW, "Include cache hits in read history"); -ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, history, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, history, UINT, ZMOD_RW, "Historical statistics for the last N txgs"); -ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, history, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, history, UINT, ZMOD_RW, "Historical statistics for last N multihost writes"); diff --git a/module/zfs/txg.c b/module/zfs/txg.c index 6e2b8b010..29eb9e8e8 100644 --- a/module/zfs/txg.c +++ b/module/zfs/txg.c @@ -111,7 +111,7 @@ static __attribute__((noreturn)) void txg_sync_thread(void *arg); static __attribute__((noreturn)) void txg_quiesce_thread(void *arg); -int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */ +uint_t zfs_txg_timeout = 5; /* max seconds worth of delta per txg */ /* * Prepare the txg subsystem. @@ -1069,5 +1069,5 @@ EXPORT_SYMBOL(txg_wait_callbacks); EXPORT_SYMBOL(txg_stalled); EXPORT_SYMBOL(txg_sync_waiting); -ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, UINT, ZMOD_RW, "Max seconds worth of delta per txg"); diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 048616c25..53c767e3b 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -81,22 +81,22 @@ * 1 << (spa_slop_shift + 1), on small pools the usable space may be reduced * (by more than 1<<spa_slop_shift) due to the embedded slog metaslab. */ -static int zfs_embedded_slog_min_ms = 64; +static uint_t zfs_embedded_slog_min_ms = 64; /* default target for number of metaslabs per top-level vdev */ -static int zfs_vdev_default_ms_count = 200; +static uint_t zfs_vdev_default_ms_count = 200; /* minimum number of metaslabs per top-level vdev */ -static int zfs_vdev_min_ms_count = 16; +static uint_t zfs_vdev_min_ms_count = 16; /* practical upper limit of total metaslabs per top-level vdev */ -static int zfs_vdev_ms_count_limit = 1ULL << 17; +static uint_t zfs_vdev_ms_count_limit = 1ULL << 17; /* lower limit for metaslab size (512M) */ -static int zfs_vdev_default_ms_shift = 29; +static uint_t zfs_vdev_default_ms_shift = 29; /* upper limit for metaslab size (16G) */ -static const int zfs_vdev_max_ms_shift = 34; +static const uint_t zfs_vdev_max_ms_shift = 34; int vdev_validate_skip = B_FALSE; @@ -6062,16 +6062,16 @@ EXPORT_SYMBOL(vdev_online); EXPORT_SYMBOL(vdev_offline); EXPORT_SYMBOL(vdev_clear); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_count, UINT, ZMOD_RW, "Target number of metaslabs per top-level vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, default_ms_shift, UINT, ZMOD_RW, "Default limit for metaslab size"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, min_ms_count, UINT, ZMOD_RW, "Minimum number of metaslabs per top-level vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, ms_count_limit, UINT, ZMOD_RW, "Practical upper limit of total metaslabs per top-level vdev"); ZFS_MODULE_PARAM(zfs, zfs_, slow_io_events_per_second, UINT, ZMOD_RW, @@ -6092,7 +6092,7 @@ ZFS_MODULE_PARAM(zfs_vdev, vdev_, validate_skip, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs, zfs_, nocacheflush, INT, ZMOD_RW, "Disable cache flushes"); -ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, embedded_slog_min_ms, UINT, ZMOD_RW, "Minimum number of metaslabs required to dedicate one for log blocks"); /* BEGIN CSTYLED */ diff --git a/module/zfs/vdev_cache.c b/module/zfs/vdev_cache.c index d1b96b5e2..f0a17600d 100644 --- a/module/zfs/vdev_cache.c +++ b/module/zfs/vdev_cache.c @@ -83,9 +83,9 @@ * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11 * has made these same changes. */ -static int zfs_vdev_cache_max = 1 << 14; /* 16KB */ -static int zfs_vdev_cache_size = 0; -static int zfs_vdev_cache_bshift = 16; +static uint_t zfs_vdev_cache_max = 1 << 14; /* 16KB */ +static uint_t zfs_vdev_cache_size = 0; +static uint_t zfs_vdev_cache_bshift = 16; #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */ @@ -426,11 +426,11 @@ vdev_cache_stat_fini(void) } } -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_max, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_max, UINT, ZMOD_RW, "Inflate reads small than max"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_size, INT, ZMOD_RD, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_size, UINT, ZMOD_RD, "Total size of the per-disk cache"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_bshift, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, cache_bshift, UINT, ZMOD_RW, "Shift size to inflate reads too"); diff --git a/module/zfs/vdev_indirect.c b/module/zfs/vdev_indirect.c index 9189d3f31..0ca0c245e 100644 --- a/module/zfs/vdev_indirect.c +++ b/module/zfs/vdev_indirect.c @@ -181,7 +181,7 @@ static int zfs_condense_indirect_vdevs_enable = B_TRUE; * condenses. Higher values will condense less often (causing less * i/o); lower values will reduce the mapping size more quickly. */ -static int zfs_condense_indirect_obsolete_pct = 25; +static uint_t zfs_condense_indirect_obsolete_pct = 25; /* * Condense if the obsolete space map takes up more than this amount of @@ -204,7 +204,7 @@ static unsigned long zfs_condense_min_mapping_bytes = 128 * 1024; * complete too quickly). If used to reduce the performance impact of * condensing in production, a maximum value of 1 should be sufficient. */ -static int zfs_condense_indirect_commit_entry_delay_ms = 0; +static uint_t zfs_condense_indirect_commit_entry_delay_ms = 0; /* * If an indirect split block contains more than this many possible unique @@ -214,7 +214,7 @@ static int zfs_condense_indirect_commit_entry_delay_ms = 0; * copies to participate fairly in the reconstruction when all combinations * cannot be checked and prevents repeated use of one bad copy. */ -int zfs_reconstruct_indirect_combinations_max = 4096; +uint_t zfs_reconstruct_indirect_combinations_max = 4096; /* * Enable to simulate damaged segments and validate reconstruction. This @@ -1886,7 +1886,7 @@ EXPORT_SYMBOL(vdev_obsolete_sm_object); ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_vdevs_enable, INT, ZMOD_RW, "Whether to attempt condensing indirect vdev mappings"); -ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, INT, +ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_obsolete_pct, UINT, ZMOD_RW, "Minimum obsolete percent of bytes in the mapping " "to attempt condensing"); @@ -1900,11 +1900,11 @@ ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, max_obsolete_bytes, ULONG, "Minimum size obsolete spacemap to attempt condensing"); ZFS_MODULE_PARAM(zfs_condense, zfs_condense_, indirect_commit_entry_delay_ms, - INT, ZMOD_RW, + UINT, ZMOD_RW, "Used by tests to ensure certain actions happen in the middle of a " "condense. A maximum value of 1 should be sufficient."); ZFS_MODULE_PARAM(zfs_reconstruct, zfs_reconstruct_, indirect_combinations_max, - INT, ZMOD_RW, + UINT, ZMOD_RW, "Maximum number of combinations when reconstructing split segments"); /* END CSTYLED */ diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index 7acb9915c..1acb89cea 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -121,7 +121,7 @@ * The maximum number of i/os active to each device. Ideally, this will be >= * the sum of each queue's max_active. */ -uint32_t zfs_vdev_max_active = 1000; +uint_t zfs_vdev_max_active = 1000; /* * Per-queue limits on the number of i/os active to each device. If the @@ -141,24 +141,24 @@ uint32_t zfs_vdev_max_active = 1000; * more quickly, but reads and writes to have higher latency and lower * throughput. */ -static uint32_t zfs_vdev_sync_read_min_active = 10; -static uint32_t zfs_vdev_sync_read_max_active = 10; -static uint32_t zfs_vdev_sync_write_min_active = 10; -static uint32_t zfs_vdev_sync_write_max_active = 10; -static uint32_t zfs_vdev_async_read_min_active = 1; -/* */ uint32_t zfs_vdev_async_read_max_active = 3; -static uint32_t zfs_vdev_async_write_min_active = 2; -/* */ uint32_t zfs_vdev_async_write_max_active = 10; -static uint32_t zfs_vdev_scrub_min_active = 1; -static uint32_t zfs_vdev_scrub_max_active = 3; -static uint32_t zfs_vdev_removal_min_active = 1; -static uint32_t zfs_vdev_removal_max_active = 2; -static uint32_t zfs_vdev_initializing_min_active = 1; -static uint32_t zfs_vdev_initializing_max_active = 1; -static uint32_t zfs_vdev_trim_min_active = 1; -static uint32_t zfs_vdev_trim_max_active = 2; -static uint32_t zfs_vdev_rebuild_min_active = 1; -static uint32_t zfs_vdev_rebuild_max_active = 3; +static uint_t zfs_vdev_sync_read_min_active = 10; +static uint_t zfs_vdev_sync_read_max_active = 10; +static uint_t zfs_vdev_sync_write_min_active = 10; +static uint_t zfs_vdev_sync_write_max_active = 10; +static uint_t zfs_vdev_async_read_min_active = 1; +/* */ uint_t zfs_vdev_async_read_max_active = 3; +static uint_t zfs_vdev_async_write_min_active = 2; +/* */ uint_t zfs_vdev_async_write_max_active = 10; +static uint_t zfs_vdev_scrub_min_active = 1; +static uint_t zfs_vdev_scrub_max_active = 3; +static uint_t zfs_vdev_removal_min_active = 1; +static uint_t zfs_vdev_removal_max_active = 2; +static uint_t zfs_vdev_initializing_min_active = 1; +static uint_t zfs_vdev_initializing_max_active = 1; +static uint_t zfs_vdev_trim_min_active = 1; +static uint_t zfs_vdev_trim_max_active = 2; +static uint_t zfs_vdev_rebuild_min_active = 1; +static uint_t zfs_vdev_rebuild_max_active = 3; /* * When the pool has less than zfs_vdev_async_write_active_min_dirty_percent @@ -167,8 +167,8 @@ static uint32_t zfs_vdev_rebuild_max_active = 3; * zfs_vdev_async_write_max_active. The value is linearly interpolated * between min and max. */ -int zfs_vdev_async_write_active_min_dirty_percent = 30; -int zfs_vdev_async_write_active_max_dirty_percent = 60; +uint_t zfs_vdev_async_write_active_min_dirty_percent = 30; +uint_t zfs_vdev_async_write_active_max_dirty_percent = 60; /* * For non-interactive I/O (scrub, resilver, removal, initialize and rebuild), @@ -198,10 +198,10 @@ static uint_t zfs_vdev_nia_credit = 5; * we include spans of optional I/Os to aid aggregation at the disk even when * they aren't able to help us aggregate at this level. */ -static int zfs_vdev_aggregation_limit = 1 << 20; -static int zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE; -static int zfs_vdev_read_gap_limit = 32 << 10; -static int zfs_vdev_write_gap_limit = 4 << 10; +static uint_t zfs_vdev_aggregation_limit = 1 << 20; +static uint_t zfs_vdev_aggregation_limit_non_rotating = SPA_OLD_MAXBLOCKSIZE; +static uint_t zfs_vdev_read_gap_limit = 32 << 10; +static uint_t zfs_vdev_write_gap_limit = 4 << 10; /* * Define the queue depth percentage for each top-level. This percentage is @@ -214,9 +214,9 @@ static int zfs_vdev_write_gap_limit = 4 << 10; * to 30 allocations per device. */ #ifdef _KERNEL -int zfs_vdev_queue_depth_pct = 1000; +uint_t zfs_vdev_queue_depth_pct = 1000; #else -int zfs_vdev_queue_depth_pct = 300; +uint_t zfs_vdev_queue_depth_pct = 300; #endif /* @@ -226,14 +226,14 @@ int zfs_vdev_queue_depth_pct = 300; * we assume that the average allocation size is 4k, so we need the queue depth * to be 32 per allocator to get good aggregation of sequential writes. */ -int zfs_vdev_def_queue_depth = 32; +uint_t zfs_vdev_def_queue_depth = 32; /* * Allow TRIM I/Os to be aggregated. This should normally not be needed since * TRIM I/O for extents up to zfs_trim_extent_bytes_max (128M) can be submitted * by the TRIM code in zfs_trim.c. */ -static int zfs_vdev_aggregate_trim = 0; +static uint_t zfs_vdev_aggregate_trim = 0; static int vdev_queue_offset_compare(const void *x1, const void *x2) @@ -281,7 +281,7 @@ vdev_queue_timestamp_compare(const void *x1, const void *x2) return (TREE_PCMP(z1, z2)); } -static int +static uint_t vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p) { switch (p) { @@ -313,10 +313,10 @@ vdev_queue_class_min_active(vdev_queue_t *vq, zio_priority_t p) } } -static int +static uint_t vdev_queue_max_async_writes(spa_t *spa) { - int writes; + uint_t writes; uint64_t dirty = 0; dsl_pool_t *dp = spa_get_dsl(spa); uint64_t min_bytes = zfs_dirty_data_max * @@ -359,7 +359,7 @@ vdev_queue_max_async_writes(spa_t *spa) return (writes); } -static int +static uint_t vdev_queue_class_max_active(spa_t *spa, vdev_queue_t *vq, zio_priority_t p) { switch (p) { @@ -1031,89 +1031,89 @@ vdev_queue_last_offset(vdev_t *vd) return (vd->vdev_queue.vq_last_offset); } -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit, UINT, ZMOD_RW, "Max vdev I/O aggregation size"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit_non_rotating, INT, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregation_limit_non_rotating, UINT, ZMOD_RW, "Max vdev I/O aggregation size for non-rotating media"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregate_trim, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, aggregate_trim, UINT, ZMOD_RW, "Allow TRIM I/O to be aggregated"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, read_gap_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, read_gap_limit, UINT, ZMOD_RW, "Aggregate read I/O over gap"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, write_gap_limit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, write_gap_limit, UINT, ZMOD_RW, "Aggregate write I/O over gap"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, max_active, UINT, ZMOD_RW, "Maximum number of active I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_max_dirty_percent, INT, - ZMOD_RW, "Async write concurrency max threshold"); +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_max_dirty_percent, + UINT, ZMOD_RW, "Async write concurrency max threshold"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_min_dirty_percent, INT, - ZMOD_RW, "Async write concurrency min threshold"); +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_active_min_dirty_percent, + UINT, ZMOD_RW, "Async write concurrency min threshold"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_max_active, UINT, ZMOD_RW, "Max active async read I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_read_min_active, UINT, ZMOD_RW, "Min active async read I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_max_active, UINT, ZMOD_RW, "Max active async write I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, async_write_min_active, UINT, ZMOD_RW, "Min active async write I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_max_active, UINT, ZMOD_RW, "Max active initializing I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, initializing_min_active, UINT, ZMOD_RW, "Min active initializing I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_max_active, UINT, ZMOD_RW, "Max active removal I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, removal_min_active, UINT, ZMOD_RW, "Min active removal I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_max_active, UINT, ZMOD_RW, "Max active scrub I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, scrub_min_active, UINT, ZMOD_RW, "Min active scrub I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_max_active, UINT, ZMOD_RW, "Max active sync read I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_read_min_active, UINT, ZMOD_RW, "Min active sync read I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_max_active, UINT, ZMOD_RW, "Max active sync write I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, sync_write_min_active, UINT, ZMOD_RW, "Min active sync write I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_max_active, UINT, ZMOD_RW, "Max active trim/discard I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, trim_min_active, UINT, ZMOD_RW, "Min active trim/discard I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_max_active, UINT, ZMOD_RW, "Max active rebuild I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, rebuild_min_active, UINT, ZMOD_RW, "Min active rebuild I/Os per vdev"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_credit, UINT, ZMOD_RW, "Number of non-interactive I/Os to allow in sequence"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, nia_delay, UINT, ZMOD_RW, "Number of non-interactive I/Os before _max_active"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_vdev_, queue_depth_pct, UINT, ZMOD_RW, "Queue depth percentage for each top-level vdev"); diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c index a48b47a50..53592dbfd 100644 --- a/module/zfs/vdev_removal.c +++ b/module/zfs/vdev_removal.c @@ -94,7 +94,7 @@ typedef struct vdev_copy_arg { * doing a device removal. This determines how much i/o we can have * in flight concurrently. */ -static const int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; +static const uint_t zfs_remove_max_copy_bytes = 64 * 1024 * 1024; /* * The largest contiguous segment that we will attempt to allocate when @@ -104,7 +104,7 @@ static const int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; * * See also the accessor function spa_remove_max_segment(). */ -int zfs_remove_max_segment = SPA_MAXBLOCKSIZE; +uint_t zfs_remove_max_segment = SPA_MAXBLOCKSIZE; /* * Ignore hard IO errors during device removal. When set if a device @@ -130,7 +130,7 @@ static int zfs_removal_ignore_errors = 0; * - we'll do larger allocations, which may fail and fall back on smaller * allocations */ -int vdev_removal_max_span = 32 * 1024; +uint_t vdev_removal_max_span = 32 * 1024; /* * This is used by the test suite so that it can ensure that certain @@ -2545,14 +2545,14 @@ spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_ignore_errors, INT, ZMOD_RW, "Ignore hard IO errors when removing device"); -ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_, remove_max_segment, UINT, ZMOD_RW, "Largest contiguous segment to allocate when removing device"); -ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, vdev_, removal_max_span, UINT, ZMOD_RW, "Largest span of free chunks a remap segment can span"); /* BEGIN CSTYLED */ -ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_vdev, zfs_, removal_suspend_progress, UINT, ZMOD_RW, "Pause device removal after this many bytes are copied " "(debug use only - causes removal to hang)"); /* END CSTYLED */ diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index bafe6fe7d..620238b72 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -6355,7 +6355,7 @@ zfs_ioc_events_next(zfs_cmd_t *zc) static int zfs_ioc_events_clear(zfs_cmd_t *zc) { - int count; + uint_t count; zfs_zevent_drain_all(&count); zc->zc_cookie = count; diff --git a/module/zfs/zil.c b/module/zfs/zil.c index 4864e0cca..dc5b8018e 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -90,7 +90,7 @@ * committed to stable storage. Please refer to the zil_commit_waiter() * function (and the comments within it) for more details. */ -static int zfs_commit_timeout_pct = 5; +static uint_t zfs_commit_timeout_pct = 5; /* * See zil.h for more information about these fields. @@ -1642,7 +1642,7 @@ static const struct { * initialized. Otherwise this should not be used directly; see * zl_max_block_size instead. */ -static int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; +static uint_t zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE; /* * Start a log block write and advance to the next log block. @@ -3936,7 +3936,7 @@ EXPORT_SYMBOL(zil_sums_init); EXPORT_SYMBOL(zil_sums_fini); EXPORT_SYMBOL(zil_kstat_values_update); -ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, UINT, ZMOD_RW, "ZIL block open timeout percentage"); ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW, @@ -3948,5 +3948,5 @@ ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW, "Limit in bytes slog sync writes per commit"); -ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, UINT, ZMOD_RW, "Limit in bytes of ZIL log block size"); diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 7b55450ca..cc2b61f25 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -83,7 +83,7 @@ static uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; #endif /* Mark IOs as "slow" if they take longer than 30 seconds */ -static int zio_slow_io_ms = (30 * MILLISEC); +static uint_t zio_slow_io_ms = (30 * MILLISEC); #define BP_SPANB(indblkshift, level) \ (((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT))) @@ -114,9 +114,15 @@ static int zio_slow_io_ms = (30 * MILLISEC); * fragmented systems, which may have very few free segments of this size, * and may need to load new metaslabs to satisfy 128K allocations. */ -int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */ -static int zfs_sync_pass_dont_compress = 8; /* don't compress s. i. t. p. */ -static int zfs_sync_pass_rewrite = 2; /* rewrite new bps s. i. t. p. */ + +/* defer frees starting in this pass */ +uint_t zfs_sync_pass_deferred_free = 2; + +/* don't compress starting in this pass */ +static uint_t zfs_sync_pass_dont_compress = 8; + +/* rewrite new bps starting in this pass */ +static uint_t zfs_sync_pass_rewrite = 2; /* * An allocating zio is one that either currently has the DVA allocate @@ -1640,7 +1646,7 @@ zio_write_compress(zio_t *zio) blkptr_t *bp = zio->io_bp; uint64_t lsize = zio->io_lsize; uint64_t psize = zio->io_size; - int pass = 1; + uint32_t pass = 1; /* * If our children haven't all reached the ready stage, @@ -5051,13 +5057,13 @@ ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW, "Prioritize requeued I/O"); -ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, UINT, ZMOD_RW, "Defer frees starting in this pass"); -ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, UINT, ZMOD_RW, "Don't compress starting in this pass"); -ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, UINT, ZMOD_RW, "Rewrite new bps starting in this pass"); ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW, diff --git a/module/zstd/zfs_zstd.c b/module/zstd/zfs_zstd.c index 413518989..1bb95e460 100644 --- a/module/zstd/zfs_zstd.c +++ b/module/zstd/zfs_zstd.c @@ -50,7 +50,7 @@ #include "lib/zstd.h" #include "lib/common/zstd_errors.h" -static int zstd_earlyabort_pass = 1; +static uint_t zstd_earlyabort_pass = 1; static int zstd_cutoff_level = ZIO_ZSTD_LEVEL_3; static unsigned int zstd_abort_size = (128 * 1024); @@ -897,7 +897,7 @@ module_init(zstd_init); module_exit(zstd_fini); #endif -ZFS_MODULE_PARAM(zfs, zstd_, earlyabort_pass, INT, ZMOD_RW, +ZFS_MODULE_PARAM(zfs, zstd_, earlyabort_pass, UINT, ZMOD_RW, "Enable early abort attempts when using zstd"); ZFS_MODULE_PARAM(zfs, zstd_, abort_size, UINT, ZMOD_RW, "Minimal size of block to attempt early abort"); |