aboutsummaryrefslogtreecommitdiffstats
path: root/module/zfs/dmu_send.c
diff options
context:
space:
mode:
authorнаб <[email protected]>2022-01-15 00:37:55 +0100
committerGitHub <[email protected]>2022-01-14 15:37:55 -0800
commit18168da727427e28914235137daebe06c23069cd (patch)
tree71a8769a2a12dd4add4f7abfb5a1e4f51f09cf18 /module/zfs/dmu_send.c
parent7adc19009817303af10c8b3b7617850994cfb9e2 (diff)
module/*.ko: prune .data, global .rodata
Evaluated every variable that lives in .data (and globals in .rodata) in the kernel modules, and constified/eliminated/localised them appropriately. This means that all read-only data is now actually read-only data, and, if possible, at file scope. A lot of previously- global-symbols became inlinable (and inlined!) constants. Probably not in a big Wowee Performance Moment, but hey. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Ahelenia Ziemiańska <[email protected]> Closes #12899
Diffstat (limited to 'module/zfs/dmu_send.c')
-rw-r--r--module/zfs/dmu_send.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index 6cff7fd58..fbb1947a5 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -67,7 +67,7 @@
#endif
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
-int zfs_send_corrupt_data = B_FALSE;
+static int zfs_send_corrupt_data = B_FALSE;
/*
* This tunable controls the amount of data (measured in bytes) that will be
* prefetched by zfs send. If the main thread is blocking on reads that haven't
@@ -75,7 +75,7 @@ int zfs_send_corrupt_data = B_FALSE;
* thread is issuing new reads because the prefetches have fallen out of the
* cache, this may need to be decreased.
*/
-int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
+static int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
/*
* This tunable controls the length of the queues that zfs send worker threads
* use to communicate. If the send_main_thread is blocking on these queues,
@@ -83,7 +83,7 @@ int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
* at the start of a send as these threads consume all the available IO
* resources, this variable may need to be decreased.
*/
-int zfs_send_no_prefetch_queue_length = 1024 * 1024;
+static int zfs_send_no_prefetch_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs send. The fill
* fraction controls the frequency with which threads have to be cv_signaled.
@@ -91,19 +91,19 @@ int zfs_send_no_prefetch_queue_length = 1024 * 1024;
* down. If the queues empty before the signalled thread can catch up, then
* these should be tuned up.
*/
-int zfs_send_queue_ff = 20;
-int zfs_send_no_prefetch_queue_ff = 20;
+static int zfs_send_queue_ff = 20;
+static int zfs_send_no_prefetch_queue_ff = 20;
/*
* Use this to override the recordsize calculation for fast zfs send estimates.
*/
-int zfs_override_estimate_recordsize = 0;
+static int zfs_override_estimate_recordsize = 0;
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
-int zfs_send_set_freerecords_bit = B_TRUE;
+static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
/* Set this tunable to FALSE is disable sending unmodified spill blocks. */
-int zfs_send_unmodified_spill_blocks = B_TRUE;
+static int zfs_send_unmodified_spill_blocks = B_TRUE;
static inline boolean_t
overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)