aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/icp/algs/aes/aes_impl.c2
-rw-r--r--module/icp/algs/modes/gcm.c2
-rw-r--r--module/os/freebsd/spl/spl_kmem.c2
-rw-r--r--module/os/linux/spl/spl-kmem-cache.c2
-rw-r--r--module/zcommon/zfs_comutil.c8
-rw-r--r--module/zfs/arc.c21
-rw-r--r--module/zfs/dbuf.c22
-rw-r--r--module/zfs/dmu.c29
-rw-r--r--module/zfs/dmu_object.c4
-rw-r--r--module/zfs/dmu_objset.c18
-rw-r--r--module/zfs/dmu_recv.c2
-rw-r--r--module/zfs/dnode.c12
-rw-r--r--module/zfs/dsl_bookmark.c4
-rw-r--r--module/zfs/dsl_crypt.c4
-rw-r--r--module/zfs/dsl_dataset.c43
-rw-r--r--module/zfs/dsl_dir.c2
-rw-r--r--module/zfs/dsl_pool.c10
-rw-r--r--module/zfs/dsl_userhold.c4
-rw-r--r--module/zfs/mmp.c2
-rw-r--r--module/zfs/rrwlock.c22
-rw-r--r--module/zfs/spa.c25
-rw-r--r--module/zfs/spa_config.c2
-rw-r--r--module/zfs/spa_errlog.c10
-rw-r--r--module/zfs/spa_misc.c12
-rw-r--r--module/zfs/vdev.c2
-rw-r--r--module/zfs/vdev_raidz_math.c2
-rw-r--r--module/zfs/vdev_rebuild.c2
-rw-r--r--module/zfs/vdev_removal.c7
-rw-r--r--module/zfs/zfs_fuid.c16
-rw-r--r--module/zfs/zil.c2
-rw-r--r--module/zfs/zio_checksum.c2
-rw-r--r--module/zfs/zio_compress.c2
32 files changed, 150 insertions, 149 deletions
diff --git a/module/icp/algs/aes/aes_impl.c b/module/icp/algs/aes/aes_impl.c
index f518a54a6..16afc2572 100644
--- a/module/icp/algs/aes/aes_impl.c
+++ b/module/icp/algs/aes/aes_impl.c
@@ -337,7 +337,7 @@ aes_impl_init(void)
}
static const struct {
- char *name;
+ const char *name;
uint32_t sel;
} aes_impl_opts[] = {
{ "cycle", IMPL_CYCLE },
diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c
index ee2100b7f..e6a631c1a 100644
--- a/module/icp/algs/modes/gcm.c
+++ b/module/icp/algs/modes/gcm.c
@@ -907,7 +907,7 @@ gcm_impl_init(void)
}
static const struct {
- char *name;
+ const char *name;
uint32_t sel;
} gcm_impl_opts[] = {
{ "cycle", IMPL_CYCLE },
diff --git a/module/os/freebsd/spl/spl_kmem.c b/module/os/freebsd/spl/spl_kmem.c
index ee8f1d851..ca9a67756 100644
--- a/module/os/freebsd/spl/spl_kmem.c
+++ b/module/os/freebsd/spl/spl_kmem.c
@@ -156,7 +156,7 @@ kmem_std_destructor(void *mem, int size __unused, void *private)
}
kmem_cache_t *
-kmem_cache_create(char *name, size_t bufsize, size_t align,
+kmem_cache_create(const char *name, size_t bufsize, size_t align,
int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags)
{
diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c
index 33aaad653..ba4ca49a2 100644
--- a/module/os/linux/spl/spl-kmem-cache.c
+++ b/module/os/linux/spl/spl-kmem-cache.c
@@ -679,7 +679,7 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
* KMC_NODEBUG Disable debugging (unsupported)
*/
spl_kmem_cache_t *
-spl_kmem_cache_create(char *name, size_t size, size_t align,
+spl_kmem_cache_create(const char *name, size_t size, size_t align,
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
void *priv, void *vmp, int flags)
{
diff --git a/module/zcommon/zfs_comutil.c b/module/zcommon/zfs_comutil.c
index 020e7e86c..c8f4dccd3 100644
--- a/module/zcommon/zfs_comutil.c
+++ b/module/zcommon/zfs_comutil.c
@@ -68,7 +68,7 @@ zfs_allocatable_devs(nvlist_t *nv)
* Are there special vdevs?
*/
boolean_t
-zfs_special_devs(nvlist_t *nv, char *type)
+zfs_special_devs(nvlist_t *nv, const char *type)
{
char *bias;
uint_t c;
@@ -84,11 +84,9 @@ zfs_special_devs(nvlist_t *nv, char *type)
&bias) == 0) {
if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0 ||
strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0) {
- if (type != NULL && strcmp(bias, type) == 0) {
+ if (type == NULL ||
+ (type != NULL && strcmp(bias, type) == 0))
return (B_TRUE);
- } else if (type == NULL) {
- return (B_TRUE);
- }
}
}
}
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index af42670cc..919081c25 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -2280,7 +2280,7 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
* it is not evictable.
*/
static void
-add_reference(arc_buf_hdr_t *hdr, void *tag)
+add_reference(arc_buf_hdr_t *hdr, const void *tag)
{
arc_state_t *state;
@@ -2740,8 +2740,8 @@ arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
- void *tag, boolean_t encrypted, boolean_t compressed, boolean_t noauth,
- boolean_t fill, arc_buf_t **ret)
+ const void *tag, boolean_t encrypted, boolean_t compressed,
+ boolean_t noauth, boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
@@ -2841,7 +2841,7 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
return (0);
}
-static char *arc_onloan_tag = "onloan";
+static const char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
@@ -3589,7 +3589,8 @@ arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
-arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
+arc_alloc_buf(spa_t *spa, const void *tag, arc_buf_contents_t type,
+ int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type);
@@ -3607,8 +3608,8 @@ arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
* for bufs containing metadata.
*/
arc_buf_t *
-arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
- enum zio_compress compression_type, uint8_t complevel)
+arc_alloc_compressed_buf(spa_t *spa, const void *tag, uint64_t psize,
+ uint64_t lsize, enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
@@ -3635,9 +3636,9 @@ arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
}
arc_buf_t *
-arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
- const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
- dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
+arc_alloc_raw_buf(spa_t *spa, const void *tag, uint64_t dsobj,
+ boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
+ const uint8_t *mac, dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index d10e87652..00b01611d 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -1297,7 +1297,7 @@ dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
* used when modifying or reading db_blkptr.
*/
db_lock_type_t
-dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
+dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
{
enum db_lock_type ret = DLT_NONE;
if (db->db_parent != NULL) {
@@ -1322,7 +1322,7 @@ dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
* panic if we didn't pass the lock type in.
*/
void
-dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag)
+dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
{
if (type == DLT_PARENT)
rw_exit(&db->db_parent->db_rwlock);
@@ -1522,7 +1522,7 @@ dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
*/
static int
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
- db_lock_type_t dblt, void *tag)
+ db_lock_type_t dblt, const void *tag)
{
dnode_t *dn;
zbookmark_phys_t zb;
@@ -3532,7 +3532,7 @@ dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
int
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
- void *tag, dmu_buf_impl_t **dbp)
+ const void *tag, dmu_buf_impl_t **dbp)
{
dmu_buf_impl_t *db, *parent = NULL;
@@ -3637,13 +3637,13 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
}
dmu_buf_impl_t *
-dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
+dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
{
return (dbuf_hold_level(dn, 0, blkid, tag));
}
dmu_buf_impl_t *
-dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
+dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
{
dmu_buf_impl_t *db;
int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
@@ -3684,7 +3684,7 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
#pragma weak dmu_buf_add_ref = dbuf_add_ref
void
-dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
+dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
{
int64_t holds = zfs_refcount_add(&db->db_holds, tag);
VERIFY3S(holds, >, 1);
@@ -3693,7 +3693,7 @@ dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
boolean_t
dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
- void *tag)
+ const void *tag)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dmu_buf_impl_t *found_db;
@@ -3722,14 +3722,14 @@ dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
* dnode's parent dbuf evicting its dnode handles.
*/
void
-dbuf_rele(dmu_buf_impl_t *db, void *tag)
+dbuf_rele(dmu_buf_impl_t *db, const void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag, B_FALSE);
}
void
-dmu_buf_rele(dmu_buf_t *db, void *tag)
+dmu_buf_rele(dmu_buf_t *db, const void *tag)
{
dbuf_rele((dmu_buf_impl_t *)db, tag);
}
@@ -3748,7 +3748,7 @@ dmu_buf_rele(dmu_buf_t *db, void *tag)
*
*/
void
-dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
+dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
{
int64_t holds;
uint64_t size;
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 7d8b2c96b..e6008b3bf 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -160,7 +160,7 @@ const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
static int
dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
- void *tag, dmu_buf_t **dbp)
+ const void *tag, dmu_buf_t **dbp)
{
uint64_t blkid;
dmu_buf_impl_t *db;
@@ -180,7 +180,7 @@ dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
}
int
dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
- void *tag, dmu_buf_t **dbp)
+ const void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
uint64_t blkid;
@@ -207,7 +207,7 @@ dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
int
dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
- void *tag, dmu_buf_t **dbp, int flags)
+ const void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
@@ -232,7 +232,7 @@ dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
- void *tag, dmu_buf_t **dbp, int flags)
+ const void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
@@ -342,7 +342,7 @@ dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
* has not yet been allocated a new bonus dbuf a will be allocated.
* Returns ENOENT, EIO, or 0.
*/
-int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp,
+int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp,
uint32_t flags)
{
dmu_buf_impl_t *db;
@@ -389,7 +389,7 @@ int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp,
}
int
-dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
+dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
int error;
@@ -414,7 +414,8 @@ dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
* dmu_spill_hold_existing() should be used.
*/
int
-dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
+dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag,
+ dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = NULL;
int err;
@@ -442,7 +443,7 @@ dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
}
int
-dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
+dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
@@ -471,7 +472,7 @@ dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
}
int
-dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
+dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag,
dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
@@ -498,7 +499,8 @@ dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
*/
int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
- boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
+ boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp,
+ uint32_t flags)
{
dmu_buf_t **dbp;
zstream_t *zs = NULL;
@@ -619,7 +621,8 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
int
dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
- uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
+ uint64_t length, int read, const void *tag, int *numbufsp,
+ dmu_buf_t ***dbpp)
{
dnode_t *dn;
int err;
@@ -638,7 +641,7 @@ dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
int
dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
- uint64_t length, boolean_t read, void *tag, int *numbufsp,
+ uint64_t length, boolean_t read, const void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
@@ -655,7 +658,7 @@ dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
}
void
-dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
+dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag)
{
int i;
dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c
index 12cdbd68b..d954c7d9b 100644
--- a/module/zfs/dmu_object.c
+++ b/module/zfs/dmu_object.c
@@ -46,7 +46,7 @@ int dmu_object_alloc_chunk_shift = 7;
static uint64_t
dmu_object_alloc_impl(objset_t *os, dmu_object_type_t ot, int blocksize,
int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen,
- int dnodesize, dnode_t **allocated_dnode, void *tag, dmu_tx_t *tx)
+ int dnodesize, dnode_t **allocated_dnode, const void *tag, dmu_tx_t *tx)
{
uint64_t object;
uint64_t L1_dnode_count = DNODES_PER_BLOCK <<
@@ -255,7 +255,7 @@ dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot, int blocksize,
uint64_t
dmu_object_alloc_hold(objset_t *os, dmu_object_type_t ot, int blocksize,
int indirect_blockshift, dmu_object_type_t bonustype, int bonuslen,
- int dnodesize, dnode_t **allocated_dnode, void *tag, dmu_tx_t *tx)
+ int dnodesize, dnode_t **allocated_dnode, const void *tag, dmu_tx_t *tx)
{
return (dmu_object_alloc_impl(os, ot, blocksize, indirect_blockshift,
bonustype, bonuslen, dnodesize, allocated_dnode, tag, tx));
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 97a81ed8d..324ee8d83 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -714,7 +714,7 @@ dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
* can be held at a time.
*/
int
-dmu_objset_hold_flags(const char *name, boolean_t decrypt, void *tag,
+dmu_objset_hold_flags(const char *name, boolean_t decrypt, const void *tag,
objset_t **osp)
{
dsl_pool_t *dp;
@@ -742,14 +742,14 @@ dmu_objset_hold_flags(const char *name, boolean_t decrypt, void *tag,
}
int
-dmu_objset_hold(const char *name, void *tag, objset_t **osp)
+dmu_objset_hold(const char *name, const void *tag, objset_t **osp)
{
return (dmu_objset_hold_flags(name, B_FALSE, tag, osp));
}
static int
dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
- boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
+ boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
(void) tag;
@@ -789,7 +789,7 @@ dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
*/
int
dmu_objset_own(const char *name, dmu_objset_type_t type,
- boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
+ boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
@@ -834,7 +834,7 @@ dmu_objset_own(const char *name, dmu_objset_type_t type,
int
dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
- boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
+ boolean_t readonly, boolean_t decrypt, const void *tag, objset_t **osp)
{
dsl_dataset_t *ds;
int err;
@@ -855,7 +855,7 @@ dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
}
void
-dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, void *tag)
+dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, const void *tag)
{
ds_hold_flags_t flags;
dsl_pool_t *dp = dmu_objset_pool(os);
@@ -866,7 +866,7 @@ dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, void *tag)
}
void
-dmu_objset_rele(objset_t *os, void *tag)
+dmu_objset_rele(objset_t *os, const void *tag)
{
dmu_objset_rele_flags(os, B_FALSE, tag);
}
@@ -884,7 +884,7 @@ dmu_objset_rele(objset_t *os, void *tag)
*/
void
dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
- boolean_t decrypt, void *tag)
+ boolean_t decrypt, const void *tag)
{
dsl_pool_t *dp;
char name[ZFS_MAX_DATASET_NAME_LEN];
@@ -904,7 +904,7 @@ dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
}
void
-dmu_objset_disown(objset_t *os, boolean_t decrypt, void *tag)
+dmu_objset_disown(objset_t *os, boolean_t decrypt, const void *tag)
{
ds_hold_flags_t flags;
diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c
index c90df1eb4..5ac862519 100644
--- a/module/zfs/dmu_recv.c
+++ b/module/zfs/dmu_recv.c
@@ -68,7 +68,7 @@ static int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
static int zfs_recv_queue_ff = 20;
static int zfs_recv_write_batch_size = 1024 * 1024;
-static void *const dmu_recv_tag = "dmu_recv_tag";
+static const void *const dmu_recv_tag = "dmu_recv_tag";
const char *const recv_clone_name = "%recv";
static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index af0ee1b0f..39e9ca1b8 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -1268,7 +1268,7 @@ dnode_buf_evict_async(void *dbu)
*/
int
dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
- void *tag, dnode_t **dnp)
+ const void *tag, dnode_t **dnp)
{
int epb, idx, err;
int drop_struct_lock = FALSE;
@@ -1562,7 +1562,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
* Return held dnode if the object is allocated, NULL if not.
*/
int
-dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
+dnode_hold(objset_t *os, uint64_t object, const void *tag, dnode_t **dnp)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
dnp));
@@ -1574,7 +1574,7 @@ dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
* new reference.
*/
boolean_t
-dnode_add_ref(dnode_t *dn, void *tag)
+dnode_add_ref(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
if (zfs_refcount_is_zero(&dn->dn_holds)) {
@@ -1587,14 +1587,14 @@ dnode_add_ref(dnode_t *dn, void *tag)
}
void
-dnode_rele(dnode_t *dn, void *tag)
+dnode_rele(dnode_t *dn, const void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag, B_FALSE);
}
void
-dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
+dnode_rele_and_unlock(dnode_t *dn, const void *tag, boolean_t evicting)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
@@ -2029,7 +2029,7 @@ dnode_dirty_l1range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
}
void
-dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, void *tag)
+dnode_set_dirtyctx(dnode_t *dn, dmu_tx_t *tx, const void *tag)
{
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
diff --git a/module/zfs/dsl_bookmark.c b/module/zfs/dsl_bookmark.c
index 5d9b674f9..4e44b167f 100644
--- a/module/zfs/dsl_bookmark.c
+++ b/module/zfs/dsl_bookmark.c
@@ -1291,7 +1291,7 @@ dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
* The empty-string name can't be in the AVL, and it compares
* before any entries with this TXG.
*/
- search.dbn_name = "";
+ search.dbn_name = (char *)"";
VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
dsl_bookmark_node_t *dbn =
avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
@@ -1418,7 +1418,7 @@ dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
* The empty-string name can't be in the AVL, and it compares
* before any entries with this TXG.
*/
- search.dbn_name = "";
+ search.dbn_name = (char *)"";
VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
dsl_bookmark_node_t *dbn =
avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c
index d802eb6b6..44e0083d2 100644
--- a/module/zfs/dsl_crypt.c
+++ b/module/zfs/dsl_crypt.c
@@ -1506,7 +1506,7 @@ spa_keystore_change_key_sync(void *arg, dmu_tx_t *tx)
dsl_crypto_params_t *dcp = skcka->skcka_cp;
dsl_wrapping_key_t *wkey = NULL, *found_wkey;
dsl_wrapping_key_t wkey_search;
- char *keylocation = dcp->cp_keylocation;
+ const char *keylocation = dcp->cp_keylocation;
uint64_t rddobj, new_rddobj;
/* create and initialize the wrapping key */
@@ -2229,7 +2229,7 @@ dsl_crypto_recv_raw_key_sync(dsl_dataset_t *ds, nvlist_t *nvl, dmu_tx_t *tx)
uint8_t *keydata, *hmac_keydata, *iv, *mac;
uint64_t crypt, key_guid, keyformat, iters, salt;
uint64_t version = ZIO_CRYPT_KEY_CURRENT_VERSION;
- char *keylocation = "prompt";
+ const char *keylocation = "prompt";
/* lookup the values we need to create the DSL Crypto Key */
crypt = fnvlist_lookup_uint64(nvl, DSL_CRYPTO_KEY_CRYPTO_SUITE);
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index 805bae12d..106721894 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -551,7 +551,7 @@ dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
}
boolean_t
-dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, void *tag)
+dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, const void *tag)
{
dmu_buf_t *dbuf = ds->ds_dbuf;
boolean_t result = B_FALSE;
@@ -569,7 +569,7 @@ dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, void *tag)
}
int
-dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
+dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, const void *tag,
dsl_dataset_t **dsp)
{
objset_t *mos = dp->dp_meta_objset;
@@ -758,7 +758,7 @@ dsl_dataset_create_key_mapping(dsl_dataset_t *ds)
int
dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
- ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp)
+ ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp)
{
int err;
@@ -779,7 +779,7 @@ dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
int
dsl_dataset_hold_flags(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
- void *tag, dsl_dataset_t **dsp)
+ const void *tag, dsl_dataset_t **dsp)
{
dsl_dir_t *dd;
const char *snapname;
@@ -832,7 +832,7 @@ dsl_dataset_hold_flags(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
}
int
-dsl_dataset_hold(dsl_pool_t *dp, const char *name, void *tag,
+dsl_dataset_hold(dsl_pool_t *dp, const char *name, const void *tag,
dsl_dataset_t **dsp)
{
return (dsl_dataset_hold_flags(dp, name, 0, tag, dsp));
@@ -840,7 +840,7 @@ dsl_dataset_hold(dsl_pool_t *dp, const char *name, void *tag,
static int
dsl_dataset_own_obj_impl(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
- void *tag, boolean_t override, dsl_dataset_t **dsp)
+ const void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_obj_flags(dp, dsobj, flags, tag, dsp);
if (err != 0)
@@ -856,21 +856,21 @@ dsl_dataset_own_obj_impl(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
int
dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
- void *tag, dsl_dataset_t **dsp)
+ const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_FALSE, dsp));
}
int
dsl_dataset_own_obj_force(dsl_pool_t *dp, uint64_t dsobj,
- ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp)
+ ds_hold_flags_t flags, const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_obj_impl(dp, dsobj, flags, tag, B_TRUE, dsp));
}
static int
dsl_dataset_own_impl(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
- void *tag, boolean_t override, dsl_dataset_t **dsp)
+ const void *tag, boolean_t override, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_flags(dp, name, flags, tag, dsp);
if (err != 0)
@@ -884,14 +884,14 @@ dsl_dataset_own_impl(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
int
dsl_dataset_own_force(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
- void *tag, dsl_dataset_t **dsp)
+ const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_TRUE, dsp));
}
int
dsl_dataset_own(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
- void *tag, dsl_dataset_t **dsp)
+ const void *tag, dsl_dataset_t **dsp)
{
return (dsl_dataset_own_impl(dp, name, flags, tag, B_FALSE, dsp));
}
@@ -970,7 +970,7 @@ dsl_dataset_namelen(dsl_dataset_t *ds)
}
void
-dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
+dsl_dataset_rele(dsl_dataset_t *ds, const void *tag)
{
dmu_buf_rele(ds->ds_dbuf, tag);
}
@@ -988,7 +988,8 @@ dsl_dataset_remove_key_mapping(dsl_dataset_t *ds)
}
void
-dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
+dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags,
+ const void *tag)
{
if (flags & DS_HOLD_FLAG_DECRYPT)
dsl_dataset_remove_key_mapping(ds);
@@ -997,7 +998,7 @@ dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
}
void
-dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
+dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, const void *tag)
{
ASSERT3P(ds->ds_owner, ==, tag);
ASSERT(ds->ds_dbuf != NULL);
@@ -1010,7 +1011,7 @@ dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
}
boolean_t
-dsl_dataset_tryown(dsl_dataset_t *ds, void *tag, boolean_t override)
+dsl_dataset_tryown(dsl_dataset_t *ds, const void *tag, boolean_t override)
{
boolean_t gotit = FALSE;
@@ -3278,8 +3279,8 @@ struct promotenode {
static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
static int promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp,
- void *tag);
-static void promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag);
+ const void *tag);
+static void promote_rele(dsl_dataset_promote_arg_t *ddpa, const void *tag);
int
dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
@@ -3739,7 +3740,7 @@ dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
*/
static int
snaplist_make(dsl_pool_t *dp,
- uint64_t first_obj, uint64_t last_obj, list_t *l, void *tag)
+ uint64_t first_obj, uint64_t last_obj, list_t *l, const void *tag)
{
uint64_t obj = last_obj;
@@ -3784,7 +3785,7 @@ snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
}
static void
-snaplist_destroy(list_t *l, void *tag)
+snaplist_destroy(list_t *l, const void *tag)
{
struct promotenode *snap;
@@ -3800,7 +3801,7 @@ snaplist_destroy(list_t *l, void *tag)
}
static int
-promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, void *tag)
+promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, const void *tag)
{
int error;
dsl_dir_t *dd;
@@ -3850,7 +3851,7 @@ out:
}
static void
-promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag)
+promote_rele(dsl_dataset_promote_arg_t *ddpa, const void *tag)
{
snaplist_destroy(&ddpa->shared_snaps, tag);
snaplist_destroy(&ddpa->clone_snaps, tag);
diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c
index aca32ff9b..252dd2912 100644
--- a/module/zfs/dsl_dir.c
+++ b/module/zfs/dsl_dir.c
@@ -801,7 +801,7 @@ dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
{
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t limit, count;
- char *count_prop;
+ const char *count_prop;
enforce_res_t enforce;
int err = 0;
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 4a6d4b623..3c2fefcfb 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -1387,7 +1387,7 @@ dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
*/
int
-dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
+dsl_pool_hold(const char *name, const void *tag, dsl_pool_t **dp)
{
spa_t *spa;
int error;
@@ -1401,14 +1401,14 @@ dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
}
void
-dsl_pool_rele(dsl_pool_t *dp, void *tag)
+dsl_pool_rele(dsl_pool_t *dp, const void *tag)
{
dsl_pool_config_exit(dp, tag);
spa_close(dp->dp_spa, tag);
}
void
-dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
+dsl_pool_config_enter(dsl_pool_t *dp, const void *tag)
{
/*
* We use a "reentrant" reader-writer lock, but not reentrantly.
@@ -1427,14 +1427,14 @@ dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
}
void
-dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
+dsl_pool_config_enter_prio(dsl_pool_t *dp, const void *tag)
{
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
}
void
-dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
+dsl_pool_config_exit(dsl_pool_t *dp, const void *tag)
{
rrw_exit(&dp->dp_config_rwlock, tag);
}
diff --git a/module/zfs/dsl_userhold.c b/module/zfs/dsl_userhold.c
index 75d153194..499ab09ec 100644
--- a/module/zfs/dsl_userhold.c
+++ b/module/zfs/dsl_userhold.c
@@ -346,7 +346,7 @@ dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
return (ret);
}
-typedef int (dsl_holdfunc_t)(dsl_pool_t *dp, const char *name, void *tag,
+typedef int (dsl_holdfunc_t)(dsl_pool_t *dp, const char *name, const void *tag,
dsl_dataset_t **dsp);
typedef struct dsl_dataset_user_release_arg {
@@ -359,7 +359,7 @@ typedef struct dsl_dataset_user_release_arg {
/* Place a dataset hold on the snapshot identified by passed dsobj string */
static int
-dsl_dataset_hold_obj_string(dsl_pool_t *dp, const char *dsobj, void *tag,
+dsl_dataset_hold_obj_string(dsl_pool_t *dp, const char *dsobj, const void *tag,
dsl_dataset_t **dsp)
{
return (dsl_dataset_hold_obj(dp, zfs_strtonum(dsobj, NULL), tag, dsp));
diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c
index b03b90fdc..ab0f055ff 100644
--- a/module/zfs/mmp.c
+++ b/module/zfs/mmp.c
@@ -186,7 +186,7 @@ uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
*/
uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
-static void *const mmp_tag = "mmp_write_uberblock";
+static const void *const mmp_tag = "mmp_write_uberblock";
static __attribute__((noreturn)) void mmp_thread(void *arg);
void
diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
index d23fc3ad1..8e3ee5ddf 100644
--- a/module/zfs/rrwlock.c
+++ b/module/zfs/rrwlock.c
@@ -77,7 +77,7 @@ uint_t rrw_tsd_key;
typedef struct rrw_node {
struct rrw_node *rn_next;
rrwlock_t *rn_rrl;
- void *rn_tag;
+ const void *rn_tag;
} rrw_node_t;
static rrw_node_t *
@@ -99,7 +99,7 @@ rrn_find(rrwlock_t *rrl)
* Add a node to the head of the singly linked list.
*/
static void
-rrn_add(rrwlock_t *rrl, void *tag)
+rrn_add(rrwlock_t *rrl, const void *tag)
{
rrw_node_t *rn;
@@ -115,7 +115,7 @@ rrn_add(rrwlock_t *rrl, void *tag)
* thread's list and return TRUE; otherwise return FALSE.
*/
static boolean_t
-rrn_find_and_remove(rrwlock_t *rrl, void *tag)
+rrn_find_and_remove(rrwlock_t *rrl, const void *tag)
{
rrw_node_t *rn;
rrw_node_t *prev = NULL;
@@ -160,7 +160,7 @@ rrw_destroy(rrwlock_t *rrl)
}
static void
-rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
+rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, const void *tag)
{
mutex_enter(&rrl->rr_lock);
#if !defined(ZFS_DEBUG) && defined(_KERNEL)
@@ -192,7 +192,7 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
}
void
-rrw_enter_read(rrwlock_t *rrl, void *tag)
+rrw_enter_read(rrwlock_t *rrl, const void *tag)
{
rrw_enter_read_impl(rrl, B_FALSE, tag);
}
@@ -204,7 +204,7 @@ rrw_enter_read(rrwlock_t *rrl, void *tag)
* the pending writer does not work, so we have to give an explicit hint here.
*/
void
-rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
+rrw_enter_read_prio(rrwlock_t *rrl, const void *tag)
{
rrw_enter_read_impl(rrl, B_TRUE, tag);
}
@@ -228,7 +228,7 @@ rrw_enter_write(rrwlock_t *rrl)
}
void
-rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
+rrw_enter(rrwlock_t *rrl, krw_t rw, const void *tag)
{
if (rw == RW_READER)
rrw_enter_read(rrl, tag);
@@ -237,7 +237,7 @@ rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
}
void
-rrw_exit(rrwlock_t *rrl, void *tag)
+rrw_exit(rrwlock_t *rrl, const void *tag)
{
mutex_enter(&rrl->rr_lock);
#if !defined(ZFS_DEBUG) && defined(_KERNEL)
@@ -339,7 +339,7 @@ rrm_destroy(rrmlock_t *rrl)
}
void
-rrm_enter(rrmlock_t *rrl, krw_t rw, void *tag)
+rrm_enter(rrmlock_t *rrl, krw_t rw, const void *tag)
{
if (rw == RW_READER)
rrm_enter_read(rrl, tag);
@@ -358,7 +358,7 @@ rrm_enter(rrmlock_t *rrl, krw_t rw, void *tag)
#define RRM_TD_LOCK() (((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
void
-rrm_enter_read(rrmlock_t *rrl, void *tag)
+rrm_enter_read(rrmlock_t *rrl, const void *tag)
{
rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag);
}
@@ -373,7 +373,7 @@ rrm_enter_write(rrmlock_t *rrl)
}
void
-rrm_exit(rrmlock_t *rrl, void *tag)
+rrm_exit(rrmlock_t *rrl, const void *tag)
{
int i;
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index baa5fc247..55f3a4de6 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -164,7 +164,8 @@ static const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
-static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
+static int spa_load_impl(spa_t *spa, spa_import_type_t type,
+ const char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
static uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
@@ -277,7 +278,7 @@ static int zfs_livelist_condense_new_alloc = 0;
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
-spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
+spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, const char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
@@ -2974,7 +2975,7 @@ spa_try_repair(spa_t *spa, nvlist_t *config)
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
- char *ereport = FM_EREPORT_ZFS_POOL;
+ const char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
@@ -3291,7 +3292,7 @@ out:
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
- char *hostname = "<unknown>";
+ const char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
@@ -4399,7 +4400,7 @@ spa_ld_load_dedup_tables(spa_t *spa)
}
static int
-spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
+spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, const char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
@@ -4766,7 +4767,7 @@ spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
* config stored in the MOS.
*/
static int
-spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
+spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
@@ -5157,8 +5158,8 @@ spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
* ambiguous state.
*/
static int
-spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
- nvlist_t **config)
+spa_open_common(const char *pool, spa_t **spapp, const void *tag,
+ nvlist_t *nvpolicy, nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
@@ -5274,14 +5275,14 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
}
int
-spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
- nvlist_t **config)
+spa_open_rewind(const char *name, spa_t **spapp, const void *tag,
+ nvlist_t *policy, nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
-spa_open(const char *name, spa_t **spapp, void *tag)
+spa_open(const char *name, spa_t **spapp, const void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
@@ -7507,7 +7508,7 @@ spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
-spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
+spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index 254031f31..4dcb9f153 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -67,7 +67,7 @@ static uint64_t spa_config_generation = 1;
* This can be overridden in userland to preserve an alternate namespace for
* userland pools when doing testing.
*/
-char *spa_config_path = ZPOOL_CACHE;
+char *spa_config_path = (char *)ZPOOL_CACHE;
#ifdef _KERNEL
static int zfs_autoimport_disable = B_TRUE;
#endif
diff --git a/module/zfs/spa_errlog.c b/module/zfs/spa_errlog.c
index 9e5d1de63..19a3cc814 100644
--- a/module/zfs/spa_errlog.c
+++ b/module/zfs/spa_errlog.c
@@ -659,9 +659,9 @@ sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj,
}
char buf[64];
- char *name = "";
errphys_to_name(&zep, buf, sizeof (buf));
+ const char *name = "";
(void) zap_update(spa->spa_meta_objset, err_obj,
buf, 1, strlen(name) + 1, name, tx);
}
@@ -901,17 +901,14 @@ sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
/* add errors to the current log */
if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
- char *name = se->se_name ? se->se_name : "";
-
bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
+ const char *name = se->se_name ? se->se_name : "";
(void) zap_update(spa->spa_meta_objset, *obj, buf, 1,
strlen(name) + 1, name, tx);
}
} else {
for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
- char *name = se->se_name ? se->se_name : "";
-
zbookmark_err_phys_t zep;
zep.zb_object = se->se_bookmark.zb_object;
zep.zb_level = se->se_bookmark.zb_level;
@@ -943,6 +940,7 @@ sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
}
errphys_to_name(&zep, buf, sizeof (buf));
+ const char *name = se->se_name ? se->se_name : "";
(void) zap_update(spa->spa_meta_objset,
err_obj, buf, 1, strlen(name) + 1, name, tx);
}
@@ -1153,7 +1151,7 @@ swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t
for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog);
zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
- char *name = "";
+ const char *name = "";
name_to_errphys(za.za_name, &err_block);
if (err_block.zb_birth < txg) {
(void) zap_update(spa->spa_meta_objset, new_head_errlog,
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index c57c69bd7..aaca2a8a5 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -462,7 +462,7 @@ spa_config_lock_destroy(spa_t *spa)
}
int
-spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
+spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
@@ -877,7 +877,7 @@ spa_next(spa_t *prev)
* have the namespace lock held.
*/
void
-spa_open_ref(spa_t *spa, void *tag)
+spa_open_ref(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
@@ -889,7 +889,7 @@ spa_open_ref(spa_t *spa, void *tag)
* have the namespace lock held.
*/
void
-spa_close(spa_t *spa, void *tag)
+spa_close(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
@@ -905,7 +905,7 @@ spa_close(spa_t *spa, void *tag)
* so the asserts in spa_close() do not apply.
*/
void
-spa_async_close(spa_t *spa, void *tag)
+spa_async_close(spa_t *spa, const void *tag)
{
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
}
@@ -1513,8 +1513,8 @@ void
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
char type[256];
- char *checksum = NULL;
- char *compress = NULL;
+ const char *checksum = NULL;
+ const char *compress = NULL;
if (bp != NULL) {
if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 2dcb97b7f..dedbc1a23 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -5790,7 +5790,7 @@ vdev_prop_get(vdev_t *vd, nvlist_t *innvl, nvlist_t *outnvl)
KM_SLEEP);
for (uint64_t i = 0; i < vd->vdev_children;
i++) {
- char *vname;
+ const char *vname;
vname = vdev_name(vd->vdev_child[i],
namebuf, sizeof (namebuf));
diff --git a/module/zfs/vdev_raidz_math.c b/module/zfs/vdev_raidz_math.c
index 50b8dab74..c58895973 100644
--- a/module/zfs/vdev_raidz_math.c
+++ b/module/zfs/vdev_raidz_math.c
@@ -563,7 +563,7 @@ vdev_raidz_math_fini(void)
}
static const struct {
- char *name;
+ const char *name;
uint32_t sel;
} math_impl_opts[] = {
{ "cycle", IMPL_CYCLE },
diff --git a/module/zfs/vdev_rebuild.c b/module/zfs/vdev_rebuild.c
index a965912ac..aa7ed24de 100644
--- a/module/zfs/vdev_rebuild.c
+++ b/module/zfs/vdev_rebuild.c
@@ -260,7 +260,7 @@ vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
}
static void
-vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, char *name)
+vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, const char *name)
{
nvlist_t *aux = fnvlist_alloc();
diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c
index 7dfc4345f..7fbe38b42 100644
--- a/module/zfs/vdev_removal.c
+++ b/module/zfs/vdev_removal.c
@@ -338,8 +338,8 @@ spa_vdev_alloc(spa_t *spa, uint64_t guid)
}
static void
-spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
- nvlist_t *dev_to_remove)
+spa_vdev_remove_aux(nvlist_t *config, const char *name, nvlist_t **dev,
+ int count, nvlist_t *dev_to_remove)
{
nvlist_t **newdev = NULL;
@@ -2384,7 +2384,8 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
int error = 0, error_log;
boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
sysevent_t *ev = NULL;
- char *vd_type = NULL, *vd_path = NULL;
+ const char *vd_type = NULL;
+ char *vd_path = NULL;
ASSERT(spa_writeable(spa));
diff --git a/module/zfs/zfs_fuid.c b/module/zfs/zfs_fuid.c
index 3aa60034d..976333ee8 100644
--- a/module/zfs/zfs_fuid.c
+++ b/module/zfs/zfs_fuid.c
@@ -61,7 +61,7 @@ typedef struct fuid_domain {
uint64_t f_idx;
} fuid_domain_t;
-static char *nulldomain = "";
+static const char *const nulldomain = "";
/*
* Compare two indexes.
@@ -171,7 +171,7 @@ zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
avl_destroy(idx_tree);
}
-char *
+const char *
zfs_fuid_idx_domain(avl_tree_t *idx_tree, uint32_t idx)
{
fuid_domain_t searchnode, *findnode;
@@ -290,9 +290,9 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
* necessary for the caller or another thread to detect the dirty table
* and sync out the changes.
*/
-int
+static int
zfs_fuid_find_by_domain(zfsvfs_t *zfsvfs, const char *domain,
- char **retdomain, boolean_t addok)
+ const char **retdomain, boolean_t addok)
{
fuid_domain_t searchnode, *findnode;
avl_index_t loc;
@@ -358,7 +358,7 @@ retry:
const char *
zfs_fuid_find_by_idx(zfsvfs_t *zfsvfs, uint32_t idx)
{
- char *domain;
+ const char *domain;
if (idx == 0 || !zfsvfs->z_use_fuids)
return (NULL);
@@ -518,8 +518,7 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, zfs_fuid_type_t type,
uint64_t idx;
ksid_t *ksid;
uint32_t rid;
- char *kdomain;
- const char *domain;
+ const char *kdomain, *domain;
uid_t id;
VERIFY(type == ZFS_OWNER || type == ZFS_GROUP);
@@ -574,8 +573,7 @@ zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr,
zfs_fuid_type_t type, zfs_fuid_info_t **fuidpp)
{
#ifdef HAVE_KSID
- const char *domain;
- char *kdomain;
+ const char *domain, *kdomain;
uint32_t fuid_idx = FUID_INDEX(id);
uint32_t rid = 0;
idmap_stat status;
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index 9adf81551..9de60896d 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -3470,7 +3470,7 @@ zil_close(zilog_t *zilog)
mutex_exit(&zilog->zl_lock);
}
-static char *suspend_tag = "zil suspending";
+static const char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
diff --git a/module/zfs/zio_checksum.c b/module/zfs/zio_checksum.c
index 3c5cdf604..c7368ac26 100644
--- a/module/zfs/zio_checksum.c
+++ b/module/zfs/zio_checksum.c
@@ -160,7 +160,7 @@ abd_fletcher_4_byteswap(abd_t *abd, uint64_t size,
abd_fletcher_4_impl(abd, size, &acd);
}
-zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
+const zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
{{NULL, NULL}, NULL, NULL, 0, "inherit"},
{{NULL, NULL}, NULL, NULL, 0, "on"},
{{abd_checksum_off, abd_checksum_off},
diff --git a/module/zfs/zio_compress.c b/module/zfs/zio_compress.c
index 38020ce22..717395dcf 100644
--- a/module/zfs/zio_compress.c
+++ b/module/zfs/zio_compress.c
@@ -49,7 +49,7 @@ unsigned long zio_decompress_fail_fraction = 0;
/*
* Compression vectors.
*/
-zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS] = {
+const zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS] = {
{"inherit", 0, NULL, NULL, NULL},
{"on", 0, NULL, NULL, NULL},
{"uncompressed", 0, NULL, NULL, NULL},