summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/nvpair/nvpair.c4
-rw-r--r--module/zcommon/zfs_deleg.c4
-rw-r--r--module/zcommon/zfs_prop.c8
-rw-r--r--module/zcommon/zpool_prop.c8
-rw-r--r--module/zcommon/zprop_common.c12
-rw-r--r--module/zfs/arc.c15
-rw-r--r--module/zfs/dbuf.c43
-rw-r--r--module/zfs/dmu.c14
-rw-r--r--module/zfs/dmu_objset.c16
-rw-r--r--module/zfs/dmu_send.c39
-rw-r--r--module/zfs/dmu_traverse.c5
-rw-r--r--module/zfs/dmu_tx.c4
-rw-r--r--module/zfs/dmu_zfetch.c4
-rw-r--r--module/zfs/dnode.c15
-rw-r--r--module/zfs/dnode_sync.c6
-rw-r--r--module/zfs/dsl_dataset.c16
-rw-r--r--module/zfs/dsl_deleg.c16
-rw-r--r--module/zfs/dsl_dir.c21
-rw-r--r--module/zfs/dsl_pool.c4
-rw-r--r--module/zfs/dsl_prop.c2
-rw-r--r--module/zfs/dsl_synctask.c2
-rw-r--r--module/zfs/include/sys/spa.h2
-rw-r--r--module/zfs/include/sys/zap.h5
-rw-r--r--module/zfs/include/sys/zap_impl.h1
-rw-r--r--module/zfs/include/sys/zfs_znode.h4
-rw-r--r--module/zfs/include/sys/zil.h4
-rw-r--r--module/zfs/lzjb.c4
-rw-r--r--module/zfs/metaslab.c12
-rw-r--r--module/zfs/refcount.c6
-rw-r--r--module/zfs/rrwlock.c2
-rw-r--r--module/zfs/spa.c47
-rw-r--r--module/zfs/spa_history.c2
-rw-r--r--module/zfs/spa_misc.c26
-rw-r--r--module/zfs/txg.c6
-rw-r--r--module/zfs/vdev.c16
-rw-r--r--module/zfs/vdev_cache.c3
-rw-r--r--module/zfs/vdev_label.c39
-rw-r--r--module/zfs/vdev_mirror.c4
-rw-r--r--module/zfs/vdev_queue.c3
-rw-r--r--module/zfs/vdev_raidz.c6
-rw-r--r--module/zfs/zap.c25
-rw-r--r--module/zfs/zap_leaf.c2
-rw-r--r--module/zfs/zap_micro.c41
-rw-r--r--module/zfs/zfs_byteswap.c2
-rw-r--r--module/zfs/zfs_ioctl.c12
-rw-r--r--module/zfs/zio.c35
46 files changed, 350 insertions, 217 deletions
diff --git a/module/nvpair/nvpair.c b/module/nvpair/nvpair.c
index 77891bf77..81f22deee 100644
--- a/module/nvpair/nvpair.c
+++ b/module/nvpair/nvpair.c
@@ -1560,7 +1560,7 @@ nvlist_lookup_nvpair_ei_sep(nvlist_t *nvl, const char *name, const char sep,
{
nvpair_t *nvp;
const char *np;
- char *sepp;
+ char *sepp = NULL;
char *idxp, *idxep;
nvlist_t **nva;
long idx;
@@ -2331,7 +2331,7 @@ nvlist_xpack(nvlist_t *nvl, char **bufp, size_t *buflen, int encoding,
*/
nv_priv_init(&nvpriv, nva, 0);
- if (err = nvlist_size(nvl, &alloc_size, encoding))
+ if ((err = nvlist_size(nvl, &alloc_size, encoding)))
return (err);
if ((buf = nv_mem_zalloc(&nvpriv, alloc_size)) == NULL)
diff --git a/module/zcommon/zfs_deleg.c b/module/zcommon/zfs_deleg.c
index 0fd5800a8..1b94a2856 100644
--- a/module/zcommon/zfs_deleg.c
+++ b/module/zcommon/zfs_deleg.c
@@ -179,8 +179,8 @@ zfs_deleg_verify_nvlist(nvlist_t *nvp)
nvpair_name(perm_name));
if (error)
return (-1);
- } while (perm_name = nvlist_next_nvpair(perms, perm_name));
- } while (who = nvlist_next_nvpair(nvp, who));
+ } while ((perm_name = nvlist_next_nvpair(perms, perm_name)));
+ } while ((who = nvlist_next_nvpair(nvp, who)));
return (0);
}
diff --git a/module/zcommon/zfs_prop.c b/module/zcommon/zfs_prop.c
index effd2dba7..b273c6ef9 100644
--- a/module/zcommon/zfs_prop.c
+++ b/module/zcommon/zfs_prop.c
@@ -372,15 +372,15 @@ zfs_prop_user(const char *name)
* (strings) and internal representation (uint64_t).
*/
int
-zfs_prop_string_to_index(zfs_prop_t prop, const char *string, uint64_t *index)
+zfs_prop_string_to_index(zfs_prop_t prop, const char *string, uint64_t *idx)
{
- return (zprop_string_to_index(prop, string, index, ZFS_TYPE_DATASET));
+ return (zprop_string_to_index(prop, string, idx, ZFS_TYPE_DATASET));
}
int
-zfs_prop_index_to_string(zfs_prop_t prop, uint64_t index, const char **string)
+zfs_prop_index_to_string(zfs_prop_t prop, uint64_t idx, const char **string)
{
- return (zprop_index_to_string(prop, index, string, ZFS_TYPE_DATASET));
+ return (zprop_index_to_string(prop, idx, string, ZFS_TYPE_DATASET));
}
/*
diff --git a/module/zcommon/zpool_prop.c b/module/zcommon/zpool_prop.c
index f5efe18d2..562165b7a 100644
--- a/module/zcommon/zpool_prop.c
+++ b/module/zcommon/zpool_prop.c
@@ -152,16 +152,16 @@ zpool_prop_default_numeric(zpool_prop_t prop)
int
zpool_prop_string_to_index(zpool_prop_t prop, const char *string,
- uint64_t *index)
+ uint64_t *idx)
{
- return (zprop_string_to_index(prop, string, index, ZFS_TYPE_POOL));
+ return (zprop_string_to_index(prop, string, idx, ZFS_TYPE_POOL));
}
int
-zpool_prop_index_to_string(zpool_prop_t prop, uint64_t index,
+zpool_prop_index_to_string(zpool_prop_t prop, uint64_t idx,
const char **string)
{
- return (zprop_index_to_string(prop, index, string, ZFS_TYPE_POOL));
+ return (zprop_index_to_string(prop, idx, string, ZFS_TYPE_POOL));
}
#ifndef _KERNEL
diff --git a/module/zcommon/zprop_common.c b/module/zcommon/zprop_common.c
index bd267e2e6..a9f2ab315 100644
--- a/module/zcommon/zprop_common.c
+++ b/module/zcommon/zprop_common.c
@@ -158,7 +158,7 @@ int
zprop_iter_common(zprop_func func, void *cb, boolean_t show_all,
boolean_t ordered, zfs_type_t type)
{
- int i, num_props, size, prop;
+ int i, j, num_props, size, prop;
zprop_desc_t *prop_tbl;
zprop_desc_t **order;
@@ -173,7 +173,7 @@ zprop_iter_common(zprop_func func, void *cb, boolean_t show_all,
return (ZPROP_CONT);
#endif
- for (int j = 0; j < num_props; j++)
+ for (j = 0; j < num_props; j++)
order[j] = &prop_tbl[j];
if (ordered) {
@@ -261,7 +261,7 @@ zprop_name_to_prop(const char *propname, zfs_type_t type)
}
int
-zprop_string_to_index(int prop, const char *string, uint64_t *index,
+zprop_string_to_index(int prop, const char *string, uint64_t *idx,
zfs_type_t type)
{
zprop_desc_t *prop_tbl;
@@ -278,7 +278,7 @@ zprop_string_to_index(int prop, const char *string, uint64_t *index,
for (i = 0; idx_tbl[i].pi_name != NULL; i++) {
if (strcmp(string, idx_tbl[i].pi_name) == 0) {
- *index = idx_tbl[i].pi_value;
+ *idx = idx_tbl[i].pi_value;
return (0);
}
}
@@ -287,7 +287,7 @@ zprop_string_to_index(int prop, const char *string, uint64_t *index,
}
int
-zprop_index_to_string(int prop, uint64_t index, const char **string,
+zprop_index_to_string(int prop, uint64_t idx, const char **string,
zfs_type_t type)
{
zprop_desc_t *prop_tbl;
@@ -303,7 +303,7 @@ zprop_index_to_string(int prop, uint64_t index, const char **string,
return (-1);
for (i = 0; idx_tbl[i].pi_name != NULL; i++) {
- if (idx_tbl[i].pi_value == index) {
+ if (idx_tbl[i].pi_value == idx) {
*string = idx_tbl[i].pi_name;
return (0);
}
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 45ddfbfb6..f13b65e5b 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -2455,7 +2455,7 @@ arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp,
uint32_t *arc_flags, const zbookmark_t *zb)
{
arc_buf_hdr_t *hdr;
- arc_buf_t *buf;
+ arc_buf_t *buf = NULL;
kmutex_t *hash_lock;
zio_t *rzio;
@@ -2838,7 +2838,7 @@ arc_release(arc_buf_t *buf, void *tag)
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
l2arc_buf_hdr_t *l2hdr;
- uint64_t buf_size;
+ uint64_t buf_size = 0;
rw_enter(&buf->b_lock, RW_WRITER);
hdr = buf->b_hdr;
@@ -3311,7 +3311,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
* in order to compress/encrypt/etc the data. We therefor need to
* make sure that there is sufficient available memory for this.
*/
- if (error = arc_memory_throttle(reserve, txg))
+ if ((error = arc_memory_throttle(reserve, txg)))
return (error);
/*
@@ -3706,7 +3706,7 @@ out:
* Free buffers that were tagged for destruction.
*/
static void
-l2arc_do_free_on_write()
+l2arc_do_free_on_write(void)
{
list_t *buflist;
l2arc_data_free_t *df, *df_prev;
@@ -3882,7 +3882,7 @@ l2arc_read_done(zio_t *zio)
static list_t *
l2arc_list_locked(int list_num, kmutex_t **lock)
{
- list_t *list;
+ list_t *list = NULL;
ASSERT(list_num >= 0 && list_num <= 3);
@@ -4055,10 +4055,11 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
list_t *list;
uint64_t passed_sz, write_sz, buf_sz, headroom;
void *buf_data;
- kmutex_t *hash_lock, *list_lock;
+ kmutex_t *hash_lock, *list_lock = NULL;
boolean_t have_lock, full;
l2arc_write_callback_t *cb;
zio_t *pio, *wzio;
+ int try;
ASSERT(dev->l2ad_vdev != NULL);
@@ -4072,7 +4073,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
* Copy buffers for L2ARC writing.
*/
mutex_enter(&l2arc_buflist_mtx);
- for (int try = 0; try <= 3; try++) {
+ for (try = 0; try <= 3; try++) {
list = l2arc_list_locked(try, &list_lock);
passed_sz = 0;
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index a29b4a33b..1fd66d964 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -111,11 +111,13 @@ dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_impl_t *os = dn->dn_objset;
- uint64_t obj = dn->dn_object;
- uint64_t hv = DBUF_HASH(os, obj, level, blkid);
- uint64_t idx = hv & h->hash_table_mask;
+ uint64_t obj, hv, idx;
dmu_buf_impl_t *db;
+ obj = dn->dn_object;
+ hv = DBUF_HASH(os, obj, level, blkid);
+ idx = hv & h->hash_table_mask;
+
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
@@ -144,11 +146,13 @@ dbuf_hash_insert(dmu_buf_impl_t *db)
objset_impl_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
- uint64_t blkid = db->db_blkid;
- uint64_t hv = DBUF_HASH(os, obj, level, blkid);
- uint64_t idx = hv & h->hash_table_mask;
+ uint64_t blkid, hv, idx;
dmu_buf_impl_t *dbf;
+ blkid = db->db_blkid;
+ hv = DBUF_HASH(os, obj, level, blkid);
+ idx = hv & h->hash_table_mask;
+
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
@@ -178,11 +182,13 @@ static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
- uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
- db->db_level, db->db_blkid);
- uint64_t idx = hv & h->hash_table_mask;
+ uint64_t hv, idx;
dmu_buf_impl_t *dbf, **dbp;
+ hv = DBUF_HASH(db->db_objset, db->db.db_object,
+ db->db_level, db->db_blkid);
+ idx = hv & h->hash_table_mask;
+
/*
* We musn't hold db_mtx to maintin lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
@@ -1577,7 +1583,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
return;
/* dbuf_find() returns with db_mtx held */
- if (db = dbuf_find(dn, 0, blkid)) {
+ if ((db = dbuf_find(dn, 0, blkid))) {
if (refcount_count(&db->db_holds) > 0) {
/*
* This dbuf is active. We assume that it is
@@ -1738,8 +1744,7 @@ dbuf_create_bonus(dnode_t *dn)
void
dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
{
- int64_t holds = refcount_add(&db->db_holds, tag);
- ASSERT(holds > 1);
+ VERIFY(refcount_add(&db->db_holds, tag) > 1);
}
#pragma weak dmu_buf_rele = dbuf_rele
@@ -2108,7 +2113,7 @@ dbuf_sync_list(list_t *list, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
- while (dr = list_head(list)) {
+ while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
@@ -2340,17 +2345,15 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
ASSERT(arc_released(db->db_buf));
}
} else {
- dnode_t *dn = db->db_dnode;
-
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
- ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
+ ASSERT3U(db->db.db_size, ==,
+ 1<<db->db_dnode->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
- int epbs =
- dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
- ASSERT3U(dn->dn_phys->dn_maxblkid
- >> (db->db_level * epbs), >=, db->db_blkid);
+ ASSERT3U(db->db_dnode->dn_phys->dn_maxblkid >> (db->db_level *
+ (db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT)),
+ >=, db->db_blkid);
arc_set_callback(db->db_buf, dbuf_do_evict, db);
}
mutex_destroy(&dr->dt.di.dr_mtx);
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index b6205bd50..ea2bcb940 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -181,7 +181,7 @@ dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
*/
static int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
- uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
+ uint64_t length, int rd, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
dsl_pool_t *dp = NULL;
dmu_buf_t **dbp;
@@ -231,7 +231,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
return (EIO);
}
/* initiate async i/o */
- if (read) {
+ if (rd) {
rw_exit(&dn->dn_struct_rwlock);
(void) dbuf_read(db, zio, flags);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
@@ -251,7 +251,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
}
/* wait for other io to complete */
- if (read) {
+ if (rd) {
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
mutex_enter(&db->db_mtx);
@@ -275,7 +275,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
static int
dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
- uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
+ uint64_t length, int rd, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
dnode_t *dn;
int err;
@@ -284,7 +284,7 @@ dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
if (err)
return (err);
- err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
+ err = dmu_buf_hold_array_by_dnode(dn, offset, length, rd, tag,
numbufsp, dbpp);
dnode_rele(dn, FTAG);
@@ -294,12 +294,12 @@ dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
int
dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
- uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
+ uint64_t length, int rd, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
dnode_t *dn = ((dmu_buf_impl_t *)db)->db_dnode;
int err;
- err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
+ err = dmu_buf_hold_array_by_dnode(dn, offset, length, rd, tag,
numbufsp, dbpp);
return (err);
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 7981e0682..5ad3208db 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -803,7 +803,7 @@ dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive)
}
out:
- while (osn = list_head(&sn.objsets)) {
+ while ((osn = list_head(&sn.objsets))) {
list_remove(&sn.objsets, osn);
zil_resume(dmu_objset_zil(osn->os));
dmu_objset_close(osn->os);
@@ -823,7 +823,7 @@ dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx)
{
dnode_t *dn;
- while (dn = list_head(list)) {
+ while ((dn = list_head(list))) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
ASSERT(dn->dn_dbuf->db_data_pending);
/*
@@ -843,6 +843,8 @@ dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx)
static void
ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{
+ int i;
+
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
objset_impl_t *os = arg;
@@ -856,7 +858,7 @@ ready(zio_t *zio, arc_buf_t *abuf, void *arg)
* Update rootbp fill count.
*/
bp->blk_fill = 1; /* count the meta-dnode */
- for (int i = 0; i < dnp->dn_nblkptr; i++)
+ for (i = 0; i < dnp->dn_nblkptr; i++)
bp->blk_fill += dnp->dn_blkptr[i].blk_fill;
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
@@ -931,7 +933,7 @@ dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx)
dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], tx);
list = &os->os_meta_dnode->dn_dirty_records[txgoff];
- while (dr = list_head(list)) {
+ while ((dr = list_head(list))) {
ASSERT(dr->dr_dbuf->db_level == 0);
list_remove(list, dr);
if (dr->dr_zio)
@@ -960,11 +962,11 @@ dmu_objset_fsid_guid(objset_t *os)
}
void
-dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
+dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *st)
{
- stat->dds_type = os->os->os_phys->os_type;
+ st->dds_type = os->os->os_phys->os_type;
if (os->os->os_dsl_dataset)
- dsl_dataset_fast_stat(os->os->os_dsl_dataset, stat);
+ dsl_dataset_fast_stat(os->os->os_dsl_dataset, st);
}
void
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index 857b9a343..65c60c816 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -957,6 +957,28 @@ dmu_recv_abort_cleanup(dmu_recv_cookie_t *drc)
}
/*
+ * Compute checksum of drr_begin record
+ */
+static void
+dmu_recv_stream_cksum(dmu_recv_cookie_t *drc, struct restorearg *ra)
+{
+ dmu_replay_record_t *drr;
+
+ drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
+
+ drr->drr_type = DRR_BEGIN;
+ drr->drr_u.drr_begin = *drc->drc_drrb;
+ if (ra->byteswap) {
+ fletcher_4_incremental_byteswap(drr,
+ sizeof (dmu_replay_record_t), &(ra->cksum));
+ } else {
+ fletcher_4_incremental_native(drr,
+ sizeof (dmu_replay_record_t), &(ra->cksum));
+ }
+ kmem_free(drr, sizeof (dmu_replay_record_t));
+}
+
+/*
* NB: callers *must* call dmu_recv_end() if this succeeds.
*/
int
@@ -970,22 +992,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp)
if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
ra.byteswap = TRUE;
- {
- /* compute checksum of drr_begin record */
- dmu_replay_record_t *drr;
- drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
-
- drr->drr_type = DRR_BEGIN;
- drr->drr_u.drr_begin = *drc->drc_drrb;
- if (ra.byteswap) {
- fletcher_4_incremental_byteswap(drr,
- sizeof (dmu_replay_record_t), &ra.cksum);
- } else {
- fletcher_4_incremental_native(drr,
- sizeof (dmu_replay_record_t), &ra.cksum);
- }
- kmem_free(drr, sizeof (dmu_replay_record_t));
- }
+ dmu_recv_stream_cksum(drc, &ra);
if (ra.byteswap) {
struct drr_begin *drrb = drc->drc_drrb;
diff --git a/module/zfs/dmu_traverse.c b/module/zfs/dmu_traverse.c
index 512401470..9044039b2 100644
--- a/module/zfs/dmu_traverse.c
+++ b/module/zfs/dmu_traverse.c
@@ -309,7 +309,7 @@ traverse_impl(spa_t *spa, uint64_t objset, blkptr_t *rootbp,
uint64_t txg_start, int flags, blkptr_cb_t func, void *arg)
{
struct traverse_data td;
- struct prefetch_data pd = { 0 };
+ struct prefetch_data pd;
zbookmark_t czb;
int err;
@@ -323,7 +323,10 @@ traverse_impl(spa_t *spa, uint64_t objset, blkptr_t *rootbp,
td.td_flags = flags;
pd.pd_blks_max = 100;
+ pd.pd_blks_fetched = 0;
pd.pd_flags = flags;
+ pd.pd_cancel = B_FALSE;
+ pd.pd_exited = B_FALSE;
mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL);
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index bf560e565..18a640d6d 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -999,7 +999,7 @@ dmu_tx_commit(dmu_tx_t *tx)
ASSERT(tx->tx_txg != 0);
- while (txh = list_head(&tx->tx_holds)) {
+ while ((txh = list_head(&tx->tx_holds))) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
@@ -1042,7 +1042,7 @@ dmu_tx_abort(dmu_tx_t *tx)
ASSERT(tx->tx_txg == 0);
- while (txh = list_head(&tx->tx_holds)) {
+ while ((txh = list_head(&tx->tx_holds))) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c
index 4d79fe98e..d39b4d4b3 100644
--- a/module/zfs/dmu_zfetch.c
+++ b/module/zfs/dmu_zfetch.c
@@ -411,7 +411,7 @@ top:
if (zs) {
if (reset) {
- zstream_t *remove = zs;
+ zstream_t *rm = zs;
rc = 0;
mutex_exit(&zs->zst_lock);
@@ -423,7 +423,7 @@ top:
*/
for (zs = list_head(&zf->zf_stream); zs;
zs = list_next(&zf->zf_stream, zs)) {
- if (zs == remove) {
+ if (zs == rm) {
dmu_zfetch_stream_remove(zf, zs);
mutex_destroy(&zs->zst_lock);
kmem_free(zs, sizeof (zstream_t));
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 7f7f9592a..0cc67db59 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -40,7 +40,9 @@ static int free_range_compar(const void *node1, const void *node2);
static kmem_cache_t *dnode_cache;
+#ifndef NDEBUG
static dnode_phys_t dnode_phys_zero;
+#endif
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
@@ -133,7 +135,6 @@ dnode_verify(dnode_t *dn)
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
- ASSERT3U(dn->dn_indblkshift, >=, 0);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
@@ -527,11 +528,12 @@ dnode_buf_pageout(dmu_buf_t *db, void *arg)
for (i = 0; i < epb; i++) {
dnode_t *dn = children_dnodes[i];
- int n;
if (dn == NULL)
continue;
#ifdef ZFS_DEBUG
+ {
+ int n;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
@@ -544,6 +546,7 @@ dnode_buf_pageout(dmu_buf_t *db, void *arg)
for (n = 0; n < TXG_SIZE; n++)
ASSERT(!list_link_active(&dn->dn_dirty_link[n]));
+ }
#endif
children_dnodes[i] = NULL;
dnode_destroy(dn);
@@ -610,18 +613,18 @@ dnode_hold_impl(objset_impl_t *os, uint64_t object, int flag,
dnode_t **winner;
children_dnodes = kmem_zalloc(epb * sizeof (dnode_t *),
KM_SLEEP);
- if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
- dnode_buf_pageout)) {
+ if ((winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
+ dnode_buf_pageout))) {
kmem_free(children_dnodes, epb * sizeof (dnode_t *));
children_dnodes = winner;
}
}
if ((dn = children_dnodes[idx]) == NULL) {
- dnode_phys_t *dnp = (dnode_phys_t *)db->db.db_data+idx;
+ dnode_phys_t *dnpp = (dnode_phys_t *)db->db.db_data+idx;
dnode_t *winner;
- dn = dnode_create(os, dnp, db, object);
+ dn = dnode_create(os, dnpp, db, object);
winner = atomic_cas_ptr(&children_dnodes[idx], NULL, dn);
if (winner != NULL) {
dnode_destroy(dn);
diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
index cabd5b6ed..f4154bd91 100644
--- a/module/zfs/dnode_sync.c
+++ b/module/zfs/dnode_sync.c
@@ -319,8 +319,10 @@ dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
(void) free_blocks(dn, bp + blkid, nblks, tx);
if (trunc) {
+#ifndef NDEBUG
uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
(dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
+#endif
dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
ASSERT(off < dn->dn_phys->dn_maxblkid ||
dn->dn_phys->dn_maxblkid == 0 ||
@@ -349,8 +351,10 @@ dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
dbuf_rele(db, FTAG);
}
if (trunc) {
+#ifndef NDEBUG
uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
(dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
+#endif
dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
ASSERT(off < dn->dn_phys->dn_maxblkid ||
dn->dn_phys->dn_maxblkid == 0 ||
@@ -426,7 +430,7 @@ dnode_undirty_dbufs(list_t *list)
{
dbuf_dirty_record_t *dr;
- while (dr = list_head(list)) {
+ while ((dr = list_head(list))) {
dmu_buf_impl_t *db = dr->dr_dbuf;
uint64_t txg = dr->dr_txg;
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index c0dbda195..ad9b717a6 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -78,11 +78,13 @@ parent_delta(dsl_dataset_t *ds, int64_t delta)
void
dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
{
- int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
- int compressed = BP_GET_PSIZE(bp);
- int uncompressed = BP_GET_UCSIZE(bp);
+ int used, compressed, uncompressed;
int64_t delta;
+ used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
+ compressed = BP_GET_PSIZE(bp);
+ uncompressed = BP_GET_UCSIZE(bp);
+
dprintf_bp(bp, "born, ds=%p\n", ds);
ASSERT(dmu_tx_is_syncing(tx));
@@ -351,7 +353,7 @@ dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
return (err);
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
- dsl_dataset_t *winner;
+ dsl_dataset_t *winner = NULL;
ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
ds->ds_dbuf = dbuf;
@@ -1996,10 +1998,8 @@ dsl_dataset_space(dsl_dataset_t *ds,
boolean_t
dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
{
- dsl_pool_t *dp = ds->ds_dir->dd_pool;
-
- ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
- dsl_pool_sync_context(dp));
+ ASSERT(RW_LOCK_HELD(&(ds->ds_dir->dd_pool)->dp_config_rwlock) ||
+ dsl_pool_sync_context(ds->ds_dir->dd_pool));
if (ds->ds_prev == NULL)
return (B_FALSE);
if (ds->ds_phys->ds_bp.blk_birth >
diff --git a/module/zfs/dsl_deleg.c b/module/zfs/dsl_deleg.c
index da5d15787..24f68b89d 100644
--- a/module/zfs/dsl_deleg.c
+++ b/module/zfs/dsl_deleg.c
@@ -101,13 +101,13 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
if ((error = dsl_deleg_access(ddname, ZFS_DELEG_PERM_ALLOW, cr)) != 0)
return (error);
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
nvlist_t *perms;
nvpair_t *permpair = NULL;
VERIFY(nvpair_value_nvlist(whopair, &perms) == 0);
- while (permpair = nvlist_next_nvpair(perms, permpair)) {
+ while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
if (strcmp(perm, ZFS_DELEG_PERM_ALLOW) == 0)
@@ -138,7 +138,7 @@ dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr)
(void) snprintf(idstr, sizeof (idstr), "%lld",
(longlong_t)crgetuid(cr));
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
zfs_deleg_who_type_t type = nvpair_name(whopair)[0];
if (type != ZFS_DELEG_USER &&
@@ -166,7 +166,7 @@ dsl_deleg_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
}
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
const char *whokey = nvpair_name(whopair);
nvlist_t *perms;
nvpair_t *permpair = NULL;
@@ -181,7 +181,7 @@ dsl_deleg_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
whokey, 8, 1, &jumpobj, tx) == 0);
}
- while (permpair = nvlist_next_nvpair(perms, permpair)) {
+ while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
uint64_t n = 0;
@@ -207,7 +207,7 @@ dsl_deleg_unset_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
if (zapobj == 0)
return;
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
const char *whokey = nvpair_name(whopair);
nvlist_t *perms;
nvpair_t *permpair = NULL;
@@ -229,7 +229,7 @@ dsl_deleg_unset_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0)
continue;
- while (permpair = nvlist_next_nvpair(perms, permpair)) {
+ while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
uint64_t n = 0;
@@ -266,7 +266,7 @@ dsl_deleg_set(const char *ddname, nvlist_t *nvp, boolean_t unset)
return (ENOTSUP);
}
- while (whopair = nvlist_next_nvpair(nvp, whopair))
+ while ((whopair = nvlist_next_nvpair(nvp, whopair)))
blocks_modified++;
error = dsl_sync_task_do(dd->dd_pool, NULL,
diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c
index 48d87f97f..7c46092ab 100644
--- a/module/zfs/dsl_dir.c
+++ b/module/zfs/dsl_dir.c
@@ -48,11 +48,10 @@ static void
dsl_dir_evict(dmu_buf_t *db, void *arg)
{
dsl_dir_t *dd = arg;
- dsl_pool_t *dp = dd->dd_pool;
int t;
for (t = 0; t < TXG_SIZE; t++) {
- ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
+ ASSERT(!txg_list_member(&dd->dd_pool->dp_dirty_dirs, dd, t));
ASSERT(dd->dd_tempreserved[t] == 0);
ASSERT(dd->dd_space_towrite[t] == 0);
}
@@ -870,7 +869,7 @@ dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
if (tr_cookie == NULL)
return;
- while (tr = list_head(tr_list)) {
+ while ((tr = list_head(tr_list))) {
if (tr->tr_dp) {
dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
} else if (tr->tr_ds) {
@@ -953,11 +952,13 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
dd->dd_phys->dd_used_breakdown[type] >= -used);
dd->dd_phys->dd_used_breakdown[type] += used;
#ifdef DEBUG
- dd_used_t t;
- uint64_t u = 0;
- for (t = 0; t < DD_USED_NUM; t++)
- u += dd->dd_phys->dd_used_breakdown[t];
- ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
+ {
+ dd_used_t t;
+ uint64_t u = 0;
+ for (t = 0; t < DD_USED_NUM; t++)
+ u += dd->dd_phys->dd_used_breakdown[t];
+ ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes);
+ }
#endif
}
if (needlock)
@@ -1221,8 +1222,8 @@ dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
if (closest_common_ancestor(dd, ra->newparent) == dd)
return (EINVAL);
- if (err = dsl_dir_transfer_possible(dd->dd_parent,
- ra->newparent, myspace))
+ if ((err = dsl_dir_transfer_possible(dd->dd_parent,
+ ra->newparent, myspace)))
return (err);
}
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index 41386b269..f4275ea06 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -298,7 +298,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
dp->dp_read_overhead = 0;
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
- while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
+ while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
if (!list_link_active(&ds->ds_synced_link))
list_insert_tail(&dp->dp_synced_datasets, ds);
else
@@ -387,7 +387,7 @@ dsl_pool_zil_clean(dsl_pool_t *dp)
{
dsl_dataset_t *ds;
- while (ds = list_head(&dp->dp_synced_datasets)) {
+ while ((ds = list_head(&dp->dp_synced_datasets))) {
list_remove(&dp->dp_synced_datasets, ds);
ASSERT(ds->ds_user_ptr != NULL);
zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil);
diff --git a/module/zfs/dsl_prop.c b/module/zfs/dsl_prop.c
index 212acbbc5..bb19f3e9e 100644
--- a/module/zfs/dsl_prop.c
+++ b/module/zfs/dsl_prop.c
@@ -370,7 +370,7 @@ dsl_prop_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
if (psa->numints == 0) {
int err = zap_remove(mos, zapobj, psa->name, tx);
- ASSERT(err == 0 || err == ENOENT);
+ VERIFY(0 == err || ENOENT == err);
if (isint) {
VERIFY(0 == dsl_prop_get_ds(ds,
psa->name, 8, 1, &intval, NULL));
diff --git a/module/zfs/dsl_synctask.c b/module/zfs/dsl_synctask.c
index 21100225a..9bb9c4580 100644
--- a/module/zfs/dsl_synctask.c
+++ b/module/zfs/dsl_synctask.c
@@ -139,7 +139,7 @@ dsl_sync_task_group_destroy(dsl_sync_task_group_t *dstg)
{
dsl_sync_task_t *dst;
- while (dst = list_head(&dstg->dstg_tasks)) {
+ while ((dst = list_head(&dstg->dstg_tasks))) {
list_remove(&dstg->dstg_tasks, dst);
kmem_free(dst, sizeof (dsl_sync_task_t));
}
diff --git a/module/zfs/include/sys/spa.h b/module/zfs/include/sys/spa.h
index 24b3ca447..1029b3829 100644
--- a/module/zfs/include/sys/spa.h
+++ b/module/zfs/include/sys/spa.h
@@ -522,7 +522,7 @@ extern void vdev_cache_stat_fini(void);
/* Initialization and termination */
extern void spa_init(int flags);
extern void spa_fini(void);
-extern void spa_boot_init();
+extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
diff --git a/module/zfs/include/sys/zap.h b/module/zfs/include/sys/zap.h
index f88cc068b..687f7fcd7 100644
--- a/module/zfs/include/sys/zap.h
+++ b/module/zfs/include/sys/zap.h
@@ -317,6 +317,11 @@ void zap_cursor_advance(zap_cursor_t *zc);
uint64_t zap_cursor_serialize(zap_cursor_t *zc);
/*
+ * Advance the cursor to the attribute having the key.
+ */
+int zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt);
+
+/*
* Initialize a zap cursor pointing to the position recorded by
* zap_cursor_serialize (in the "serialized" argument). You can also
* use a "serialized" argument of 0 to start at the beginning of the
diff --git a/module/zfs/include/sys/zap_impl.h b/module/zfs/include/sys/zap_impl.h
index 0dc02ab6b..159ffaf8b 100644
--- a/module/zfs/include/sys/zap_impl.h
+++ b/module/zfs/include/sys/zap_impl.h
@@ -210,6 +210,7 @@ int fzap_add_cd(zap_name_t *zn,
uint64_t integer_size, uint64_t num_integers,
const void *val, uint32_t cd, dmu_tx_t *tx);
void fzap_upgrade(zap_t *zap, dmu_tx_t *tx);
+int fzap_cursor_move_to_key(zap_cursor_t *zc, zap_name_t *zn);
#ifdef __cplusplus
}
diff --git a/module/zfs/include/sys/zfs_znode.h b/module/zfs/include/sys/zfs_znode.h
index a5416525c..db40968fa 100644
--- a/module/zfs/include/sys/zfs_znode.h
+++ b/module/zfs/include/sys/zfs_znode.h
@@ -305,8 +305,8 @@ extern int zfs_rezget(znode_t *);
extern void zfs_zinactive(znode_t *);
extern void zfs_znode_delete(znode_t *, dmu_tx_t *);
extern void zfs_znode_free(znode_t *);
-extern void zfs_remove_op_tables();
-extern int zfs_create_op_tables();
+extern void zfs_remove_op_tables(void);
+extern int zfs_create_op_tables(void);
extern int zfs_sync(vfs_t *vfsp, short flag, cred_t *cr);
extern dev_t zfs_cmpldev(uint64_t);
extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value);
diff --git a/module/zfs/include/sys/zil.h b/module/zfs/include/sys/zil.h
index 4d02d14f7..402a16cb2 100644
--- a/module/zfs/include/sys/zil.h
+++ b/module/zfs/include/sys/zil.h
@@ -334,8 +334,8 @@ typedef void zil_parse_blk_func_t(zilog_t *zilog, blkptr_t *bp, void *arg,
uint64_t txg);
typedef void zil_parse_lr_func_t(zilog_t *zilog, lr_t *lr, void *arg,
uint64_t txg);
-typedef int zil_replay_func_t();
-typedef void zil_replay_cleaner_t();
+typedef int zil_replay_func_t(void *, char *, boolean_t);
+typedef void zil_replay_cleaner_t(void *);
typedef int zil_get_data_t(void *arg, lr_write_t *lr, char *dbuf, zio_t *zio);
extern uint64_t zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
diff --git a/module/zfs/lzjb.c b/module/zfs/lzjb.c
index 7fcde8475..4132406bd 100644
--- a/module/zfs/lzjb.c
+++ b/module/zfs/lzjb.c
@@ -51,7 +51,7 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
uchar_t *src = s_start;
uchar_t *dst = d_start;
- uchar_t *cpy, *copymap;
+ uchar_t *cpy, *copymap = NULL;
int copymask = 1 << (NBBY - 1);
int mlen, offset;
uint16_t *hp;
@@ -104,7 +104,7 @@ lzjb_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
uchar_t *src = s_start;
uchar_t *dst = d_start;
uchar_t *d_end = (uchar_t *)d_start + d_len;
- uchar_t *cpy, copymap;
+ uchar_t *cpy, copymap = 0;
int copymask = 1 << (NBBY - 1);
while (dst < d_end) {
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 87727fac2..9a4b43665 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -963,7 +963,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = hintbp->blk_dva;
- int error = 0;
+ int d, error = 0;
ASSERT(bp->blk_birth == 0);
@@ -978,7 +978,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
- for (int d = 0; d < ndvas; d++) {
+ for (d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags);
if (error) {
@@ -1004,14 +1004,14 @@ void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
- int ndvas = BP_GET_NDVAS(bp);
+ int d, ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa->spa_syncing_txg);
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
- for (int d = 0; d < ndvas; d++)
+ for (d = 0; d < ndvas; d++)
metaslab_free_dva(spa, &dva[d], txg, now);
spa_config_exit(spa, SCL_FREE, FTAG);
@@ -1022,7 +1022,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
- int error = 0;
+ int d, error = 0;
ASSERT(!BP_IS_HOLE(bp));
@@ -1037,7 +1037,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
- for (int d = 0; d < ndvas; d++)
+ for (d = 0; d < ndvas; d++)
if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
break;
diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c
index f1b3b23fe..2ce8e4356 100644
--- a/module/zfs/refcount.c
+++ b/module/zfs/refcount.c
@@ -75,13 +75,13 @@ refcount_destroy_many(refcount_t *rc, uint64_t number)
reference_t *ref;
ASSERT(rc->rc_count == number);
- while (ref = list_head(&rc->rc_list)) {
+ while ((ref = list_head(&rc->rc_list))) {
list_remove(&rc->rc_list, ref);
kmem_cache_free(reference_cache, ref);
}
list_destroy(&rc->rc_list);
- while (ref = list_head(&rc->rc_removed)) {
+ while ((ref = list_head(&rc->rc_removed))) {
list_remove(&rc->rc_removed, ref);
kmem_cache_free(reference_history_cache, ref->ref_removed);
kmem_cache_free(reference_cache, ref);
@@ -113,7 +113,7 @@ refcount_count(refcount_t *rc)
int64_t
refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
{
- reference_t *ref;
+ reference_t *ref = NULL;
int64_t count;
if (reference_tracking_enable) {
diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
index 710685dbc..db3b70fc6 100644
--- a/module/zfs/rrwlock.c
+++ b/module/zfs/rrwlock.c
@@ -118,7 +118,7 @@ rrn_find_and_remove(rrwlock_t *rrl)
rrw_node_t *prev = NULL;
if (refcount_count(&rrl->rr_linked_rcount) == 0)
- return (NULL);
+ return (B_FALSE);
for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
if (rn->rn_rrl == rrl) {
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index fb1b96f8b..ea088bbeb 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -274,7 +274,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
- uint64_t objnum;
+ uint64_t objnum = 0;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
@@ -393,6 +393,8 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
strcmp(slash, "/..") == 0)
error = EINVAL;
break;
+ default:
+ break;
}
if (error)
@@ -488,6 +490,8 @@ spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
static void
spa_activate(spa_t *spa)
{
+ int t, q;
+
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
@@ -495,8 +499,8 @@ spa_activate(spa_t *spa)
spa->spa_normal_class = metaslab_class_create();
spa->spa_log_class = metaslab_class_create();
- for (int t = 0; t < ZIO_TYPES; t++) {
- for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
+ for (t = 0; t < ZIO_TYPES; t++) {
+ for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa->spa_zio_taskq[t][q] = taskq_create("spa_zio",
zio_taskq_threads[t][q], maxclsyspri, 50,
INT_MAX, TASKQ_PREPOPULATE);
@@ -525,6 +529,8 @@ spa_activate(spa_t *spa)
static void
spa_deactivate(spa_t *spa)
{
+ int t, q;
+
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
@@ -536,8 +542,8 @@ spa_deactivate(spa_t *spa)
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_state_dirty_list);
- for (int t = 0; t < ZIO_TYPES; t++) {
- for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
+ for (t = 0; t < ZIO_TYPES; t++) {
+ for (q = 0; q < ZIO_TASKQ_TYPES; q++) {
taskq_destroy(spa->spa_zio_taskq[t][q]);
spa->spa_zio_taskq[t][q] = NULL;
}
@@ -817,7 +823,7 @@ spa_load_l2cache(spa_t *spa)
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid, size;
- vdev_t *vd, **oldvdevs, **newvdevs;
+ vdev_t *vd, **oldvdevs, **newvdevs = NULL;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
@@ -3006,7 +3012,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
- uint64_t unspare_guid;
+ uint64_t unspare_guid = 0;
size_t len;
txg = spa_vdev_enter(spa);
@@ -3208,7 +3214,9 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
static nvlist_t *
spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
{
- for (int i = 0; i < count; i++) {
+ int i;
+
+ for (i = 0; i < count; i++) {
uint64_t guid;
VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
@@ -3226,11 +3234,12 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
nvlist_t *dev_to_remove)
{
nvlist_t **newdev = NULL;
+ int i, j;
if (count > 1)
newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
- for (int i = 0, j = 0; i < count; i++) {
+ for (i = 0, j = 0; i < count; i++) {
if (dev[i] == dev_to_remove)
continue;
VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
@@ -3239,7 +3248,7 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
- for (int i = 0; i < count - 1; i++)
+ for (i = 0; i < count - 1; i++)
nvlist_free(newdev[i]);
if (count > 1)
@@ -3494,6 +3503,8 @@ spa_scrub(spa_t *spa, pool_scrub_type_t type)
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
+ int c;
+
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = 0;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
@@ -3501,26 +3512,28 @@ spa_async_remove(spa_t *spa, vdev_t *vd)
vdev_state_dirty(vd->vdev_top);
}
- for (int c = 0; c < vd->vdev_children; c++)
+ for (c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
+ int c;
+
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = 0;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
- for (int c = 0; c < vd->vdev_children; c++)
+ for (c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_thread(spa_t *spa)
{
- int tasks;
+ int tasks, i;
ASSERT(spa->spa_sync_on);
@@ -3544,9 +3557,9 @@ spa_async_thread(spa_t *spa)
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa);
spa_async_remove(spa, spa->spa_root_vdev);
- for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
+ for (i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
- for (int i = 0; i < spa->spa_spares.sav_count; i++)
+ for (i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
@@ -3994,7 +4007,7 @@ spa_sync(spa_t *spa, uint64_t txg)
dsl_pool_sync(dp, txg);
dirty_vdevs = 0;
- while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
+ while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))) {
vdev_sync(vd, txg);
dirty_vdevs++;
}
@@ -4078,7 +4091,7 @@ spa_sync(spa_t *spa, uint64_t txg)
/*
* Update usable space statistics.
*/
- while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
+ while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
vdev_sync_done(vd, txg);
/*
diff --git a/module/zfs/spa_history.c b/module/zfs/spa_history.c
index c997240c1..8b422cf8b 100644
--- a/module/zfs/spa_history.c
+++ b/module/zfs/spa_history.c
@@ -176,7 +176,7 @@ spa_history_write(spa_t *spa, void *buf, uint64_t len, spa_history_phys_t *shpp,
}
static char *
-spa_history_zone()
+spa_history_zone(void)
{
#ifdef _KERNEL
return (curproc->p_zone->zone_name);
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 36046e6df..ae6ceb63e 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -255,7 +255,9 @@ int zfs_recover = 0;
static void
spa_config_lock_init(spa_t *spa)
{
- for (int i = 0; i < SCL_LOCKS; i++) {
+ int i;
+
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
@@ -268,7 +270,9 @@ spa_config_lock_init(spa_t *spa)
static void
spa_config_lock_destroy(spa_t *spa)
{
- for (int i = 0; i < SCL_LOCKS; i++) {
+ int i;
+
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
@@ -281,7 +285,9 @@ spa_config_lock_destroy(spa_t *spa)
int
spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
{
- for (int i = 0; i < SCL_LOCKS; i++) {
+ int i;
+
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
@@ -310,7 +316,9 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
void
spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
{
- for (int i = 0; i < SCL_LOCKS; i++) {
+ int i;
+
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
@@ -336,7 +344,9 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
void
spa_config_exit(spa_t *spa, int locks, void *tag)
{
- for (int i = SCL_LOCKS - 1; i >= 0; i--) {
+ int i;
+
+ for (i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
@@ -355,9 +365,9 @@ spa_config_exit(spa_t *spa, int locks, void *tag)
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
- int locks_held = 0;
+ int i, locks_held = 0;
- for (int i = 0; i < SCL_LOCKS; i++) {
+ for (i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
@@ -1329,7 +1339,7 @@ spa_busy(void)
}
void
-spa_boot_init()
+spa_boot_init(void)
{
spa_config_load();
}
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index 8e99ef7f2..2ba3b20dd 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -156,12 +156,12 @@ txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
}
static void
-txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t time)
+txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, uint64_t wt)
{
CALLB_CPR_SAFE_BEGIN(cpr);
- if (time)
- (void) cv_timedwait(cv, &tx->tx_sync_lock, lbolt + time);
+ if (wt)
+ (void) cv_timedwait(cv, &tx->tx_sync_lock, lbolt + wt);
else
cv_wait(cv, &tx->tx_sync_lock);
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 856a36e2b..e7db441f8 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -868,6 +868,7 @@ vdev_probe(vdev_t *vd, zio_t *pio)
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps;
zio_t *zio;
+ int l;
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
@@ -900,7 +901,7 @@ vdev_probe(vdev_t *vd, zio_t *pio)
vps->vps_root = zio;
vps->vps_vd = vd;
- for (int l = 1; l < VDEV_LABELS; l++) {
+ for (l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_pad)),
@@ -1416,8 +1417,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
if (vd->vdev_detached) {
if (smo->smo_object != 0) {
- int err = dmu_object_free(mos, smo->smo_object, tx);
- ASSERT3U(err, ==, 0);
+ VERIFY(0 == dmu_object_free(mos, smo->smo_object, tx));
smo->smo_object = 0;
}
dmu_tx_commit(tx);
@@ -1580,7 +1580,7 @@ vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
- while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
+ while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
metaslab_sync_done(msp, txg);
}
@@ -1795,6 +1795,7 @@ void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
+ int c;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
@@ -1805,7 +1806,7 @@ vdev_clear(spa_t *spa, vdev_t *vd)
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
- for (int c = 0; c < vd->vdev_children; c++)
+ for (c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
/*
@@ -1888,6 +1889,7 @@ void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
+ int c, t;
mutex_enter(&vd->vdev_stat_lock);
bcopy(&vd->vdev_stat, vs, sizeof (*vs));
@@ -1902,12 +1904,12 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (vd == rvd) {
- for (int c = 0; c < rvd->vdev_children; c++) {
+ for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *cvd = rvd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
mutex_enter(&vd->vdev_stat_lock);
- for (int t = 0; t < ZIO_TYPES; t++) {
+ for (t = 0; t < ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
diff --git a/module/zfs/vdev_cache.c b/module/zfs/vdev_cache.c
index 5a7b59f6e..cad0438c3 100644
--- a/module/zfs/vdev_cache.c
+++ b/module/zfs/vdev_cache.c
@@ -253,7 +253,6 @@ vdev_cache_read(zio_t *zio)
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
vdev_cache_entry_t *ve, ve_search;
uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
- uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
zio_t *fio;
ASSERT(zio->io_type == ZIO_TYPE_READ);
@@ -270,7 +269,7 @@ vdev_cache_read(zio_t *zio)
if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS))
return (EXDEV);
- ASSERT(cache_phase + zio->io_size <= VCBS);
+ ASSERT(P2PHASE(zio->io_offset, VCBS) + zio->io_size <= VCBS);
mutex_enter(&vc->vc_lock);
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c
index bf930466f..df9957eb5 100644
--- a/module/zfs/vdev_label.c
+++ b/module/zfs/vdev_label.c
@@ -335,7 +335,7 @@ vdev_label_read_config(vdev_t *vd)
nvlist_t *config = NULL;
vdev_phys_t *vp;
zio_t *zio;
- int flags =
+ int l, flags =
ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
@@ -345,7 +345,7 @@ vdev_label_read_config(vdev_t *vd)
vp = zio_buf_alloc(sizeof (vdev_phys_t));
- for (int l = 0; l < VDEV_LABELS; l++) {
+ for (l = 0; l < VDEV_LABELS; l++) {
zio = zio_root(spa, NULL, NULL, flags);
@@ -458,6 +458,8 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
case VDEV_LABEL_SPARE:
return (spa_has_spare(spa, device_guid));
+ default:
+ break;
}
}
@@ -489,6 +491,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
nvlist_t *label;
vdev_phys_t *vp;
vdev_boot_header_t *vb;
+ vdev_t *pvd;
uberblock_t *ub;
zio_t *zio;
char *buf;
@@ -496,10 +499,11 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
int error;
uint64_t spare_guid, l2cache_guid;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
+ int c, l, n;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
- for (int c = 0; c < vd->vdev_children; c++)
+ for (c = 0; c < vd->vdev_children; c++)
if ((error = vdev_label_init(vd->vdev_child[c],
crtxg, reason)) != 0)
return (error);
@@ -535,7 +539,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
vd->vdev_guid += guid_delta;
- for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
+ for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
@@ -554,7 +558,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
vd->vdev_guid += guid_delta;
- for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
+ for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
@@ -655,7 +659,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
*/
zio = zio_root(spa, NULL, NULL, flags);
- for (int l = 0; l < VDEV_LABELS; l++) {
+ for (l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, vp,
offsetof(vdev_label_t, vl_vdev_phys),
@@ -665,7 +669,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
offsetof(vdev_label_t, vl_boot_header),
sizeof (vdev_boot_header_t), NULL, NULL, flags);
- for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
+ for (n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
vdev_label_write(zio, vd, l, ub,
VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd), NULL, NULL, flags);
@@ -756,6 +760,7 @@ vdev_uberblock_load(zio_t *zio, vdev_t *vd, uberblock_t *ubbest)
vdev_t *rvd = spa->spa_root_vdev;
int flags =
ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
+ int c, l, n;
if (vd == rvd) {
ASSERT(zio == NULL);
@@ -766,12 +771,12 @@ vdev_uberblock_load(zio_t *zio, vdev_t *vd, uberblock_t *ubbest)
ASSERT(zio != NULL);
- for (int c = 0; c < vd->vdev_children; c++)
+ for (c = 0; c < vd->vdev_children; c++)
vdev_uberblock_load(zio, vd->vdev_child[c], ubbest);
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
- for (int l = 0; l < VDEV_LABELS; l++) {
- for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
+ for (l = 0; l < VDEV_LABELS; l++) {
+ for (n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
vdev_label_read(zio, vd, l,
zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd)),
VDEV_UBERBLOCK_OFFSET(vd, n),
@@ -807,9 +812,9 @@ static void
vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
{
uberblock_t *ubbuf;
- int n;
+ int c, l, n;
- for (int c = 0; c < vd->vdev_children; c++)
+ for (c = 0; c < vd->vdev_children; c++)
vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags);
if (!vd->vdev_ops->vdev_op_leaf)
@@ -824,7 +829,7 @@ vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
bzero(ubbuf, VDEV_UBERBLOCK_SIZE(vd));
*ubbuf = *ub;
- for (int l = 0; l < VDEV_LABELS; l++)
+ for (l = 0; l < VDEV_LABELS; l++)
vdev_label_write(zio, vd, l, ubbuf,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_sync_done, zio->io_private,
@@ -839,10 +844,11 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
spa_t *spa = svd[0]->vdev_spa;
zio_t *zio;
uint64_t good_writes = 0;
+ int v;
zio = zio_root(spa, NULL, &good_writes, flags);
- for (int v = 0; v < svdcount; v++)
+ for (v = 0; v < svdcount; v++)
vdev_uberblock_sync(zio, ub, svd[v], flags);
(void) zio_wait(zio);
@@ -854,7 +860,7 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
*/
zio = zio_root(spa, NULL, NULL, flags);
- for (int v = 0; v < svdcount; v++)
+ for (v = 0; v < svdcount; v++)
zio_flush(zio, svd[v]);
(void) zio_wait(zio);
@@ -907,8 +913,9 @@ vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags)
vdev_phys_t *vp;
char *buf;
size_t buflen;
+ int c;
- for (int c = 0; c < vd->vdev_children; c++)
+ for (c = 0; c < vd->vdev_children; c++)
vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags);
if (!vd->vdev_ops->vdev_op_leaf)
diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c
index c4629ff45..ed2c251f5 100644
--- a/module/zfs/vdev_mirror.c
+++ b/module/zfs/vdev_mirror.c
@@ -313,9 +313,9 @@ vdev_mirror_io_start(zio_t *zio)
static int
vdev_mirror_worst_error(mirror_map_t *mm)
{
- int error[2] = { 0, 0 };
+ int c, error[2] = { 0, 0 };
- for (int c = 0; c < mm->mm_children; c++) {
+ for (c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc = &mm->mm_child[c];
int s = mc->mc_speculative;
error[s] = zio_worst_error(error[s], mc->mc_error);
diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c
index 46fca0e3b..3e5aab173 100644
--- a/module/zfs/vdev_queue.c
+++ b/module/zfs/vdev_queue.c
@@ -285,12 +285,13 @@ void
vdev_queue_io_done(zio_t *zio)
{
vdev_queue_t *vq = &zio->io_vd->vdev_queue;
+ int i;
mutex_enter(&vq->vq_lock);
avl_remove(&vq->vq_pending_tree, zio);
- for (int i = 0; i < zfs_vdev_ramp_rate; i++) {
+ for (i = 0; i < zfs_vdev_ramp_rate; i++) {
zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending);
if (nio == NULL)
break;
diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c
index 69e314468..a06b68093 100644
--- a/module/zfs/vdev_raidz.c
+++ b/module/zfs/vdev_raidz.c
@@ -775,9 +775,9 @@ static uint64_t raidz_corrected_pq;
static int
vdev_raidz_worst_error(raidz_map_t *rm)
{
- int error = 0;
+ int c, error = 0;
- for (int c = 0; c < rm->rm_cols; c++)
+ for (c = 0; c < rm->rm_cols; c++)
error = zio_worst_error(error, rm->rm_col[c].rc_error);
return (error);
@@ -789,7 +789,7 @@ vdev_raidz_io_done(zio_t *zio)
vdev_t *vd = zio->io_vd;
vdev_t *cvd;
raidz_map_t *rm = zio->io_vsd;
- raidz_col_t *rc, *rc1;
+ raidz_col_t *rc = NULL, *rc1;
int unexpected_errors = 0;
int parity_errors = 0;
int parity_untried = 0;
diff --git a/module/zfs/zap.c b/module/zfs/zap.c
index 297984bc9..a9bd189f8 100644
--- a/module/zfs/zap.c
+++ b/module/zfs/zap.c
@@ -476,7 +476,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3);
/* The chunks should begin at the end of the hash table */
- ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==,
+ ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==, (zap_leaf_chunk_t *)
&l->l_phys->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]);
/* The chunks should end at the end of the block */
@@ -1080,6 +1080,29 @@ zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs)
}
}
+int fzap_cursor_move_to_key(zap_cursor_t *zc, zap_name_t *zn)
+{
+ int err;
+ zap_leaf_t *l;
+ zap_entry_handle_t zeh;
+
+ if (zn->zn_name_orij && strlen(zn->zn_name_orij) > ZAP_MAXNAMELEN)
+ return (E2BIG);
+
+ err = zap_deref_leaf(zc->zc_zap, zn->zn_hash, NULL, RW_READER, &l);
+ if (err != 0)
+ return (err);
+
+ err = zap_leaf_lookup(l, zn, &zeh);
+ if (err != 0)
+ return (err);
+
+ zc->zc_leaf = l;
+ zc->zc_hash = zeh.zeh_hash;
+ zc->zc_cd = zeh.zeh_cd;
+ return 0;
+}
+
void
fzap_get_stats(zap_t *zap, zap_stats_t *zs)
{
diff --git a/module/zfs/zap_leaf.c b/module/zfs/zap_leaf.c
index da498b6bc..267f296fb 100644
--- a/module/zfs/zap_leaf.c
+++ b/module/zfs/zap_leaf.c
@@ -220,7 +220,7 @@ zap_leaf_array_create(zap_leaf_t *l, const char *buf,
uint16_t chunk_head;
uint16_t *chunkp = &chunk_head;
int byten = 0;
- uint64_t value;
+ uint64_t value = 0;
int shift = (integer_size-1)*8;
int len = num_integers;
diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c
index 96964d683..02c13120c 100644
--- a/module/zfs/zap_micro.c
+++ b/module/zfs/zap_micro.c
@@ -271,7 +271,7 @@ mze_destroy(zap_t *zap)
mzap_ent_t *mze;
void *avlcookie = NULL;
- while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie))
+ while ((mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie)))
kmem_free(mze, sizeof (mzap_ent_t));
avl_destroy(&zap->zap_m.zap_avl);
}
@@ -1045,6 +1045,45 @@ zap_cursor_advance(zap_cursor_t *zc)
}
}
+int zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt)
+{
+ int err = 0;
+ mzap_ent_t *mze;
+ zap_name_t *zn;
+
+ if (zc->zc_zap == NULL) {
+ err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
+ RW_READER, TRUE, FALSE, &zc->zc_zap);
+ if (err)
+ return (err);
+ } else {
+ rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
+ }
+
+ zn = zap_name_alloc(zc->zc_zap, name, mt);
+ if (zn == NULL) {
+ rw_exit(&zc->zc_zap->zap_rwlock);
+ return (ENOTSUP);
+ }
+
+ if (!zc->zc_zap->zap_ismicro) {
+ err = fzap_cursor_move_to_key(zc, zn);
+ } else {
+ mze = mze_find(zn);
+ if (mze == NULL) {
+ err = (ENOENT);
+ goto out;
+ }
+ zc->zc_hash = mze->mze_hash;
+ zc->zc_cd = mze->mze_phys.mze_cd;
+ }
+
+out:
+ zap_name_free(zn);
+ rw_exit(&zc->zc_zap->zap_rwlock);
+ return (err);
+}
+
int
zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
{
diff --git a/module/zfs/zfs_byteswap.c b/module/zfs/zfs_byteswap.c
index ab97f83eb..177ff552c 100644
--- a/module/zfs/zfs_byteswap.c
+++ b/module/zfs/zfs_byteswap.c
@@ -52,7 +52,7 @@ zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout)
{
caddr_t end;
caddr_t ptr;
- zfs_ace_t *zacep;
+ zfs_ace_t *zacep = NULL;
ace_t *acep;
uint16_t entry_type;
size_t entry_size;
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index b6ad57451..111141834 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -142,7 +142,7 @@ history_str_get(zfs_cmd_t *zc)
{
char *buf;
- if (zc->zc_history == NULL)
+ if (zc->zc_history == 0)
return (NULL);
buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
@@ -1271,7 +1271,7 @@ zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
* which we aren't supposed to do with a DS_MODE_USER
* hold, because it could be inconsistent.
*/
- if (zc->zc_nvlist_dst != NULL &&
+ if (zc->zc_nvlist_dst != 0 &&
!zc->zc_objset_stats.dds_inconsistent &&
dmu_objset_type(os) == DMU_OST_ZFS) {
nvlist_t *nv;
@@ -1669,7 +1669,7 @@ zfs_ioc_pool_get_props(zfs_cmd_t *zc)
error = spa_prop_get(spa, &nvp);
- if (error == 0 && zc->zc_nvlist_dst != NULL)
+ if (error == 0 && zc->zc_nvlist_dst != 0)
error = put_nvlist(zc, nvp);
else
error = EFAULT;
@@ -2045,7 +2045,7 @@ zfs_ioc_create(zfs_cmd_t *zc)
strchr(zc->zc_name, '%'))
return (EINVAL);
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
&nvprops)) != 0)
return (error);
@@ -2189,7 +2189,7 @@ zfs_ioc_snapshot(zfs_cmd_t *zc)
if (snapshot_namecheck(zc->zc_value, NULL, NULL) != 0)
return (EINVAL);
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
&nvprops)) != 0)
return (error);
@@ -2441,7 +2441,7 @@ zfs_ioc_recv(zfs_cmd_t *zc)
*tosnap = '\0';
tosnap++;
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
&props)) != 0)
return (error);
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index a20f971c9..eea487507 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -850,8 +850,8 @@ zio_write_bp_init(zio_t *zio)
*/
if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize &&
pass > SYNC_PASS_REWRITE) {
- ASSERT(csize != 0);
uint32_t gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
+ ASSERT(csize != 0);
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
@@ -909,8 +909,9 @@ zio_taskq_member(zio_t *zio, enum zio_taskq_type q)
{
kthread_t *executor = zio->io_executor;
spa_t *spa = zio->io_spa;
+ zio_type_t t;
- for (zio_type_t t = 0; t < ZIO_TYPES; t++)
+ for (t = 0; t < ZIO_TYPES; t++)
if (taskq_member(spa->spa_zio_taskq[t][q], executor))
return (B_TRUE);
@@ -1045,13 +1046,14 @@ static void
zio_reexecute(zio_t *pio)
{
zio_t *zio, *zio_next;
+ int c;
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_error = 0;
- for (int c = 0; c < ZIO_CHILD_TYPES; c++)
+ for (c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio)) {
@@ -1309,8 +1311,9 @@ static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
+ int g;
- for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
+ for (g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
@@ -1322,11 +1325,12 @@ static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
+ int g;
if (gn == NULL)
return;
- for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
+ for (g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
@@ -1351,6 +1355,7 @@ zio_gang_tree_assemble_done(zio_t *zio)
zio_t *lio = zio->io_logical;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
+ int g;
ASSERT(zio->io_parent == lio);
ASSERT(zio->io_child == NULL);
@@ -1365,7 +1370,7 @@ zio_gang_tree_assemble_done(zio_t *zio)
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zbt_magic == ZBT_MAGIC);
- for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
+ for (g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
@@ -1378,6 +1383,7 @@ zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data)
{
zio_t *lio = pio->io_logical;
zio_t *zio;
+ int g;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(lio->io_bp));
@@ -1392,7 +1398,7 @@ zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data)
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zbt_magic == ZBT_MAGIC);
- for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
+ for (g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
@@ -1449,6 +1455,7 @@ zio_write_gang_member_ready(zio_t *zio)
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
+ int d;
if (BP_IS_HOLE(zio->io_bp))
return;
@@ -1462,7 +1469,7 @@ zio_write_gang_member_ready(zio_t *zio)
ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
- for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
+ for (d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
@@ -1486,7 +1493,7 @@ zio_write_gang_block(zio_t *pio)
int ndvas = lio->io_prop.zp_ndvas;
int gbh_ndvas = MIN(ndvas + 1, spa_max_replication(spa));
zio_prop_t zp;
- int error;
+ int g, error;
error = metaslab_alloc(spa, spa->spa_normal_class, SPA_GANGBLOCKSIZE,
bp, gbh_ndvas, txg, pio == lio ? NULL : lio->io_bp,
@@ -1516,7 +1523,7 @@ zio_write_gang_block(zio_t *pio)
/*
* Create and nowait the gang children.
*/
- for (int g = 0; resid != 0; resid -= lsize, g++) {
+ for (g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
@@ -1606,6 +1613,7 @@ zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
spa_t *spa = zio->io_spa;
boolean_t now = !(zio->io_flags & ZIO_FLAG_IO_REWRITE);
+ int g;
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
@@ -1636,7 +1644,7 @@ zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
metaslab_free(spa, bp, bp->blk_birth, now);
if (gn != NULL) {
- for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
+ for (g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
@@ -2080,6 +2088,7 @@ zio_done(zio_t *zio)
blkptr_t *bp = zio->io_bp;
vdev_t *vd = zio->io_vd;
uint64_t psize = zio->io_size;
+ int c, w;
/*
* If our of children haven't all completed,
@@ -2090,8 +2099,8 @@ zio_done(zio_t *zio)
zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE))
return (ZIO_PIPELINE_STOP);
- for (int c = 0; c < ZIO_CHILD_TYPES; c++)
- for (int w = 0; w < ZIO_WAIT_TYPES; w++)
+ for (c = 0; c < ZIO_CHILD_TYPES; c++)
+ for (w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (bp != NULL) {