diff options
author | Brian Behlendorf <[email protected]> | 2010-08-26 09:52:39 -0700 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2010-08-27 15:28:32 -0700 |
commit | d6320ddb78fa89c4d0fc2af00ae53c7c70992f96 (patch) | |
tree | 8a50c251b955ae31a670835ac0c90cfba6d28752 /module | |
parent | c5b3a7bbcc321846bb15ff73c6fd6f1c483b6aa6 (diff) |
Fix gcc c90 compliance warnings
Fix non-c90 compliant code, for the most part these changes
simply deal with where a particular variable is declared.
Under c90 it must alway be done at the very start of a block.
Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'module')
-rw-r--r-- | module/zcommon/zprop_common.c | 4 | ||||
-rw-r--r-- | module/zfs/arc.c | 6 | ||||
-rw-r--r-- | module/zfs/dbuf.c | 26 | ||||
-rw-r--r-- | module/zfs/ddt.c | 110 | ||||
-rw-r--r-- | module/zfs/dmu.c | 3 | ||||
-rw-r--r-- | module/zfs/dmu_objset.c | 11 | ||||
-rw-r--r-- | module/zfs/dmu_tx.c | 3 | ||||
-rw-r--r-- | module/zfs/dsl_dataset.c | 18 | ||||
-rw-r--r-- | module/zfs/dsl_dir.c | 12 | ||||
-rw-r--r-- | module/zfs/dsl_pool.c | 3 | ||||
-rw-r--r-- | module/zfs/dsl_scan.c | 13 | ||||
-rw-r--r-- | module/zfs/include/sys/spa.h | 3 | ||||
-rw-r--r-- | module/zfs/metaslab.c | 35 | ||||
-rw-r--r-- | module/zfs/sa.c | 4 | ||||
-rw-r--r-- | module/zfs/spa.c | 86 | ||||
-rw-r--r-- | module/zfs/spa_misc.c | 38 | ||||
-rw-r--r-- | module/zfs/vdev.c | 99 | ||||
-rw-r--r-- | module/zfs/vdev_label.c | 41 | ||||
-rw-r--r-- | module/zfs/vdev_mirror.c | 11 | ||||
-rw-r--r-- | module/zfs/vdev_queue.c | 3 | ||||
-rw-r--r-- | module/zfs/vdev_raidz.c | 4 | ||||
-rw-r--r-- | module/zfs/vdev_root.c | 7 | ||||
-rw-r--r-- | module/zfs/zfs_znode.c | 4 | ||||
-rw-r--r-- | module/zfs/zil.c | 6 | ||||
-rw-r--r-- | module/zfs/zio.c | 51 | ||||
-rw-r--r-- | module/zfs/zrlock.c | 4 |
26 files changed, 388 insertions, 217 deletions
diff --git a/module/zcommon/zprop_common.c b/module/zcommon/zprop_common.c index 0bbf20d4f..df2fdeab8 100644 --- a/module/zcommon/zprop_common.c +++ b/module/zcommon/zprop_common.c @@ -162,7 +162,7 @@ int zprop_iter_common(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered, zfs_type_t type) { - int i, num_props, size, prop; + int i, j, num_props, size, prop; zprop_desc_t *prop_tbl; zprop_desc_t **order; @@ -177,7 +177,7 @@ zprop_iter_common(zprop_func func, void *cb, boolean_t show_all, return (ZPROP_CONT); #endif - for (int j = 0; j < num_props; j++) + for (j = 0; j < num_props; j++) order[j] = &prop_tbl[j]; if (ordered) { diff --git a/module/zfs/arc.c b/module/zfs/arc.c index a82718e8b..de09ca9a9 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -1426,10 +1426,11 @@ arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) static void arc_hdr_destroy(arc_buf_hdr_t *hdr) { + l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; + ASSERT(refcount_is_zero(&hdr->b_refcnt)); ASSERT3P(hdr->b_state, ==, arc_anon); ASSERT(!HDR_IO_IN_PROGRESS(hdr)); - l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; if (l2hdr != NULL) { boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); @@ -4235,6 +4236,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) l2arc_write_callback_t *cb; zio_t *pio, *wzio; uint64_t guid = spa_guid(spa); + int try; ASSERT(dev->l2ad_vdev != NULL); @@ -4248,7 +4250,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) * Copy buffers for L2ARC writing. */ mutex_enter(&l2arc_buflist_mtx); - for (int try = 0; try <= 3; try++) { + for (try = 0; try <= 3; try++) { list = l2arc_list_locked(try, &list_lock); passed_sz = 0; diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 9c4e0296d..82cfd1a2e 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -107,11 +107,15 @@ dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) { dbuf_hash_table_t *h = &dbuf_hash_table; objset_t *os = dn->dn_objset; - uint64_t obj = dn->dn_object; - uint64_t hv = DBUF_HASH(os, obj, level, blkid); - uint64_t idx = hv & h->hash_table_mask; + uint64_t obj; + uint64_t hv; + uint64_t idx; dmu_buf_impl_t *db; + obj = dn->dn_object; + hv = DBUF_HASH(os, obj, level, blkid); + idx = hv & h->hash_table_mask; + mutex_enter(DBUF_HASH_MUTEX(h, idx)); for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { if (DBUF_EQUAL(db, os, obj, level, blkid)) { @@ -140,11 +144,13 @@ dbuf_hash_insert(dmu_buf_impl_t *db) objset_t *os = db->db_objset; uint64_t obj = db->db.db_object; int level = db->db_level; - uint64_t blkid = db->db_blkid; - uint64_t hv = DBUF_HASH(os, obj, level, blkid); - uint64_t idx = hv & h->hash_table_mask; + uint64_t blkid, hv, idx; dmu_buf_impl_t *dbf; + blkid = db->db_blkid; + hv = DBUF_HASH(os, obj, level, blkid); + idx = hv & h->hash_table_mask; + mutex_enter(DBUF_HASH_MUTEX(h, idx)); for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { @@ -174,11 +180,13 @@ static void dbuf_hash_remove(dmu_buf_impl_t *db) { dbuf_hash_table_t *h = &dbuf_hash_table; - uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, - db->db_level, db->db_blkid); - uint64_t idx = hv & h->hash_table_mask; + uint64_t hv, idx; dmu_buf_impl_t *dbf, **dbp; + hv = DBUF_HASH(db->db_objset, db->db.db_object, + db->db_level, db->db_blkid); + idx = hv & h->hash_table_mask; + /* * We musn't hold db_mtx to maintin lock ordering: * DBUF_HASH_MUTEX > db_mtx. diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c index 718331496..ae9d2a5e1 100644 --- a/module/zfs/ddt.c +++ b/module/zfs/ddt.c @@ -244,9 +244,10 @@ ddt_object_name(ddt_t *ddt, enum ddt_type type, enum ddt_class class, void ddt_bp_fill(const ddt_phys_t *ddp, blkptr_t *bp, uint64_t txg) { + int d; ASSERT(txg != 0); - for (int d = 0; d < SPA_DVAS_PER_BP; d++) + for (d = 0; d < SPA_DVAS_PER_BP; d++) bp->blk_dva[d] = ddp->ddp_dva[d]; BP_SET_BIRTH(bp, txg, ddp->ddp_phys_birth); } @@ -287,9 +288,10 @@ ddt_key_fill(ddt_key_t *ddk, const blkptr_t *bp) void ddt_phys_fill(ddt_phys_t *ddp, const blkptr_t *bp) { + int d; ASSERT(ddp->ddp_phys_birth == 0); - for (int d = 0; d < SPA_DVAS_PER_BP; d++) + for (d = 0; d < SPA_DVAS_PER_BP; d++) ddp->ddp_dva[d] = bp->blk_dva[d]; ddp->ddp_phys_birth = BP_PHYSICAL_BIRTH(bp); } @@ -327,8 +329,9 @@ ddt_phys_t * ddt_phys_select(const ddt_entry_t *dde, const blkptr_t *bp) { ddt_phys_t *ddp = (ddt_phys_t *)dde->dde_phys; + int p; - for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { + for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { if (DVA_EQUAL(BP_IDENTITY(bp), &ddp->ddp_dva[0]) && BP_PHYSICAL_BIRTH(bp) == ddp->ddp_phys_birth) return (ddp); @@ -340,8 +343,9 @@ uint64_t ddt_phys_total_refcnt(const ddt_entry_t *dde) { uint64_t refcnt = 0; + int p; - for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) + for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) refcnt += dde->dde_phys[p].ddp_refcnt; return (refcnt); @@ -355,17 +359,18 @@ ddt_stat_generate(ddt_t *ddt, ddt_entry_t *dde, ddt_stat_t *dds) ddt_key_t *ddk = &dde->dde_key; uint64_t lsize = DDK_GET_LSIZE(ddk); uint64_t psize = DDK_GET_PSIZE(ddk); + int p, d; bzero(dds, sizeof (*dds)); - for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { + for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { uint64_t dsize = 0; uint64_t refcnt = ddp->ddp_refcnt; if (ddp->ddp_phys_birth == 0) continue; - for (int d = 0; d < SPA_DVAS_PER_BP; d++) + for (d = 0; d < SPA_DVAS_PER_BP; d++) dsize += dva_get_dsize_sync(spa, &ddp->ddp_dva[d]); dds->dds_blocks += 1; @@ -413,16 +418,20 @@ ddt_stat_update(ddt_t *ddt, ddt_entry_t *dde, uint64_t neg) void ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src) { - for (int h = 0; h < 64; h++) + int h; + + for (h = 0; h < 64; h++) ddt_stat_add(&dst->ddh_stat[h], &src->ddh_stat[h], 0); } void ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh) { + int h; + bzero(dds, sizeof (*dds)); - for (int h = 0; h < 64; h++) + for (h = 0; h < 64; h++) ddt_stat_add(dds, &ddh->ddh_stat[h], 0); } @@ -442,11 +451,15 @@ ddt_histogram_empty(const ddt_histogram_t *ddh) void ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total) { + enum zio_checksum c; + enum ddt_type type; + enum ddt_class class; + /* Sum the statistics we cached in ddt_object_sync(). */ - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { ddt_object_t *ddo = &ddt->ddt_object_stats[type][class]; @@ -467,10 +480,14 @@ ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total) void ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh) { - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + enum zio_checksum c; + enum ddt_type type; + enum ddt_class class; + + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { ddt_histogram_add(ddh, &ddt->ddt_histogram_cache[type][class]); @@ -519,8 +536,9 @@ ddt_ditto_copies_needed(ddt_t *ddt, ddt_entry_t *dde, ddt_phys_t *ddp_willref) uint64_t ditto = spa->spa_dedup_ditto; int total_copies = 0; int desired_copies = 0; + int p; - for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { + for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { ddt_phys_t *ddp = &dde->dde_phys[p]; zio_t *zio = dde->dde_lead_zio[p]; uint64_t refcnt = ddp->ddp_refcnt; /* committed refs */ @@ -553,8 +571,9 @@ ddt_ditto_copies_present(ddt_entry_t *dde) ddt_phys_t *ddp = &dde->dde_phys[DDT_PHYS_DITTO]; dva_t *dva = ddp->ddp_dva; int copies = 0 - DVA_GET_GANG(dva); + int d; - for (int d = 0; d < SPA_DVAS_PER_BP; d++, dva++) + for (d = 0; d < SPA_DVAS_PER_BP; d++, dva++) if (DVA_IS_VALID(dva)) copies++; @@ -641,9 +660,11 @@ ddt_alloc(const ddt_key_t *ddk) static void ddt_free(ddt_entry_t *dde) { + int p; + ASSERT(!dde->dde_loading); - for (int p = 0; p < DDT_PHYS_TYPES; p++) + for (p = 0; p < DDT_PHYS_TYPES; p++) ASSERT(dde->dde_lead_zio[p] == NULL); if (dde->dde_repair_data != NULL) @@ -731,6 +752,8 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp) { ddt_t *ddt; ddt_entry_t dde; + enum ddt_type type; + enum ddt_class class; if (!zfs_dedup_prefetch || bp == NULL || !BP_GET_DEDUP(bp)) return; @@ -743,8 +766,8 @@ ddt_prefetch(spa_t *spa, const blkptr_t *bp) ddt = ddt_select(spa, bp); ddt_key_fill(&dde.dde_key, bp); - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { ddt_object_prefetch(ddt, type, class, &dde); } } @@ -757,8 +780,9 @@ ddt_entry_compare(const void *x1, const void *x2) const ddt_entry_t *dde2 = x2; const uint64_t *u1 = (const uint64_t *)&dde1->dde_key; const uint64_t *u2 = (const uint64_t *)&dde2->dde_key; + int i; - for (int i = 0; i < DDT_KEY_WORDS; i++) { + for (i = 0; i < DDT_KEY_WORDS; i++) { if (u1[i] < u2[i]) return (-1); if (u1[i] > u2[i]) @@ -801,15 +825,20 @@ ddt_table_free(ddt_t *ddt) void ddt_create(spa_t *spa) { + enum zio_checksum c; + spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM; - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) spa->spa_ddt[c] = ddt_table_alloc(spa, c); } int ddt_load(spa_t *spa) { + enum zio_checksum c; + enum ddt_type type; + enum ddt_class class; int error; ddt_create(spa); @@ -821,10 +850,10 @@ ddt_load(spa_t *spa) if (error) return (error == ENOENT ? 0 : error); - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { error = ddt_object_load(ddt, type, class); if (error != 0 && error != ENOENT) @@ -845,7 +874,9 @@ ddt_load(spa_t *spa) void ddt_unload(spa_t *spa) { - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + enum zio_checksum c; + + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { if (spa->spa_ddt[c]) { ddt_table_free(spa->spa_ddt[c]); spa->spa_ddt[c] = NULL; @@ -858,6 +889,8 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp) { ddt_t *ddt; ddt_entry_t dde; + enum ddt_type type; + enum ddt_class class; if (!BP_GET_DEDUP(bp)) return (B_FALSE); @@ -869,8 +902,8 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp) ddt_key_fill(&dde.dde_key, bp); - for (enum ddt_type type = 0; type < DDT_TYPES; type++) - for (enum ddt_class class = 0; class <= max_class; class++) + for (type = 0; type < DDT_TYPES; type++) + for (class = 0; class <= max_class; class++) if (ddt_object_lookup(ddt, type, class, &dde) == 0) return (B_TRUE); @@ -882,13 +915,15 @@ ddt_repair_start(ddt_t *ddt, const blkptr_t *bp) { ddt_key_t ddk; ddt_entry_t *dde; + enum ddt_type type; + enum ddt_class class; ddt_key_fill(&ddk, bp); dde = ddt_alloc(&ddk); - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { - for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { + for (type = 0; type < DDT_TYPES; type++) { + for (class = 0; class < DDT_CLASSES; class++) { /* * We can only do repair if there are multiple copies * of the block. For anything in the UNIQUE class, @@ -938,11 +973,12 @@ ddt_repair_entry(ddt_t *ddt, ddt_entry_t *dde, ddt_entry_t *rdde, zio_t *rio) ddt_key_t *rddk = &rdde->dde_key; zio_t *zio; blkptr_t blk; + int p; zio = zio_null(rio, rio->io_spa, NULL, ddt_repair_entry_done, rdde, rio->io_flags); - for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) { + for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) { if (ddp->ddp_phys_birth == 0 || ddp->ddp_phys_birth != rddp->ddp_phys_birth || bcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva))) @@ -992,11 +1028,12 @@ ddt_sync_entry(ddt_t *ddt, ddt_entry_t *dde, dmu_tx_t *tx, uint64_t txg) enum ddt_class oclass = dde->dde_class; enum ddt_class nclass; uint64_t total_refcnt = 0; + int p; ASSERT(dde->dde_loaded); ASSERT(!dde->dde_loading); - for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { + for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { ASSERT(dde->dde_lead_zio[p] == NULL); ASSERT((int64_t)ddp->ddp_refcnt >= 0); if (ddp->ddp_phys_birth == 0) { @@ -1054,6 +1091,8 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg) spa_t *spa = ddt->ddt_spa; ddt_entry_t *dde; void *cookie = NULL; + enum ddt_type type; + enum ddt_class class; if (avl_numnodes(&ddt->ddt_tree) == 0) return; @@ -1073,15 +1112,15 @@ ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg) ddt_free(dde); } - for (enum ddt_type type = 0; type < DDT_TYPES; type++) { + for (type = 0; type < DDT_TYPES; type++) { uint64_t count = 0; - for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { + for (class = 0; class < DDT_CLASSES; class++) { if (ddt_object_exists(ddt, type, class)) { ddt_object_sync(ddt, type, class, tx); count += ddt_object_count(ddt, type, class); } } - for (enum ddt_class class = 0; class < DDT_CLASSES; class++) { + for (class = 0; class < DDT_CLASSES; class++) { if (count == 0 && ddt_object_exists(ddt, type, class)) ddt_object_destroy(ddt, type, class, tx); } @@ -1097,12 +1136,13 @@ ddt_sync(spa_t *spa, uint64_t txg) dmu_tx_t *tx; zio_t *rio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); + enum zio_checksum c; ASSERT(spa_syncing_txg(spa) == txg); tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); - for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { + for (c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) { ddt_t *ddt = spa->spa_ddt[c]; if (ddt == NULL) continue; diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 39234eba5..f2b3f2ba7 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1611,6 +1611,7 @@ void dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) { dnode_phys_t *dnp; + int i; rw_enter(&dn->dn_struct_rwlock, RW_READER); mutex_enter(&dn->dn_mtx); @@ -1629,7 +1630,7 @@ dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; doi->doi_max_offset = (dnp->dn_maxblkid + 1) * dn->dn_datablksz; doi->doi_fill_count = 0; - for (int i = 0; i < dnp->dn_nblkptr; i++) + for (i = 0; i < dnp->dn_nblkptr; i++) doi->doi_fill_count += dnp->dn_blkptr[i].blk_fill; mutex_exit(&dn->dn_mtx); diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 7caebd979..bacd62c09 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -528,8 +528,9 @@ void dmu_objset_evict(objset_t *os) { dsl_dataset_t *ds = os->os_dsl_dataset; + int t; - for (int t = 0; t < TXG_SIZE; t++) + for (t = 0; t < TXG_SIZE; t++) ASSERT(!dmu_objset_is_dirty(os, t)); if (ds) { @@ -1041,6 +1042,8 @@ dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) static void dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) { + int i; + blkptr_t *bp = zio->io_bp; objset_t *os = arg; dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; @@ -1056,7 +1059,7 @@ dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) * dnode and user/group accounting objects). */ bp->blk_fill = 0; - for (int i = 0; i < dnp->dn_nblkptr; i++) + for (i = 0; i < dnp->dn_nblkptr; i++) bp->blk_fill += dnp->dn_blkptr[i].blk_fill; } @@ -1178,7 +1181,9 @@ dmu_objset_is_dirty(objset_t *os, uint64_t txg) boolean_t dmu_objset_is_dirty_anywhere(objset_t *os) { - for (int t = 0; t < TXG_SIZE; t++) + int t; + + for (t = 0; t < TXG_SIZE; t++) if (dmu_objset_is_dirty(os, t)) return (B_TRUE); return (B_FALSE); diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c index bd5c71a22..d292438dc 100644 --- a/module/zfs/dmu_tx.c +++ b/module/zfs/dmu_tx.c @@ -216,6 +216,7 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) uint64_t start, end, i; int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; int err = 0; + int l; if (len == 0) return; @@ -303,7 +304,7 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) * we need to account for overwrites/unref. */ if (start <= dn->dn_maxblkid) { - for (int l = 0; l < DN_MAX_LEVELS; l++) + for (l = 0; l < DN_MAX_LEVELS; l++) history[l] = -1ULL; } while (start <= dn->dn_maxblkid) { diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index 59ac4a609..667aef37e 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -85,11 +85,13 @@ parent_delta(dsl_dataset_t *ds, int64_t delta) void dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx) { - int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); - int compressed = BP_GET_PSIZE(bp); - int uncompressed = BP_GET_UCSIZE(bp); + int used, compressed, uncompressed; int64_t delta; + used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); + compressed = BP_GET_PSIZE(bp); + uncompressed = BP_GET_UCSIZE(bp); + dprintf_bp(bp, "ds=%p", ds); ASSERT(dmu_tx_is_syncing(tx)); @@ -130,15 +132,17 @@ int dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx, boolean_t async) { + int used, compressed, uncompressed; + if (BP_IS_HOLE(bp)) return (0); ASSERT(dmu_tx_is_syncing(tx)); ASSERT(bp->blk_birth <= tx->tx_txg); - int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); - int compressed = BP_GET_PSIZE(bp); - int uncompressed = BP_GET_UCSIZE(bp); + used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp); + compressed = BP_GET_PSIZE(bp); + uncompressed = BP_GET_UCSIZE(bp); ASSERT(used > 0); if (ds == NULL) { @@ -1772,6 +1776,7 @@ dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) if (dsl_dataset_is_snapshot(ds_next)) { dsl_dataset_t *ds_nextnext; + dsl_dataset_t *hds; /* * Update next's unique to include blocks which @@ -1794,7 +1799,6 @@ dsl_dataset_destroy_sync(void *arg1, void *tag, dmu_tx_t *tx) ASSERT3P(ds_next->ds_prev, ==, NULL); /* Collapse range in this head. */ - dsl_dataset_t *hds; VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, ds->ds_dir->dd_phys->dd_head_dataset_obj, FTAG, &hds)); diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c index 1cd49c827..545dce9a0 100644 --- a/module/zfs/dsl_dir.c +++ b/module/zfs/dsl_dir.c @@ -964,11 +964,13 @@ dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type, dd->dd_phys->dd_used_breakdown[type] >= -used); dd->dd_phys->dd_used_breakdown[type] += used; #ifdef DEBUG - dd_used_t t; - uint64_t u = 0; - for (t = 0; t < DD_USED_NUM; t++) - u += dd->dd_phys->dd_used_breakdown[t]; - ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes); + { + dd_used_t t; + uint64_t u = 0; + for (t = 0; t < DD_USED_NUM; t++) + u += dd->dd_phys->dd_used_breakdown[t]; + ASSERT3U(u, ==, dd->dd_phys->dd_used_bytes); + } #endif } if (needlock) diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index 700cc9628..a3c1457df 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -692,9 +692,10 @@ upgrade_dir_clones_cb(spa_t *spa, uint64_t dsobj, const char *dsname, void *arg) void dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx) { - ASSERT(dmu_tx_is_syncing(tx)); uint64_t obj; + ASSERT(dmu_tx_is_syncing(tx)); + (void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx); VERIFY(0 == dsl_pool_open_special_dir(dp, FREE_DIR_NAME, &dp->dp_free_dir)); diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index 56d410836..7e6be15a0 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -1076,6 +1076,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) dsl_pool_t *dp = scn->scn_dp; dsl_dataset_t *ds; objset_t *os; + char *dsname; VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds)); @@ -1098,7 +1099,7 @@ dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx) dmu_buf_will_dirty(ds->ds_dbuf, tx); dsl_scan_visit_rootbp(scn, ds, &ds->ds_phys->ds_bp, tx); - char *dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); + dsname = kmem_alloc(ZFS_MAXNAMELEN, KM_SLEEP); dsl_dataset_name(ds, dsname); zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; " "pausing=%u", @@ -1294,11 +1295,12 @@ dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum, ddt_phys_t *ddp = dde->dde_phys; blkptr_t bp; zbookmark_t zb = { 0 }; + int p; if (scn->scn_phys.scn_state != DSS_SCANNING) return; - for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { + for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { if (ddp->ddp_phys_birth == 0 || ddp->ddp_phys_birth > scn->scn_phys.scn_cur_max_txg) continue; @@ -1658,10 +1660,11 @@ dsl_scan_scrub_cb(dsl_pool_t *dp, size_t size = BP_GET_PSIZE(bp); spa_t *spa = dp->dp_spa; uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp); - boolean_t needs_io; + boolean_t needs_io = B_FALSE; int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL; - int zio_priority; + int zio_priority = 0; int scan_delay = 0; + int d; if (phys_birth <= scn->scn_phys.scn_min_txg || phys_birth >= scn->scn_phys.scn_max_txg) @@ -1686,7 +1689,7 @@ dsl_scan_scrub_cb(dsl_pool_t *dp, if (zb->zb_level == ZB_ZIL_LEVEL) zio_flags |= ZIO_FLAG_SPECULATIVE; - for (int d = 0; d < BP_GET_NDVAS(bp); d++) { + for (d = 0; d < BP_GET_NDVAS(bp); d++) { vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[d])); diff --git a/module/zfs/include/sys/spa.h b/module/zfs/include/sys/spa.h index 456ec06dc..7ebbbb2cd 100644 --- a/module/zfs/include/sys/spa.h +++ b/module/zfs/include/sys/spa.h @@ -350,13 +350,14 @@ typedef struct blkptr { int size = BP_SPRINTF_LEN; \ int len = 0; \ int copies = 0; \ + int d; \ \ if (bp == NULL) { \ len = func(buf + len, size - len, "<NULL>"); \ } else if (BP_IS_HOLE(bp)) { \ len = func(buf + len, size - len, "<hole>"); \ } else { \ - for (int d = 0; d < BP_GET_NDVAS(bp); d++) { \ + for (d = 0; d < BP_GET_NDVAS(bp); d++) { \ const dva_t *dva = &bp->blk_dva[d]; \ if (DVA_IS_VALID(dva)) \ copies++; \ diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 17b4b12c4..1722a53fc 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -730,6 +730,7 @@ void metaslab_fini(metaslab_t *msp) { metaslab_group_t *mg = msp->ms_group; + int t; vdev_space_update(mg->mg_vd, -msp->ms_smo.smo_alloc, 0, -msp->ms_map.sm_size); @@ -741,12 +742,12 @@ metaslab_fini(metaslab_t *msp) space_map_unload(&msp->ms_map); space_map_destroy(&msp->ms_map); - for (int t = 0; t < TXG_SIZE; t++) { + for (t = 0; t < TXG_SIZE; t++) { space_map_destroy(&msp->ms_allocmap[t]); space_map_destroy(&msp->ms_freemap[t]); } - for (int t = 0; t < TXG_DEFER_SIZE; t++) + for (t = 0; t < TXG_DEFER_SIZE; t++) space_map_destroy(&msp->ms_defermap[t]); ASSERT3S(msp->ms_deferspace, ==, 0); @@ -849,6 +850,7 @@ metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) metaslab_group_t *mg = msp->ms_group; space_map_t *sm = &msp->ms_map; space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops; + int t; ASSERT(MUTEX_HELD(&msp->ms_lock)); @@ -862,7 +864,7 @@ metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) metaslab_group_sort(msp->ms_group, msp, 0); return (error); } - for (int t = 0; t < TXG_DEFER_SIZE; t++) + for (t = 0; t < TXG_DEFER_SIZE; t++) space_map_walk(&msp->ms_defermap[t], space_map_claim, sm); @@ -922,6 +924,7 @@ metaslab_sync(metaslab_t *msp, uint64_t txg) space_map_obj_t *smo = &msp->ms_smo_syncing; dmu_buf_t *db; dmu_tx_t *tx; + int t; ASSERT(!vd->vdev_ishole); @@ -977,11 +980,11 @@ metaslab_sync(metaslab_t *msp, uint64_t txg) space_map_walk(sm, space_map_remove, allocmap); space_map_walk(freed_map, space_map_remove, allocmap); - for (int t = 0; t < TXG_DEFER_SIZE; t++) + for (t = 0; t < TXG_DEFER_SIZE; t++) space_map_walk(&msp->ms_defermap[t], space_map_remove, allocmap); - for (int t = 1; t < TXG_CONCURRENT_STATES; t++) + for (t = 1; t < TXG_CONCURRENT_STATES; t++) space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], space_map_remove, allocmap); @@ -1019,6 +1022,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) metaslab_group_t *mg = msp->ms_group; vdev_t *vd = mg->mg_vd; int64_t alloc_delta, defer_delta; + int t; ASSERT(!vd->vdev_ishole); @@ -1029,14 +1033,14 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) * allocmaps and freemaps and add its capacity to the vdev. */ if (freed_map->sm_size == 0) { - for (int t = 0; t < TXG_SIZE; t++) { + for (t = 0; t < TXG_SIZE; t++) { space_map_create(&msp->ms_allocmap[t], sm->sm_start, sm->sm_size, sm->sm_shift, sm->sm_lock); space_map_create(&msp->ms_freemap[t], sm->sm_start, sm->sm_size, sm->sm_shift, sm->sm_lock); } - for (int t = 0; t < TXG_DEFER_SIZE; t++) + for (t = 0; t < TXG_DEFER_SIZE; t++) space_map_create(&msp->ms_defermap[t], sm->sm_start, sm->sm_size, sm->sm_shift, sm->sm_lock); @@ -1082,7 +1086,7 @@ metaslab_sync_done(metaslab_t *msp, uint64_t txg) if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { int evictable = 1; - for (int t = 1; t < TXG_CONCURRENT_STATES; t++) + for (t = 1; t < TXG_CONCURRENT_STATES; t++) if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) evictable = 0; @@ -1099,12 +1103,13 @@ void metaslab_sync_reassess(metaslab_group_t *mg) { vdev_t *vd = mg->mg_vd; + int m; /* * Re-evaluate all metaslabs which have lower offsets than the * bonus area. */ - for (int m = 0; m < vd->vdev_ms_count; m++) { + for (m = 0; m < vd->vdev_ms_count; m++) { metaslab_t *msp = vd->vdev_ms[m]; if (msp->ms_map.sm_start > mg->mg_bonus_area) @@ -1517,7 +1522,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, { dva_t *dva = bp->blk_dva; dva_t *hintdva = hintbp->blk_dva; - int error = 0; + int d, error = 0; ASSERT(bp->blk_birth == 0); ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); @@ -1533,7 +1538,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, ASSERT(BP_GET_NDVAS(bp) == 0); ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); - for (int d = 0; d < ndvas; d++) { + for (d = 0; d < ndvas; d++) { error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, txg, flags); if (error) { @@ -1559,14 +1564,14 @@ void metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) { const dva_t *dva = bp->blk_dva; - int ndvas = BP_GET_NDVAS(bp); + int d, ndvas = BP_GET_NDVAS(bp); ASSERT(!BP_IS_HOLE(bp)); ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); - for (int d = 0; d < ndvas; d++) + for (d = 0; d < ndvas; d++) metaslab_free_dva(spa, &dva[d], txg, now); spa_config_exit(spa, SCL_FREE, FTAG); @@ -1577,7 +1582,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) { const dva_t *dva = bp->blk_dva; int ndvas = BP_GET_NDVAS(bp); - int error = 0; + int d, error = 0; ASSERT(!BP_IS_HOLE(bp)); @@ -1592,7 +1597,7 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); - for (int d = 0; d < ndvas; d++) + for (d = 0; d < ndvas; d++) if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) break; diff --git a/module/zfs/sa.c b/module/zfs/sa.c index 4cb4546b2..d5c985bf1 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -1478,8 +1478,8 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, void *data) /* Verify header size is consistent with layout information */ ASSERT(tb); - ASSERT(IS_SA_BONUSTYPE(bonustype) && - SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb) || !IS_SA_BONUSTYPE(bonustype) || + ASSERT((IS_SA_BONUSTYPE(bonustype) && + SA_HDR_SIZE_MATCH_LAYOUT(hdr, tb)) || !IS_SA_BONUSTYPE(bonustype) || (IS_SA_BONUSTYPE(bonustype) && hdr->sa_layout_info == 0)); /* diff --git a/module/zfs/spa.c b/module/zfs/spa.c index b6190e4cf..9ae16d316 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -653,8 +653,10 @@ spa_taskq_create(spa_t *spa, const char *name, enum zti_modes mode, static void spa_create_zio_taskqs(spa_t *spa) { - for (int t = 0; t < ZIO_TYPES; t++) { - for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { + int t, q; + + for (t = 0; t < ZIO_TYPES; t++) { + for (q = 0; q < ZIO_TASKQ_TYPES; q++) { const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; enum zti_modes mode = ztip->zti_mode; uint_t value = ztip->zti_value; @@ -808,6 +810,8 @@ spa_activate(spa_t *spa, int mode) static void spa_deactivate(spa_t *spa) { + int t, q; + ASSERT(spa->spa_sync_on == B_FALSE); ASSERT(spa->spa_dsl_pool == NULL); ASSERT(spa->spa_root_vdev == NULL); @@ -819,8 +823,8 @@ spa_deactivate(spa_t *spa) list_destroy(&spa->spa_config_dirty_list); list_destroy(&spa->spa_state_dirty_list); - for (int t = 0; t < ZIO_TYPES; t++) { - for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { + for (t = 0; t < ZIO_TYPES; t++) { + for (q = 0; q < ZIO_TASKQ_TYPES; q++) { if (spa->spa_zio_taskq[t][q] != NULL) taskq_destroy(spa->spa_zio_taskq[t][q]); spa->spa_zio_taskq[t][q] = NULL; @@ -883,6 +887,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, nvlist_t **child; uint_t children; int error; + int c; if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) return (error); @@ -902,7 +907,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, return (EINVAL); } - for (int c = 0; c < children; c++) { + for (c = 0; c < children; c++) { vdev_t *vd; if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, atype)) != 0) { @@ -1279,7 +1284,9 @@ load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) static void spa_check_removed(vdev_t *vd) { - for (int c = 0; c < vd->vdev_children; c++) + int c; + + for (c = 0; c < vd->vdev_children; c++) spa_check_removed(vd->vdev_child[c]); if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd)) { @@ -1296,6 +1303,7 @@ spa_config_valid(spa_t *spa, nvlist_t *config) { vdev_t *mrvd, *rvd = spa->spa_root_vdev; nvlist_t *nv; + int c, i; VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); @@ -1317,7 +1325,7 @@ spa_config_valid(spa_t *spa, nvlist_t *config) KM_SLEEP); VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_t *tvd = rvd->vdev_child[c]; vdev_t *mtvd = mrvd->vdev_child[c]; @@ -1334,7 +1342,7 @@ spa_config_valid(spa_t *spa, nvlist_t *config) VERIFY(nvlist_add_nvlist(spa->spa_load_info, ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); - for (int i = 0; i < idx; i++) + for (i = 0; i < idx; i++) nvlist_free(child[i]); } nvlist_free(nv); @@ -1346,7 +1354,7 @@ spa_config_valid(spa_t *spa, nvlist_t *config) * from the MOS config (mrvd). Check each top-level vdev * with the corresponding MOS config top-level (mtvd). */ - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_t *tvd = rvd->vdev_child[c]; vdev_t *mtvd = mrvd->vdev_child[c]; @@ -1435,13 +1443,14 @@ spa_passivate_log(spa_t *spa) { vdev_t *rvd = spa->spa_root_vdev; boolean_t slog_found = B_FALSE; + int c; ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); if (!spa_has_slogs(spa)) return (B_FALSE); - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_t *tvd = rvd->vdev_child[c]; metaslab_group_t *mg = tvd->vdev_mg; @@ -1458,10 +1467,11 @@ static void spa_activate_log(spa_t *spa) { vdev_t *rvd = spa->spa_root_vdev; + int c; ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_t *tvd = rvd->vdev_child[c]; metaslab_group_t *mg = tvd->vdev_mg; @@ -1491,7 +1501,9 @@ spa_offline_log(spa_t *spa) static void spa_aux_check_removed(spa_aux_vdev_t *sav) { - for (int i = 0; i < sav->sav_count; i++) + int i; + + for (i = 0; i < sav->sav_count; i++) spa_check_removed(sav->sav_vdevs[i]); } @@ -2163,6 +2175,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, spa->spa_load_max_txg == UINT64_MAX)) { dmu_tx_t *tx; int need_update = B_FALSE; + int c; ASSERT(state != SPA_LOAD_TRYIMPORT); @@ -2209,7 +2222,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) need_update = B_TRUE; - for (int c = 0; c < rvd->vdev_children; c++) + for (c = 0; c < rvd->vdev_children; c++) if (rvd->vdev_child[c]->vdev_ms_array == 0) need_update = B_TRUE; @@ -2845,6 +2858,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, nvlist_t **spares, **l2cache; uint_t nspares, nl2cache; uint64_t version, obj; + int c; /* * If this pool already exists, return failure. @@ -2903,7 +2917,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, (error = vdev_create(rvd, txg, B_FALSE)) == 0 && (error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) { - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_metaslab_set_size(rvd->vdev_child[c]); vdev_expand(rvd->vdev_child[c], txg); } @@ -3103,7 +3117,9 @@ spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) static void spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) { - for (int c = 0; c < vd->vdev_children; c++) + int c; + + for (c = 0; c < vd->vdev_children; c++) spa_alt_rootvdev(vd->vdev_child[c], avd, txg); if (vd->vdev_ops->vdev_op_leaf) { @@ -3687,6 +3703,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot) vdev_t *vd, *tvd; nvlist_t **spares, **l2cache; uint_t nspares, nl2cache; + int c; ASSERT(spa_writeable(spa)); @@ -3723,7 +3740,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot) /* * Transfer each new top-level vdev from vd to rvd. */ - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { /* * Set the vdev id to the first hole, if one exists. @@ -3999,6 +4016,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) boolean_t unspare = B_FALSE; uint64_t unspare_guid; char *vdpath; + int c, t; ASSERT(spa_writeable(spa)); @@ -4066,7 +4084,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) vd->vdev_path != NULL) { size_t len = strlen(vd->vdev_path); - for (int c = 0; c < pvd->vdev_children; c++) { + for (c = 0; c < pvd->vdev_children; c++) { cvd = pvd->vdev_child[c]; if (cvd == vd || cvd->vdev_path == NULL) @@ -4174,7 +4192,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) * prevent vd from being accessed after it's freed. */ vdpath = spa_strdup(vd->vdev_path); - for (int t = 0; t < TXG_SIZE; t++) + for (t = 0; t < TXG_SIZE; t++) (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); vd->vdev_detached = B_TRUE; vdev_dirty(tvd, VDD_DTL, vd, txg); @@ -4509,7 +4527,9 @@ out: static nvlist_t * spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) { - for (int i = 0; i < count; i++) { + int i; + + for (i = 0; i < count; i++) { uint64_t guid; VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, @@ -4527,11 +4547,12 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, nvlist_t *dev_to_remove) { nvlist_t **newdev = NULL; + int i, j; if (count > 1) newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); - for (int i = 0, j = 0; i < count; i++) { + for (i = 0, j = 0; i < count; i++) { if (dev[i] == dev_to_remove) continue; VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); @@ -4540,7 +4561,7 @@ spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); - for (int i = 0; i < count - 1; i++) + for (i = 0; i < count - 1; i++) nvlist_free(newdev[i]); if (count > 1) @@ -4761,8 +4782,9 @@ static vdev_t * spa_vdev_resilver_done_hunt(vdev_t *vd) { vdev_t *newvd, *oldvd; + int c; - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); if (oldvd != NULL) return (oldvd); @@ -4965,6 +4987,8 @@ spa_scan(spa_t *spa, pool_scan_func_t func) static void spa_async_remove(spa_t *spa, vdev_t *vd) { + int c; + if (vd->vdev_remove_wanted) { vd->vdev_remove_wanted = B_FALSE; vd->vdev_delayed_close = B_FALSE; @@ -4983,19 +5007,21 @@ spa_async_remove(spa_t *spa, vdev_t *vd) vdev_state_dirty(vd->vdev_top); } - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) spa_async_remove(spa, vd->vdev_child[c]); } static void spa_async_probe(spa_t *spa, vdev_t *vd) { + int c; + if (vd->vdev_probe_wanted) { vd->vdev_probe_wanted = B_FALSE; vdev_reopen(vd); /* vdev_open() does the actual probe */ } - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) spa_async_probe(spa, vd->vdev_child[c]); } @@ -5005,11 +5031,12 @@ spa_async_autoexpand(spa_t *spa, vdev_t *vd) sysevent_id_t eid; nvlist_t *attr; char *physpath; + int c; if (!spa->spa_autoexpand) return; - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; spa_async_autoexpand(spa, cvd); } @@ -5033,7 +5060,7 @@ spa_async_autoexpand(spa_t *spa, vdev_t *vd) static void spa_async_thread(spa_t *spa) { - int tasks; + int tasks, i; ASSERT(spa->spa_sync_on); @@ -5072,9 +5099,9 @@ spa_async_thread(spa_t *spa) if (tasks & SPA_ASYNC_REMOVE) { spa_vdev_state_enter(spa, SCL_NONE); spa_async_remove(spa, spa->spa_root_vdev); - for (int i = 0; i < spa->spa_l2cache.sav_count; i++) + for (i = 0; i < spa->spa_l2cache.sav_count; i++) spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); - for (int i = 0; i < spa->spa_spares.sav_count; i++) + for (i = 0; i < spa->spa_spares.sav_count; i++) spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); (void) spa_vdev_state_exit(spa, NULL, 0); } @@ -5460,6 +5487,7 @@ spa_sync(spa_t *spa, uint64_t txg) vdev_t *vd; dmu_tx_t *tx; int error; + int c; VERIFY(spa_writeable(spa)); @@ -5593,7 +5621,7 @@ spa_sync(spa_t *spa, uint64_t txg) int children = rvd->vdev_children; int c0 = spa_get_random(children); - for (int c = 0; c < children; c++) { + for (c = 0; c < children; c++) { vd = rvd->vdev_child[(c0 + c) % children]; if (vd->vdev_ms_array == 0 || vd->vdev_islog) continue; diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index 1b54afb0b..228ed1376 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -255,7 +255,9 @@ int zfs_recover = 0; static void spa_config_lock_init(spa_t *spa) { - for (int i = 0; i < SCL_LOCKS; i++) { + int i; + + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); @@ -268,7 +270,9 @@ spa_config_lock_init(spa_t *spa) static void spa_config_lock_destroy(spa_t *spa) { - for (int i = 0; i < SCL_LOCKS; i++) { + int i; + + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; mutex_destroy(&scl->scl_lock); cv_destroy(&scl->scl_cv); @@ -281,7 +285,9 @@ spa_config_lock_destroy(spa_t *spa) int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) { - for (int i = 0; i < SCL_LOCKS; i++) { + int i; + + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; @@ -311,8 +317,9 @@ void spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) { int wlocks_held = 0; + int i; - for (int i = 0; i < SCL_LOCKS; i++) { + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (scl->scl_writer == curthread) wlocks_held |= (1 << i); @@ -341,7 +348,9 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) void spa_config_exit(spa_t *spa, int locks, void *tag) { - for (int i = SCL_LOCKS - 1; i >= 0; i--) { + int i; + + for (i = SCL_LOCKS - 1; i >= 0; i--) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; @@ -360,9 +369,9 @@ spa_config_exit(spa_t *spa, int locks, void *tag) int spa_config_held(spa_t *spa, int locks, krw_t rw) { - int locks_held = 0; + int i, locks_held = 0; - for (int i = 0; i < SCL_LOCKS; i++) { + for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; @@ -424,6 +433,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot) { spa_t *spa; spa_config_dirent_t *dp; + int t; ASSERT(MUTEX_HELD(&spa_namespace_lock)); @@ -444,7 +454,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot) cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); - for (int t = 0; t < TXG_SIZE; t++) + for (t = 0; t < TXG_SIZE; t++) bplist_create(&spa->spa_free_bplist[t]); (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); @@ -496,6 +506,7 @@ void spa_remove(spa_t *spa) { spa_config_dirent_t *dp; + int t; ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); @@ -526,7 +537,7 @@ spa_remove(spa_t *spa) spa_config_lock_destroy(spa); - for (int t = 0; t < TXG_SIZE; t++) + for (t = 0; t < TXG_SIZE; t++) bplist_destroy(&spa->spa_free_bplist[t]); cv_destroy(&spa->spa_async_cv); @@ -877,10 +888,9 @@ spa_vdev_config_enter(spa_t *spa) void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) { - ASSERT(MUTEX_HELD(&spa_namespace_lock)); - int config_changed = B_FALSE; + ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(txg > spa_last_synced_txg(spa)); spa->spa_pending_vdev = NULL; @@ -1454,8 +1464,9 @@ uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) { uint64_t dsize = 0; + int d; - for (int d = 0; d < SPA_DVAS_PER_BP; d++) + for (d = 0; d < SPA_DVAS_PER_BP; d++) dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); return (dsize); @@ -1465,10 +1476,11 @@ uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp) { uint64_t dsize = 0; + int d; spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); - for (int d = 0; d < SPA_DVAS_PER_BP; d++) + for (d = 0; d < SPA_DVAS_PER_BP; d++) dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); spa_config_exit(spa, SCL_VDEV, FTAG); diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index bac3e8605..604a673f9 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -85,8 +85,9 @@ vdev_default_asize(vdev_t *vd, uint64_t psize) { uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift); uint64_t csize; + int c; - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { csize = vdev_psize_to_asize(vd->vdev_child[c], psize); asize = MAX(asize, csize); } @@ -132,9 +133,10 @@ vdev_get_min_asize(vdev_t *vd) void vdev_set_min_asize(vdev_t *vd) { + int c; vd->vdev_min_asize = vdev_get_min_asize(vd); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_set_min_asize(vd->vdev_child[c]); } @@ -157,11 +159,12 @@ vdev_t * vdev_lookup_by_guid(vdev_t *vd, uint64_t guid) { vdev_t *mvd; + int c; if (vd->vdev_guid == guid) return (vd); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) != NULL) return (mvd); @@ -252,16 +255,17 @@ vdev_compact_children(vdev_t *pvd) vdev_t **newchild, *cvd; int oldc = pvd->vdev_children; int newc; + int c; ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL); - for (int c = newc = 0; c < oldc; c++) + for (c = newc = 0; c < oldc; c++) if (pvd->vdev_child[c]) newc++; newchild = kmem_alloc(newc * sizeof (vdev_t *), KM_SLEEP); - for (int c = newc = 0; c < oldc; c++) { + for (c = newc = 0; c < oldc; c++) { if ((cvd = pvd->vdev_child[c]) != NULL) { newchild[newc] = cvd; cvd->vdev_id = newc++; @@ -280,6 +284,7 @@ vdev_t * vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) { vdev_t *vd; + int t; vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP); @@ -315,7 +320,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops) mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { space_map_create(&vd->vdev_dtl[t], 0, -1ULL, 0, &vd->vdev_dtl_lock); } @@ -561,6 +566,7 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id, void vdev_free(vdev_t *vd) { + int c, t; spa_t *spa = vd->vdev_spa; /* @@ -575,7 +581,7 @@ vdev_free(vdev_t *vd) /* * Free all children. */ - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_free(vd->vdev_child[c]); ASSERT(vd->vdev_child == NULL); @@ -624,7 +630,7 @@ vdev_free(vdev_t *vd) txg_list_destroy(&vd->vdev_dtl_list); mutex_enter(&vd->vdev_dtl_lock); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { space_map_unload(&vd->vdev_dtl[t]); space_map_destroy(&vd->vdev_dtl[t]); } @@ -707,12 +713,14 @@ vdev_top_transfer(vdev_t *svd, vdev_t *tvd) static void vdev_top_update(vdev_t *tvd, vdev_t *vd) { + int c; + if (vd == NULL) return; vd->vdev_top = tvd; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_top_update(tvd, vd->vdev_child[c]); } @@ -960,6 +968,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) spa_t *spa = vd->vdev_spa; vdev_probe_stats_t *vps = NULL; zio_t *pio; + int l; ASSERT(vd->vdev_ops->vdev_op_leaf); @@ -1029,7 +1038,7 @@ vdev_probe(vdev_t *vd, zio_t *zio) return (NULL); } - for (int l = 1; l < VDEV_LABELS; l++) { + for (l = 1; l < VDEV_LABELS; l++) { zio_nowait(zio_read_phys(pio, vd, vdev_label_offset(vd->vdev_psize, l, offsetof(vdev_label_t, vl_pad2)), @@ -1058,10 +1067,12 @@ vdev_open_child(void *arg) boolean_t vdev_uses_zvols(vdev_t *vd) { + int c; + if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR, strlen(ZVOL_DIR)) == 0) return (B_TRUE); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if (vdev_uses_zvols(vd->vdev_child[c])) return (B_TRUE); return (B_FALSE); @@ -1072,6 +1083,7 @@ vdev_open_children(vdev_t *vd) { taskq_t *tq; int children = vd->vdev_children; + int c; /* * in order to handle pools on top of zvols, do the opens @@ -1079,7 +1091,7 @@ vdev_open_children(vdev_t *vd) * spa_namespace_lock */ if (vdev_uses_zvols(vd)) { - for (int c = 0; c < children; c++) + for (c = 0; c < children; c++) vd->vdev_child[c]->vdev_open_error = vdev_open(vd->vdev_child[c]); return; @@ -1087,9 +1099,9 @@ vdev_open_children(vdev_t *vd) tq = taskq_create("vdev_open", children, minclsyspri, children, children, TASKQ_PREPOPULATE); - for (int c = 0; c < children; c++) + for (c = 0; c < children; c++) VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c], - TQ_SLEEP) != NULL); + TQ_SLEEP) != 0); taskq_destroy(tq); } @@ -1105,6 +1117,7 @@ vdev_open(vdev_t *vd) uint64_t osize = 0; uint64_t asize, psize; uint64_t ashift = 0; + int c; ASSERT(vd->vdev_open_thread == curthread || spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -1183,7 +1196,7 @@ vdev_open(vdev_t *vd) if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops) return (0); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) { vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); @@ -1292,8 +1305,9 @@ vdev_validate(vdev_t *vd) nvlist_t *label; uint64_t guid = 0, top_guid; uint64_t state; + int c; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if (vdev_validate(vd->vdev_child[c]) != 0) return (EBADF); @@ -1432,12 +1446,13 @@ void vdev_hold(vdev_t *vd) { spa_t *spa = vd->vdev_spa; + int c; ASSERT(spa_is_root(spa)); if (spa->spa_state == POOL_STATE_UNINITIALIZED) return; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_hold(vd->vdev_child[c]); if (vd->vdev_ops->vdev_op_leaf) @@ -1447,10 +1462,10 @@ vdev_hold(vdev_t *vd) void vdev_rele(vdev_t *vd) { - spa_t *spa = vd->vdev_spa; + int c; - ASSERT(spa_is_root(spa)); - for (int c = 0; c < vd->vdev_children; c++) + ASSERT(spa_is_root(vd->vdev_spa)); + for (c = 0; c < vd->vdev_children; c++) vdev_rele(vd->vdev_child[c]); if (vd->vdev_ops->vdev_op_leaf) @@ -1643,11 +1658,11 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) { spa_t *spa = vd->vdev_spa; avl_tree_t reftree; - int minref; + int c, t, minref; ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_dtl_reassess(vd->vdev_child[c], txg, scrub_txg, scrub_done); @@ -1707,7 +1722,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) } mutex_enter(&vd->vdev_dtl_lock); - for (int t = 0; t < DTL_TYPES; t++) { + for (t = 0; t < DTL_TYPES; t++) { /* account for child's outage in parent's missing map */ int s = (t == DTL_MISSING) ? DTL_OUTAGE: t; if (t == DTL_SCRUB) @@ -1719,7 +1734,7 @@ vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done) else minref = vd->vdev_children; /* any kind of mirror */ space_map_ref_create(&reftree); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; mutex_enter(&cvd->vdev_dtl_lock); space_map_ref_add_map(&reftree, &cvd->vdev_dtl[s], 1); @@ -1869,6 +1884,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) boolean_t needed = B_FALSE; uint64_t thismin = UINT64_MAX; uint64_t thismax = 0; + int c; if (vd->vdev_children == 0) { mutex_enter(&vd->vdev_dtl_lock); @@ -1884,7 +1900,7 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) } mutex_exit(&vd->vdev_dtl_lock); } else { - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; uint64_t cmin, cmax; @@ -1906,10 +1922,12 @@ vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp) void vdev_load(vdev_t *vd) { + int c; + /* * Recursively load all children. */ - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_load(vd->vdev_child[c]); /* @@ -1977,6 +1995,7 @@ vdev_remove(vdev_t *vd, uint64_t txg) spa_t *spa = vd->vdev_spa; objset_t *mos = spa->spa_meta_objset; dmu_tx_t *tx; + int m; tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); @@ -1987,7 +2006,7 @@ vdev_remove(vdev_t *vd, uint64_t txg) } if (vd->vdev_ms != NULL) { - for (int m = 0; m < vd->vdev_ms_count; m++) { + for (m = 0; m < vd->vdev_ms_count; m++) { metaslab_t *msp = vd->vdev_ms[m]; if (msp == NULL || msp->ms_smo.smo_object == 0) @@ -2324,6 +2343,7 @@ void vdev_clear(spa_t *spa, vdev_t *vd) { vdev_t *rvd = spa->spa_root_vdev; + int c; ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -2334,7 +2354,7 @@ vdev_clear(spa_t *spa, vdev_t *vd) vd->vdev_stat.vs_write_errors = 0; vd->vdev_stat.vs_checksum_errors = 0; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_clear(spa, vd->vdev_child[c]); /* @@ -2448,6 +2468,7 @@ void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) { vdev_t *rvd = vd->vdev_spa->spa_root_vdev; + int c, t; mutex_enter(&vd->vdev_stat_lock); bcopy(&vd->vdev_stat, vs, sizeof (*vs)); @@ -2463,12 +2484,12 @@ vdev_get_stats(vdev_t *vd, vdev_stat_t *vs) * over all top-level vdevs (i.e. the direct children of the root). */ if (vd == rvd) { - for (int c = 0; c < rvd->vdev_children; c++) { + for (c = 0; c < rvd->vdev_children; c++) { vdev_t *cvd = rvd->vdev_child[c]; vdev_stat_t *cvs = &cvd->vdev_stat; mutex_enter(&vd->vdev_stat_lock); - for (int t = 0; t < ZIO_TYPES; t++) { + for (t = 0; t < ZIO_TYPES; t++) { vs->vs_ops[t] += cvs->vs_ops[t]; vs->vs_bytes[t] += cvs->vs_bytes[t]; } @@ -2492,8 +2513,9 @@ void vdev_scan_stat_init(vdev_t *vd) { vdev_stat_t *vs = &vd->vdev_stat; + int c; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_scan_stat_init(vd->vdev_child[c]); mutex_enter(&vd->vdev_stat_lock); @@ -2834,9 +2856,10 @@ vdev_propagate_state(vdev_t *vd) int degraded = 0, faulted = 0; int corrupted = 0; vdev_t *child; + int c; if (vd->vdev_children > 0) { - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { child = vd->vdev_child[c]; /* @@ -3026,6 +3049,8 @@ vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux) boolean_t vdev_is_bootable(vdev_t *vd) { + int c; + if (!vd->vdev_ops->vdev_op_leaf) { char *vdev_type = vd->vdev_ops->vdev_op_type; @@ -3040,7 +3065,7 @@ vdev_is_bootable(vdev_t *vd) return (B_FALSE); } - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { if (!vdev_is_bootable(vd->vdev_child[c])) return (B_FALSE); } @@ -3056,13 +3081,13 @@ vdev_is_bootable(vdev_t *vd) void vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) { - spa_t *spa = nvd->vdev_spa; + int c; ASSERT(nvd->vdev_top->vdev_islog); ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid); - for (int c = 0; c < nvd->vdev_children; c++) + for (c = 0; c < nvd->vdev_children; c++) vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]); if (nvd->vdev_ops->vdev_op_leaf) { @@ -3084,11 +3109,13 @@ vdev_load_log_state(vdev_t *nvd, vdev_t *ovd) boolean_t vdev_log_state_valid(vdev_t *vd) { + int c; + if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted && !vd->vdev_removed) return (B_TRUE); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if (vdev_log_state_valid(vd->vdev_child[c])) return (B_TRUE); diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c index c08ed8ba0..528710eaf 100644 --- a/module/zfs/vdev_label.c +++ b/module/zfs/vdev_label.c @@ -437,6 +437,7 @@ vdev_label_read_config(vdev_t *vd) zio_t *zio; int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE; + int l; ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL); @@ -446,7 +447,7 @@ vdev_label_read_config(vdev_t *vd) vp = zio_buf_alloc(sizeof (vdev_phys_t)); retry: - for (int l = 0; l < VDEV_LABELS; l++) { + for (l = 0; l < VDEV_LABELS; l++) { zio = zio_root(spa, NULL, NULL, flags); @@ -611,10 +612,12 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason) int error; uint64_t spare_guid, l2cache_guid; int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; + int c, l; + vdev_t *pvd; ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) if ((error = vdev_label_init(vd->vdev_child[c], crtxg, reason)) != 0) return (error); @@ -650,7 +653,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason) vd->vdev_guid += guid_delta; - for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) + for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) pvd->vdev_guid_sum += guid_delta; /* @@ -670,7 +673,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason) vd->vdev_guid += guid_delta; - for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) + for (pvd = vd; pvd != NULL; pvd = pvd->vdev_parent) pvd->vdev_guid_sum += guid_delta; /* @@ -770,7 +773,7 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason) retry: zio = zio_root(spa, NULL, NULL, flags); - for (int l = 0; l < VDEV_LABELS; l++) { + for (l = 0; l < VDEV_LABELS; l++) { vdev_label_write(zio, vd, l, vp, offsetof(vdev_label_t, vl_vdev_phys), @@ -881,6 +884,7 @@ vdev_uberblock_load(zio_t *zio, vdev_t *vd, uberblock_t *ubbest) vdev_t *rvd = spa->spa_root_vdev; int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD; + int c, l, n; if (vd == rvd) { ASSERT(zio == NULL); @@ -891,12 +895,12 @@ vdev_uberblock_load(zio_t *zio, vdev_t *vd, uberblock_t *ubbest) ASSERT(zio != NULL); - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_uberblock_load(zio, vd->vdev_child[c], ubbest); if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) { - for (int l = 0; l < VDEV_LABELS; l++) { - for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { + for (l = 0; l < VDEV_LABELS; l++) { + for (n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) { vdev_label_read(zio, vd, l, zio_buf_alloc(VDEV_UBERBLOCK_SIZE(vd)), VDEV_UBERBLOCK_OFFSET(vd, n), @@ -932,9 +936,9 @@ static void vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags) { uberblock_t *ubbuf; - int n; + int c, l, n; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags); if (!vd->vdev_ops->vdev_op_leaf) @@ -949,7 +953,7 @@ vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags) bzero(ubbuf, VDEV_UBERBLOCK_SIZE(vd)); *ubbuf = *ub; - for (int l = 0; l < VDEV_LABELS; l++) + for (l = 0; l < VDEV_LABELS; l++) vdev_label_write(zio, vd, l, ubbuf, VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd), vdev_uberblock_sync_done, zio->io_private, @@ -964,10 +968,11 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags) spa_t *spa = svd[0]->vdev_spa; zio_t *zio; uint64_t good_writes = 0; + int v; zio = zio_root(spa, NULL, &good_writes, flags); - for (int v = 0; v < svdcount; v++) + for (v = 0; v < svdcount; v++) vdev_uberblock_sync(zio, ub, svd[v], flags); (void) zio_wait(zio); @@ -979,7 +984,7 @@ vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags) */ zio = zio_root(spa, NULL, NULL, flags); - for (int v = 0; v < svdcount; v++) + for (v = 0; v < svdcount; v++) zio_flush(zio, svd[v]); (void) zio_wait(zio); @@ -1032,8 +1037,9 @@ vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags) vdev_phys_t *vp; char *buf; size_t buflen; + int c; - for (int c = 0; c < vd->vdev_children; c++) + for (c = 0; c < vd->vdev_children; c++) vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags); if (!vd->vdev_ops->vdev_op_leaf) @@ -1081,12 +1087,13 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) zio = zio_root(spa, NULL, NULL, flags); for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) { - uint64_t *good_writes = kmem_zalloc(sizeof (uint64_t), - KM_SLEEP); + uint64_t *good_writes; + zio_t *vio; ASSERT(!vd->vdev_ishole); - zio_t *vio = zio_null(zio, spa, NULL, + good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP); + vio = zio_null(zio, spa, NULL, (vd->vdev_islog || vd->vdev_aux != NULL) ? vdev_label_sync_ignore_done : vdev_label_sync_top_done, good_writes, flags); diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c index 698c0275d..47181d439 100644 --- a/module/zfs/vdev_mirror.c +++ b/module/zfs/vdev_mirror.c @@ -131,6 +131,7 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) { int numerrors = 0; int lasterror = 0; + int c; if (vd->vdev_children == 0) { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; @@ -139,7 +140,7 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) vdev_open_children(vd); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; if (cvd->vdev_open_error) { @@ -163,7 +164,9 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) static void vdev_mirror_close(vdev_t *vd) { - for (int c = 0; c < vd->vdev_children; c++) + int c; + + for (c = 0; c < vd->vdev_children; c++) vdev_close(vd->vdev_child[c]); } @@ -311,9 +314,9 @@ vdev_mirror_io_start(zio_t *zio) static int vdev_mirror_worst_error(mirror_map_t *mm) { - int error[2] = { 0, 0 }; + int c, error[2] = { 0, 0 }; - for (int c = 0; c < mm->mm_children; c++) { + for (c = 0; c < mm->mm_children; c++) { mirror_child_t *mc = &mm->mm_child[c]; int s = mc->mc_speculative; error[s] = zio_worst_error(error[s], mc->mc_error); diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index 5a0d3ee97..8f8663526 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -383,12 +383,13 @@ void vdev_queue_io_done(zio_t *zio) { vdev_queue_t *vq = &zio->io_vd->vdev_queue; + int i; mutex_enter(&vq->vq_lock); avl_remove(&vq->vq_pending_tree, zio); - for (int i = 0; i < zfs_vdev_ramp_rate; i++) { + for (i = 0; i < zfs_vdev_ramp_rate; i++) { zio_t *nio = vdev_queue_io_to_issue(vq, zfs_vdev_max_pending); if (nio == NULL) break; diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c index 526385e01..af2de40ac 100644 --- a/module/zfs/vdev_raidz.c +++ b/module/zfs/vdev_raidz.c @@ -1692,9 +1692,9 @@ static uint64_t raidz_corrected[1 << VDEV_RAIDZ_MAXPARITY]; static int vdev_raidz_worst_error(raidz_map_t *rm) { - int error = 0; + int c, error = 0; - for (int c = 0; c < rm->rm_cols; c++) + for (c = 0; c < rm->rm_cols; c++) error = zio_worst_error(error, rm->rm_col[c].rc_error); return (error); diff --git a/module/zfs/vdev_root.c b/module/zfs/vdev_root.c index 879f78f3a..d7ca99a3d 100644 --- a/module/zfs/vdev_root.c +++ b/module/zfs/vdev_root.c @@ -54,6 +54,7 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) { int lasterror = 0; int numerrors = 0; + int c; if (vd->vdev_children == 0) { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; @@ -62,7 +63,7 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) vdev_open_children(vd); - for (int c = 0; c < vd->vdev_children; c++) { + for (c = 0; c < vd->vdev_children; c++) { vdev_t *cvd = vd->vdev_child[c]; if (cvd->vdev_open_error && !cvd->vdev_islog) { @@ -85,7 +86,9 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *ashift) static void vdev_root_close(vdev_t *vd) { - for (int c = 0; c < vd->vdev_children; c++) + int c; + + for (c = 0; c < vd->vdev_children; c++) vdev_close(vd->vdev_child[c]); } diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index e1e4e9e03..018f12d96 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -1919,8 +1919,8 @@ zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp, dmu_object_info_from_db(*db, &doi); if ((doi.doi_bonus_type != DMU_OT_SA && doi.doi_bonus_type != DMU_OT_ZNODE) || - doi.doi_bonus_type == DMU_OT_ZNODE && - doi.doi_bonus_size < sizeof (znode_phys_t)) { + (doi.doi_bonus_type == DMU_OT_ZNODE && + doi.doi_bonus_size < sizeof (znode_phys_t))) { sa_buf_rele(*db, FTAG); return (ENOTSUP); } diff --git a/module/zfs/zil.c b/module/zfs/zil.c index c66313ff6..e89a24e41 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -1620,6 +1620,7 @@ zilog_t * zil_alloc(objset_t *os, zil_header_t *zh_phys) { zilog_t *zilog; + int i; zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP); @@ -1634,7 +1635,7 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys) mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL); - for (int i = 0; i < TXG_SIZE; i++) { + for (i = 0; i < TXG_SIZE; i++) { mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL, MUTEX_DEFAULT, NULL); } @@ -1662,6 +1663,7 @@ void zil_free(zilog_t *zilog) { lwb_t *head_lwb; + int i; zilog->zl_stop_sync = 1; @@ -1683,7 +1685,7 @@ zil_free(zilog_t *zilog) ASSERT(list_is_empty(&zilog->zl_itx_commit_list)); list_destroy(&zilog->zl_itx_commit_list); - for (int i = 0; i < TXG_SIZE; i++) { + for (i = 0; i < TXG_SIZE; i++) { /* * It's possible for an itx to be generated that doesn't dirty * a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean() diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 1ba2330bd..0bf0f6e12 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -366,6 +366,7 @@ void zio_add_child(zio_t *pio, zio_t *cio) { zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP); + int w; /* * Logical I/Os can have logical, gang, or vdev children. @@ -383,7 +384,7 @@ zio_add_child(zio_t *pio, zio_t *cio) ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0); - for (int w = 0; w < ZIO_WAIT_TYPES; w++) + for (w = 0; w < ZIO_WAIT_TYPES; w++) pio->io_children[cio->io_child_type][w] += !cio->io_state[w]; list_insert_head(&pio->io_child_list, zl); @@ -993,8 +994,8 @@ zio_write_bp_init(zio_t *zio) */ if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize && pass > SYNC_PASS_REWRITE) { - ASSERT(psize != 0); enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES; + ASSERT(psize != 0); zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages; zio->io_flags |= ZIO_FLAG_IO_REWRITE; } else { @@ -1081,8 +1082,9 @@ zio_taskq_member(zio_t *zio, enum zio_taskq_type q) { kthread_t *executor = zio->io_executor; spa_t *spa = zio->io_spa; + zio_type_t t; - for (zio_type_t t = 0; t < ZIO_TYPES; t++) + for (t = 0; t < ZIO_TYPES; t++) if (taskq_member(spa->spa_zio_taskq[t][q], executor)) return (B_TRUE); @@ -1223,6 +1225,7 @@ static void zio_reexecute(zio_t *pio) { zio_t *cio, *cio_next; + int c, w; ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL); ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN); @@ -1234,9 +1237,9 @@ zio_reexecute(zio_t *pio) pio->io_pipeline = pio->io_orig_pipeline; pio->io_reexecute = 0; pio->io_error = 0; - for (int w = 0; w < ZIO_WAIT_TYPES; w++) + for (w = 0; w < ZIO_WAIT_TYPES; w++) pio->io_state[w] = 0; - for (int c = 0; c < ZIO_CHILD_TYPES; c++) + for (c = 0; c < ZIO_CHILD_TYPES; c++) pio->io_child_error[c] = 0; if (IO_IS_ALLOCATING(pio)) @@ -1252,7 +1255,7 @@ zio_reexecute(zio_t *pio) for (cio = zio_walk_children(pio); cio != NULL; cio = cio_next) { cio_next = zio_walk_children(pio); mutex_enter(&pio->io_lock); - for (int w = 0; w < ZIO_WAIT_TYPES; w++) + for (w = 0; w < ZIO_WAIT_TYPES; w++) pio->io_children[cio->io_child_type][w]++; mutex_exit(&pio->io_lock); zio_reexecute(cio); @@ -1488,8 +1491,9 @@ static void zio_gang_node_free(zio_gang_node_t **gnpp) { zio_gang_node_t *gn = *gnpp; + int g; - for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) + for (g = 0; g < SPA_GBH_NBLKPTRS; g++) ASSERT(gn->gn_child[g] == NULL); zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE); @@ -1501,11 +1505,12 @@ static void zio_gang_tree_free(zio_gang_node_t **gnpp) { zio_gang_node_t *gn = *gnpp; + int g; if (gn == NULL) return; - for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) + for (g = 0; g < SPA_GBH_NBLKPTRS; g++) zio_gang_tree_free(&gn->gn_child[g]); zio_gang_node_free(gnpp); @@ -1530,6 +1535,7 @@ zio_gang_tree_assemble_done(zio_t *zio) zio_t *gio = zio->io_gang_leader; zio_gang_node_t *gn = zio->io_private; blkptr_t *bp = zio->io_bp; + int g; ASSERT(gio == zio_unique_parent(zio)); ASSERT(zio->io_child_count == 0); @@ -1544,7 +1550,7 @@ zio_gang_tree_assemble_done(zio_t *zio) ASSERT(zio->io_size == SPA_GANGBLOCKSIZE); ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); - for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { + for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; if (!BP_IS_GANG(gbp)) continue; @@ -1557,6 +1563,7 @@ zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) { zio_t *gio = pio->io_gang_leader; zio_t *zio; + int g; ASSERT(BP_IS_GANG(bp) == !!gn); ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp)); @@ -1571,7 +1578,7 @@ zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, void *data) if (gn != NULL) { ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC); - for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { + for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g]; if (BP_IS_HOLE(gbp)) continue; @@ -1631,6 +1638,7 @@ zio_write_gang_member_ready(zio_t *zio) dva_t *cdva = zio->io_bp->blk_dva; dva_t *pdva = pio->io_bp->blk_dva; uint64_t asize; + int d; if (BP_IS_HOLE(zio->io_bp)) return; @@ -1644,7 +1652,7 @@ zio_write_gang_member_ready(zio_t *zio) ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp)); mutex_enter(&pio->io_lock); - for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { + for (d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) { ASSERT(DVA_GET_GANG(&pdva[d])); asize = DVA_GET_ASIZE(&pdva[d]); asize += DVA_GET_ASIZE(&cdva[d]); @@ -1668,7 +1676,7 @@ zio_write_gang_block(zio_t *pio) int copies = gio->io_prop.zp_copies; int gbh_copies = MIN(copies + 1, spa_max_replication(spa)); zio_prop_t zp; - int error; + int g, error; error = metaslab_alloc(spa, spa_normal_class(spa), SPA_GANGBLOCKSIZE, bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, @@ -1698,7 +1706,7 @@ zio_write_gang_block(zio_t *pio) /* * Create and nowait the gang children. */ - for (int g = 0; resid != 0; resid -= lsize, g++) { + for (g = 0; resid != 0; resid -= lsize, g++) { lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g), SPA_MINBLOCKSIZE); ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid); @@ -1756,6 +1764,7 @@ static int zio_ddt_read_start(zio_t *zio) { blkptr_t *bp = zio->io_bp; + int p; ASSERT(BP_GET_DEDUP(bp)); ASSERT(BP_GET_PSIZE(bp) == zio->io_size); @@ -1774,7 +1783,7 @@ zio_ddt_read_start(zio_t *zio) if (ddp_self == NULL) return (ZIO_PIPELINE_CONTINUE); - for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { + for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) { if (ddp->ddp_phys_birth == 0 || ddp == ddp_self) continue; ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp, @@ -1836,6 +1845,7 @@ static boolean_t zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) { spa_t *spa = zio->io_spa; + int p; /* * Note: we compare the original data, not the transformed data, @@ -1843,7 +1853,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) * pushed the I/O transforms. That's an important optimization * because otherwise we'd compress/encrypt all dmu_sync() data twice. */ - for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { + for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { zio_t *lio = dde->dde_lead_zio[p]; if (lio != NULL) { @@ -1853,7 +1863,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde) } } - for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { + for (p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) { ddt_phys_t *ddp = &dde->dde_phys[p]; if (ddp->ddp_phys_birth != 0) { @@ -2161,6 +2171,8 @@ zio_dva_claim(zio_t *zio) static void zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) { + int g; + ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp)); ASSERT(zio->io_bp_override == NULL); @@ -2168,7 +2180,7 @@ zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp) metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE); if (gn != NULL) { - for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) { + for (g = 0; g < SPA_GBH_NBLKPTRS; g++) { zio_dva_unallocate(zio, gn->gn_child[g], &gn->gn_gbh->zg_blkptr[g]); } @@ -2655,6 +2667,7 @@ zio_done(zio_t *zio) vdev_t *vd = zio->io_vd; uint64_t psize = zio->io_size; zio_t *pio, *pio_next; + int c, w; /* * If our children haven't all completed, @@ -2666,8 +2679,8 @@ zio_done(zio_t *zio) zio_wait_for_children(zio, ZIO_CHILD_LOGICAL, ZIO_WAIT_DONE)) return (ZIO_PIPELINE_STOP); - for (int c = 0; c < ZIO_CHILD_TYPES; c++) - for (int w = 0; w < ZIO_WAIT_TYPES; w++) + for (c = 0; c < ZIO_CHILD_TYPES; c++) + for (w = 0; w < ZIO_WAIT_TYPES; w++) ASSERT(zio->io_children[c][w] == 0); if (bp != NULL) { diff --git a/module/zfs/zrlock.c b/module/zfs/zrlock.c index ec94b0855..36134431f 100644 --- a/module/zfs/zrlock.c +++ b/module/zfs/zrlock.c @@ -163,9 +163,11 @@ zrl_exit(zrlock_t *zrl) int zrl_refcount(zrlock_t *zrl) { + int n; + ASSERT(zrl->zr_refcount > ZRL_DESTROYED); - int n = (int)zrl->zr_refcount; + n = (int)zrl->zr_refcount; return (n <= 0 ? 0 : n); } |