summaryrefslogtreecommitdiffstats
path: root/module/zfs
diff options
context:
space:
mode:
Diffstat (limited to 'module/zfs')
-rw-r--r--module/zfs/arc.c24
-rw-r--r--module/zfs/dbuf.c59
-rw-r--r--module/zfs/dmu_objset.c6
-rw-r--r--module/zfs/dmu_tx.c4
-rw-r--r--module/zfs/dnode.c15
-rw-r--r--module/zfs/dnode_sync.c14
-rw-r--r--module/zfs/dsl_dataset.c16
-rw-r--r--module/zfs/dsl_deleg.c16
-rw-r--r--module/zfs/dsl_dir.c9
-rw-r--r--module/zfs/dsl_pool.c5
-rw-r--r--module/zfs/dsl_prop.c2
-rw-r--r--module/zfs/dsl_synctask.c2
-rw-r--r--module/zfs/gzip.c27
-rw-r--r--module/zfs/include/sys/spa.h2
-rw-r--r--module/zfs/include/sys/zfs_znode.h4
-rw-r--r--module/zfs/lzjb.c4
-rw-r--r--module/zfs/refcount.c6
-rw-r--r--module/zfs/spa.c12
-rw-r--r--module/zfs/spa_history.c2
-rw-r--r--module/zfs/spa_misc.c2
-rw-r--r--module/zfs/space_map.c3
-rw-r--r--module/zfs/txg.c14
-rw-r--r--module/zfs/vdev.c5
-rw-r--r--module/zfs/vdev_cache.c3
-rw-r--r--module/zfs/vdev_label.c2
-rw-r--r--module/zfs/vdev_raidz.c2
-rw-r--r--module/zfs/zap.c6
-rw-r--r--module/zfs/zap_leaf.c2
-rw-r--r--module/zfs/zap_micro.c4
-rw-r--r--module/zfs/zfs_byteswap.c2
-rw-r--r--module/zfs/zfs_ioctl.c14
-rw-r--r--module/zfs/zfs_znode.c2
-rw-r--r--module/zfs/zio.c2
33 files changed, 181 insertions, 111 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 73aecb285..a4dd7b874 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -1528,7 +1528,7 @@ arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle,
mutex_exit(&state->arcs_mtx);
if (bytes_evicted < bytes)
- dprintf("only evicted %lld bytes from %x",
+ dprintf("only evicted %lld bytes from %x\n",
(longlong_t)bytes_evicted, state);
if (skipped)
@@ -1628,7 +1628,7 @@ top:
}
if (bytes_deleted < bytes)
- dprintf("only deleted %lld bytes from %p",
+ dprintf("only deleted %lld bytes from %p\n",
(longlong_t)bytes_deleted, state);
}
@@ -1892,7 +1892,7 @@ arc_kmem_reap_now(arc_reclaim_strategy_t strat)
static void
arc_reclaim_thread(void)
{
- clock_t growtime = 0;
+ int64_t growtime = 0;
arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
callb_cpr_t cpr;
@@ -1915,12 +1915,12 @@ arc_reclaim_thread(void)
}
/* reset the growth delay for every reclaim */
- growtime = lbolt + (arc_grow_retry * hz);
+ growtime = lbolt64 + (arc_grow_retry * hz);
arc_kmem_reap_now(last_reclaim);
arc_warm = B_TRUE;
- } else if (arc_no_grow && lbolt >= growtime) {
+ } else if (arc_no_grow && lbolt64 >= growtime) {
arc_no_grow = FALSE;
}
@@ -2453,7 +2453,7 @@ arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp,
uint32_t *arc_flags, const zbookmark_t *zb)
{
arc_buf_hdr_t *hdr;
- arc_buf_t *buf;
+ arc_buf_t *buf = NULL;
kmutex_t *hash_lock;
zio_t *rzio;
@@ -2836,7 +2836,7 @@ arc_release(arc_buf_t *buf, void *tag)
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
l2arc_buf_hdr_t *l2hdr;
- uint64_t buf_size;
+ uint64_t buf_size = 0;
rw_enter(&buf->b_lock, RW_WRITER);
hdr = buf->b_hdr;
@@ -3309,7 +3309,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
* in order to compress/encrypt/etc the data. We therefor need to
* make sure that there is sufficient available memory for this.
*/
- if (error = arc_memory_throttle(reserve, txg))
+ if ((error = arc_memory_throttle(reserve, txg)))
return (error);
/*
@@ -3491,6 +3491,7 @@ arc_fini(void)
mutex_destroy(&arc_mru_ghost->arcs_mtx);
mutex_destroy(&arc_mfu->arcs_mtx);
mutex_destroy(&arc_mfu_ghost->arcs_mtx);
+ mutex_destroy(&arc_l2c_only->arcs_mtx);
mutex_destroy(&zfs_write_limit_lock);
@@ -3879,7 +3880,7 @@ l2arc_read_done(zio_t *zio)
static list_t *
l2arc_list_locked(int list_num, kmutex_t **lock)
{
- list_t *list;
+ list_t *list = NULL;
ASSERT(list_num >= 0 && list_num <= 3);
@@ -4052,10 +4053,11 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
list_t *list;
uint64_t passed_sz, write_sz, buf_sz, headroom;
void *buf_data;
- kmutex_t *hash_lock, *list_lock;
+ kmutex_t *hash_lock, *list_lock = NULL;
boolean_t have_lock, full;
l2arc_write_callback_t *cb;
zio_t *pio, *wzio;
+ int try;
ASSERT(dev->l2ad_vdev != NULL);
@@ -4069,7 +4071,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
* Copy buffers for L2ARC writing.
*/
mutex_enter(&l2arc_buflist_mtx);
- for (int try = 0; try <= 3; try++) {
+ for (try = 0; try <= 3; try++) {
list = l2arc_list_locked(try, &list_lock);
passed_sz = 0;
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index d04610317..4ca8c98fc 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -110,11 +110,13 @@ dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_impl_t *os = dn->dn_objset;
- uint64_t obj = dn->dn_object;
- uint64_t hv = DBUF_HASH(os, obj, level, blkid);
- uint64_t idx = hv & h->hash_table_mask;
+ uint64_t obj, hv, idx;
dmu_buf_impl_t *db;
+ obj = dn->dn_object;
+ hv = DBUF_HASH(os, obj, level, blkid);
+ idx = hv & h->hash_table_mask;
+
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
@@ -143,11 +145,13 @@ dbuf_hash_insert(dmu_buf_impl_t *db)
objset_impl_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
- uint64_t blkid = db->db_blkid;
- uint64_t hv = DBUF_HASH(os, obj, level, blkid);
- uint64_t idx = hv & h->hash_table_mask;
+ uint64_t blkid, hv, idx;
dmu_buf_impl_t *dbf;
+ blkid = db->db_blkid;
+ hv = DBUF_HASH(os, obj, level, blkid);
+ idx = hv & h->hash_table_mask;
+
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
@@ -177,11 +181,13 @@ static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
- uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
- db->db_level, db->db_blkid);
- uint64_t idx = hv & h->hash_table_mask;
+ uint64_t hv, idx;
dmu_buf_impl_t *dbf, **dbp;
+ hv = DBUF_HASH(db->db_objset, db->db.db_object,
+ db->db_level, db->db_blkid);
+ idx = hv & h->hash_table_mask;
+
/*
* We musn't hold db_mtx to maintin lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
@@ -1575,7 +1581,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
return;
/* dbuf_find() returns with db_mtx held */
- if (db = dbuf_find(dn, 0, blkid)) {
+ if ((db = dbuf_find(dn, 0, blkid))) {
if (refcount_count(&db->db_holds) > 0) {
/*
* This dbuf is active. We assume that it is
@@ -1736,8 +1742,7 @@ dbuf_create_bonus(dnode_t *dn)
void
dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
{
- int64_t holds = refcount_add(&db->db_holds, tag);
- ASSERT(holds > 1);
+ VERIFY(refcount_add(&db->db_holds, tag) > 1);
}
#pragma weak dmu_buf_rele = dbuf_rele
@@ -1895,7 +1900,11 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
}
}
-static void
+/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
+ * is critical the we not allow the compiler to inline this function in to
+ * dbuf_sync_list() thereby drastically bloating the stack usage.
+ */
+noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
@@ -1935,7 +1944,11 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
zio_nowait(zio);
}
-static void
+/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
+ * critical the we not allow the compiler to inline this function in to
+ * dbuf_sync_list() thereby drastically bloating the stack usage.
+ */
+noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
@@ -1988,6 +2001,10 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
drp = &(*drp)->dr_next;
ASSERT(dr->dr_next == NULL);
*drp = dr->dr_next;
+ if (dr->dr_dbuf->db_level != 0) {
+ mutex_destroy(&dr->dt.di.dr_mtx);
+ list_destroy(&dr->dt.di.dr_children);
+ }
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
@@ -2094,7 +2111,7 @@ dbuf_sync_list(list_t *list, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
- while (dr = list_head(list)) {
+ while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
@@ -2326,17 +2343,15 @@ dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
ASSERT(arc_released(db->db_buf));
}
} else {
- dnode_t *dn = db->db_dnode;
-
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
- ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
+ ASSERT3U(db->db.db_size, ==,
+ 1<<db->db_dnode->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
- int epbs =
- dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
- ASSERT3U(dn->dn_phys->dn_maxblkid
- >> (db->db_level * epbs), >=, db->db_blkid);
+ ASSERT3U(db->db_dnode->dn_phys->dn_maxblkid >> (db->db_level *
+ (db->db_dnode->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT)),
+ >=, db->db_blkid);
arc_set_callback(db->db_buf, dbuf_do_evict, db);
}
mutex_destroy(&dr->dt.di.dr_mtx);
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index 7981e0682..b4c4c98cc 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -803,7 +803,7 @@ dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive)
}
out:
- while (osn = list_head(&sn.objsets)) {
+ while ((osn = list_head(&sn.objsets))) {
list_remove(&sn.objsets, osn);
zil_resume(dmu_objset_zil(osn->os));
dmu_objset_close(osn->os);
@@ -823,7 +823,7 @@ dmu_objset_sync_dnodes(list_t *list, dmu_tx_t *tx)
{
dnode_t *dn;
- while (dn = list_head(list)) {
+ while ((dn = list_head(list))) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
ASSERT(dn->dn_dbuf->db_data_pending);
/*
@@ -931,7 +931,7 @@ dmu_objset_sync(objset_impl_t *os, zio_t *pio, dmu_tx_t *tx)
dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], tx);
list = &os->os_meta_dnode->dn_dirty_records[txgoff];
- while (dr = list_head(list)) {
+ while ((dr = list_head(list))) {
ASSERT(dr->dr_dbuf->db_level == 0);
list_remove(list, dr);
if (dr->dr_zio)
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index ea065951f..42ce01d23 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -1001,7 +1001,7 @@ dmu_tx_commit(dmu_tx_t *tx)
ASSERT(tx->tx_txg != 0);
- while (txh = list_head(&tx->tx_holds)) {
+ while ((txh = list_head(&tx->tx_holds))) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
@@ -1050,7 +1050,7 @@ dmu_tx_abort(dmu_tx_t *tx)
ASSERT(tx->tx_txg == 0);
- while (txh = list_head(&tx->tx_holds)) {
+ while ((txh = list_head(&tx->tx_holds))) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index e77834d60..0e15e258e 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -40,7 +40,9 @@ static int free_range_compar(const void *node1, const void *node2);
static kmem_cache_t *dnode_cache;
+#ifndef NDEBUG
static dnode_phys_t dnode_phys_zero;
+#endif
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
@@ -56,6 +58,8 @@ dnode_cons(void *arg, void *unused, int kmflag)
rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
+
refcount_create(&dn->dn_holds);
refcount_create(&dn->dn_tx_holds);
@@ -84,6 +88,7 @@ dnode_dest(void *arg, void *unused)
rw_destroy(&dn->dn_struct_rwlock);
mutex_destroy(&dn->dn_mtx);
mutex_destroy(&dn->dn_dbufs_mtx);
+ cv_destroy(&dn->dn_notxholds);
refcount_destroy(&dn->dn_holds);
refcount_destroy(&dn->dn_tx_holds);
@@ -130,7 +135,6 @@ dnode_verify(dnode_t *dn)
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
- ASSERT3U(dn->dn_indblkshift, >=, 0);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
@@ -273,7 +277,6 @@ dnode_create(objset_impl_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object)
{
dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
- (void) dnode_cons(dn, NULL, 0); /* XXX */
dn->dn_objset = os;
dn->dn_object = object;
@@ -518,11 +521,12 @@ dnode_buf_pageout(dmu_buf_t *db, void *arg)
for (i = 0; i < epb; i++) {
dnode_t *dn = children_dnodes[i];
- int n;
if (dn == NULL)
continue;
#ifdef ZFS_DEBUG
+ {
+ int n;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
@@ -535,6 +539,7 @@ dnode_buf_pageout(dmu_buf_t *db, void *arg)
for (n = 0; n < TXG_SIZE; n++)
ASSERT(!list_link_active(&dn->dn_dirty_link[n]));
+ }
#endif
children_dnodes[i] = NULL;
dnode_destroy(dn);
@@ -601,8 +606,8 @@ dnode_hold_impl(objset_impl_t *os, uint64_t object, int flag,
dnode_t **winner;
children_dnodes = kmem_zalloc(epb * sizeof (dnode_t *),
KM_SLEEP);
- if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
- dnode_buf_pageout)) {
+ if ((winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
+ dnode_buf_pageout))) {
kmem_free(children_dnodes, epb * sizeof (dnode_t *));
children_dnodes = winner;
}
diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c
index 779cfc96f..f4154bd91 100644
--- a/module/zfs/dnode_sync.c
+++ b/module/zfs/dnode_sync.c
@@ -319,8 +319,10 @@ dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
ASSERT3U(blkid + nblks, <=, dn->dn_phys->dn_nblkptr);
(void) free_blocks(dn, bp + blkid, nblks, tx);
if (trunc) {
+#ifndef NDEBUG
uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
(dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
+#endif
dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
ASSERT(off < dn->dn_phys->dn_maxblkid ||
dn->dn_phys->dn_maxblkid == 0 ||
@@ -349,8 +351,10 @@ dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
dbuf_rele(db, FTAG);
}
if (trunc) {
+#ifndef NDEBUG
uint64_t off = (dn->dn_phys->dn_maxblkid + 1) *
(dn->dn_phys->dn_datablkszsec << SPA_MINBLOCKSHIFT);
+#endif
dn->dn_phys->dn_maxblkid = (blkid ? blkid - 1 : 0);
ASSERT(off < dn->dn_phys->dn_maxblkid ||
dn->dn_phys->dn_maxblkid == 0 ||
@@ -405,9 +409,13 @@ dnode_evict_dbufs(dnode_t *dn)
if (evicting)
delay(1);
pass++;
- ASSERT(pass < 100); /* sanity check */
+ if ((pass % 100) == 0)
+ dprintf("Exceeded %d passes evicting dbufs\n", pass);
} while (progress);
+ if (pass >= 100)
+ dprintf("Required %d passes to evict dbufs\n", pass);
+
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_bonus && refcount_is_zero(&dn->dn_bonus->db_holds)) {
mutex_enter(&dn->dn_bonus->db_mtx);
@@ -422,7 +430,7 @@ dnode_undirty_dbufs(list_t *list)
{
dbuf_dirty_record_t *dr;
- while (dr = list_head(list)) {
+ while ((dr = list_head(list))) {
dmu_buf_impl_t *db = dr->dr_dbuf;
uint64_t txg = dr->dr_txg;
@@ -440,6 +448,8 @@ dnode_undirty_dbufs(list_t *list)
} else {
mutex_exit(&db->db_mtx);
dnode_undirty_dbufs(&dr->dt.di.dr_children);
+ mutex_destroy(&dr->dt.di.dr_mtx);
+ list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
dbuf_rele(db, (void *)(uintptr_t)txg);
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index 93ea8aa11..4f9083f17 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -78,11 +78,13 @@ parent_delta(dsl_dataset_t *ds, int64_t delta)
void
dsl_dataset_block_born(dsl_dataset_t *ds, blkptr_t *bp, dmu_tx_t *tx)
{
- int used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
- int compressed = BP_GET_PSIZE(bp);
- int uncompressed = BP_GET_UCSIZE(bp);
+ int used, compressed, uncompressed;
int64_t delta;
+ used = bp_get_dasize(tx->tx_pool->dp_spa, bp);
+ compressed = BP_GET_PSIZE(bp);
+ uncompressed = BP_GET_UCSIZE(bp);
+
dprintf_bp(bp, "born, ds=%p\n", ds);
ASSERT(dmu_tx_is_syncing(tx));
@@ -351,7 +353,7 @@ dsl_dataset_get_ref(dsl_pool_t *dp, uint64_t dsobj, void *tag,
return (err);
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
- dsl_dataset_t *winner;
+ dsl_dataset_t *winner = NULL;
ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
ds->ds_dbuf = dbuf;
@@ -1995,10 +1997,8 @@ dsl_dataset_space(dsl_dataset_t *ds,
boolean_t
dsl_dataset_modified_since_lastsnap(dsl_dataset_t *ds)
{
- dsl_pool_t *dp = ds->ds_dir->dd_pool;
-
- ASSERT(RW_LOCK_HELD(&dp->dp_config_rwlock) ||
- dsl_pool_sync_context(dp));
+ ASSERT(RW_LOCK_HELD(&(ds->ds_dir->dd_pool)->dp_config_rwlock) ||
+ dsl_pool_sync_context(ds->ds_dir->dd_pool));
if (ds->ds_prev == NULL)
return (B_FALSE);
if (ds->ds_phys->ds_bp.blk_birth >
diff --git a/module/zfs/dsl_deleg.c b/module/zfs/dsl_deleg.c
index da5d15787..24f68b89d 100644
--- a/module/zfs/dsl_deleg.c
+++ b/module/zfs/dsl_deleg.c
@@ -101,13 +101,13 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
if ((error = dsl_deleg_access(ddname, ZFS_DELEG_PERM_ALLOW, cr)) != 0)
return (error);
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
nvlist_t *perms;
nvpair_t *permpair = NULL;
VERIFY(nvpair_value_nvlist(whopair, &perms) == 0);
- while (permpair = nvlist_next_nvpair(perms, permpair)) {
+ while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
if (strcmp(perm, ZFS_DELEG_PERM_ALLOW) == 0)
@@ -138,7 +138,7 @@ dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr)
(void) snprintf(idstr, sizeof (idstr), "%lld",
(longlong_t)crgetuid(cr));
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
zfs_deleg_who_type_t type = nvpair_name(whopair)[0];
if (type != ZFS_DELEG_USER &&
@@ -166,7 +166,7 @@ dsl_deleg_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
}
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
const char *whokey = nvpair_name(whopair);
nvlist_t *perms;
nvpair_t *permpair = NULL;
@@ -181,7 +181,7 @@ dsl_deleg_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
whokey, 8, 1, &jumpobj, tx) == 0);
}
- while (permpair = nvlist_next_nvpair(perms, permpair)) {
+ while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
uint64_t n = 0;
@@ -207,7 +207,7 @@ dsl_deleg_unset_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
if (zapobj == 0)
return;
- while (whopair = nvlist_next_nvpair(nvp, whopair)) {
+ while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
const char *whokey = nvpair_name(whopair);
nvlist_t *perms;
nvpair_t *permpair = NULL;
@@ -229,7 +229,7 @@ dsl_deleg_unset_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0)
continue;
- while (permpair = nvlist_next_nvpair(perms, permpair)) {
+ while ((permpair = nvlist_next_nvpair(perms, permpair))) {
const char *perm = nvpair_name(permpair);
uint64_t n = 0;
@@ -266,7 +266,7 @@ dsl_deleg_set(const char *ddname, nvlist_t *nvp, boolean_t unset)
return (ENOTSUP);
}
- while (whopair = nvlist_next_nvpair(nvp, whopair))
+ while ((whopair = nvlist_next_nvpair(nvp, whopair)))
blocks_modified++;
error = dsl_sync_task_do(dd->dd_pool, NULL,
diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c
index 48d87f97f..9a76d989a 100644
--- a/module/zfs/dsl_dir.c
+++ b/module/zfs/dsl_dir.c
@@ -48,11 +48,10 @@ static void
dsl_dir_evict(dmu_buf_t *db, void *arg)
{
dsl_dir_t *dd = arg;
- dsl_pool_t *dp = dd->dd_pool;
int t;
for (t = 0; t < TXG_SIZE; t++) {
- ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
+ ASSERT(!txg_list_member(&dd->dd_pool->dp_dirty_dirs, dd, t));
ASSERT(dd->dd_tempreserved[t] == 0);
ASSERT(dd->dd_space_towrite[t] == 0);
}
@@ -870,7 +869,7 @@ dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
if (tr_cookie == NULL)
return;
- while (tr = list_head(tr_list)) {
+ while ((tr = list_head(tr_list))) {
if (tr->tr_dp) {
dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
} else if (tr->tr_ds) {
@@ -1221,8 +1220,8 @@ dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
if (closest_common_ancestor(dd, ra->newparent) == dd)
return (EINVAL);
- if (err = dsl_dir_transfer_possible(dd->dd_parent,
- ra->newparent, myspace))
+ if ((err = dsl_dir_transfer_possible(dd->dd_parent,
+ ra->newparent, myspace)))
return (err);
}
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index dacc57c81..f4275ea06 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -219,6 +219,7 @@ dsl_pool_close(dsl_pool_t *dp)
txg_list_destroy(&dp->dp_dirty_datasets);
txg_list_destroy(&dp->dp_dirty_dirs);
+ txg_list_destroy(&dp->dp_sync_tasks);
list_destroy(&dp->dp_synced_datasets);
arc_flush(dp->dp_spa);
@@ -297,7 +298,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
dp->dp_read_overhead = 0;
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
- while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
+ while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
if (!list_link_active(&ds->ds_synced_link))
list_insert_tail(&dp->dp_synced_datasets, ds);
else
@@ -386,7 +387,7 @@ dsl_pool_zil_clean(dsl_pool_t *dp)
{
dsl_dataset_t *ds;
- while (ds = list_head(&dp->dp_synced_datasets)) {
+ while ((ds = list_head(&dp->dp_synced_datasets))) {
list_remove(&dp->dp_synced_datasets, ds);
ASSERT(ds->ds_user_ptr != NULL);
zil_clean(((objset_impl_t *)ds->ds_user_ptr)->os_zil);
diff --git a/module/zfs/dsl_prop.c b/module/zfs/dsl_prop.c
index 212acbbc5..bb19f3e9e 100644
--- a/module/zfs/dsl_prop.c
+++ b/module/zfs/dsl_prop.c
@@ -370,7 +370,7 @@ dsl_prop_set_sync(void *arg1, void *arg2, cred_t *cr, dmu_tx_t *tx)
if (psa->numints == 0) {
int err = zap_remove(mos, zapobj, psa->name, tx);
- ASSERT(err == 0 || err == ENOENT);
+ VERIFY(0 == err || ENOENT == err);
if (isint) {
VERIFY(0 == dsl_prop_get_ds(ds,
psa->name, 8, 1, &intval, NULL));
diff --git a/module/zfs/dsl_synctask.c b/module/zfs/dsl_synctask.c
index 21100225a..9bb9c4580 100644
--- a/module/zfs/dsl_synctask.c
+++ b/module/zfs/dsl_synctask.c
@@ -139,7 +139,7 @@ dsl_sync_task_group_destroy(dsl_sync_task_group_t *dstg)
{
dsl_sync_task_t *dst;
- while (dst = list_head(&dstg->dstg_tasks)) {
+ while ((dst = list_head(&dstg->dstg_tasks))) {
list_remove(&dstg->dstg_tasks, dst);
kmem_free(dst, sizeof (dsl_sync_task_t));
}
diff --git a/module/zfs/gzip.c b/module/zfs/gzip.c
index b257d4af7..a60772719 100644
--- a/module/zfs/gzip.c
+++ b/module/zfs/gzip.c
@@ -28,22 +28,35 @@
#include <sys/debug.h>
#include <sys/types.h>
-#include <sys/zmod.h>
#ifdef _KERNEL
+
#include <sys/systm.h>
-#else
+#include <sys/zmod.h>
+
+typedef size_t zlen_t;
+#define compress_func z_compress_level
+#define uncompress_func z_uncompress
+
+#else /* _KERNEL */
+
#include <strings.h>
+#include <zlib.h>
+
+typedef uLongf zlen_t;
+#define compress_func compress2
+#define uncompress_func uncompress
+
#endif
size_t
gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
- size_t dstlen = d_len;
+ zlen_t dstlen = d_len;
ASSERT(d_len <= s_len);
- if (z_compress_level(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
+ if (compress_func(d_start, &dstlen, s_start, s_len, n) != Z_OK) {
if (d_len != s_len)
return (s_len);
@@ -51,18 +64,18 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
return (s_len);
}
- return (dstlen);
+ return ((size_t) dstlen);
}
/*ARGSUSED*/
int
gzip_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
- size_t dstlen = d_len;
+ zlen_t dstlen = d_len;
ASSERT(d_len >= s_len);
- if (z_uncompress(d_start, &dstlen, s_start, s_len) != Z_OK)
+ if (uncompress_func(d_start, &dstlen, s_start, s_len) != Z_OK)
return (-1);
return (0);
diff --git a/module/zfs/include/sys/spa.h b/module/zfs/include/sys/spa.h
index 24b3ca447..1029b3829 100644
--- a/module/zfs/include/sys/spa.h
+++ b/module/zfs/include/sys/spa.h
@@ -522,7 +522,7 @@ extern void vdev_cache_stat_fini(void);
/* Initialization and termination */
extern void spa_init(int flags);
extern void spa_fini(void);
-extern void spa_boot_init();
+extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
diff --git a/module/zfs/include/sys/zfs_znode.h b/module/zfs/include/sys/zfs_znode.h
index a5416525c..db40968fa 100644
--- a/module/zfs/include/sys/zfs_znode.h
+++ b/module/zfs/include/sys/zfs_znode.h
@@ -305,8 +305,8 @@ extern int zfs_rezget(znode_t *);
extern void zfs_zinactive(znode_t *);
extern void zfs_znode_delete(znode_t *, dmu_tx_t *);
extern void zfs_znode_free(znode_t *);
-extern void zfs_remove_op_tables();
-extern int zfs_create_op_tables();
+extern void zfs_remove_op_tables(void);
+extern int zfs_create_op_tables(void);
extern int zfs_sync(vfs_t *vfsp, short flag, cred_t *cr);
extern dev_t zfs_cmpldev(uint64_t);
extern int zfs_get_zplprop(objset_t *os, zfs_prop_t prop, uint64_t *value);
diff --git a/module/zfs/lzjb.c b/module/zfs/lzjb.c
index 7fcde8475..4132406bd 100644
--- a/module/zfs/lzjb.c
+++ b/module/zfs/lzjb.c
@@ -51,7 +51,7 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
{
uchar_t *src = s_start;
uchar_t *dst = d_start;
- uchar_t *cpy, *copymap;
+ uchar_t *cpy, *copymap = NULL;
int copymask = 1 << (NBBY - 1);
int mlen, offset;
uint16_t *hp;
@@ -104,7 +104,7 @@ lzjb_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
uchar_t *src = s_start;
uchar_t *dst = d_start;
uchar_t *d_end = (uchar_t *)d_start + d_len;
- uchar_t *cpy, copymap;
+ uchar_t *cpy, copymap = 0;
int copymask = 1 << (NBBY - 1);
while (dst < d_end) {
diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c
index f1b3b23fe..2ce8e4356 100644
--- a/module/zfs/refcount.c
+++ b/module/zfs/refcount.c
@@ -75,13 +75,13 @@ refcount_destroy_many(refcount_t *rc, uint64_t number)
reference_t *ref;
ASSERT(rc->rc_count == number);
- while (ref = list_head(&rc->rc_list)) {
+ while ((ref = list_head(&rc->rc_list))) {
list_remove(&rc->rc_list, ref);
kmem_cache_free(reference_cache, ref);
}
list_destroy(&rc->rc_list);
- while (ref = list_head(&rc->rc_removed)) {
+ while ((ref = list_head(&rc->rc_removed))) {
list_remove(&rc->rc_removed, ref);
kmem_cache_free(reference_history_cache, ref->ref_removed);
kmem_cache_free(reference_cache, ref);
@@ -113,7 +113,7 @@ refcount_count(refcount_t *rc)
int64_t
refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
{
- reference_t *ref;
+ reference_t *ref = NULL;
int64_t count;
if (reference_tracking_enable) {
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index fb1b96f8b..bd4ce35b2 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -274,7 +274,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
- uint64_t objnum;
+ uint64_t objnum = 0;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
@@ -393,6 +393,8 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
strcmp(slash, "/..") == 0)
error = EINVAL;
break;
+ default:
+ break;
}
if (error)
@@ -817,7 +819,7 @@ spa_load_l2cache(spa_t *spa)
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid, size;
- vdev_t *vd, **oldvdevs, **newvdevs;
+ vdev_t *vd, **oldvdevs, **newvdevs = NULL;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
@@ -3006,7 +3008,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, int replace_done)
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
- uint64_t unspare_guid;
+ uint64_t unspare_guid = 0;
size_t len;
txg = spa_vdev_enter(spa);
@@ -3994,7 +3996,7 @@ spa_sync(spa_t *spa, uint64_t txg)
dsl_pool_sync(dp, txg);
dirty_vdevs = 0;
- while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) {
+ while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))) {
vdev_sync(vd, txg);
dirty_vdevs++;
}
@@ -4078,7 +4080,7 @@ spa_sync(spa_t *spa, uint64_t txg)
/*
* Update usable space statistics.
*/
- while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
+ while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
vdev_sync_done(vd, txg);
/*
diff --git a/module/zfs/spa_history.c b/module/zfs/spa_history.c
index c997240c1..8b422cf8b 100644
--- a/module/zfs/spa_history.c
+++ b/module/zfs/spa_history.c
@@ -176,7 +176,7 @@ spa_history_write(spa_t *spa, void *buf, uint64_t len, spa_history_phys_t *shpp,
}
static char *
-spa_history_zone()
+spa_history_zone(void)
{
#ifdef _KERNEL
return (curproc->p_zone->zone_name);
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 36046e6df..e188b2da1 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -1329,7 +1329,7 @@ spa_busy(void)
}
void
-spa_boot_init()
+spa_boot_init(void)
{
spa_config_load();
}
diff --git a/module/zfs/space_map.c b/module/zfs/space_map.c
index 0a1fd59ea..e0119d1af 100644
--- a/module/zfs/space_map.c
+++ b/module/zfs/space_map.c
@@ -63,6 +63,8 @@ space_map_create(space_map_t *sm, uint64_t start, uint64_t size, uint8_t shift,
avl_create(&sm->sm_root, space_map_seg_compare,
sizeof (space_seg_t), offsetof(struct space_seg, ss_node));
+ cv_init(&sm->sm_load_cv, NULL, CV_DEFAULT, NULL);
+
sm->sm_start = start;
sm->sm_size = size;
sm->sm_shift = shift;
@@ -74,6 +76,7 @@ space_map_destroy(space_map_t *sm)
{
ASSERT(!sm->sm_loaded && !sm->sm_loading);
VERIFY3U(sm->sm_space, ==, 0);
+ cv_destroy(&sm->sm_load_cv);
avl_destroy(&sm->sm_root);
}
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index f3b0fc92e..b150ebd3c 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -67,6 +67,13 @@ txg_init(dsl_pool_t *dp, uint64_t txg)
rw_init(&tx->tx_suspend, NULL, RW_DEFAULT, NULL);
mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&tx->tx_timeout_cv, NULL, CV_DEFAULT, NULL);
+ cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
+
tx->tx_open_txg = txg;
}
@@ -81,6 +88,13 @@ txg_fini(dsl_pool_t *dp)
ASSERT(tx->tx_threads == 0);
+ cv_destroy(&tx->tx_sync_more_cv);
+ cv_destroy(&tx->tx_sync_done_cv);
+ cv_destroy(&tx->tx_quiesce_more_cv);
+ cv_destroy(&tx->tx_quiesce_done_cv);
+ cv_destroy(&tx->tx_timeout_cv);
+ cv_destroy(&tx->tx_exit_cv);
+
rw_destroy(&tx->tx_suspend);
mutex_destroy(&tx->tx_sync_lock);
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 16a27e514..140b6ef2d 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -1414,8 +1414,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
if (vd->vdev_detached) {
if (smo->smo_object != 0) {
- int err = dmu_object_free(mos, smo->smo_object, tx);
- ASSERT3U(err, ==, 0);
+ VERIFY(0 == dmu_object_free(mos, smo->smo_object, tx));
smo->smo_object = 0;
}
dmu_tx_commit(tx);
@@ -1578,7 +1577,7 @@ vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
- while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
+ while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
metaslab_sync_done(msp, txg);
}
diff --git a/module/zfs/vdev_cache.c b/module/zfs/vdev_cache.c
index 5a7b59f6e..cad0438c3 100644
--- a/module/zfs/vdev_cache.c
+++ b/module/zfs/vdev_cache.c
@@ -253,7 +253,6 @@ vdev_cache_read(zio_t *zio)
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
vdev_cache_entry_t *ve, ve_search;
uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
- uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);
zio_t *fio;
ASSERT(zio->io_type == ZIO_TYPE_READ);
@@ -270,7 +269,7 @@ vdev_cache_read(zio_t *zio)
if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS))
return (EXDEV);
- ASSERT(cache_phase + zio->io_size <= VCBS);
+ ASSERT(P2PHASE(zio->io_offset, VCBS) + zio->io_size <= VCBS);
mutex_enter(&vc->vc_lock);
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c
index bf930466f..640ab7957 100644
--- a/module/zfs/vdev_label.c
+++ b/module/zfs/vdev_label.c
@@ -458,6 +458,8 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
case VDEV_LABEL_SPARE:
return (spa_has_spare(spa, device_guid));
+ default:
+ break;
}
}
diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c
index 69e314468..33e7f102c 100644
--- a/module/zfs/vdev_raidz.c
+++ b/module/zfs/vdev_raidz.c
@@ -789,7 +789,7 @@ vdev_raidz_io_done(zio_t *zio)
vdev_t *vd = zio->io_vd;
vdev_t *cvd;
raidz_map_t *rm = zio->io_vsd;
- raidz_col_t *rc, *rc1;
+ raidz_col_t *rc = NULL, *rc1;
int unexpected_errors = 0;
int parity_errors = 0;
int parity_untried = 0;
diff --git a/module/zfs/zap.c b/module/zfs/zap.c
index 4e4677e50..a9bd189f8 100644
--- a/module/zfs/zap.c
+++ b/module/zfs/zap.c
@@ -388,7 +388,7 @@ zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
- rw_init(&l->l_rwlock, 0, 0, 0);
+ rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&l->l_rwlock, RW_WRITER);
l->l_blkid = zap_allocate_blocks(zap, 1);
l->l_dbuf = NULL;
@@ -446,7 +446,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
ASSERT(blkid != 0);
l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
- rw_init(&l->l_rwlock, 0, 0, 0);
+ rw_init(&l->l_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&l->l_rwlock, RW_WRITER);
l->l_blkid = blkid;
l->l_bs = highbit(db->db_size)-1;
@@ -476,7 +476,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3);
/* The chunks should begin at the end of the hash table */
- ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==,
+ ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==, (zap_leaf_chunk_t *)
&l->l_phys->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]);
/* The chunks should end at the end of the block */
diff --git a/module/zfs/zap_leaf.c b/module/zfs/zap_leaf.c
index da498b6bc..267f296fb 100644
--- a/module/zfs/zap_leaf.c
+++ b/module/zfs/zap_leaf.c
@@ -220,7 +220,7 @@ zap_leaf_array_create(zap_leaf_t *l, const char *buf,
uint16_t chunk_head;
uint16_t *chunkp = &chunk_head;
int byten = 0;
- uint64_t value;
+ uint64_t value = 0;
int shift = (integer_size-1)*8;
int len = num_integers;
diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c
index b2db2eeae..02c13120c 100644
--- a/module/zfs/zap_micro.c
+++ b/module/zfs/zap_micro.c
@@ -271,7 +271,7 @@ mze_destroy(zap_t *zap)
mzap_ent_t *mze;
void *avlcookie = NULL;
- while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie))
+ while ((mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie)))
kmem_free(mze, sizeof (mzap_ent_t));
avl_destroy(&zap->zap_m.zap_avl);
}
@@ -286,7 +286,7 @@ mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
- rw_init(&zap->zap_rwlock, 0, 0, 0);
+ rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
rw_enter(&zap->zap_rwlock, RW_WRITER);
zap->zap_objset = os;
zap->zap_object = obj;
diff --git a/module/zfs/zfs_byteswap.c b/module/zfs/zfs_byteswap.c
index ab97f83eb..177ff552c 100644
--- a/module/zfs/zfs_byteswap.c
+++ b/module/zfs/zfs_byteswap.c
@@ -52,7 +52,7 @@ zfs_ace_byteswap(void *buf, size_t size, boolean_t zfs_layout)
{
caddr_t end;
caddr_t ptr;
- zfs_ace_t *zacep;
+ zfs_ace_t *zacep = NULL;
ace_t *acep;
uint16_t entry_type;
size_t entry_size;
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index b6ad57451..b77ee4da3 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -142,7 +142,7 @@ history_str_get(zfs_cmd_t *zc)
{
char *buf;
- if (zc->zc_history == NULL)
+ if (zc->zc_history == 0)
return (NULL);
buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
@@ -349,6 +349,8 @@ zfs_secpolicy_setprop(const char *name, zfs_prop_t prop, cred_t *cr)
return (EPERM);
}
break;
+ default:
+ break;
}
return (zfs_secpolicy_write_perms(name, zfs_prop_to_name(prop), cr));
@@ -1271,7 +1273,7 @@ zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
* which we aren't supposed to do with a DS_MODE_USER
* hold, because it could be inconsistent.
*/
- if (zc->zc_nvlist_dst != NULL &&
+ if (zc->zc_nvlist_dst != 0 &&
!zc->zc_objset_stats.dds_inconsistent &&
dmu_objset_type(os) == DMU_OST_ZFS) {
nvlist_t *nv;
@@ -1472,6 +1474,8 @@ zfs_set_prop_nvlist(const char *name, nvlist_t *nvl)
SPA_VERSION_PASSTHROUGH_X))
return (ENOTSUP);
}
+ default:
+ break;
}
elem = NULL;
@@ -1669,7 +1673,7 @@ zfs_ioc_pool_get_props(zfs_cmd_t *zc)
error = spa_prop_get(spa, &nvp);
- if (error == 0 && zc->zc_nvlist_dst != NULL)
+ if (error == 0 && zc->zc_nvlist_dst != 0)
error = put_nvlist(zc, nvp);
else
error = EFAULT;
@@ -2045,7 +2049,7 @@ zfs_ioc_create(zfs_cmd_t *zc)
strchr(zc->zc_name, '%'))
return (EINVAL);
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
&nvprops)) != 0)
return (error);
@@ -2441,7 +2445,7 @@ zfs_ioc_recv(zfs_cmd_t *zc)
*tosnap = '\0';
tosnap++;
- if (zc->zc_nvlist_src != NULL &&
+ if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
&props)) != 0)
return (error);
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index 25751ae5f..4a1820940 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -1574,6 +1574,8 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs.z_norm |= U8_TEXTPREP_TOUPPER;
+ /* XXX - This must be destroyed but I'm not quite sure yet so
+ * I'm just annotating that fact when it's an issue. -Brian */
mutex_init(&zfsvfs.z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs.z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index d347920ea..a20f971c9 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -901,7 +901,7 @@ zio_taskq_dispatch(zio_t *zio, enum zio_taskq_type q)
t = ZIO_TYPE_NULL;
(void) taskq_dispatch(zio->io_spa->spa_zio_taskq[t][q],
- (task_func_t *)zio_execute, zio, TQ_SLEEP);
+ (task_func_t *)zio_execute, zio, TQ_NOSLEEP);
}
static boolean_t