diff options
author | Michael Kjorling <[email protected]> | 2013-11-01 20:26:11 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2013-12-18 16:46:35 -0800 |
commit | d1d7e2689db9e03f11c069ebc9f1ba12829e5dac (patch) | |
tree | 75b9a2b23334d5f673fb31f142f74146d351865c /module/zfs | |
parent | 8ffef572ed2ba97e0c2d6a8aa2240012e611dc6f (diff) |
cstyle: Resolve C style issues
The vast majority of these changes are in Linux specific code.
They are the result of not having an automated style checker to
validate the code when it was originally written. Others were
caused when the common code was slightly adjusted for Linux.
This patch contains no functional changes. It only refreshes
the code to conform to style guide.
Everyone submitting patches for inclusion upstream should now
run 'make checkstyle' and resolve any warning prior to opening
a pull request. The automated builders have been updated to
fail a build if when 'make checkstyle' detects an issue.
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #1821
Diffstat (limited to 'module/zfs')
48 files changed, 497 insertions, 458 deletions
diff --git a/module/zfs/arc.c b/module/zfs/arc.c index d6b4e1f29..222614c3d 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -906,8 +906,10 @@ buf_fini(void) int i; #if defined(_KERNEL) && defined(HAVE_SPL) - /* Large allocations which do not require contiguous pages - * should be using vmem_free() in the linux kernel */ + /* + * Large allocations which do not require contiguous pages + * should be using vmem_free() in the linux kernel\ + */ vmem_free(buf_hash_table.ht_table, (buf_hash_table.ht_mask + 1) * sizeof (void *)); #else @@ -998,8 +1000,10 @@ buf_init(void) retry: buf_hash_table.ht_mask = hsize - 1; #if defined(_KERNEL) && defined(HAVE_SPL) - /* Large allocations which do not require contiguous pages - * should be using vmem_alloc() in the linux kernel */ + /* + * Large allocations which do not require contiguous pages + * should be using vmem_alloc() in the linux kernel + */ buf_hash_table.ht_table = vmem_zalloc(hsize * sizeof (void*), KM_SLEEP); #else @@ -1075,7 +1079,7 @@ arc_cksum_compute(arc_buf_t *buf, boolean_t force) return; } buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), - KM_PUSHPAGE); + KM_PUSHPAGE); fletcher_2_native(buf->b_data, buf->b_hdr->b_size, buf->b_hdr->b_freeze_cksum); mutex_exit(&buf->b_hdr->b_freeze_lock); @@ -1219,7 +1223,7 @@ arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index) arc_buf_hdr_t *hdr = ab->b_hdr; arc_state_t *state = hdr->b_state; - memset(abi, 0, sizeof(arc_buf_info_t)); + memset(abi, 0, sizeof (arc_buf_info_t)); abi->abi_flags = hdr->b_flags; abi->abi_datacnt = hdr->b_datacnt; abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON; @@ -2031,7 +2035,7 @@ arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes, int count = 0; ASSERT(GHOST_STATE(state)); - bzero(&marker, sizeof(marker)); + bzero(&marker, sizeof (marker)); top: mutex_enter(&state->arcs_mtx); for (ab = list_tail(list); ab; ab = ab_prev) { @@ -2412,7 +2416,8 @@ arc_adapt_thread(void) } /* reset the growth delay for every reclaim */ - arc_grow_time = ddi_get_lbolt()+(zfs_arc_grow_retry * hz); + arc_grow_time = ddi_get_lbolt() + + (zfs_arc_grow_retry * hz); arc_kmem_reap_now(last_reclaim, 0); arc_warm = B_TRUE; @@ -3394,7 +3399,7 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private) { arc_prune_t *p; - p = kmem_alloc(sizeof(*p), KM_SLEEP); + p = kmem_alloc(sizeof (*p), KM_SLEEP); p->p_pfunc = func; p->p_private = private; list_link_init(&p->p_node); @@ -4958,7 +4963,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz, list_insert_head(dev->l2ad_buflist, head); cb = kmem_alloc(sizeof (l2arc_write_callback_t), - KM_PUSHPAGE); + KM_PUSHPAGE); cb->l2wcb_dev = dev; cb->l2wcb_head = head; pio = zio_root(spa, l2arc_write_done, cb, diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 01352a91c..c8a526171 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -305,8 +305,10 @@ dbuf_init(void) retry: h->hash_table_mask = hsize - 1; #if defined(_KERNEL) && defined(HAVE_SPL) - /* Large allocations which do not require contiguous pages - * should be using vmem_alloc() in the linux kernel */ + /* + * Large allocations which do not require contiguous pages + * should be using vmem_alloc() in the linux kernel + */ h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_PUSHPAGE); #else h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); @@ -339,8 +341,10 @@ dbuf_fini(void) for (i = 0; i < DBUF_MUTEXES; i++) mutex_destroy(&h->hash_mutexes[i]); #if defined(_KERNEL) && defined(HAVE_SPL) - /* Large allocations which do not require contiguous pages - * should be using vmem_free() in the linux kernel */ + /* + * Large allocations which do not require contiguous pages + * should be using vmem_free() in the linux kernel + */ vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); #else kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); @@ -1700,8 +1704,7 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, if (dh == NULL) { err = dbuf_hold_impl(dn, level+1, blkid >> epbs, fail_sparse, NULL, parentp); - } - else { + } else { __dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1, blkid >> epbs, fail_sparse, NULL, parentp, dh->dh_depth + 1); @@ -1927,7 +1930,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio) } } -#define DBUF_HOLD_IMPL_MAX_DEPTH 20 +#define DBUF_HOLD_IMPL_MAX_DEPTH 20 /* * Returns with db_holds incremented, and db_mtx not held. @@ -1956,7 +1959,8 @@ top: dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp, dh); if (dh->dh_fail_sparse) { - if (dh->dh_err == 0 && dh->dh_bp && BP_IS_HOLE(dh->dh_bp)) + if (dh->dh_err == 0 && + dh->dh_bp && BP_IS_HOLE(dh->dh_bp)) dh->dh_err = SET_ERROR(ENOENT); if (dh->dh_err) { if (dh->dh_parent) @@ -2037,13 +2041,13 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, struct dbuf_hold_impl_data *dh; int error; - dh = kmem_zalloc(sizeof(struct dbuf_hold_impl_data) * + dh = kmem_zalloc(sizeof (struct dbuf_hold_impl_data) * DBUF_HOLD_IMPL_MAX_DEPTH, KM_PUSHPAGE); __dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse, tag, dbp, 0); error = __dbuf_hold_impl(dh); - kmem_free(dh, sizeof(struct dbuf_hold_impl_data) * + kmem_free(dh, sizeof (struct dbuf_hold_impl_data) * DBUF_HOLD_IMPL_MAX_DEPTH); return (error); @@ -2359,7 +2363,8 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) } } -/* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it +/* + * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it * is critical the we not allow the compiler to inline this function in to * dbuf_sync_list() thereby drastically bloating the stack usage. */ @@ -2409,7 +2414,8 @@ dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) zio_nowait(zio); } -/* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is +/* + * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is * critical the we not allow the compiler to inline this function in to * dbuf_sync_list() thereby drastically bloating the stack usage. */ diff --git a/module/zfs/dbuf_stats.c b/module/zfs/dbuf_stats.c index ef760eaba..0cad9efdd 100644 --- a/module/zfs/dbuf_stats.c +++ b/module/zfs/dbuf_stats.c @@ -53,11 +53,11 @@ dbuf_stats_hash_table_headers(char *buf, size_t size) "%-6s %-6s %-8s %-8s %-6s %-6s %-5s %-8s %-8s\n", "dbuf", "arcbuf", "dnode", "pool", "objset", "object", "level", "blkid", "offset", "dbsize", "meta", "state", "dbholds", "list", - "atype", "index", "flags", "count", "asize", "access", "mru", "gmru", - "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", "l2_comp", "aholds", - "dtype", "btype", "data_bs", "meta_bs", "bsize", - "lvls", "dholds", "blocks", "dsize"); - buf[size] = '\0'; + "atype", "index", "flags", "count", "asize", "access", + "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", + "l2_comp", "aholds", "dtype", "btype", "data_bs", "meta_bs", + "bsize", "lvls", "dholds", "blocks", "dsize"); + buf[size] = '\0'; return (0); } @@ -118,7 +118,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db) (ulong_t)refcount_count(&dn->dn_holds), (u_longlong_t)doi.doi_fill_count, (u_longlong_t)doi.doi_max_offset); - buf[size] = '\0'; + buf[size] = '\0'; return (size); } @@ -166,7 +166,7 @@ dbuf_stats_hash_table_addr(kstat_t *ksp, loff_t n) { dbuf_stats_t *dsh = ksp->ks_private; - ASSERT(MUTEX_HELD(&dsh->lock)); + ASSERT(MUTEX_HELD(&dsh->lock)); if (n <= dsh->hash->hash_table_mask) { dsh->idx = n; diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c index 22720c9dd..b923df13a 100644 --- a/module/zfs/ddt.c +++ b/module/zfs/ddt.c @@ -916,20 +916,20 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp) return (B_TRUE); ddt = spa->spa_ddt[BP_GET_CHECKSUM(bp)]; - dde = kmem_alloc(sizeof(ddt_entry_t), KM_PUSHPAGE); + dde = kmem_alloc(sizeof (ddt_entry_t), KM_PUSHPAGE); ddt_key_fill(&(dde->dde_key), bp); for (type = 0; type < DDT_TYPES; type++) { for (class = 0; class <= max_class; class++) { if (ddt_object_lookup(ddt, type, class, dde) == 0) { - kmem_free(dde, sizeof(ddt_entry_t)); + kmem_free(dde, sizeof (ddt_entry_t)); return (B_TRUE); } } } - kmem_free(dde, sizeof(ddt_entry_t)); + kmem_free(dde, sizeof (ddt_entry_t)); return (B_FALSE); } @@ -1209,5 +1209,5 @@ ddt_walk(spa_t *spa, ddt_bookmark_t *ddb, ddt_entry_t *dde) #if defined(_KERNEL) && defined(HAVE_SPL) module_param(zfs_dedup_prefetch, int, 0644); -MODULE_PARM_DESC(zfs_dedup_prefetch,"Enable prefetching dedup-ed blks"); +MODULE_PARM_DESC(zfs_dedup_prefetch, "Enable prefetching dedup-ed blks"); #endif diff --git a/module/zfs/ddt_zap.c b/module/zfs/ddt_zap.c index 65b14ab63..a21ed4542 100644 --- a/module/zfs/ddt_zap.c +++ b/module/zfs/ddt_zap.c @@ -141,7 +141,7 @@ ddt_zap_walk(objset_t *os, uint64_t object, ddt_entry_t *dde, uint64_t *walk) static int ddt_zap_count(objset_t *os, uint64_t object, uint64_t *count) { - return zap_count(os, object, count); + return (zap_count(os, object, count)); } const ddt_ops_t ddt_zap_ops = { diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index ade13b9f0..9e99558a3 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -400,7 +400,8 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, } nblks = 1; } - dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_PUSHPAGE | KM_NODEBUG); + dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, + KM_PUSHPAGE | KM_NODEBUG); zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL); blkid = dbuf_whichblock(dn, offset); @@ -877,9 +878,9 @@ static xuio_stats_t xuio_stats = { { "write_buf_nocopy", KSTAT_DATA_UINT64 } }; -#define XUIOSTAT_INCR(stat, val) \ - atomic_add_64(&xuio_stats.stat.value.ui64, (val)) -#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1) +#define XUIOSTAT_INCR(stat, val) \ + atomic_add_64(&xuio_stats.stat.value.ui64, (val)) +#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1) int dmu_xuio_init(xuio_t *xuio, int nblk) @@ -1044,7 +1045,7 @@ dmu_req_copy(void *arg_buf, int size, int *offset, struct request *req) bv->bv_len -= tocpy; } - return 0; + return (0); } static void @@ -1067,13 +1068,13 @@ dmu_bio_clone(struct bio *bio, struct bio **bio_copy) struct bio *bio_new; if (bio == NULL) - return EINVAL; + return (EINVAL); while (bio) { bio_new = bio_clone(bio, GFP_NOIO); if (bio_new == NULL) { dmu_bio_put(bio_root); - return ENOMEM; + return (ENOMEM); } if (bio_last) { @@ -1089,7 +1090,7 @@ dmu_bio_clone(struct bio *bio, struct bio **bio_copy) *bio_copy = bio_root; - return 0; + return (0); } int @@ -1106,7 +1107,7 @@ dmu_read_req(objset_t *os, uint64_t object, struct request *req) * to be reading in parallel. */ err = dmu_buf_hold_array(os, object, offset, size, TRUE, FTAG, - &numbufs, &dbp); + &numbufs, &dbp); if (err) return (err); @@ -1168,7 +1169,7 @@ dmu_write_req(objset_t *os, uint64_t object, struct request *req, dmu_tx_t *tx) return (0); err = dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG, - &numbufs, &dbp); + &numbufs, &dbp); if (err) return (err); @@ -1564,7 +1565,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, zgd->zgd_db->db_data, zgd->zgd_db->db_size, zp, dmu_sync_late_arrival_ready, NULL, dmu_sync_late_arrival_done, dsa, - ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, zb)); + ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL|ZIO_FLAG_FASTWRITE, zb)); return (0); } diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 07e00c307..fc7c80365 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -1485,7 +1485,7 @@ dmu_snapshot_list_next(objset_t *os, int namelen, char *name, int dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value) { - return dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value); + return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value)); } int diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c index feb763947..876ff357f 100644 --- a/module/zfs/dmu_zfetch.c +++ b/module/zfs/dmu_zfetch.c @@ -703,7 +703,8 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched) if (cur_streams >= max_streams) { return; } - newstream = kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE); + newstream = + kmem_zalloc(sizeof (zstream_t), KM_PUSHPAGE); } newstream->zst_offset = zst.zst_offset; @@ -743,4 +744,3 @@ MODULE_PARM_DESC(zfetch_block_cap, "Max number of blocks to fetch at a time"); module_param(zfetch_array_rd_sz, ulong, 0644); MODULE_PARM_DESC(zfetch_array_rd_sz, "Number of bytes in a array_read"); #endif - diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c index 52edbd3fa..7c4819bbc 100644 --- a/module/zfs/dsl_dataset.c +++ b/module/zfs/dsl_dataset.c @@ -1232,7 +1232,7 @@ dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors) #ifdef _KERNEL if (error == 0) { for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL; - pair = nvlist_next_nvpair(snaps, pair)) { + pair = nvlist_next_nvpair(snaps, pair)) { char *snapname = nvpair_name(pair); zvol_create_minors(snapname); } diff --git a/module/zfs/dsl_deleg.c b/module/zfs/dsl_deleg.c index 03be99c01..99670dfe0 100644 --- a/module/zfs/dsl_deleg.c +++ b/module/zfs/dsl_deleg.c @@ -326,10 +326,10 @@ dsl_deleg_get(const char *ddname, nvlist_t **nvp) dp = startdd->dd_pool; mos = dp->dp_meta_objset; - zc = kmem_alloc(sizeof(zap_cursor_t), KM_SLEEP); - za = kmem_alloc(sizeof(zap_attribute_t), KM_SLEEP); - basezc = kmem_alloc(sizeof(zap_cursor_t), KM_SLEEP); - baseza = kmem_alloc(sizeof(zap_attribute_t), KM_SLEEP); + zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); + za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); + basezc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); + baseza = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); source = kmem_alloc(MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, KM_SLEEP); VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); @@ -371,10 +371,10 @@ dsl_deleg_get(const char *ddname, nvlist_t **nvp) } kmem_free(source, MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); - kmem_free(baseza, sizeof(zap_attribute_t)); - kmem_free(basezc, sizeof(zap_cursor_t)); - kmem_free(za, sizeof(zap_attribute_t)); - kmem_free(zc, sizeof(zap_cursor_t)); + kmem_free(baseza, sizeof (zap_attribute_t)); + kmem_free(basezc, sizeof (zap_cursor_t)); + kmem_free(za, sizeof (zap_attribute_t)); + kmem_free(zc, sizeof (zap_cursor_t)); dsl_dir_rele(startdd, FTAG); dsl_pool_rele(dp, FTAG); diff --git a/module/zfs/dsl_destroy.c b/module/zfs/dsl_destroy.c index ec4053ac7..351165dbf 100644 --- a/module/zfs/dsl_destroy.c +++ b/module/zfs/dsl_destroy.c @@ -500,7 +500,8 @@ dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer, return (0); dsda.dsda_snaps = snaps; - VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps, NV_UNIQUE_NAME, KM_PUSHPAGE)); + VERIFY0(nvlist_alloc(&dsda.dsda_successful_snaps, + NV_UNIQUE_NAME, KM_PUSHPAGE)); dsda.dsda_defer = defer; dsda.dsda_errlist = errlist; @@ -519,8 +520,8 @@ dsl_destroy_snapshot(const char *name, boolean_t defer) nvlist_t *nvl; nvlist_t *errlist; - VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE)); - VERIFY0(nvlist_alloc(&errlist, NV_UNIQUE_NAME, KM_PUSHPAGE)); + VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE)); + VERIFY0(nvlist_alloc(&errlist, NV_UNIQUE_NAME, KM_PUSHPAGE)); fnvlist_add_boolean(nvl, name); error = dsl_destroy_snapshots_nvl(nvl, defer, errlist); diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c index 803a77c25..f0a0b116a 100644 --- a/module/zfs/dsl_dir.c +++ b/module/zfs/dsl_dir.c @@ -48,8 +48,8 @@ static void dsl_dir_evict(dmu_buf_t *db, void *arg) { dsl_dir_t *dd = arg; - ASSERTV(dsl_pool_t *dp = dd->dd_pool;) int t; + ASSERTV(dsl_pool_t *dp = dd->dd_pool); for (t = 0; t < TXG_SIZE; t++) { ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t)); @@ -1097,7 +1097,7 @@ dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx) zfs_prop_to_name(ZFS_PROP_RESERVATION), ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1, &ddsqra->ddsqra_value, tx); - + VERIFY0(dsl_prop_get_int_ds(ds, zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval)); } else { @@ -1109,7 +1109,7 @@ dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx) dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx); dsl_dataset_rele(ds, FTAG); - } +} int dsl_dir_set_reservation(const char *ddname, zprop_source_t source, diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c index eed4bd497..0ef50717c 100644 --- a/module/zfs/dsl_pool.c +++ b/module/zfs/dsl_pool.c @@ -1049,15 +1049,14 @@ dsl_pool_config_held(dsl_pool_t *dp) EXPORT_SYMBOL(dsl_pool_config_enter); EXPORT_SYMBOL(dsl_pool_config_exit); -/* zfs_dirty_data_max_percent only applied at module load time in arc_init(). */ +/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */ module_param(zfs_dirty_data_max_percent, int, 0444); MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty"); -/* zfs_dirty_data_max_max_percent only applied at module load time in - * arc_init(). */ +/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */ module_param(zfs_dirty_data_max_max_percent, int, 0444); MODULE_PARM_DESC(zfs_dirty_data_max_max_percent, - "zfs_dirty_data_max upper bound as % of RAM"); + "zfs_dirty_data_max upper bound as % of RAM"); module_param(zfs_delay_min_dirty_percent, int, 0644); MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold"); @@ -1065,10 +1064,10 @@ MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold"); module_param(zfs_dirty_data_max, ulong, 0644); MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit"); -/* zfs_dirty_data_max_max only applied at module load time in arc_init(). */ +/* zfs_dirty_data_max_max only applied at module load in arc_init(). */ module_param(zfs_dirty_data_max_max, ulong, 0444); MODULE_PARM_DESC(zfs_dirty_data_max_max, - "zfs_dirty_data_max upper bound in bytes"); + "zfs_dirty_data_max upper bound in bytes"); module_param(zfs_dirty_data_sync, ulong, 0644); MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data"); diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index 3780aee79..ea0450781 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -201,9 +201,11 @@ dsl_scan_setup_sync(void *arg, dmu_tx_t *tx) if (vdev_resilver_needed(spa->spa_root_vdev, &scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) { - spa_event_notify(spa, NULL, FM_EREPORT_ZFS_RESILVER_START); + spa_event_notify(spa, NULL, + FM_EREPORT_ZFS_RESILVER_START); } else { - spa_event_notify(spa, NULL, FM_EREPORT_ZFS_SCRUB_START); + spa_event_notify(spa, NULL, + FM_EREPORT_ZFS_SCRUB_START); } spa->spa_scrub_started = B_TRUE; @@ -783,7 +785,7 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_t *zb, if (buf) (void) arc_buf_remove_ref(buf, &buf); out: - kmem_free(bp_toread, sizeof(blkptr_t)); + kmem_free(bp_toread, sizeof (blkptr_t)); } static void @@ -1290,8 +1292,8 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) * bookmark so we don't think that we're still trying to resume. */ bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_t)); - zc = kmem_alloc(sizeof(zap_cursor_t), KM_PUSHPAGE); - za = kmem_alloc(sizeof(zap_attribute_t), KM_PUSHPAGE); + zc = kmem_alloc(sizeof (zap_cursor_t), KM_PUSHPAGE); + za = kmem_alloc(sizeof (zap_attribute_t), KM_PUSHPAGE); /* keep pulling things out of the zap-object-as-queue */ while (zap_cursor_init(zc, dp->dp_meta_objset, @@ -1325,8 +1327,8 @@ dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx) } zap_cursor_fini(zc); out: - kmem_free(za, sizeof(zap_attribute_t)); - kmem_free(zc, sizeof(zap_cursor_t)); + kmem_free(za, sizeof (zap_attribute_t)); + kmem_free(zc, sizeof (zap_cursor_t)); } static boolean_t diff --git a/module/zfs/fm.c b/module/zfs/fm.c index c004032f8..002827b52 100644 --- a/module/zfs/fm.c +++ b/module/zfs/fm.c @@ -276,8 +276,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_int8_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -290,8 +290,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_uint8_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -304,8 +304,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_int16_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -318,8 +318,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_uint16_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -332,8 +332,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_int32_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -346,8 +346,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_uint32_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -360,8 +360,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_int64_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -374,8 +374,8 @@ fm_nvprintr(nvlist_t *nvl, int d, int c, int cols) c = fm_printf(d + 1, c, cols, "[ "); (void) nvpair_value_uint64_array(nvp, &val, &nelem); for (i = 0; i < nelem; i++) - c = fm_printf(d + 1, c, cols, "0x%llx ", - (u_longlong_t)val[i]); + c = fm_printf(d + 1, c, cols, "0x%llx ", + (u_longlong_t)val[i]); c = fm_printf(d + 1, c, cols, "]"); break; @@ -418,15 +418,15 @@ zfs_zevent_alloc(void) { zevent_t *ev; - ev = kmem_zalloc(sizeof(zevent_t), KM_PUSHPAGE); + ev = kmem_zalloc(sizeof (zevent_t), KM_PUSHPAGE); if (ev == NULL) - return NULL; + return (NULL); - list_create(&ev->ev_ze_list, sizeof(zfs_zevent_t), + list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t), offsetof(zfs_zevent_t, ze_node)); list_link_init(&ev->ev_node); - return ev; + return (ev); } static void @@ -436,7 +436,7 @@ zfs_zevent_free(zevent_t *ev) ev->ev_cb(ev->ev_nvl, ev->ev_detector); list_destroy(&ev->ev_ze_list); - kmem_free(ev, sizeof(zevent_t)); + kmem_free(ev, sizeof (zevent_t)); } static void @@ -524,7 +524,7 @@ zfs_zevent_post(nvlist_t *nvl, nvlist_t *detector, zevent_cb_t *cb) return; } - ev->ev_nvl = nvl; + ev->ev_nvl = nvl; ev->ev_detector = detector; ev->ev_cb = cb; @@ -550,12 +550,12 @@ zfs_zevent_fd_hold(int fd, minor_t *minorp, zfs_zevent_t **ze) file_t *fp; int error; - fp = getf(fd); - if (fp == NULL) - return (EBADF); + fp = getf(fd); + if (fp == NULL) + return (EBADF); - *minorp = zfsdev_getminor(fp->f_file); - error = zfs_zevent_minor_to_state(*minorp, ze); + *minorp = zfsdev_getminor(fp->f_file); + error = zfs_zevent_minor_to_state(*minorp, ze); if (error) zfs_zevent_fd_rele(fd); @@ -577,7 +577,7 @@ zfs_zevent_fd_rele(int fd) */ int zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size, - uint64_t *dropped) + uint64_t *dropped) { zevent_t *ev; size_t size; @@ -592,8 +592,10 @@ zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size, goto out; } } else { - /* Existing stream continue with the next element and remove - * ourselves from the wait queue for the previous element */ + /* + * Existing stream continue with the next element and remove + * ourselves from the wait queue for the previous element + */ ev = list_prev(&zevent_list, ze->ze_zevent); if (ev == NULL) { error = ENOENT; @@ -619,7 +621,7 @@ zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size, out: mutex_exit(&zevent_lock); - return error; + return (error); } int @@ -643,7 +645,7 @@ zfs_zevent_wait(zfs_zevent_t *ze) out: mutex_exit(&zevent_lock); - return error; + return (error); } void @@ -1512,7 +1514,8 @@ fm_init(void) } mutex_init(&zevent_lock, NULL, MUTEX_DEFAULT, NULL); - list_create(&zevent_list, sizeof(zevent_t), offsetof(zevent_t, ev_node)); + list_create(&zevent_list, sizeof (zevent_t), + offsetof(zevent_t, ev_node)); cv_init(&zevent_cv, NULL, CV_DEFAULT, NULL); } diff --git a/module/zfs/gzip.c b/module/zfs/gzip.c index 155404efd..011fb9188 100644 --- a/module/zfs/gzip.c +++ b/module/zfs/gzip.c @@ -35,8 +35,8 @@ #include <sys/zmod.h> typedef size_t zlen_t; -#define compress_func z_compress_level -#define uncompress_func z_uncompress +#define compress_func z_compress_level +#define uncompress_func z_uncompress #else /* _KERNEL */ @@ -44,8 +44,8 @@ typedef size_t zlen_t; #include <zlib.h> typedef uLongf zlen_t; -#define compress_func compress2 -#define uncompress_func uncompress +#define compress_func compress2 +#define uncompress_func uncompress #endif diff --git a/module/zfs/lz4.c b/module/zfs/lz4.c index ae5d5a23a..6fc6201ee 100644 --- a/module/zfs/lz4.c +++ b/module/zfs/lz4.c @@ -47,7 +47,8 @@ static kmem_cache_t *lz4_cache; /*ARGSUSED*/ size_t -lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) +lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, + size_t d_len, int n) { uint32_t bufsiz; char *dest = d_start; @@ -74,7 +75,8 @@ lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n /*ARGSUSED*/ int -lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) +lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, + size_t d_len, int n) { const char *src = s_start; uint32_t bufsiz = BE_IN32(src); @@ -143,16 +145,16 @@ lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int * This function explicitly handles the CTX memory structure. * * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated - * by the caller (either on the stack or using kmem_cache_alloc). Passing NULL - * isn't valid. + * by the caller (either on the stack or using kmem_cache_alloc). Passing + * NULL isn't valid. * * LZ4_compress64kCtx() : * Same as LZ4_compressCtx(), but specific to small inputs (<64KB). * isize *Must* be <64KB, otherwise the output will be corrupted. * * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated - * by the caller (either on the stack or using kmem_cache_alloc). Passing NULL - * isn't valid. + * by the caller (either on the stack or using kmem_cache_alloc). Passing + * NULL isn't valid. */ /* @@ -267,7 +269,7 @@ lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int #define unlikely(expr) expect((expr) != 0, 0) #endif -#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \ +#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \ (((x) & 0xffu) << 8))) /* Basic types */ @@ -1009,4 +1011,3 @@ lz4_fini(void) lz4_cache = NULL; } } - diff --git a/module/zfs/lzjb.c b/module/zfs/lzjb.c index 7bad4f664..83ff409ce 100644 --- a/module/zfs/lzjb.c +++ b/module/zfs/lzjb.c @@ -61,7 +61,8 @@ lzjb_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n) while (src < (uchar_t *)s_start + s_len) { if ((copymask <<= 1) == (1 << NBBY)) { if (dst >= (uchar_t *)d_start + d_len - 1 - 2 * NBBY) { - kmem_free(lempel, LEMPEL_SIZE*sizeof(uint16_t)); + kmem_free(lempel, + LEMPEL_SIZE*sizeof (uint16_t)); return (s_len); } copymask = 1; diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index f49a8adc6..6356f7950 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -32,7 +32,7 @@ #include <sys/vdev_impl.h> #include <sys/zio.h> -#define WITH_DF_BLOCK_ALLOCATOR +#define WITH_DF_BLOCK_ALLOCATOR /* * Allow allocations to switch to gang blocks quickly. We do this to @@ -2021,7 +2021,8 @@ metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) return (error); } -void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) +void +metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) { const dva_t *dva = bp->blk_dva; int ndvas = BP_GET_NDVAS(bp); @@ -2043,7 +2044,8 @@ void metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) spa_config_exit(spa, SCL_VDEV, FTAG); } -void metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) +void +metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) { const dva_t *dva = bp->blk_dva; int ndvas = BP_GET_NDVAS(bp); diff --git a/module/zfs/sa.c b/module/zfs/sa.c index 9dc6756dc..13c09f92c 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -251,7 +251,7 @@ sa_cache_fini(void) void * sa_spill_alloc(int flags) { - return kmem_cache_alloc(spill_cache, flags); + return (kmem_cache_alloc(spill_cache, flags)); } void @@ -607,7 +607,8 @@ sa_find_sizes(sa_os_t *sa, sa_bulk_attr_t *attr_desc, int attr_count, } if (is_var_sz && var_size > 1) { - /* Don't worry that the spill block might overflow. + /* + * Don't worry that the spill block might overflow. * It will be resized if needed in sa_build_layouts(). */ if (buftype == SA_SPILL || @@ -1142,7 +1143,8 @@ sa_tear_down(objset_t *os) sa_free_attr_table(sa); cookie = NULL; - while ((layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))){ + while ((layout = + avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) { sa_idx_tab_t *tab; while ((tab = list_head(&layout->lot_idx_tab))) { ASSERT(refcount_count(&tab->sa_refcount)); @@ -1151,7 +1153,7 @@ sa_tear_down(objset_t *os) } cookie = NULL; - while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))){ + while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))) { kmem_free(layout->lot_attrs, sizeof (sa_attr_type_t) * layout->lot_attr_count); kmem_free(layout, sizeof (sa_lot_t)); diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 3daf5805d..7052eec4a 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -288,7 +288,7 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp) err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_PUSHPAGE); if (err) - return err; + return (err); mutex_enter(&spa->spa_props_lock); @@ -488,7 +488,8 @@ spa_prop_validate(spa_t *spa, nvlist_t *props) break; } - if ((error = dmu_objset_hold(strval,FTAG,&os))) + error = dmu_objset_hold(strval, FTAG, &os); + if (error) break; /* Must be ZPL and not gzip compressed. */ @@ -2434,9 +2435,9 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, hostid != myhostid) { nvlist_free(nvconfig); cmn_err(CE_WARN, "pool '%s' could not be " - "loaded as it was last accessed by " - "another system (host: %s hostid: 0x%lx). " - "See: http://zfsonlinux.org/msg/ZFS-8000-EY", + "loaded as it was last accessed by another " + "system (host: %s hostid: 0x%lx). See: " + "http://zfsonlinux.org/msg/ZFS-8000-EY", spa_name(spa), hostname, (unsigned long)hostid); return (SET_ERROR(EBADF)); @@ -4098,7 +4099,9 @@ spa_tryimport(nvlist_t *tryconfig) if (dsl_dsobj_to_dsname(spa_name(spa), spa->spa_bootfs, tmpname) == 0) { char *cp; - char *dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE); + char *dsname; + + dsname = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE); cp = strchr(tmpname, '/'); if (cp == NULL) { @@ -5865,7 +5868,7 @@ spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, if (sav->sav_count == 0) { VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); } else { - list = kmem_alloc(sav->sav_count * sizeof (void *), KM_PUSHPAGE); + list = kmem_alloc(sav->sav_count*sizeof (void *), KM_PUSHPAGE); for (i = 0; i < sav->sav_count; i++) list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], B_FALSE, VDEV_CONFIG_L2CACHE); diff --git a/module/zfs/spa_history.c b/module/zfs/spa_history.c index 6a5beb9c3..5b82238b9 100644 --- a/module/zfs/spa_history.c +++ b/module/zfs/spa_history.c @@ -293,7 +293,7 @@ spa_history_log(spa_t *spa, const char *msg) int err; nvlist_t *nvl; - VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE)); + VERIFY0(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_PUSHPAGE)); fnvlist_add_string(nvl, ZPOOL_HIST_CMD, msg); err = spa_history_log_nvl(spa, nvl); diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index d12e233b1..07bfb3112 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -1883,12 +1883,12 @@ EXPORT_SYMBOL(spa_mode); EXPORT_SYMBOL(spa_namespace_lock); module_param(zfs_deadman_synctime_ms, ulong, 0644); -MODULE_PARM_DESC(zfs_deadman_synctime_ms,"Expiration time in milliseconds"); +MODULE_PARM_DESC(zfs_deadman_synctime_ms, "Expiration time in milliseconds"); module_param(zfs_deadman_enabled, int, 0644); MODULE_PARM_DESC(zfs_deadman_enabled, "Enable deadman timer"); module_param(spa_asize_inflation, int, 0644); MODULE_PARM_DESC(spa_asize_inflation, - "SPA size estimate multiplication factor"); + "SPA size estimate multiplication factor"); #endif diff --git a/module/zfs/spa_stats.c b/module/zfs/spa_stats.c index d37b0af4f..c415395f9 100644 --- a/module/zfs/spa_stats.c +++ b/module/zfs/spa_stats.c @@ -122,14 +122,14 @@ spa_read_history_update(kstat_t *ksp, int rw) while ((srh = list_remove_head(&ssh->list))) { ssh->size--; - kmem_free(srh, sizeof(spa_read_history_t)); + kmem_free(srh, sizeof (spa_read_history_t)); } ASSERT3U(ssh->size, ==, 0); } ksp->ks_ndata = ssh->size; - ksp->ks_data_size = ssh->size * sizeof(spa_read_history_t); + ksp->ks_data_size = ssh->size * sizeof (spa_read_history_t); return (0); } @@ -181,7 +181,7 @@ spa_read_history_destroy(spa_t *spa) mutex_enter(&ssh->lock); while ((srh = list_remove_head(&ssh->list))) { ssh->size--; - kmem_free(srh, sizeof(spa_read_history_t)); + kmem_free(srh, sizeof (spa_read_history_t)); } ASSERT3U(ssh->size, ==, 0); @@ -206,9 +206,9 @@ spa_read_history_add(spa_t *spa, const zbookmark_t *zb, uint32_t aflags) if (zfs_read_history_hits == 0 && (aflags & ARC_CACHED)) return; - srh = kmem_zalloc(sizeof(spa_read_history_t), KM_PUSHPAGE); - strlcpy(srh->origin, zb->zb_func, sizeof(srh->origin)); - strlcpy(srh->comm, getcomm(), sizeof(srh->comm)); + srh = kmem_zalloc(sizeof (spa_read_history_t), KM_PUSHPAGE); + strlcpy(srh->origin, zb->zb_func, sizeof (srh->origin)); + strlcpy(srh->comm, getcomm(), sizeof (srh->comm)); srh->start = gethrtime(); srh->objset = zb->zb_objset; srh->object = zb->zb_object; @@ -226,7 +226,7 @@ spa_read_history_add(spa_t *spa, const zbookmark_t *zb, uint32_t aflags) while (ssh->size > zfs_read_history) { ssh->size--; rm = list_remove_tail(&ssh->list); - kmem_free(rm, sizeof(spa_read_history_t)); + kmem_free(rm, sizeof (spa_read_history_t)); } mutex_exit(&ssh->lock); @@ -343,14 +343,14 @@ spa_txg_history_update(kstat_t *ksp, int rw) while ((sth = list_remove_head(&ssh->list))) { ssh->size--; - kmem_free(sth, sizeof(spa_txg_history_t)); + kmem_free(sth, sizeof (spa_txg_history_t)); } ASSERT3U(ssh->size, ==, 0); } ksp->ks_ndata = ssh->size; - ksp->ks_data_size = ssh->size * sizeof(spa_txg_history_t); + ksp->ks_data_size = ssh->size * sizeof (spa_txg_history_t); return (0); } @@ -402,7 +402,7 @@ spa_txg_history_destroy(spa_t *spa) mutex_enter(&ssh->lock); while ((sth = list_remove_head(&ssh->list))) { ssh->size--; - kmem_free(sth, sizeof(spa_txg_history_t)); + kmem_free(sth, sizeof (spa_txg_history_t)); } ASSERT3U(ssh->size, ==, 0); @@ -424,7 +424,7 @@ spa_txg_history_add(spa_t *spa, uint64_t txg) if (zfs_txg_history == 0 && ssh->size == 0) return; - sth = kmem_zalloc(sizeof(spa_txg_history_t), KM_PUSHPAGE); + sth = kmem_zalloc(sizeof (spa_txg_history_t), KM_PUSHPAGE); sth->txg = txg; sth->state = TXG_STATE_OPEN; sth->times[TXG_STATE_BIRTH] = gethrtime(); @@ -437,7 +437,7 @@ spa_txg_history_add(spa_t *spa, uint64_t txg) while (ssh->size > zfs_txg_history) { ssh->size--; rm = list_remove_tail(&ssh->list); - kmem_free(rm, sizeof(spa_txg_history_t)); + kmem_free(rm, sizeof (spa_txg_history_t)); } mutex_exit(&ssh->lock); @@ -459,7 +459,7 @@ spa_txg_history_set(spa_t *spa, uint64_t txg, txg_state_t completed_state, mutex_enter(&ssh->lock); for (sth = list_head(&ssh->list); sth != NULL; - sth = list_next(&ssh->list, sth)) { + sth = list_next(&ssh->list, sth)) { if (sth->txg == txg) { sth->times[completed_state] = completed_time; sth->state++; @@ -488,7 +488,7 @@ spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread, mutex_enter(&ssh->lock); for (sth = list_head(&ssh->list); sth != NULL; - sth = list_next(&ssh->list, sth)) { + sth = list_next(&ssh->list, sth)) { if (sth->txg == txg) { sth->nread = nread; sth->nwritten = nwritten; @@ -536,7 +536,7 @@ spa_tx_assign_update(kstat_t *ksp, int rw) break; ksp->ks_ndata = i; - ksp->ks_data_size = i * sizeof(kstat_named_t); + ksp->ks_data_size = i * sizeof (kstat_named_t); return (0); } @@ -553,7 +553,7 @@ spa_tx_assign_init(spa_t *spa) mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL); ssh->count = 42; /* power of two buckets for 1ns to 2,199s */ - ssh->size = ssh->count * sizeof(kstat_named_t); + ssh->size = ssh->count * sizeof (kstat_named_t); ssh->private = kmem_alloc(ssh->size, KM_SLEEP); (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa)); diff --git a/module/zfs/txg.c b/module/zfs/txg.c index 9a594b954..c779c4b9a 100644 --- a/module/zfs/txg.c +++ b/module/zfs/txg.c @@ -493,8 +493,8 @@ txg_sync_thread(dsl_pool_t *dp) txg_thread_enter(tx, &cpr); - vs1 = kmem_alloc(sizeof(vdev_stat_t), KM_PUSHPAGE); - vs2 = kmem_alloc(sizeof(vdev_stat_t), KM_PUSHPAGE); + vs1 = kmem_alloc(sizeof (vdev_stat_t), KM_PUSHPAGE); + vs2 = kmem_alloc(sizeof (vdev_stat_t), KM_PUSHPAGE); start = delta = 0; for (;;) { @@ -533,8 +533,8 @@ txg_sync_thread(dsl_pool_t *dp) } if (tx->tx_exiting) { - kmem_free(vs2, sizeof(vdev_stat_t)); - kmem_free(vs1, sizeof(vdev_stat_t)); + kmem_free(vs2, sizeof (vdev_stat_t)); + kmem_free(vs1, sizeof (vdev_stat_t)); txg_thread_exit(tx, &cpr, &tx->tx_sync_thread); } diff --git a/module/zfs/vdev_cache.c b/module/zfs/vdev_cache.c index ec215ffa5..ffd50ec2f 100644 --- a/module/zfs/vdev_cache.c +++ b/module/zfs/vdev_cache.c @@ -256,8 +256,8 @@ vdev_cache_read(zio_t *zio) vdev_cache_t *vc = &zio->io_vd->vdev_cache; vdev_cache_entry_t *ve, *ve_search; uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS); - ASSERTV(uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);) zio_t *fio; + ASSERTV(uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS)); ASSERT(zio->io_type == ZIO_TYPE_READ); @@ -277,10 +277,10 @@ vdev_cache_read(zio_t *zio) mutex_enter(&vc->vc_lock); - ve_search = kmem_alloc(sizeof(vdev_cache_entry_t), KM_PUSHPAGE); + ve_search = kmem_alloc(sizeof (vdev_cache_entry_t), KM_PUSHPAGE); ve_search->ve_offset = cache_offset; ve = avl_find(&vc->vc_offset_tree, ve_search, NULL); - kmem_free(ve_search, sizeof(vdev_cache_entry_t)); + kmem_free(ve_search, sizeof (vdev_cache_entry_t)); if (ve != NULL) { if (ve->ve_missed_update) { diff --git a/module/zfs/vdev_disk.c b/module/zfs/vdev_disk.c index d845f59c4..1d8bf3f8c 100644 --- a/module/zfs/vdev_disk.c +++ b/module/zfs/vdev_disk.c @@ -47,7 +47,7 @@ typedef struct dio_request { int dr_rw; /* Read/Write */ int dr_error; /* Bio error */ int dr_bio_count; /* Count of bio's */ - struct bio *dr_bio[0]; /* Attached bio's */ + struct bio *dr_bio[0]; /* Attached bio's */ } dio_request_t; @@ -65,7 +65,7 @@ vdev_bdev_mode(int smode) if (smode & FWRITE) mode |= FMODE_WRITE; - return mode; + return (mode); } #else static int @@ -78,7 +78,7 @@ vdev_bdev_mode(int smode) if ((smode & FREAD) && !(smode & FWRITE)) mode = MS_RDONLY; - return mode; + return (mode); } #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */ @@ -139,18 +139,19 @@ vdev_elevator_switch(vdev_t *v, char *elevator) return (0); /* Leave existing scheduler when set to "none" */ - if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4)) + if (strncmp(elevator, "none", 4) && (strlen(elevator) == 4) == 0) return (0); #ifdef HAVE_ELEVATOR_CHANGE error = elevator_change(q, elevator); #else - /* For pre-2.6.36 kernels elevator_change() is not available. + /* + * For pre-2.6.36 kernels elevator_change() is not available. * Therefore we fall back to using a usermodehelper to echo the * elevator into sysfs; This requires /bin/echo and sysfs to be * mounted which may not be true early in the boot process. */ -# define SET_SCHEDULER_CMD \ +#define SET_SCHEDULER_CMD \ "exec 0</dev/null " \ " 1>/sys/block/%s/queue/scheduler " \ " 2>/dev/null; " \ @@ -167,7 +168,7 @@ vdev_elevator_switch(vdev_t *v, char *elevator) #endif /* HAVE_ELEVATOR_CHANGE */ if (error) printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n", - elevator, v->vdev_path, device, error); + elevator, v->vdev_path, device, error); return (error); } @@ -207,7 +208,7 @@ vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd) bdev = vdev_bdev_open(path, vdev_bdev_mode(mode), zfs_vdev_holder); if (IS_ERR(bdev)) - return bdev; + return (bdev); disk = get_gendisk(bdev->bd_dev, &partno); vdev_bdev_close(bdev, vdev_bdev_mode(mode)); @@ -231,9 +232,9 @@ vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd) put_disk(disk); } - return result; + return (result); #else - return ERR_PTR(-EOPNOTSUPP); + return (ERR_PTR(-EOPNOTSUPP)); #endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */ } @@ -248,7 +249,7 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize, /* Must have a pathname and it must be absolute. */ if (v->vdev_path == NULL || v->vdev_path[0] != '/') { v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; - return EINVAL; + return (EINVAL); } /* @@ -261,9 +262,9 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize, goto skip_open; } - vd = kmem_zalloc(sizeof(vdev_disk_t), KM_PUSHPAGE); + vd = kmem_zalloc(sizeof (vdev_disk_t), KM_PUSHPAGE); if (vd == NULL) - return ENOMEM; + return (ENOMEM); /* * Devices are always opened by the path provided at configuration @@ -286,8 +287,8 @@ vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize, bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), zfs_vdev_holder); if (IS_ERR(bdev)) { - kmem_free(vd, sizeof(vdev_disk_t)); - return -PTR_ERR(bdev); + kmem_free(vd, sizeof (vdev_disk_t)); + return (-PTR_ERR(bdev)); } v->vdev_tsd = vd; @@ -312,7 +313,7 @@ skip_open: /* Try to set the io scheduler elevator algorithm */ (void) vdev_elevator_switch(v, zfs_vdev_scheduler); - return 0; + return (0); } static void @@ -325,9 +326,9 @@ vdev_disk_close(vdev_t *v) if (vd->vd_bdev != NULL) vdev_bdev_close(vd->vd_bdev, - vdev_bdev_mode(spa_mode(v->vdev_spa))); + vdev_bdev_mode(spa_mode(v->vdev_spa))); - kmem_free(vd, sizeof(vdev_disk_t)); + kmem_free(vd, sizeof (vdev_disk_t)); v->vdev_tsd = NULL; } @@ -337,8 +338,8 @@ vdev_disk_dio_alloc(int bio_count) dio_request_t *dr; int i; - dr = kmem_zalloc(sizeof(dio_request_t) + - sizeof(struct bio *) * bio_count, KM_PUSHPAGE); + dr = kmem_zalloc(sizeof (dio_request_t) + + sizeof (struct bio *) * bio_count, KM_PUSHPAGE); if (dr) { init_completion(&dr->dr_comp); atomic_set(&dr->dr_ref, 0); @@ -349,7 +350,7 @@ vdev_disk_dio_alloc(int bio_count) dr->dr_bio[i] = NULL; } - return dr; + return (dr); } static void @@ -361,8 +362,8 @@ vdev_disk_dio_free(dio_request_t *dr) if (dr->dr_bio[i]) bio_put(dr->dr_bio[i]); - kmem_free(dr, sizeof(dio_request_t) + - sizeof(struct bio *) * dr->dr_bio_count); + kmem_free(dr, sizeof (dio_request_t) + + sizeof (struct bio *) * dr->dr_bio_count); } static int @@ -370,19 +371,19 @@ vdev_disk_dio_is_sync(dio_request_t *dr) { #ifdef HAVE_BIO_RW_SYNC /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */ - return (dr->dr_rw & (1 << BIO_RW_SYNC)); + return (dr->dr_rw & (1 << BIO_RW_SYNC)); #else -# ifdef HAVE_BIO_RW_SYNCIO +#ifdef HAVE_BIO_RW_SYNCIO /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */ - return (dr->dr_rw & (1 << BIO_RW_SYNCIO)); -# else -# ifdef HAVE_REQ_SYNC + return (dr->dr_rw & (1 << BIO_RW_SYNCIO)); +#else +#ifdef HAVE_REQ_SYNC /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */ - return (dr->dr_rw & REQ_SYNC); -# else -# error "Unable to determine bio sync flag" -# endif /* HAVE_REQ_SYNC */ -# endif /* HAVE_BIO_RW_SYNC */ + return (dr->dr_rw & REQ_SYNC); +#else +#error "Unable to determine bio sync flag" +#endif /* HAVE_REQ_SYNC */ +#endif /* HAVE_BIO_RW_SYNC */ #endif /* HAVE_BIO_RW_SYNCIO */ } @@ -417,7 +418,7 @@ vdev_disk_dio_put(dio_request_t *dr) } } - return rc; + return (rc); } BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error) @@ -436,11 +437,11 @@ BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error) #ifndef HAVE_2ARGS_BIO_END_IO_T if (bio->bi_size) - return 1; + return (1); #endif /* HAVE_2ARGS_BIO_END_IO_T */ if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; + error = (-EIO); if (dr->dr_error == 0) dr->dr_error = -error; @@ -459,7 +460,7 @@ static inline unsigned long bio_nr_pages(void *bio_ptr, unsigned int bio_size) { return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >> - PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT)); + PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT)); } static unsigned int @@ -491,14 +492,14 @@ bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size) offset = 0; } - return bio_size; + return (bio_size); } static int __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr, - size_t kbuf_size, uint64_t kbuf_offset, int flags) + size_t kbuf_size, uint64_t kbuf_offset, int flags) { - dio_request_t *dr; + dio_request_t *dr; caddr_t bio_ptr; uint64_t bio_offset; int bio_size, bio_count = 16; @@ -509,7 +510,7 @@ __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr, retry: dr = vdev_disk_dio_alloc(bio_count); if (dr == NULL) - return ENOMEM; + return (ENOMEM); if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) bio_set_flags_failfast(bdev, &flags); @@ -545,10 +546,10 @@ retry: } dr->dr_bio[i] = bio_alloc(GFP_NOIO, - bio_nr_pages(bio_ptr, bio_size)); + bio_nr_pages(bio_ptr, bio_size)); if (dr->dr_bio[i] == NULL) { vdev_disk_dio_free(dr); - return ENOMEM; + return (ENOMEM); } /* Matching put called by vdev_disk_physio_completion */ @@ -592,17 +593,17 @@ retry: ASSERT3S(atomic_read(&dr->dr_ref), ==, 1); } - (void)vdev_disk_dio_put(dr); + (void) vdev_disk_dio_put(dr); - return error; + return (error); } int vdev_disk_physio(struct block_device *bdev, caddr_t kbuf, - size_t size, uint64_t offset, int flags) + size_t size, uint64_t offset, int flags) { bio_set_flags_failfast(bdev, &flags); - return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags); + return (__vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags)); } BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc) @@ -631,11 +632,11 @@ vdev_disk_io_flush(struct block_device *bdev, zio_t *zio) q = bdev_get_queue(bdev); if (!q) - return ENXIO; + return (ENXIO); bio = bio_alloc(GFP_NOIO, 0); if (!bio) - return ENOMEM; + return (ENOMEM); bio->bi_end_io = vdev_disk_io_flush_completion; bio->bi_private = zio; @@ -643,7 +644,7 @@ vdev_disk_io_flush(struct block_device *bdev, zio_t *zio) zio->io_delay = jiffies_64; submit_bio(VDEV_WRITE_FLUSH_FUA, bio); - return 0; + return (0); } static int @@ -658,7 +659,7 @@ vdev_disk_io_start(zio_t *zio) if (!vdev_readable(v)) { zio->io_error = SET_ERROR(ENXIO); - return ZIO_PIPELINE_CONTINUE; + return (ZIO_PIPELINE_CONTINUE); } switch (zio->io_cmd) { @@ -674,7 +675,7 @@ vdev_disk_io_start(zio_t *zio) error = vdev_disk_io_flush(vd->vd_bdev, zio); if (error == 0) - return ZIO_PIPELINE_STOP; + return (ZIO_PIPELINE_STOP); zio->io_error = error; if (error == ENOTSUP) @@ -686,7 +687,7 @@ vdev_disk_io_start(zio_t *zio) zio->io_error = SET_ERROR(ENOTSUP); } - return ZIO_PIPELINE_CONTINUE; + return (ZIO_PIPELINE_CONTINUE); case ZIO_TYPE_WRITE: flags = WRITE; @@ -698,17 +699,17 @@ vdev_disk_io_start(zio_t *zio) default: zio->io_error = SET_ERROR(ENOTSUP); - return ZIO_PIPELINE_CONTINUE; + return (ZIO_PIPELINE_CONTINUE); } error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data, - zio->io_size, zio->io_offset, flags); + zio->io_size, zio->io_offset, flags); if (error) { zio->io_error = error; - return ZIO_PIPELINE_CONTINUE; + return (ZIO_PIPELINE_CONTINUE); } - return ZIO_PIPELINE_STOP; + return (ZIO_PIPELINE_STOP); } static void @@ -720,7 +721,7 @@ vdev_disk_io_done(zio_t *zio) * removal of the device from the configuration. */ if (zio->io_error == EIO) { - vdev_t *v = zio->io_vd; + vdev_t *v = zio->io_vd; vdev_disk_t *vd = v->vdev_tsd; if (check_disk_change(vd->vd_bdev)) { @@ -787,19 +788,19 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), zfs_vdev_holder); if (IS_ERR(bdev)) - return -PTR_ERR(bdev); + return (-PTR_ERR(bdev)); s = bdev_capacity(bdev); if (s == 0) { vdev_bdev_close(bdev, vdev_bdev_mode(FREAD)); - return EIO; + return (EIO); } - size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t); - label = vmem_alloc(sizeof(vdev_label_t), KM_PUSHPAGE); + size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t); + label = vmem_alloc(sizeof (vdev_label_t), KM_PUSHPAGE); for (i = 0; i < VDEV_LABELS; i++) { - uint64_t offset, state, txg = 0; + uint64_t offset, state, txg = 0; /* read vdev label */ offset = vdev_label_offset(size, i, 0); @@ -830,10 +831,10 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) break; } - vmem_free(label, sizeof(vdev_label_t)); + vmem_free(label, sizeof (vdev_label_t)); vdev_bdev_close(bdev, vdev_bdev_mode(FREAD)); - return 0; + return (0); } module_param(zfs_vdev_scheduler, charp, 0644); diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c index 568ae06b3..d5af110a5 100644 --- a/module/zfs/vdev_label.c +++ b/module/zfs/vdev_label.c @@ -1116,7 +1116,7 @@ vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags) buf = vp->vp_nvlist; buflen = sizeof (vp->vp_nvlist); - if (nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_PUSHPAGE) == 0) { + if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_PUSHPAGE)) { for (; l < VDEV_LABELS; l += 2) { vdev_label_write(zio, vd, l, vp, offsetof(vdev_label_t, vl_vdev_phys), diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c index 2e1f098a1..0dc733efc 100644 --- a/module/zfs/vdev_queue.c +++ b/module/zfs/vdev_queue.c @@ -500,8 +500,10 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio) if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE) return (NULL); - /* Prevent users from setting the zfs_vdev_aggregation_limit - * tuning larger than SPA_MAXBLOCKSIZE. */ + /* + * Prevent users from setting the zfs_vdev_aggregation_limit + * tuning larger than SPA_MAXBLOCKSIZE. + */ zfs_vdev_aggregation_limit = MIN(zfs_vdev_aggregation_limit, SPA_MAXBLOCKSIZE); @@ -676,11 +678,11 @@ again: * For FIFO queues (sync), issue the i/o with the lowest timestamp. */ vqc = &vq->vq_class[p]; - search = zio_buf_alloc(sizeof(*search)); + search = zio_buf_alloc(sizeof (*search)); search->io_timestamp = 0; search->io_offset = vq->vq_last_offset + 1; VERIFY3P(avl_find(&vqc->vqc_queued_tree, search, &idx), ==, NULL); - zio_buf_free(search, sizeof(*search)); + zio_buf_free(search, sizeof (*search)); zio = avl_nearest(&vqc->vqc_queued_tree, idx, AVL_AFTER); if (zio == NULL) zio = avl_first(&vqc->vqc_queued_tree); @@ -802,27 +804,27 @@ MODULE_PARM_DESC(zfs_vdev_max_active, "Maximum number of active I/Os per vdev"); module_param(zfs_vdev_async_write_active_max_dirty_percent, int, 0644); MODULE_PARM_DESC(zfs_vdev_async_write_active_max_dirty_percent, - "Async write concurrency max threshold"); + "Async write concurrency max threshold"); module_param(zfs_vdev_async_write_active_min_dirty_percent, int, 0644); MODULE_PARM_DESC(zfs_vdev_async_write_active_min_dirty_percent, - "Async write concurrency min threshold"); + "Async write concurrency min threshold"); module_param(zfs_vdev_async_read_max_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_async_read_max_active, - "Max active async read I/Os per vdev"); + "Max active async read I/Os per vdev"); module_param(zfs_vdev_async_read_min_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_async_read_min_active, - "Min active async read I/Os per vdev"); + "Min active async read I/Os per vdev"); module_param(zfs_vdev_async_write_max_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_async_write_max_active, - "Max active async write I/Os per vdev"); + "Max active async write I/Os per vdev"); module_param(zfs_vdev_async_write_min_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_async_write_min_active, - "Min active async write I/Os per vdev"); + "Min active async write I/Os per vdev"); module_param(zfs_vdev_scrub_max_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_scrub_max_active, "Max active scrub I/Os per vdev"); @@ -832,17 +834,17 @@ MODULE_PARM_DESC(zfs_vdev_scrub_min_active, "Min active scrub I/Os per vdev"); module_param(zfs_vdev_sync_read_max_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_sync_read_max_active, - "Max active sync read I/Os per vdev"); + "Max active sync read I/Os per vdev"); module_param(zfs_vdev_sync_read_min_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_sync_read_min_active, - "Min active sync read I/Os per vdev"); + "Min active sync read I/Os per vdev"); module_param(zfs_vdev_sync_write_max_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_sync_write_max_active, - "Max active sync write I/Os per vdev"); + "Max active sync write I/Os per vdev"); module_param(zfs_vdev_sync_write_min_active, int, 0644); MODULE_PARM_DESC(zfs_vdev_sync_write_min_active, - "Min active sync write I/Osper vdev"); + "Min active sync write I/Osper vdev"); #endif diff --git a/module/zfs/zap_micro.c b/module/zfs/zap_micro.c index 7aa00280b..555d52f4c 100644 --- a/module/zfs/zap_micro.c +++ b/module/zfs/zap_micro.c @@ -938,7 +938,8 @@ mzap_addent(zap_name_t *zn, uint64_t value) #ifdef ZFS_DEBUG for (i = 0; i < zap->zap_m.zap_num_chunks; i++) { - ASSERTV(mzap_ent_phys_t *mze=&zap->zap_m.zap_phys->mz_chunk[i]); + ASSERTV(mzap_ent_phys_t *mze); + ASSERT(mze = &zap->zap_m.zap_phys->mz_chunk[i]); ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0); } #endif diff --git a/module/zfs/zfeature.c b/module/zfs/zfeature.c index bf96461b5..ccd7cb92e 100644 --- a/module/zfs/zfeature.c +++ b/module/zfs/zfeature.c @@ -180,8 +180,8 @@ feature_is_supported(objset_t *os, uint64_t obj, uint64_t desc_obj, zap_attribute_t *za; char *buf; - zc = kmem_alloc(sizeof(zap_cursor_t), KM_SLEEP); - za = kmem_alloc(sizeof(zap_attribute_t), KM_SLEEP); + zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP); + za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); buf = kmem_alloc(MAXPATHLEN, KM_SLEEP); supported = B_TRUE; @@ -215,8 +215,8 @@ feature_is_supported(objset_t *os, uint64_t obj, uint64_t desc_obj, zap_cursor_fini(zc); kmem_free(buf, MAXPATHLEN); - kmem_free(za, sizeof(zap_attribute_t)); - kmem_free(zc, sizeof(zap_cursor_t)); + kmem_free(za, sizeof (zap_attribute_t)); + kmem_free(zc, sizeof (zap_cursor_t)); return (supported); } diff --git a/module/zfs/zfs_acl.c b/module/zfs/zfs_acl.c index ce66dc01b..c5f76036f 100644 --- a/module/zfs/zfs_acl.c +++ b/module/zfs/zfs_acl.c @@ -1156,8 +1156,8 @@ zfs_acl_chown_setattr(znode_t *zp) int error; zfs_acl_t *aclp; - if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIXACL) - return 0; + if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIXACL) + return (0); ASSERT(MUTEX_HELD(&zp->z_lock)); ASSERT(MUTEX_HELD(&zp->z_acl_lock)); @@ -1165,6 +1165,7 @@ zfs_acl_chown_setattr(znode_t *zp) if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0) zp->z_mode = zfs_mode_compute(zp->z_mode, aclp, &zp->z_pflags, zp->z_uid, zp->z_gid); + return (error); } @@ -2498,7 +2499,7 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr) */ error = zfs_zget(ZTOZSB(zp), parent, &check_zp); if (error) - return (error); + return (error); rw_enter(&zp->z_xattr_lock, RW_WRITER); if (zp->z_xattr_parent == NULL) diff --git a/module/zfs/zfs_ctldir.c b/module/zfs/zfs_ctldir.c index 5bea0b6c9..96520545a 100644 --- a/module/zfs/zfs_ctldir.c +++ b/module/zfs/zfs_ctldir.c @@ -100,7 +100,7 @@ static taskq_t *zfs_expire_taskq; static zfs_snapentry_t * zfsctl_sep_alloc(void) { - return kmem_zalloc(sizeof (zfs_snapentry_t), KM_SLEEP); + return (kmem_zalloc(sizeof (zfs_snapentry_t), KM_SLEEP)); } void @@ -255,7 +255,6 @@ zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id, void zfsctl_inode_destroy(struct inode *ip) { - return; } /* diff --git a/module/zfs/zfs_debug.c b/module/zfs/zfs_debug.c index 55a18e839..4f612e16b 100644 --- a/module/zfs/zfs_debug.c +++ b/module/zfs/zfs_debug.c @@ -97,7 +97,6 @@ zfs_dbgmsg_fini(void) mutex_destroy(&zfs_dbgmsgs_lock); ASSERT0(zfs_dbgmsg_size); #endif - return; } #if !defined(_KERNEL) || !defined(__linux__) diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c index 924151480..bf212dee8 100644 --- a/module/zfs/zfs_ioctl.c +++ b/module/zfs/zfs_ioctl.c @@ -567,7 +567,7 @@ out_check: return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL)); return (0); #else - return ENOTSUP; + return (ENOTSUP); #endif /* HAVE_MLSLABEL */ } @@ -4914,7 +4914,7 @@ zfs_ioc_events_clear(zfs_cmd_t *zc) zfs_zevent_drain_all(&count); zc->zc_cookie = count; - return 0; + return (0); } /* @@ -5424,17 +5424,20 @@ zfsdev_get_state_impl(minor_t minor, enum zfsdev_state_type which) ASSERT(MUTEX_HELD(&zfsdev_state_lock)); for (zs = list_head(&zfsdev_state_list); zs != NULL; - zs = list_next(&zfsdev_state_list, zs)) { + zs = list_next(&zfsdev_state_list, zs)) { if (zs->zs_minor == minor) { switch (which) { - case ZST_ONEXIT: return (zs->zs_onexit); - case ZST_ZEVENT: return (zs->zs_zevent); - case ZST_ALL: return (zs); + case ZST_ONEXIT: + return (zs->zs_onexit); + case ZST_ZEVENT: + return (zs->zs_zevent); + case ZST_ALL: + return (zs); } } } - return NULL; + return (NULL); } void * @@ -5446,7 +5449,7 @@ zfsdev_get_state(minor_t minor, enum zfsdev_state_type which) ptr = zfsdev_get_state_impl(minor, which); mutex_exit(&zfsdev_state_lock); - return ptr; + return (ptr); } minor_t @@ -5490,11 +5493,11 @@ zfsdev_state_init(struct file *filp) ASSERT(MUTEX_HELD(&zfsdev_state_lock)); - minor = zfsdev_minor_alloc(); - if (minor == 0) - return (SET_ERROR(ENXIO)); + minor = zfsdev_minor_alloc(); + if (minor == 0) + return (SET_ERROR(ENXIO)); - zs = kmem_zalloc( sizeof(zfsdev_state_t), KM_SLEEP); + zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP); zs->zs_file = filp; zs->zs_minor = minor; @@ -5521,9 +5524,9 @@ zfsdev_state_destroy(struct file *filp) zfs_zevent_destroy(zs->zs_zevent); list_remove(&zfsdev_state_list, zs); - kmem_free(zs, sizeof(zfsdev_state_t)); + kmem_free(zs, sizeof (zfsdev_state_t)); - return 0; + return (0); } static int @@ -5623,7 +5626,7 @@ zfsdev_ioctl(struct file *filp, unsigned cmd, unsigned long arg) goto out; /* legacy ioctls can modify zc_name */ - (void) strlcpy(saved_poolname, zc->zc_name, sizeof(saved_poolname)); + (void) strlcpy(saved_poolname, zc->zc_name, sizeof (saved_poolname)); len = strcspn(saved_poolname, "/@") + 1; saved_poolname[len] = '\0'; @@ -5702,24 +5705,24 @@ out: static long zfsdev_compat_ioctl(struct file *filp, unsigned cmd, unsigned long arg) { - return zfsdev_ioctl(filp, cmd, arg); + return (zfsdev_ioctl(filp, cmd, arg)); } #else -#define zfsdev_compat_ioctl NULL +#define zfsdev_compat_ioctl NULL #endif static const struct file_operations zfsdev_fops = { - .open = zfsdev_open, - .release = zfsdev_release, - .unlocked_ioctl = zfsdev_ioctl, - .compat_ioctl = zfsdev_compat_ioctl, - .owner = THIS_MODULE, + .open = zfsdev_open, + .release = zfsdev_release, + .unlocked_ioctl = zfsdev_ioctl, + .compat_ioctl = zfsdev_compat_ioctl, + .owner = THIS_MODULE, }; static struct miscdevice zfs_misc = { - .minor = MISC_DYNAMIC_MINOR, - .name = ZFS_DRIVER, - .fops = &zfsdev_fops, + .minor = MISC_DYNAMIC_MINOR, + .name = ZFS_DRIVER, + .fops = &zfsdev_fops, }; static int @@ -5732,7 +5735,7 @@ zfs_attach(void) offsetof(zfsdev_state_t, zs_next)); error = misc_register(&zfs_misc); - if (error != 0) { + if (error != 0) { printk(KERN_INFO "ZFS: misc_register() failed %d\n", error); return (error); } @@ -5761,9 +5764,9 @@ zfs_allow_log_destroy(void *arg) } #ifdef DEBUG -#define ZFS_DEBUG_STR " (DEBUG mode)" +#define ZFS_DEBUG_STR " (DEBUG mode)" #else -#define ZFS_DEBUG_STR "" +#define ZFS_DEBUG_STR "" #endif int @@ -5787,9 +5790,9 @@ _init(void) tsd_create(&zfs_allow_log_key, zfs_allow_log_destroy); printk(KERN_NOTICE "ZFS: Loaded module v%s-%s%s, " - "ZFS pool version %s, ZFS filesystem version %s\n", - ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR, - SPA_VERSION_STRING, ZPL_VERSION_STRING); + "ZFS pool version %s, ZFS filesystem version %s\n", + ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR, + SPA_VERSION_STRING, ZPL_VERSION_STRING); #ifndef CONFIG_FS_POSIX_ACL printk(KERN_NOTICE "ZFS: Posix ACLs disabled by kernel\n"); #endif /* CONFIG_FS_POSIX_ACL */ @@ -5802,8 +5805,8 @@ out1: zfs_fini(); spa_fini(); printk(KERN_NOTICE "ZFS: Failed to Load ZFS Filesystem v%s-%s%s" - ", rc = %d\n", ZFS_META_VERSION, ZFS_META_RELEASE, - ZFS_DEBUG_STR, error); + ", rc = %d\n", ZFS_META_VERSION, ZFS_META_RELEASE, + ZFS_DEBUG_STR, error); return (error); } @@ -5821,7 +5824,7 @@ _fini(void) tsd_destroy(&zfs_allow_log_key); printk(KERN_NOTICE "ZFS: Unloaded module v%s-%s%s\n", - ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR); + ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR); return (0); } diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c index 898d8049c..2533ced64 100644 --- a/module/zfs/zfs_rlock.c +++ b/module/zfs/zfs_rlock.c @@ -550,7 +550,7 @@ zfs_range_unlock(rl_t *rl) ASSERT(rl->r_type == RL_WRITER || rl->r_type == RL_READER); ASSERT(rl->r_cnt == 1 || rl->r_cnt == 0); ASSERT(!rl->r_proxy); - list_create(&free_list, sizeof(rl_t), offsetof(rl_t, rl_node)); + list_create(&free_list, sizeof (rl_t), offsetof(rl_t, rl_node)); mutex_enter(&zp->z_range_lock); if (rl->r_type == RL_WRITER) { diff --git a/module/zfs/zfs_sa.c b/module/zfs/zfs_sa.c index df4ef3dc1..ebe92bb3a 100644 --- a/module/zfs/zfs_sa.c +++ b/module/zfs/zfs_sa.c @@ -310,7 +310,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) } /* First do a bulk query of the attributes that aren't cached */ - bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP); + bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zsb), NULL, &crtime, 16); @@ -324,7 +324,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) &znode_acl, 88); if (sa_bulk_lookup_locked(hdl, bulk, count) != 0) { - kmem_free(bulk, sizeof(sa_bulk_attr_t) * 20); + kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20); goto done; } @@ -333,7 +333,7 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) * it is such a way to pick up an already existing layout number */ count = 0; - sa_attrs = kmem_zalloc(sizeof(sa_bulk_attr_t) * 20, KM_SLEEP); + sa_attrs = kmem_zalloc(sizeof (sa_bulk_attr_t) * 20, KM_SLEEP); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_MODE(zsb), NULL, &mode, 8); SA_ADD_BULK_ATTR(sa_attrs, count, SA_ZPL_SIZE(zsb), NULL, &zp->z_size, 8); @@ -390,8 +390,8 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx) znode_acl.z_acl_extern_obj, tx)); zp->z_is_sa = B_TRUE; - kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * 20); - kmem_free(bulk, sizeof(sa_bulk_attr_t) * 20); + kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * 20); + kmem_free(bulk, sizeof (sa_bulk_attr_t) * 20); done: if (drop_lock) mutex_exit(&zp->z_lock); diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c index 2c0e923dc..c64854d7b 100644 --- a/module/zfs/zfs_vfsops.c +++ b/module/zfs/zfs_vfsops.c @@ -1249,10 +1249,12 @@ zfs_domount(struct super_block *sb, void *data, int silent) atime_changed_cb(zsb, B_FALSE); readonly_changed_cb(zsb, B_TRUE); - if ((error = dsl_prop_get_integer(osname,"xattr",&pval,NULL))) + if ((error = dsl_prop_get_integer(osname, + "xattr", &pval, NULL))) goto out; xattr_changed_cb(zsb, pval); - if ((error = dsl_prop_get_integer(osname,"acltype",&pval,NULL))) + if ((error = dsl_prop_get_integer(osname, + "acltype", &pval, NULL))) goto out; acltype_changed_cb(zsb, pval); zsb->z_issnap = B_TRUE; diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 8e4694ff6..f56b52ace 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -2500,11 +2500,11 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) */ xoap = xva_getxoptattr(xvap); - tmpxvattr = kmem_alloc(sizeof(xvattr_t), KM_SLEEP); + tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP); xva_init(tmpxvattr); - bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP); - xattr_bulk = kmem_alloc(sizeof(sa_bulk_attr_t) * 7, KM_SLEEP); + bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP); + xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * 7, KM_SLEEP); /* * Immutable files can only alter immutable bit and atime @@ -2528,8 +2528,10 @@ zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr) * once large timestamps are fully supported. */ if (mask & (ATTR_ATIME | ATTR_MTIME)) { - if (((mask & ATTR_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || - ((mask & ATTR_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { + if (((mask & ATTR_ATIME) && + TIMESPEC_OVERFLOW(&vap->va_atime)) || + ((mask & ATTR_MTIME) && + TIMESPEC_OVERFLOW(&vap->va_mtime))) { err = EOVERFLOW; goto out3; } @@ -3040,9 +3042,9 @@ out2: zil_commit(zilog, 0); out3: - kmem_free(xattr_bulk, sizeof(sa_bulk_attr_t) * 7); - kmem_free(bulk, sizeof(sa_bulk_attr_t) * 7); - kmem_free(tmpxvattr, sizeof(xvattr_t)); + kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * 7); + kmem_free(bulk, sizeof (sa_bulk_attr_t) * 7); + kmem_free(tmpxvattr, sizeof (xvattr_t)); ZFS_EXIT(zsb); return (err); } @@ -3877,9 +3879,9 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc) ASSERT(PageLocked(pp)); - pgoff = page_offset(pp); /* Page byte-offset in file */ - offset = i_size_read(ip); /* File length in bytes */ - pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */ + pgoff = page_offset(pp); /* Page byte-offset in file */ + offset = i_size_read(ip); /* File length in bytes */ + pglen = MIN(PAGE_CACHE_SIZE, /* Page length in bytes */ P2ROUNDUP(offset, PAGE_CACHE_SIZE)-pgoff); /* Page is beyond end of file */ @@ -4088,17 +4090,17 @@ EXPORT_SYMBOL(zfs_seek); static int zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages) { - znode_t *zp = ITOZ(ip); - zfs_sb_t *zsb = ITOZSB(ip); - objset_t *os; + znode_t *zp = ITOZ(ip); + zfs_sb_t *zsb = ITOZSB(ip); + objset_t *os; struct page *cur_pp; - u_offset_t io_off, total; - size_t io_len; - loff_t i_size; - unsigned page_idx; - int err; + u_offset_t io_off, total; + size_t io_len; + loff_t i_size; + unsigned page_idx; + int err; - os = zsb->z_os; + os = zsb->z_os; io_len = nr_pages << PAGE_CACHE_SHIFT; i_size = i_size_read(ip); io_off = page_offset(pl[0]); diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c index f737af449..abf6222f2 100644 --- a/module/zfs/zfs_znode.c +++ b/module/zfs/zfs_znode.c @@ -440,7 +440,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz, error: unlock_new_inode(ip); iput(ip); - return NULL; + return (NULL); } /* @@ -647,7 +647,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, * order for DMU_OT_ZNODE is critical since it needs to be constructed * in the old znode_phys_t format. Don't change this ordering */ - sa_attrs = kmem_alloc(sizeof(sa_bulk_attr_t) * ZPL_END, KM_PUSHPAGE); + sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_PUSHPAGE); if (obj_type == DMU_OT_ZNODE) { SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zsb), @@ -749,7 +749,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr, err = zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx); ASSERT0(err); } - kmem_free(sa_attrs, sizeof(sa_bulk_attr_t) * ZPL_END); + kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END); ZFS_OBJ_HOLD_EXIT(zsb, obj); } diff --git a/module/zfs/zil.c b/module/zfs/zil.c index 30035faa0..b69a7bf56 100644 --- a/module/zfs/zil.c +++ b/module/zfs/zil.c @@ -70,19 +70,19 @@ * See zil.h for more information about these fields. */ zil_stats_t zil_stats = { - { "zil_commit_count", KSTAT_DATA_UINT64 }, - { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, - { "zil_itx_count", KSTAT_DATA_UINT64 }, - { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, - { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, - { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, - { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, - { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, - { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, - { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, - { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, - { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, - { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, + { "zil_commit_count", KSTAT_DATA_UINT64 }, + { "zil_commit_writer_count", KSTAT_DATA_UINT64 }, + { "zil_itx_count", KSTAT_DATA_UINT64 }, + { "zil_itx_indirect_count", KSTAT_DATA_UINT64 }, + { "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 }, + { "zil_itx_copied_count", KSTAT_DATA_UINT64 }, + { "zil_itx_copied_bytes", KSTAT_DATA_UINT64 }, + { "zil_itx_needcopy_count", KSTAT_DATA_UINT64 }, + { "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 }, + { "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 }, + { "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 }, + { "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 }, + { "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 }, }; static kstat_t *zil_ksp; @@ -319,7 +319,7 @@ zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func, char *lrbuf, *lrp; int error = 0; - bzero(&next_blk, sizeof(blkptr_t)); + bzero(&next_blk, sizeof (blkptr_t)); /* * Old logs didn't record the maximum zh_claim_lr_seq. @@ -1017,13 +1017,10 @@ zil_lwb_write_start(zilog_t *zilog, lwb_t *lwb) use_slog = USE_SLOG(zilog); error = zio_alloc_zil(spa, txg, bp, zil_blksz, USE_SLOG(zilog)); - if (use_slog) - { + if (use_slog) { ZIL_STAT_BUMP(zil_itx_metaslab_slog_count); ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused); - } - else - { + } else { ZIL_STAT_BUMP(zil_itx_metaslab_normal_count); ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused); } @@ -1134,12 +1131,14 @@ zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb) dbuf = lr_buf + reclen; lrw->lr_common.lrc_reclen += dlen; ZIL_STAT_BUMP(zil_itx_needcopy_count); - ZIL_STAT_INCR(zil_itx_needcopy_bytes, lrw->lr_length); + ZIL_STAT_INCR(zil_itx_needcopy_bytes, + lrw->lr_length); } else { ASSERT(itx->itx_wr_state == WR_INDIRECT); dbuf = NULL; ZIL_STAT_BUMP(zil_itx_indirect_count); - ZIL_STAT_INCR(zil_itx_indirect_bytes, lrw->lr_length); + ZIL_STAT_INCR(zil_itx_indirect_bytes, + lrw->lr_length); } error = zilog->zl_get_data( itx->itx_private, lrw, dbuf, lwb->lwb_zio); @@ -1344,7 +1343,8 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) } ASSERT(itxg->itxg_sod == 0); itxg->itxg_txg = txg; - itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), KM_PUSHPAGE); + itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t), + KM_PUSHPAGE); list_create(&itxs->i_sync_list, sizeof (itx_t), offsetof(itx_t, itx_node)); @@ -1364,7 +1364,8 @@ zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx) ian = avl_find(t, &foid, &where); if (ian == NULL) { - ian = kmem_alloc(sizeof (itx_async_node_t), KM_PUSHPAGE); + ian = kmem_alloc(sizeof (itx_async_node_t), + KM_PUSHPAGE); list_create(&ian->ia_list, sizeof (itx_t), offsetof(itx_t, itx_node)); ian->ia_foid = foid; @@ -1539,7 +1540,7 @@ zil_commit_writer(zilog_t *zilog) DTRACE_PROBE1(zil__cw1, zilog_t *, zilog); for (itx = list_head(&zilog->zl_itx_commit_list); itx != NULL; - itx = list_next(&zilog->zl_itx_commit_list, itx)) { + itx = list_next(&zilog->zl_itx_commit_list, itx)) { txg = itx->itx_lr.lrc_txg; ASSERT(txg); @@ -1744,7 +1745,7 @@ zil_init(void) sizeof (struct lwb), 0, NULL, NULL, NULL, NULL, NULL, 0); zil_ksp = kstat_create("zfs", 0, "zil", "misc", - KSTAT_TYPE_NAMED, sizeof(zil_stats) / sizeof(kstat_named_t), + KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (zil_ksp != NULL) { diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 7cc3d4c9a..97f25494c 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -132,7 +132,7 @@ zio_init(void) zio_cons, zio_dest, NULL, NULL, NULL, KMC_KMEM); zio_link_cache = kmem_cache_create("zio_link_cache", sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, KMC_KMEM); - zio_vdev_cache = kmem_cache_create("zio_vdev_cache", sizeof(vdev_io_t), + zio_vdev_cache = kmem_cache_create("zio_vdev_cache", sizeof (vdev_io_t), PAGESIZE, NULL, NULL, NULL, NULL, NULL, KMC_VMEM); /* @@ -1852,11 +1852,11 @@ static void zio_write_gang_member_ready(zio_t *zio) { zio_t *pio = zio_unique_parent(zio); - ASSERTV(zio_t *gio = zio->io_gang_leader;) dva_t *cdva = zio->io_bp->blk_dva; dva_t *pdva = pio->io_bp->blk_dva; uint64_t asize; int d; + ASSERTV(zio_t *gio = zio->io_gang_leader); if (BP_IS_HOLE(zio->io_bp)) return; @@ -2995,15 +2995,18 @@ zio_done(zio_t *zio) if (zio->io_bp != NULL) { ASSERT(zio->io_bp->blk_pad[0] == 0); ASSERT(zio->io_bp->blk_pad[1] == 0); - ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0 || + ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy, + sizeof (blkptr_t)) == 0 || (zio->io_bp == zio_unique_parent(zio)->io_bp)); if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) && zio->io_bp_override == NULL && !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) { ASSERT(!BP_SHOULD_BYTESWAP(zio->io_bp)); - ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp)); + ASSERT3U(zio->io_prop.zp_copies, <=, + BP_GET_NDVAS(zio->io_bp)); ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 || - (BP_COUNT_GANG(zio->io_bp) == BP_GET_NDVAS(zio->io_bp))); + (BP_COUNT_GANG(zio->io_bp) == + BP_GET_NDVAS(zio->io_bp))); } if (zio->io_flags & ZIO_FLAG_NOPWRITE) VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig)); @@ -3030,7 +3033,7 @@ zio_done(zio_t *zio) if (asize != zio->io_size) { abuf = zio_buf_alloc(asize); bcopy(zio->io_data, abuf, zio->io_size); - bzero(abuf + zio->io_size, asize - zio->io_size); + bzero(abuf+zio->io_size, asize-zio->io_size); } zio->io_cksum_report = zcr->zcr_next; @@ -3055,7 +3058,7 @@ zio_done(zio_t *zio) if (zio->io_delay >= MSEC_TO_TICK(zio_delay_max)) { if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) zfs_ereport_post(FM_EREPORT_ZFS_DELAY, zio->io_spa, - zio->io_vd, zio, 0, 0); + zio->io_vd, zio, 0, 0); } if (zio->io_error) { @@ -3078,8 +3081,8 @@ zio_done(zio_t *zio) * error and generate a logical data ereport. */ spa_log_error(zio->io_spa, zio); - zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa, NULL, zio, - 0, 0); + zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa, + NULL, zio, 0, 0); } } @@ -3355,13 +3358,13 @@ MODULE_PARM_DESC(zio_requeue_io_start_cut_in_line, "Prioritize requeued I/O"); module_param(zfs_sync_pass_deferred_free, int, 0644); MODULE_PARM_DESC(zfs_sync_pass_deferred_free, - "defer frees starting in this pass"); + "Defer frees starting in this pass"); module_param(zfs_sync_pass_dont_compress, int, 0644); MODULE_PARM_DESC(zfs_sync_pass_dont_compress, - "don't compress starting in this pass"); + "Don't compress starting in this pass"); module_param(zfs_sync_pass_rewrite, int, 0644); MODULE_PARM_DESC(zfs_sync_pass_rewrite, - "rewrite new bps starting in this pass"); + "Rewrite new bps starting in this pass"); #endif diff --git a/module/zfs/zpl_ctldir.c b/module/zfs/zpl_ctldir.c index 8afe8bfdb..9e587e3f0 100644 --- a/module/zfs/zpl_ctldir.c +++ b/module/zfs/zpl_ctldir.c @@ -43,7 +43,7 @@ zpl_common_open(struct inode *ip, struct file *filp) if (filp->f_mode & FMODE_WRITE) return (-EACCES); - return generic_file_open(ip, filp); + return (generic_file_open(ip, filp)); } /* @@ -129,12 +129,12 @@ zpl_root_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags) if (error) { if (error == -ENOENT) - return d_splice_alias(NULL, dentry); + return (d_splice_alias(NULL, dentry)); else - return ERR_PTR(error); + return (ERR_PTR(error)); } - return d_splice_alias(ip, dentry); + return (d_splice_alias(ip, dentry)); } /* @@ -174,7 +174,7 @@ zpl_snapdir_automount(struct path *path) error = -zfsctl_mount_snapshot(path, 0); dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; if (error) - return ERR_PTR(error); + return (ERR_PTR(error)); /* * Rather than returning the new vfsmount for the snapshot we must @@ -198,7 +198,7 @@ zpl_snapdir_revalidate(struct dentry *dentry, struct nameidata *i) zpl_snapdir_revalidate(struct dentry *dentry, unsigned int flags) #endif { - return 0; + return (0); } dentry_operations_t zpl_dops_snapdirs = { @@ -237,13 +237,13 @@ zpl_snapdir_lookup(struct inode *dip, struct dentry *dentry, crfree(cr); if (error && error != -ENOENT) - return ERR_PTR(error); + return (ERR_PTR(error)); ASSERT(error == 0 || ip == NULL); d_clear_d_op(dentry); d_set_d_op(dentry, &zpl_dops_snapdirs); - return d_splice_alias(ip, dentry); + return (d_splice_alias(ip, dentry)); } static int @@ -334,7 +334,7 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, zpl_umode_t mode) int error; crhold(cr); - vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP); + vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); zpl_vap_init(vap, dip, mode | S_IFDIR, cr); error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0); @@ -344,7 +344,7 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, zpl_umode_t mode) d_instantiate(dentry, ip); } - kmem_free(vap, sizeof(vattr_t)); + kmem_free(vap, sizeof (vattr_t)); ASSERT3S(error, <=, 0); crfree(cr); @@ -423,12 +423,12 @@ zpl_shares_lookup(struct inode *dip, struct dentry *dentry, if (error) { if (error == -ENOENT) - return d_splice_alias(NULL, dentry); + return (d_splice_alias(NULL, dentry)); else - return ERR_PTR(error); + return (ERR_PTR(error)); } - return d_splice_alias(ip, dentry); + return (d_splice_alias(ip, dentry)); } static int diff --git a/module/zfs/zpl_export.c b/module/zfs/zpl_export.c index 94625e13c..ac9449433 100644 --- a/module/zfs/zpl_export.c +++ b/module/zfs/zpl_export.c @@ -45,7 +45,7 @@ zpl_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len, int connectable) len_bytes = *max_len * sizeof (__u32); if (len_bytes < offsetof(fid_t, fid_data)) - return 255; + return (255); fid->fid_len = len_bytes - offsetof(fid_t, fid_data); @@ -76,7 +76,7 @@ zpl_dentry_obtain_alias(struct inode *ip) } #endif /* HAVE_D_OBTAIN_ALIAS */ - return result; + return (result); } static struct dentry * @@ -92,16 +92,16 @@ zpl_fh_to_dentry(struct super_block *sb, struct fid *fh, if (fh_type != FILEID_INO32_GEN || len_bytes < offsetof(fid_t, fid_data) || len_bytes < offsetof(fid_t, fid_data) + fid->fid_len) - return ERR_PTR(-EINVAL); + return (ERR_PTR(-EINVAL)); rc = zfs_vget(sb, &ip, fid); if (rc != 0) - return ERR_PTR(-rc); + return (ERR_PTR(-rc)); ASSERT((ip != NULL) && !IS_ERR(ip)); - return zpl_dentry_obtain_alias(ip); + return (zpl_dentry_obtain_alias(ip)); } static struct dentry * @@ -117,9 +117,9 @@ zpl_get_parent(struct dentry *child) ASSERT3S(error, <=, 0); if (error) - return ERR_PTR(error); + return (ERR_PTR(error)); - return zpl_dentry_obtain_alias(ip); + return (zpl_dentry_obtain_alias(ip)); } #ifdef HAVE_COMMIT_METADATA @@ -134,15 +134,15 @@ zpl_commit_metadata(struct inode *inode) crfree(cr); ASSERT3S(error, <=, 0); - return error; + return (error); } #endif /* HAVE_COMMIT_METADATA */ const struct export_operations zpl_export_operations = { - .encode_fh = zpl_encode_fh, - .fh_to_dentry = zpl_fh_to_dentry, - .get_parent = zpl_get_parent, + .encode_fh = zpl_encode_fh, + .fh_to_dentry = zpl_fh_to_dentry, + .get_parent = zpl_get_parent, #ifdef HAVE_COMMIT_METADATA - .commit_metadata= zpl_commit_metadata, + .commit_metadata = zpl_commit_metadata, #endif /* HAVE_COMMIT_METADATA */ }; diff --git a/module/zfs/zpl_file.c b/module/zfs/zpl_file.c index 690f93838..3737bb519 100644 --- a/module/zfs/zpl_file.c +++ b/module/zfs/zpl_file.c @@ -169,7 +169,7 @@ zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync) ssize_t zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t pos, - uio_seg_t segment, int flags, cred_t *cr) + uio_seg_t segment, int flags, cred_t *cr) { int error; ssize_t read; @@ -280,7 +280,7 @@ zpl_llseek(struct file *filp, loff_t offset, int whence) } #endif /* SEEK_HOLE && SEEK_DATA */ - return generic_file_llseek(filp, offset, whence); + return (generic_file_llseek(filp, offset, whence)); } /* @@ -381,7 +381,7 @@ zpl_readpage(struct file *filp, struct page *pp) } unlock_page(pp); - return error; + return (error); } /* @@ -536,7 +536,7 @@ zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) static long zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - return zpl_ioctl(filp, cmd, arg); + return (zpl_ioctl(filp, cmd, arg)); } #endif /* CONFIG_COMPAT */ @@ -545,7 +545,7 @@ const struct address_space_operations zpl_address_space_operations = { .readpages = zpl_readpages, .readpage = zpl_readpage, .writepage = zpl_writepage, - .writepages = zpl_writepages, + .writepages = zpl_writepages, }; const struct file_operations zpl_file_operations = { @@ -557,11 +557,11 @@ const struct file_operations zpl_file_operations = { .mmap = zpl_mmap, .fsync = zpl_fsync, #ifdef HAVE_FILE_FALLOCATE - .fallocate = zpl_fallocate, + .fallocate = zpl_fallocate, #endif /* HAVE_FILE_FALLOCATE */ - .unlocked_ioctl = zpl_ioctl, + .unlocked_ioctl = zpl_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = zpl_compat_ioctl, + .compat_ioctl = zpl_compat_ioctl, #endif }; diff --git a/module/zfs/zpl_inode.c b/module/zfs/zpl_inode.c index 8f0fdaffe..c009807cb 100644 --- a/module/zfs/zpl_inode.c +++ b/module/zfs/zpl_inode.c @@ -42,7 +42,7 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) int error; if (dlen(dentry) > ZFS_MAXNAMELEN) - return ERR_PTR(-ENAMETOOLONG); + return (ERR_PTR(-ENAMETOOLONG)); crhold(cr); error = -zfs_lookup(dir, dname(dentry), &ip, 0, cr, NULL, NULL); @@ -58,12 +58,12 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) if (error) { if (error == -ENOENT) - return d_splice_alias(NULL, dentry); + return (d_splice_alias(NULL, dentry)); else - return ERR_PTR(error); + return (ERR_PTR(error)); } - return d_splice_alias(ip, dentry); + return (d_splice_alias(ip, dentry)); } void @@ -97,7 +97,7 @@ zpl_create(struct inode *dir, struct dentry *dentry, zpl_umode_t mode, int error; crhold(cr); - vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP); + vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); zpl_vap_init(vap, dir, mode, cr); error = -zfs_create(dir, dname(dentry), vap, 0, mode, &ip, cr, 0, NULL); @@ -107,7 +107,7 @@ zpl_create(struct inode *dir, struct dentry *dentry, zpl_umode_t mode, d_instantiate(dentry, ip); } - kmem_free(vap, sizeof(vattr_t)); + kmem_free(vap, sizeof (vattr_t)); crfree(cr); ASSERT3S(error, <=, 0); @@ -131,7 +131,7 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, zpl_umode_t mode, ASSERT(rdev == 0); crhold(cr); - vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP); + vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); zpl_vap_init(vap, dir, mode, cr); vap->va_rdev = rdev; @@ -142,7 +142,7 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, zpl_umode_t mode, d_instantiate(dentry, ip); } - kmem_free(vap, sizeof(vattr_t)); + kmem_free(vap, sizeof (vattr_t)); crfree(cr); ASSERT3S(error, <=, 0); @@ -172,7 +172,7 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, zpl_umode_t mode) int error; crhold(cr); - vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP); + vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); zpl_vap_init(vap, dir, mode | S_IFDIR, cr); error = -zfs_mkdir(dir, dname(dentry), vap, &ip, cr, 0, NULL); @@ -182,7 +182,7 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, zpl_umode_t mode) d_instantiate(dentry, ip); } - kmem_free(vap, sizeof(vattr_t)); + kmem_free(vap, sizeof (vattr_t)); crfree(cr); ASSERT3S(error, <=, 0); @@ -239,7 +239,7 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia) return (error); crhold(cr); - vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP); + vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); vap->va_mask = ia->ia_valid & ATTR_IATTR_MASK; vap->va_mode = ia->ia_mode; vap->va_uid = KUID_TO_SUID(ia->ia_uid); @@ -253,7 +253,7 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia) if (!error && (ia->ia_valid & ATTR_MODE)) error = zpl_chmod_acl(ip); - kmem_free(vap, sizeof(vattr_t)); + kmem_free(vap, sizeof (vattr_t)); crfree(cr); ASSERT3S(error, <=, 0); @@ -284,7 +284,7 @@ zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name) int error; crhold(cr); - vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP); + vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); zpl_vap_init(vap, dir, S_IFLNK | S_IRWXUGO, cr); error = -zfs_symlink(dir, dname(dentry), vap, (char *)name, &ip, cr, 0); @@ -293,7 +293,7 @@ zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name) d_instantiate(dentry, ip); } - kmem_free(vap, sizeof(vattr_t)); + kmem_free(vap, sizeof (vattr_t)); crfree(cr); ASSERT3S(error, <=, 0); @@ -349,7 +349,7 @@ zpl_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) int error; if (ip->i_nlink >= ZFS_LINK_MAX) - return -EMLINK; + return (-EMLINK); crhold(cr); ip->i_ctime = CURRENT_TIME_SEC; @@ -371,7 +371,7 @@ out: #ifdef HAVE_INODE_TRUNCATE_RANGE static void -zpl_truncate_range(struct inode* ip, loff_t start, loff_t end) +zpl_truncate_range(struct inode *ip, loff_t start, loff_t end) { cred_t *cr = CRED(); flock64_t bf; @@ -402,7 +402,7 @@ zpl_truncate_range(struct inode* ip, loff_t start, loff_t end) static long zpl_fallocate(struct inode *ip, int mode, loff_t offset, loff_t len) { - return zpl_fallocate_common(ip, mode, offset, len); + return (zpl_fallocate_common(ip, mode, offset, len)); } #endif /* HAVE_INODE_FALLOCATE */ diff --git a/module/zfs/zpl_super.c b/module/zfs/zpl_super.c index 92779bcab..b4e7b6ed0 100644 --- a/module/zfs/zpl_super.c +++ b/module/zfs/zpl_super.c @@ -44,7 +44,7 @@ zpl_inode_alloc(struct super_block *sb) static void zpl_inode_destroy(struct inode *ip) { - ASSERT(atomic_read(&ip->i_count) == 0); + ASSERT(atomic_read(&ip->i_count) == 0); zfs_inode_destroy(ip); } @@ -216,13 +216,13 @@ __zpl_show_options(struct seq_file *seq, zfs_sb_t *zsb) static int zpl_show_options(struct seq_file *seq, struct dentry *root) { - return __zpl_show_options(seq, root->d_sb->s_fs_info); + return (__zpl_show_options(seq, root->d_sb->s_fs_info)); } #else static int zpl_show_options(struct seq_file *seq, struct vfsmount *vfsp) { - return __zpl_show_options(seq, vfsp->mnt_sb->s_fs_info); + return (__zpl_show_options(seq, vfsp->mnt_sb->s_fs_info)); } #endif /* HAVE_SHOW_OPTIONS_WITH_DENTRY */ @@ -244,7 +244,7 @@ zpl_mount(struct file_system_type *fs_type, int flags, { zpl_mount_data_t zmd = { osname, data }; - return mount_nodev(fs_type, flags, &zmd, zpl_fill_super); + return (mount_nodev(fs_type, flags, &zmd, zpl_fill_super)); } #else static int @@ -253,7 +253,7 @@ zpl_get_sb(struct file_system_type *fs_type, int flags, { zpl_mount_data_t zmd = { osname, data }; - return get_sb_nodev(fs_type, flags, &zmd, zpl_fill_super, mnt); + return (get_sb_nodev(fs_type, flags, &zmd, zpl_fill_super, mnt)); } #endif /* HAVE_MOUNT_NODEV */ @@ -287,14 +287,12 @@ zpl_prune_sb(struct super_block *sb, void *arg) error = -zfs_sb_prune(sb, *(unsigned long *)arg, &objects); ASSERT3S(error, <=, 0); - - return; } void zpl_prune_sbs(int64_t bytes_to_scan, void *private) { - unsigned long nr_to_scan = (bytes_to_scan / sizeof(znode_t)); + unsigned long nr_to_scan = (bytes_to_scan / sizeof (znode_t)); iterate_supers_type(&zpl_fs_type, zpl_prune_sb, &nr_to_scan); kmem_reap(); @@ -311,11 +309,11 @@ zpl_prune_sbs(int64_t bytes_to_scan, void *private) void zpl_prune_sbs(int64_t bytes_to_scan, void *private) { - unsigned long nr_to_scan = (bytes_to_scan / sizeof(znode_t)); + unsigned long nr_to_scan = (bytes_to_scan / sizeof (znode_t)); - shrink_dcache_memory(nr_to_scan, GFP_KERNEL); - shrink_icache_memory(nr_to_scan, GFP_KERNEL); - kmem_reap(); + shrink_dcache_memory(nr_to_scan, GFP_KERNEL); + shrink_icache_memory(nr_to_scan, GFP_KERNEL); + kmem_reap(); } #endif /* HAVE_SHRINK */ @@ -344,7 +342,7 @@ zpl_nr_cached_objects(struct super_block *sb) static void zpl_free_cached_objects(struct super_block *sb, int nr_to_scan) { - arc_adjust_meta(nr_to_scan * sizeof(znode_t), B_FALSE); + arc_adjust_meta(nr_to_scan * sizeof (znode_t), B_FALSE); } #endif /* HAVE_FREE_CACHED_OBJECTS */ diff --git a/module/zfs/zpl_xattr.c b/module/zfs/zpl_xattr.c index 8ee3d2fb5..9334ae7d1 100644 --- a/module/zfs/zpl_xattr.c +++ b/module/zfs/zpl_xattr.c @@ -94,11 +94,11 @@ typedef struct xattr_filldir { static int zpl_xattr_filldir(xattr_filldir_t *xf, const char *name, int name_len) { - if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) + if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) == 0) if (!(ITOZSB(xf->inode)->z_flags & ZSB_XATTR)) return (0); - if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) + if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) if (!capable(CAP_SYS_ADMIN)) return (0); @@ -194,7 +194,7 @@ zpl_xattr_list_sa(xattr_filldir_t *xf) ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY); error = zpl_xattr_filldir(xf, nvpair_name(nvp), - strlen(nvpair_name(nvp))); + strlen(nvpair_name(nvp))); if (error) return (error); } @@ -389,7 +389,7 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value, /* Lookup failed create a new xattr. */ if (xip == NULL) { - vap = kmem_zalloc(sizeof(vattr_t), KM_SLEEP); + vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); vap->va_mode = xattr_mode; vap->va_mask = ATTR_MODE; vap->va_uid = crgetfsuid(cr); @@ -413,7 +413,7 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value, out: if (vap) - kmem_free(vap, sizeof(vattr_t)); + kmem_free(vap, sizeof (vattr_t)); if (xip) iput(xip); @@ -534,10 +534,10 @@ __zpl_xattr_user_get(struct inode *ip, const char *name, int error; if (strcmp(name, "") == 0) - return -EINVAL; + return (-EINVAL); if (!(ITOZSB(ip)->z_flags & ZSB_XATTR)) - return -EOPNOTSUPP; + return (-EOPNOTSUPP); xattr_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name); error = zpl_xattr_get(ip, xattr_name, value, size); @@ -555,10 +555,10 @@ __zpl_xattr_user_set(struct inode *ip, const char *name, int error; if (strcmp(name, "") == 0) - return -EINVAL; + return (-EINVAL); if (!(ITOZSB(ip)->z_flags & ZSB_XATTR)) - return -EOPNOTSUPP; + return (-EOPNOTSUPP); xattr_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name); error = zpl_xattr_set(ip, xattr_name, value, size, flags); @@ -582,10 +582,10 @@ __zpl_xattr_trusted_get(struct inode *ip, const char *name, int error; if (!capable(CAP_SYS_ADMIN)) - return -EACCES; + return (-EACCES); if (strcmp(name, "") == 0) - return -EINVAL; + return (-EINVAL); xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name); error = zpl_xattr_get(ip, xattr_name, value, size); @@ -603,10 +603,10 @@ __zpl_xattr_trusted_set(struct inode *ip, const char *name, int error; if (!capable(CAP_SYS_ADMIN)) - return -EACCES; + return (-EACCES); if (strcmp(name, "") == 0) - return -EINVAL; + return (-EINVAL); xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name); error = zpl_xattr_set(ip, xattr_name, value, size, flags); @@ -630,7 +630,7 @@ __zpl_xattr_security_get(struct inode *ip, const char *name, int error; if (strcmp(name, "") == 0) - return -EINVAL; + return (-EINVAL); xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name); error = zpl_xattr_get(ip, xattr_name, value, size); @@ -648,7 +648,7 @@ __zpl_xattr_security_set(struct inode *ip, const char *name, int error; if (strcmp(name, "") == 0) - return -EINVAL; + return (-EINVAL); xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name); error = zpl_xattr_set(ip, xattr_name, value, size, flags); @@ -696,10 +696,11 @@ zpl_xattr_security_init(struct inode *ip, struct inode *dip, char *name; error = zpl_security_inode_init_security(ip, dip, qstr, - &name, &value, &len); + &name, &value, &len); if (error) { if (error == -EOPNOTSUPP) - return 0; + return (0); + return (error); } @@ -731,7 +732,7 @@ zpl_set_acl(struct inode *ip, int type, struct posix_acl *acl) if (S_ISLNK(ip->i_mode)) return (-EOPNOTSUPP); - switch(type) { + switch (type) { case ACL_TYPE_ACCESS: name = POSIX_ACL_XATTR_ACCESS; if (acl) { @@ -816,7 +817,7 @@ zpl_get_acl(struct inode *ip, int type) name = POSIX_ACL_XATTR_DEFAULT; break; default: - return ERR_PTR(-EINVAL); + return (ERR_PTR(-EINVAL)); } size = zpl_xattr_get(ip, name, NULL, 0); @@ -866,25 +867,25 @@ __zpl_check_acl(struct inode *ip, int mask) int zpl_check_acl(struct inode *ip, int mask, unsigned int flags) { - return __zpl_check_acl(ip, mask); + return (__zpl_check_acl(ip, mask)); } #elif defined(HAVE_CHECK_ACL) int zpl_check_acl(struct inode *ip, int mask) { - return __zpl_check_acl(ip , mask); + return (__zpl_check_acl(ip, mask)); } #elif defined(HAVE_PERMISSION_WITH_NAMEIDATA) int zpl_permission(struct inode *ip, int mask, struct nameidata *nd) { - return generic_permission(ip, mask, __zpl_check_acl); + return (generic_permission(ip, mask, __zpl_check_acl)); } #elif defined(HAVE_PERMISSION) int zpl_permission(struct inode *ip, int mask) { - return generic_permission(ip, mask, __zpl_check_acl); + return (generic_permission(ip, mask, __zpl_check_acl)); } #endif /* HAVE_CHECK_ACL | HAVE_PERMISSION */ #endif /* !HAVE_GET_ACL */ @@ -923,7 +924,7 @@ zpl_init_acl(struct inode *ip, struct inode *dir) } mode = ip->i_mode; - error = posix_acl_create(&acl,GFP_KERNEL, &mode); + error = posix_acl_create(&acl, GFP_KERNEL, &mode); if (error >= 0) { ip->i_mode = mode; mark_inode_dirty(ip); @@ -953,9 +954,9 @@ zpl_chmod_acl(struct inode *ip) if (IS_ERR(acl) || !acl) return (PTR_ERR(acl)); - error = posix_acl_chmod(&acl,GFP_KERNEL, ip->i_mode); + error = posix_acl_chmod(&acl, GFP_KERNEL, ip->i_mode); if (!error) - error = zpl_set_acl(ip,ACL_TYPE_ACCESS, acl); + error = zpl_set_acl(ip, ACL_TYPE_ACCESS, acl); zpl_posix_acl_release(acl); @@ -975,11 +976,11 @@ zpl_xattr_acl_list(struct inode *ip, char *list, size_t list_size, switch (type) { case ACL_TYPE_ACCESS: xattr_name = POSIX_ACL_XATTR_ACCESS; - xattr_size = sizeof(xattr_name); + xattr_size = sizeof (xattr_name); break; case ACL_TYPE_DEFAULT: xattr_name = POSIX_ACL_XATTR_DEFAULT; - xattr_size = sizeof(xattr_name); + xattr_size = sizeof (xattr_name); break; default: return (0); @@ -1060,7 +1061,7 @@ zpl_xattr_acl_get_access(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { ASSERT3S(type, ==, ACL_TYPE_ACCESS); - return zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type); + return (zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type)); } static int @@ -1068,7 +1069,7 @@ zpl_xattr_acl_get_default(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { ASSERT3S(type, ==, ACL_TYPE_DEFAULT); - return zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type); + return (zpl_xattr_acl_get(dentry->d_inode, name, buffer, size, type)); } #else @@ -1077,14 +1078,14 @@ static int zpl_xattr_acl_get_access(struct inode *ip, const char *name, void *buffer, size_t size) { - return zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_ACCESS); + return (zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_ACCESS)); } static int zpl_xattr_acl_get_default(struct inode *ip, const char *name, void *buffer, size_t size) { - return zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_DEFAULT); + return (zpl_xattr_acl_get(ip, name, buffer, size, ACL_TYPE_DEFAULT)); } #endif /* HAVE_DENTRY_XATTR_GET */ @@ -1130,17 +1131,17 @@ static int zpl_xattr_acl_set_access(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { - ASSERT3S(type, ==, ACL_TYPE_ACCESS); - return zpl_xattr_acl_set(dentry->d_inode, - name, value, size, flags, type); + ASSERT3S(type, ==, ACL_TYPE_ACCESS); + return (zpl_xattr_acl_set(dentry->d_inode, + name, value, size, flags, type)); } static int zpl_xattr_acl_set_default(struct dentry *dentry, const char *name, - const void *value, size_t size,int flags, int type) + const void *value, size_t size, int flags, int type) { - ASSERT3S(type, ==, ACL_TYPE_DEFAULT); - return zpl_xattr_acl_set(dentry->d_inode, + ASSERT3S(type, ==, ACL_TYPE_DEFAULT); + return zpl_xattr_acl_set(dentry->d_inode, name, value, size, flags, type); } @@ -1150,7 +1151,7 @@ static int zpl_xattr_acl_set_access(struct inode *ip, const char *name, const void *value, size_t size, int flags) { - return zpl_xattr_acl_set(ip, + return zpl_xattr_acl_set(ip, name, value, size, flags, ACL_TYPE_ACCESS); } @@ -1158,7 +1159,7 @@ static int zpl_xattr_acl_set_default(struct inode *ip, const char *name, const void *value, size_t size, int flags) { - return zpl_xattr_acl_set(ip, + return zpl_xattr_acl_set(ip, name, value, size, flags, ACL_TYPE_DEFAULT); } #endif /* HAVE_DENTRY_XATTR_SET */ |