summaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
Diffstat (limited to 'module')
-rw-r--r--module/icp/algs/aes/aes_impl.c13
-rw-r--r--module/icp/algs/modes/gcm.c13
-rw-r--r--module/icp/core/kcf_prov_tabs.c5
-rw-r--r--module/icp/core/kcf_sched.c2
-rw-r--r--module/icp/os/modconf.c4
-rw-r--r--module/icp/spi/kcf_spi.c15
-rw-r--r--module/zcommon/zfs_fletcher.c10
-rw-r--r--module/zcommon/zfs_uio.c4
-rw-r--r--module/zfs/abd.c7
-rw-r--r--module/zfs/arc.c5
-rw-r--r--module/zfs/dbuf.c25
-rw-r--r--module/zfs/dmu_send.c2
-rw-r--r--module/zfs/dmu_zfetch.c2
-rw-r--r--module/zfs/dnode.c2
-rw-r--r--module/zfs/dsl_pool.c2
-rw-r--r--module/zfs/dsl_scan.c3
-rw-r--r--module/zfs/fm.c2
-rw-r--r--module/zfs/gzip.c2
-rw-r--r--module/zfs/lz4.c2
-rw-r--r--module/zfs/metaslab.c27
-rw-r--r--module/zfs/spa.c1
-rw-r--r--module/zfs/spa_config.c2
-rw-r--r--module/zfs/spa_misc.c3
-rw-r--r--module/zfs/vdev_disk.c4
-rw-r--r--module/zfs/vdev_queue.c10
-rw-r--r--module/zfs/vdev_raidz_math.c30
-rw-r--r--module/zfs/vdev_raidz_math_avx512bw.c46
-rw-r--r--module/zfs/vdev_raidz_math_avx512f.c4
-rw-r--r--module/zfs/vdev_raidz_math_impl.h112
-rw-r--r--module/zfs/zfs_ctldir.c2
-rw-r--r--module/zfs/zfs_debug.c2
-rw-r--r--module/zfs/zfs_ioctl.c12
-rw-r--r--module/zfs/zfs_vfsops.c5
-rw-r--r--module/zfs/zfs_vnops.c3
-rw-r--r--module/zfs/zfs_znode.c5
-rw-r--r--module/zfs/zil.c1
-rw-r--r--module/zfs/zio.c6
-rw-r--r--module/zfs/zpl_export.c1
-rw-r--r--module/zfs/zpl_inode.c5
-rw-r--r--module/zfs/zvol.c14
-rw-r--r--module/zpios/pios.c30
41 files changed, 231 insertions, 214 deletions
diff --git a/module/icp/algs/aes/aes_impl.c b/module/icp/algs/aes/aes_impl.c
index 9c53964f0..a68a02cdf 100644
--- a/module/icp/algs/aes/aes_impl.c
+++ b/module/icp/algs/aes/aes_impl.c
@@ -1593,18 +1593,17 @@ intel_aes_instructions_present(void)
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
- if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
- memcmp((char *) (&edx), "ineI", 4) == 0 &&
- memcmp((char *) (&ecx), "ntel", 4) == 0) {
-
+ if (memcmp((char *)(&ebx), "Genu", 4) == 0 &&
+ memcmp((char *)(&edx), "ineI", 4) == 0 &&
+ memcmp((char *)(&ecx), "ntel", 4) == 0) {
func = 1;
subfunc = 0;
/* check for aes-ni instruction set */
__asm__ __volatile__(
- "cpuid"
- : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
- : "a"(func), "c"(subfunc));
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
cached_result = !!(ecx & INTEL_AESNI_FLAG);
} else {
diff --git a/module/icp/algs/modes/gcm.c b/module/icp/algs/modes/gcm.c
index 9cd8ab1e9..80acb6636 100644
--- a/module/icp/algs/modes/gcm.c
+++ b/module/icp/algs/modes/gcm.c
@@ -723,18 +723,17 @@ intel_pclmulqdq_instruction_present(void)
: "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
: "a"(func), "c"(subfunc));
- if (memcmp((char *) (&ebx), "Genu", 4) == 0 &&
- memcmp((char *) (&edx), "ineI", 4) == 0 &&
- memcmp((char *) (&ecx), "ntel", 4) == 0) {
-
+ if (memcmp((char *)(&ebx), "Genu", 4) == 0 &&
+ memcmp((char *)(&edx), "ineI", 4) == 0 &&
+ memcmp((char *)(&ecx), "ntel", 4) == 0) {
func = 1;
subfunc = 0;
/* check for aes-ni instruction set */
__asm__ __volatile__(
- "cpuid"
- : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
- : "a"(func), "c"(subfunc));
+ "cpuid"
+ : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx)
+ : "a"(func), "c"(subfunc));
cached_result = !!(ecx & INTEL_PCLMULQDQ_FLAG);
} else {
diff --git a/module/icp/core/kcf_prov_tabs.c b/module/icp/core/kcf_prov_tabs.c
index dca0fc103..c29832046 100644
--- a/module/icp/core/kcf_prov_tabs.c
+++ b/module/icp/core/kcf_prov_tabs.c
@@ -67,8 +67,9 @@ static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
void
kcf_prov_tab_destroy(void)
{
- if (prov_tab) kmem_free(prov_tab, prov_tab_max *
- sizeof (kcf_provider_desc_t *));
+ if (prov_tab)
+ kmem_free(prov_tab, prov_tab_max *
+ sizeof (kcf_provider_desc_t *));
}
/*
diff --git a/module/icp/core/kcf_sched.c b/module/icp/core/kcf_sched.c
index 4161bb7e3..90136c421 100644
--- a/module/icp/core/kcf_sched.c
+++ b/module/icp/core/kcf_sched.c
@@ -1062,7 +1062,7 @@ kcf_sched_destroy(void)
for (i = 0; i < REQID_TABLES; i++) {
if (kcf_reqid_table[i])
kmem_free(kcf_reqid_table[i],
- sizeof (kcf_reqid_table_t));
+ sizeof (kcf_reqid_table_t));
}
if (gswq)
diff --git a/module/icp/os/modconf.c b/module/icp/os/modconf.c
index 32b46b5ff..eb50767b7 100644
--- a/module/icp/os/modconf.c
+++ b/module/icp/os/modconf.c
@@ -71,7 +71,7 @@ mod_install(struct modlinkage *modlp)
if (modlp->ml_rev != MODREV_1) {
cmn_err(CE_WARN, "mod_install: "
- "modlinkage structure is not MODREV_1\n");
+ "modlinkage structure is not MODREV_1\n");
return (EINVAL);
}
linkpp = (struct modlmisc **)&modlp->ml_linkage[0];
@@ -168,4 +168,4 @@ mod_info(struct modlinkage *modlp, struct modinfo *modinfop)
if (retval == 0)
return (1);
return (0);
-} \ No newline at end of file
+}
diff --git a/module/icp/spi/kcf_spi.c b/module/icp/spi/kcf_spi.c
index 8bd86b0a3..c2c2b54bc 100644
--- a/module/icp/spi/kcf_spi.c
+++ b/module/icp/spi/kcf_spi.c
@@ -701,16 +701,13 @@ kcf_prov_kstat_update(kstat_t *ksp, int rw)
ks_data = ksp->ks_data;
- ks_data->ps_ops_total.value.ui64 =
- pd->pd_sched_info.ks_ndispatches;
- ks_data->ps_ops_failed.value.ui64 =
- pd->pd_sched_info.ks_nfails;
- ks_data->ps_ops_busy_rval.value.ui64 =
- pd->pd_sched_info.ks_nbusy_rval;
+ ks_data->ps_ops_total.value.ui64 = pd->pd_sched_info.ks_ndispatches;
+ ks_data->ps_ops_failed.value.ui64 = pd->pd_sched_info.ks_nfails;
+ ks_data->ps_ops_busy_rval.value.ui64 = pd->pd_sched_info.ks_nbusy_rval;
ks_data->ps_ops_passed.value.ui64 =
- pd->pd_sched_info.ks_ndispatches -
- pd->pd_sched_info.ks_nfails -
- pd->pd_sched_info.ks_nbusy_rval;
+ pd->pd_sched_info.ks_ndispatches -
+ pd->pd_sched_info.ks_nfails -
+ pd->pd_sched_info.ks_nbusy_rval;
return (0);
}
diff --git a/module/zcommon/zfs_fletcher.c b/module/zcommon/zfs_fletcher.c
index fb0a14991..1d1ea2e24 100644
--- a/module/zcommon/zfs_fletcher.c
+++ b/module/zcommon/zfs_fletcher.c
@@ -608,7 +608,7 @@ fletcher_4_kstat_data(char *buf, size_t size, void *data)
{
struct fletcher_4_kstat *fastest_stat =
&fletcher_4_stat_data[fletcher_4_supp_impls_cnt];
- struct fletcher_4_kstat *curr_stat = (struct fletcher_4_kstat *) data;
+ struct fletcher_4_kstat *curr_stat = (struct fletcher_4_kstat *)data;
ssize_t off = 0;
if (curr_stat == fastest_stat) {
@@ -623,9 +623,9 @@ fletcher_4_kstat_data(char *buf, size_t size, void *data)
off += snprintf(buf + off, size - off, "%-17s",
fletcher_4_supp_impls[id]->name);
off += snprintf(buf + off, size - off, "%-15llu",
- (u_longlong_t) curr_stat->native);
+ (u_longlong_t)curr_stat->native);
off += snprintf(buf + off, size - off, "%-15llu\n",
- (u_longlong_t) curr_stat->byteswap);
+ (u_longlong_t)curr_stat->byteswap);
}
return (0);
@@ -723,7 +723,7 @@ fletcher_4_init(void)
/* move supported impl into fletcher_4_supp_impls */
for (i = 0, c = 0; i < ARRAY_SIZE(fletcher_4_impls); i++) {
- curr_impl = (fletcher_4_ops_t *) fletcher_4_impls[i];
+ curr_impl = (fletcher_4_ops_t *)fletcher_4_impls[i];
if (curr_impl->valid && curr_impl->valid())
fletcher_4_supp_impls[c++] = curr_impl;
@@ -754,7 +754,7 @@ fletcher_4_init(void)
/* install kstats for all implementations */
fletcher_4_kstat = kstat_create("zfs", 0, "fletcher_4_bench", "misc",
- KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
+ KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
if (fletcher_4_kstat != NULL) {
fletcher_4_kstat->ks_data = NULL;
fletcher_4_kstat->ks_ndata = UINT32_MAX;
diff --git a/module/zcommon/zfs_uio.c b/module/zcommon/zfs_uio.c
index 9ec3002a2..7b4175bbe 100644
--- a/module/zcommon/zfs_uio.c
+++ b/module/zcommon/zfs_uio.c
@@ -193,7 +193,7 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
*/
p = iov->iov_base + skip;
while (cnt) {
- if (fuword8((uint8_t *) p, &tmp))
+ if (fuword8((uint8_t *)p, &tmp))
return;
incr = MIN(cnt, PAGESIZE);
p += incr;
@@ -203,7 +203,7 @@ uio_prefaultpages(ssize_t n, struct uio *uio)
* touch the last byte in case it straddles a page.
*/
p--;
- if (fuword8((uint8_t *) p, &tmp))
+ if (fuword8((uint8_t *)p, &tmp))
return;
}
}
diff --git a/module/zfs/abd.c b/module/zfs/abd.c
index 15d998a3b..dca70d6f2 100644
--- a/module/zfs/abd.c
+++ b/module/zfs/abd.c
@@ -407,7 +407,7 @@ struct page;
#define kpm_enable 1
#define abd_alloc_chunk(o) \
- ((struct page *) umem_alloc_aligned(PAGESIZE << (o), 64, KM_SLEEP))
+ ((struct page *)umem_alloc_aligned(PAGESIZE << (o), 64, KM_SLEEP))
#define abd_free_chunk(chunk, o) umem_free(chunk, PAGESIZE << (o))
#define zfs_kmap_atomic(chunk, km) ((void *)chunk)
#define zfs_kunmap_atomic(addr, km) do { (void)(addr); } while (0)
@@ -1486,8 +1486,8 @@ abd_nr_pages_off(abd_t *abd, unsigned int size, size_t off)
else
pos = abd->abd_u.abd_scatter.abd_offset + off;
- return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT)
- - (pos >> PAGE_SHIFT);
+ return ((pos + size + PAGESIZE - 1) >> PAGE_SHIFT) -
+ (pos >> PAGE_SHIFT);
}
/*
@@ -1537,6 +1537,7 @@ abd_scatter_bio_map_off(struct bio *bio, abd_t *abd,
module_param(zfs_abd_scatter_enabled, int, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_enabled,
"Toggle whether ABD allocations must be linear.");
+/* CSTYLED */
module_param(zfs_abd_scatter_max_order, uint, 0644);
MODULE_PARM_DESC(zfs_abd_scatter_max_order,
"Maximum order allocation used for a scatter ABD.");
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index e54a7cc59..170e2f128 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -1510,7 +1510,7 @@ arc_cksum_compute(arc_buf_t *buf)
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
- panic("Got SIGSEGV at address: 0x%lx\n", (long) si->si_addr);
+ panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
@@ -7688,6 +7688,7 @@ EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
+/* BEGIN CSTYLED */
module_param(zfs_arc_min, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_min, "Min arc size");
@@ -7786,5 +7787,5 @@ MODULE_PARM_DESC(zfs_arc_dnode_limit_percent,
module_param(zfs_arc_dnode_reduce_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_dnode_reduce_percent,
"Percentage of excess dnodes to try to unpin");
-
+/* END CSTYLED */
#endif
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index cfa4fd1fc..38334b3d2 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -790,7 +790,7 @@ dbuf_verify(dmu_buf_impl_t *db)
} else {
/* db is pointed to by an indirect block */
ASSERTV(int epb = db->db_parent->db.db_size >>
- SPA_BLKPTRSHIFT);
+ SPA_BLKPTRSHIFT);
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
@@ -2686,8 +2686,7 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
ASSERT3P(dh->dh_parent, ==, NULL);
dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
- dh->dh_fail_sparse, &dh->dh_parent,
- &dh->dh_bp, dh);
+ dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp, dh);
if (dh->dh_fail_sparse) {
if (dh->dh_err == 0 &&
dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
@@ -2701,7 +2700,7 @@ __dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
if (dh->dh_err && dh->dh_err != ENOENT)
return (dh->dh_err);
dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
- dh->dh_parent, dh->dh_bp);
+ dh->dh_parent, dh->dh_bp);
}
if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) {
@@ -2775,7 +2774,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
dh = kmem_alloc(sizeof (struct dbuf_hold_impl_data) *
DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
__dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse,
- fail_uncached, tag, dbp, 0);
+ fail_uncached, tag, dbp, 0);
error = __dbuf_hold_impl(dh);
@@ -3884,23 +3883,23 @@ EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_freeable);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
-
+/* BEGIN CSTYLED */
module_param(dbuf_cache_max_bytes, ulong, 0644);
MODULE_PARM_DESC(dbuf_cache_max_bytes,
- "Maximum size in bytes of the dbuf cache.");
+ "Maximum size in bytes of the dbuf cache.");
module_param(dbuf_cache_hiwater_pct, uint, 0644);
MODULE_PARM_DESC(dbuf_cache_hiwater_pct,
- "Percentage over dbuf_cache_max_bytes when dbufs \
- much be evicted directly.");
+ "Percentage over dbuf_cache_max_bytes when dbufs \
+ much be evicted directly.");
module_param(dbuf_cache_lowater_pct, uint, 0644);
MODULE_PARM_DESC(dbuf_cache_lowater_pct,
- "Percentage below dbuf_cache_max_bytes \
- when the evict thread stop evicting dbufs.");
+ "Percentage below dbuf_cache_max_bytes \
+ when the evict thread stop evicting dbufs.");
module_param(dbuf_cache_max_shift, int, 0644);
MODULE_PARM_DESC(dbuf_cache_max_shift,
- "Cap the size of the dbuf cache to log2 fraction of arc size.");
-
+ "Cap the size of the dbuf cache to log2 fraction of arc size.");
+/* END CSTYLED */
#endif
diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
index af6208e4d..d66a9dca4 100644
--- a/module/zfs/dmu_send.c
+++ b/module/zfs/dmu_send.c
@@ -1591,7 +1591,7 @@ dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
- 8, 1, &one, tx));
+ 8, 1, &one, tx));
}
if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA) {
diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c
index baed0492f..1bf5c4e34 100644
--- a/module/zfs/dmu_zfetch.c
+++ b/module/zfs/dmu_zfetch.c
@@ -336,6 +336,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data)
}
#if defined(_KERNEL) && defined(HAVE_SPL)
+/* BEGIN CSTYLED */
module_param(zfs_prefetch_disable, int, 0644);
MODULE_PARM_DESC(zfs_prefetch_disable, "Disable all ZFS prefetching");
@@ -351,4 +352,5 @@ MODULE_PARM_DESC(zfetch_max_distance,
module_param(zfetch_array_rd_sz, ulong, 0644);
MODULE_PARM_DESC(zfetch_array_rd_sz, "Number of bytes in a array_read");
+/* END CSTYLED */
#endif
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 6ba8207e2..45bb958cd 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -632,7 +632,7 @@ dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
- DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
+ DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
dn_slots = dn_slots > 0 ? dn_slots : DNODE_MIN_SLOTS;
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index cf5259acd..1a62fba2c 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -1087,6 +1087,7 @@ dsl_pool_config_held_writer(dsl_pool_t *dp)
EXPORT_SYMBOL(dsl_pool_config_enter);
EXPORT_SYMBOL(dsl_pool_config_exit);
+/* BEGIN CSTYLED */
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
module_param(zfs_dirty_data_max_percent, int, 0444);
MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
@@ -1112,4 +1113,5 @@ MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data");
module_param(zfs_delay_scale, ulong, 0644);
MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
+/* END CSTYLED */
#endif
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index fd7a53bc9..68c11b3a8 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -73,7 +73,7 @@ int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
/* max number of blocks to free in a single TXG */
-ulong zfs_free_max_blocks = 100000;
+unsigned long zfs_free_max_blocks = 100000;
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
@@ -1985,6 +1985,7 @@ MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O");
module_param(zfs_no_scrub_prefetch, int, 0644);
MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching");
+/* CSTYLED */
module_param(zfs_free_max_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_free_max_blocks, "Max number of blocks freed in one txg");
diff --git a/module/zfs/fm.c b/module/zfs/fm.c
index 6c569ffc4..f6ae16284 100644
--- a/module/zfs/fm.c
+++ b/module/zfs/fm.c
@@ -431,7 +431,7 @@ zfs_zevent_alloc(void)
ev = kmem_zalloc(sizeof (zevent_t), KM_SLEEP);
list_create(&ev->ev_ze_list, sizeof (zfs_zevent_t),
- offsetof(zfs_zevent_t, ze_node));
+ offsetof(zfs_zevent_t, ze_node));
list_link_init(&ev->ev_node);
return (ev);
diff --git a/module/zfs/gzip.c b/module/zfs/gzip.c
index 011fb9188..6e5c859fe 100644
--- a/module/zfs/gzip.c
+++ b/module/zfs/gzip.c
@@ -64,7 +64,7 @@ gzip_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
return (s_len);
}
- return ((size_t) dstlen);
+ return ((size_t)dstlen);
}
/*ARGSUSED*/
diff --git a/module/zfs/lz4.c b/module/zfs/lz4.c
index cf406b936..5caa6a854 100644
--- a/module/zfs/lz4.c
+++ b/module/zfs/lz4.c
@@ -1006,7 +1006,7 @@ void
lz4_init(void)
{
lz4_cache = kmem_cache_create("lz4_cache",
- sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
+ sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 27a758ee0..5dd425768 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -2924,37 +2924,44 @@ metaslab_check_free(spa_t *spa, const blkptr_t *bp)
}
#if defined(_KERNEL) && defined(HAVE_SPL)
+/* CSTYLED */
module_param(metaslab_aliquot, ulong, 0644);
-module_param(metaslab_debug_load, int, 0644);
-module_param(metaslab_debug_unload, int, 0644);
-module_param(metaslab_preload_enabled, int, 0644);
-module_param(zfs_mg_noalloc_threshold, int, 0644);
-module_param(zfs_mg_fragmentation_threshold, int, 0644);
-module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
-module_param(metaslab_fragmentation_factor_enabled, int, 0644);
-module_param(metaslab_lba_weighting_enabled, int, 0644);
-module_param(metaslab_bias_enabled, int, 0644);
-
MODULE_PARM_DESC(metaslab_aliquot,
"allocation granularity (a.k.a. stripe size)");
+
+module_param(metaslab_debug_load, int, 0644);
MODULE_PARM_DESC(metaslab_debug_load,
"load all metaslabs when pool is first opened");
+
+module_param(metaslab_debug_unload, int, 0644);
MODULE_PARM_DESC(metaslab_debug_unload,
"prevent metaslabs from being unloaded");
+
+module_param(metaslab_preload_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_preload_enabled,
"preload potential metaslabs during reassessment");
+module_param(zfs_mg_noalloc_threshold, int, 0644);
MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
"percentage of free space for metaslab group to allow allocation");
+
+module_param(zfs_mg_fragmentation_threshold, int, 0644);
MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
"fragmentation for metaslab group to allow allocation");
+module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
"fragmentation for metaslab to allow allocation");
+
+module_param(metaslab_fragmentation_factor_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
"use the fragmentation metric to prefer less fragmented metaslabs");
+
+module_param(metaslab_lba_weighting_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
"prefer metaslabs with lower LBAs");
+
+module_param(metaslab_bias_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_bias_enabled,
"enable metaslab group biasing");
#endif /* _KERNEL && HAVE_SPL */
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 5203ea826..a463859a2 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -6996,6 +6996,7 @@ module_param(spa_load_verify_data, int, 0644);
MODULE_PARM_DESC(spa_load_verify_data,
"Set to traverse data on pool import");
+/* CSTYLED */
module_param(zio_taskq_batch_pct, uint, 0444);
MODULE_PARM_DESC(zio_taskq_batch_pct,
"Percentage of CPUs to run an IO worker thread");
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index a3ff24bd5..a813dfbd0 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -419,7 +419,7 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
*/
if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
VERIFY0(nvlist_lookup_string(spa->spa_config,
- ZPOOL_CONFIG_POOL_NAME, &pool_name));
+ ZPOOL_CONFIG_POOL_NAME, &pool_name));
} else
pool_name = spa_name(spa);
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 8ae5fb559..b022c236b 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -2093,9 +2093,9 @@ EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
-
EXPORT_SYMBOL(spa_namespace_lock);
+/* BEGIN CSTYLED */
module_param(zfs_flags, uint, 0644);
MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags");
@@ -2118,4 +2118,5 @@ MODULE_PARM_DESC(spa_asize_inflation,
module_param(spa_slop_shift, int, 0644);
MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool");
+/* END CSTYLED */
#endif
diff --git a/module/zfs/vdev_disk.c b/module/zfs/vdev_disk.c
index ae6ed4de9..35041a4f3 100644
--- a/module/zfs/vdev_disk.c
+++ b/module/zfs/vdev_disk.c
@@ -576,7 +576,7 @@ retry:
/* bio_alloc() with __GFP_WAIT never returns NULL */
dr->dr_bio[i] = bio_alloc(GFP_NOIO,
MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
- BIO_MAX_PAGES));
+ BIO_MAX_PAGES));
if (unlikely(dr->dr_bio[i] == NULL)) {
vdev_disk_dio_free(dr);
return (ENOMEM);
@@ -593,7 +593,7 @@ retry:
/* Remaining size is returned to become the new size */
bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd,
- bio_size, abd_offset);
+ bio_size, abd_offset);
/* Advance in buffer and construct another bio if needed */
abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c
index 91ef106b4..bf2e24cb2 100644
--- a/module/zfs/vdev_queue.c
+++ b/module/zfs/vdev_queue.c
@@ -371,11 +371,11 @@ vdev_queue_init(vdev_t *vd)
avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
- vdev_queue_offset_compare, sizeof (zio_t),
- offsetof(struct zio, io_offset_node));
+ vdev_queue_offset_compare, sizeof (zio_t),
+ offsetof(struct zio, io_offset_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
- vdev_queue_offset_compare, sizeof (zio_t),
- offsetof(struct zio, io_offset_node));
+ vdev_queue_offset_compare, sizeof (zio_t),
+ offsetof(struct zio, io_offset_node));
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
int (*compfn) (const void *, const void *);
@@ -390,7 +390,7 @@ vdev_queue_init(vdev_t *vd)
else
compfn = vdev_queue_offset_compare;
avl_create(vdev_queue_class_tree(vq, p), compfn,
- sizeof (zio_t), offsetof(struct zio, io_queue_node));
+ sizeof (zio_t), offsetof(struct zio, io_queue_node));
}
vq->vq_lastoffset = 0;
diff --git a/module/zfs/vdev_raidz_math.c b/module/zfs/vdev_raidz_math.c
index c050c9099..85dd15cc3 100644
--- a/module/zfs/vdev_raidz_math.c
+++ b/module/zfs/vdev_raidz_math.c
@@ -124,10 +124,10 @@ vdev_raidz_math_get_ops()
break;
#endif
case IMPL_ORIGINAL:
- ops = (raidz_impl_ops_t *) &vdev_raidz_original_impl;
+ ops = (raidz_impl_ops_t *)&vdev_raidz_original_impl;
break;
case IMPL_SCALAR:
- ops = (raidz_impl_ops_t *) &vdev_raidz_scalar_impl;
+ ops = (raidz_impl_ops_t *)&vdev_raidz_scalar_impl;
break;
default:
ASSERT3U(impl, <, raidz_supp_impl_cnt);
@@ -162,7 +162,7 @@ vdev_raidz_math_generate(raidz_map_t *rm)
default:
gen_parity = NULL;
cmn_err(CE_PANIC, "invalid RAID-Z configuration %d",
- raidz_parity(rm));
+ raidz_parity(rm));
break;
}
@@ -196,7 +196,7 @@ reconstruct_fun_pq_sel(raidz_map_t *rm, const int *parity_valid,
return (rm->rm_ops->rec[RAIDZ_REC_Q]);
}
} else if (nbaddata == 2 &&
- parity_valid[CODE_P] && parity_valid[CODE_Q]) {
+ parity_valid[CODE_P] && parity_valid[CODE_Q]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQ]);
}
return ((raidz_rec_f) NULL);
@@ -223,8 +223,8 @@ reconstruct_fun_pqr_sel(raidz_map_t *rm, const int *parity_valid,
return (rm->rm_ops->rec[RAIDZ_REC_QR]);
}
} else if (nbaddata == 3 &&
- parity_valid[CODE_P] && parity_valid[CODE_Q] &&
- parity_valid[CODE_R]) {
+ parity_valid[CODE_P] && parity_valid[CODE_Q] &&
+ parity_valid[CODE_R]) {
return (rm->rm_ops->rec[RAIDZ_REC_PQR]);
}
return ((raidz_rec_f) NULL);
@@ -300,8 +300,8 @@ raidz_math_kstat_headers(char *buf, size_t size)
static int
raidz_math_kstat_data(char *buf, size_t size, void *data)
{
- raidz_impl_kstat_t * fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
- raidz_impl_kstat_t * cstat = (raidz_impl_kstat_t *) data;
+ raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
+ raidz_impl_kstat_t *cstat = (raidz_impl_kstat_t *)data;
ssize_t off = 0;
int i;
@@ -328,11 +328,11 @@ raidz_math_kstat_data(char *buf, size_t size, void *data)
for (i = 0; i < ARRAY_SIZE(raidz_gen_name); i++)
off += snprintf(buf + off, size - off, "%-16llu",
- (u_longlong_t) cstat->gen[i]);
+ (u_longlong_t)cstat->gen[i]);
for (i = 0; i < ARRAY_SIZE(raidz_rec_name); i++)
off += snprintf(buf + off, size - off, "%-16llu",
- (u_longlong_t) cstat->rec[i]);
+ (u_longlong_t)cstat->rec[i]);
}
(void) snprintf(buf + off, size - off, "\n");
@@ -392,7 +392,7 @@ benchmark_raidz_impl(raidz_map_t *bench_rm, const int fn, benchmark_fn bench_fn)
uint64_t run_cnt, speed, best_speed = 0;
hrtime_t t_start, t_diff;
raidz_impl_ops_t *curr_impl;
- raidz_impl_kstat_t * fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
+ raidz_impl_kstat_t *fstat = &raidz_impl_kstats[raidz_supp_impl_cnt];
int impl, i;
for (impl = 0; impl < raidz_supp_impl_cnt; impl++) {
@@ -446,14 +446,14 @@ vdev_raidz_math_init(void)
/* move supported impl into raidz_supp_impl */
for (i = 0, c = 0; i < ARRAY_SIZE(raidz_all_maths); i++) {
- curr_impl = (raidz_impl_ops_t *) raidz_all_maths[i];
+ curr_impl = (raidz_impl_ops_t *)raidz_all_maths[i];
/* initialize impl */
if (curr_impl->init)
curr_impl->init();
if (curr_impl->is_supported())
- raidz_supp_impl[c++] = (raidz_impl_ops_t *) curr_impl;
+ raidz_supp_impl[c++] = (raidz_impl_ops_t *)curr_impl;
}
membar_producer(); /* complete raidz_supp_impl[] init */
raidz_supp_impl_cnt = c; /* number of supported impl */
@@ -505,7 +505,7 @@ vdev_raidz_math_init(void)
/* install kstats for all impl */
raidz_math_kstat = kstat_create("zfs", 0, "vdev_raidz_bench", "misc",
- KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
+ KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
if (raidz_math_kstat != NULL) {
raidz_math_kstat->ks_data = NULL;
@@ -542,7 +542,7 @@ vdev_raidz_math_fini(void)
}
static const struct {
- char *name;
+ char *name;
uint32_t sel;
} math_impl_opts[] = {
#if !defined(_KERNEL)
diff --git a/module/zfs/vdev_raidz_math_avx512bw.c b/module/zfs/vdev_raidz_math_avx512bw.c
index 465d1e569..33b2d388f 100644
--- a/module/zfs/vdev_raidz_math_avx512bw.c
+++ b/module/zfs/vdev_raidz_math_avx512bw.c
@@ -66,14 +66,14 @@ typedef struct v {
uint8_t b[ELEM_SIZE] __attribute__((aligned(ELEM_SIZE)));
} v_t;
-#define PREFETCHNTA(ptr, offset) \
+#define PREFETCHNTA(ptr, offset) \
{ \
__asm( \
"prefetchnta " #offset "(%[MEM])\n" \
: : [MEM] "r" (ptr)); \
}
-#define PREFETCH(ptr, offset) \
+#define PREFETCH(ptr, offset) \
{ \
__asm( \
"prefetcht0 " #offset "(%[MEM])\n" \
@@ -142,7 +142,7 @@ typedef struct v {
} \
}
-#define COPY(r...) \
+#define COPY(r...) \
{ \
switch (REG_CNT(r)) { \
case 8: \
@@ -162,7 +162,7 @@ typedef struct v {
} \
}
-#define LOAD(src, r...) \
+#define LOAD(src, r...) \
{ \
switch (REG_CNT(r)) { \
case 4: \
@@ -184,7 +184,7 @@ typedef struct v {
} \
}
-#define STORE(dst, r...) \
+#define STORE(dst, r...) \
{ \
switch (REG_CNT(r)) { \
case 4: \
@@ -211,8 +211,8 @@ typedef struct v {
__asm("vzeroupper"); \
}
-#define MUL2_SETUP() \
-{ \
+#define MUL2_SETUP() \
+{ \
__asm("vmovq %0, %%xmm14" :: "r"(0x1d1d1d1d1d1d1d1d)); \
__asm("vpbroadcastq %xmm14, %zmm14"); \
__asm("vmovq %0, %%xmm13" :: "r"(0x8080808080808080)); \
@@ -222,7 +222,7 @@ typedef struct v {
__asm("vpxorq %zmm15, %zmm15 ,%zmm15"); \
}
-#define _MUL2(r...) \
+#define _MUL2(r...) \
{ \
switch (REG_CNT(r)) { \
case 2: \
@@ -237,8 +237,8 @@ typedef struct v {
"vpsubq %zmm9, %zmm11, %zmm11\n" \
"vpsllq $1, %" VR0(r)", %" VR0(r) "\n" \
"vpsllq $1, %" VR1(r)", %" VR1(r) "\n" \
- "vpandq %zmm10, %zmm14, %zmm10\n" \
- "vpandq %zmm11, %zmm14, %zmm11\n" \
+ "vpandq %zmm10, %zmm14, %zmm10\n" \
+ "vpandq %zmm11, %zmm14, %zmm11\n" \
"vpternlogd $0x6c,%zmm12, %zmm10, %" VR0(r) "\n" \
"vpternlogd $0x6c,%zmm12, %zmm11, %" VR1(r)); \
break; \
@@ -355,60 +355,60 @@ static const uint8_t __attribute__((aligned(32))) _mul_mask = 0x0F;
#define ADD_STRIDE 4
#define ADD_DEFINE() {}
-#define ADD_D 0, 1, 2, 3
+#define ADD_D 0, 1, 2, 3
#define MUL_STRIDE 4
-#define MUL_DEFINE() {}
+#define MUL_DEFINE() {}
#define MUL_D 0, 1, 2, 3
#define GEN_P_DEFINE() {}
#define GEN_P_STRIDE 4
#define GEN_P_P 0, 1, 2, 3
-#define GEN_PQ_DEFINE() {}
+#define GEN_PQ_DEFINE() {}
#define GEN_PQ_STRIDE 4
#define GEN_PQ_D 0, 1, 2, 3
#define GEN_PQ_P 4, 5, 6, 7
#define GEN_PQ_Q 20, 21, 22, 23
-#define GEN_PQR_DEFINE() {}
+#define GEN_PQR_DEFINE() {}
#define GEN_PQR_STRIDE 2
#define GEN_PQR_D 0, 1
#define GEN_PQR_P 2, 3
#define GEN_PQR_Q 4, 5
#define GEN_PQR_R 6, 7
-#define REC_P_DEFINE() {}
+#define REC_P_DEFINE() {}
#define REC_P_STRIDE 4
#define REC_P_X 0, 1, 2, 3
-#define REC_Q_DEFINE() {}
+#define REC_Q_DEFINE() {}
#define REC_Q_STRIDE 4
#define REC_Q_X 0, 1, 2, 3
-#define REC_R_DEFINE() {}
+#define REC_R_DEFINE() {}
#define REC_R_STRIDE 4
#define REC_R_X 0, 1, 2, 3
-#define REC_PQ_DEFINE() {}
+#define REC_PQ_DEFINE() {}
#define REC_PQ_STRIDE 4
#define REC_PQ_X 0, 1, 2, 3
#define REC_PQ_Y 4, 5, 6, 7
#define REC_PQ_D 20, 21, 22, 23
-#define REC_PR_DEFINE() {}
+#define REC_PR_DEFINE() {}
#define REC_PR_STRIDE 4
#define REC_PR_X 0, 1, 2, 3
#define REC_PR_Y 4, 5, 6, 7
#define REC_PR_D 20, 21, 22, 23
-#define REC_QR_DEFINE() {}
+#define REC_QR_DEFINE() {}
#define REC_QR_STRIDE 4
#define REC_QR_X 0, 1, 2, 3
#define REC_QR_Y 4, 5, 6, 7
#define REC_QR_D 20, 21, 22, 23
-#define REC_PQR_DEFINE() {}
+#define REC_PQR_DEFINE() {}
#define REC_PQR_STRIDE 2
#define REC_PQR_X 0, 1
#define REC_PQR_Y 2, 3
@@ -428,8 +428,8 @@ static boolean_t
raidz_will_avx512bw_work(void)
{
return (zfs_avx_available() &&
- zfs_avx512f_available() &&
- zfs_avx512bw_available());
+ zfs_avx512f_available() &&
+ zfs_avx512bw_available());
}
const raidz_impl_ops_t vdev_raidz_avx512bw_impl = {
diff --git a/module/zfs/vdev_raidz_math_avx512f.c b/module/zfs/vdev_raidz_math_avx512f.c
index 0b6108c10..f4e4560ce 100644
--- a/module/zfs/vdev_raidz_math_avx512f.c
+++ b/module/zfs/vdev_raidz_math_avx512f.c
@@ -471,8 +471,8 @@ static boolean_t
raidz_will_avx512f_work(void)
{
return (zfs_avx_available() &&
- zfs_avx2_available() &&
- zfs_avx512f_available());
+ zfs_avx2_available() &&
+ zfs_avx512f_available());
}
const raidz_impl_ops_t vdev_raidz_avx512f_impl = {
diff --git a/module/zfs/vdev_raidz_math_impl.h b/module/zfs/vdev_raidz_math_impl.h
index 171380524..0a40677b6 100644
--- a/module/zfs/vdev_raidz_math_impl.h
+++ b/module/zfs/vdev_raidz_math_impl.h
@@ -158,7 +158,7 @@ raidz_rec_pqr_coeff(const raidz_map_t *rm, const int *tgtidx, unsigned *coeff)
static int
raidz_zero_abd_cb(void *dc, size_t dsize, void *private)
{
- v_t *dst = (v_t *) dc;
+ v_t *dst = (v_t *)dc;
size_t i;
ZERO_DEFINE();
@@ -193,8 +193,8 @@ raidz_zero_abd_cb(void *dc, size_t dsize, void *private)
static int
raidz_copy_abd_cb(void *dc, void *sc, size_t size, void *private)
{
- v_t *dst = (v_t *) dc;
- const v_t *src = (v_t *) sc;
+ v_t *dst = (v_t *)dc;
+ const v_t *src = (v_t *)sc;
size_t i;
COPY_DEFINE();
@@ -232,8 +232,8 @@ raidz_copy_abd_cb(void *dc, void *sc, size_t size, void *private)
static int
raidz_add_abd_cb(void *dc, void *sc, size_t size, void *private)
{
- v_t *dst = (v_t *) dc;
- const v_t *src = (v_t *) sc;
+ v_t *dst = (v_t *)dc;
+ const v_t *src = (v_t *)sc;
size_t i;
ADD_DEFINE();
@@ -270,8 +270,8 @@ raidz_add_abd_cb(void *dc, void *sc, size_t size, void *private)
static int
raidz_mul_abd_cb(void *dc, size_t size, void *private)
{
- const unsigned mul = *((unsigned *) private);
- v_t *d = (v_t *) dc;
+ const unsigned mul = *((unsigned *)private);
+ v_t *d = (v_t *)dc;
size_t i;
MUL_DEFINE();
@@ -389,9 +389,9 @@ static void
raidz_gen_pq_add(void **c, const void *dc, const size_t csize,
const size_t dsize)
{
- v_t *p = (v_t *) c[0];
- v_t *q = (v_t *) c[1];
- const v_t *d = (v_t *) dc;
+ v_t *p = (v_t *)c[0];
+ v_t *q = (v_t *)c[1];
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const qend = q + (csize / sizeof (v_t));
@@ -439,7 +439,7 @@ raidz_generate_pq_impl(raidz_map_t * const rm)
dsize = rm->rm_col[c].rc_size;
abd_raidz_gen_iterate(cabds, dabd, csize, dsize, 2,
- raidz_gen_pq_add);
+ raidz_gen_pq_add);
}
raidz_math_end();
@@ -459,10 +459,10 @@ static void
raidz_gen_pqr_add(void **c, const void *dc, const size_t csize,
const size_t dsize)
{
- v_t *p = (v_t *) c[0];
- v_t *q = (v_t *) c[1];
- v_t *r = (v_t *) c[CODE_R];
- const v_t *d = (v_t *) dc;
+ v_t *p = (v_t *)c[0];
+ v_t *q = (v_t *)c[1];
+ v_t *r = (v_t *)c[CODE_R];
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const qend = q + (csize / sizeof (v_t));
@@ -514,7 +514,7 @@ raidz_generate_pqr_impl(raidz_map_t * const rm)
dsize = rm->rm_col[c].rc_size;
abd_raidz_gen_iterate(cabds, dabd, csize, dsize, 3,
- raidz_gen_pqr_add);
+ raidz_gen_pqr_add);
}
raidz_math_end();
@@ -628,8 +628,8 @@ static void
raidz_syn_q_abd(void **xc, const void *dc, const size_t xsize,
const size_t dsize)
{
- v_t *x = (v_t *) xc[TARGET_X];
- const v_t *d = (v_t *) dc;
+ v_t *x = (v_t *)xc[TARGET_X];
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const xend = x + (xsize / sizeof (v_t));
@@ -719,8 +719,8 @@ static void
raidz_syn_r_abd(void **xc, const void *dc, const size_t tsize,
const size_t dsize)
{
- v_t *x = (v_t *) xc[TARGET_X];
- const v_t *d = (v_t *) dc;
+ v_t *x = (v_t *)xc[TARGET_X];
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const xend = x + (tsize / sizeof (v_t));
@@ -784,7 +784,7 @@ raidz_reconstruct_r_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 1,
- raidz_syn_r_abd);
+ raidz_syn_r_abd);
}
/* add R to the syndrome */
@@ -811,9 +811,9 @@ static void
raidz_syn_pq_abd(void **tc, const void *dc, const size_t tsize,
const size_t dsize)
{
- v_t *x = (v_t *) tc[TARGET_X];
- v_t *y = (v_t *) tc[TARGET_Y];
- const v_t *d = (v_t *) dc;
+ v_t *x = (v_t *)tc[TARGET_X];
+ v_t *y = (v_t *)tc[TARGET_Y];
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const yend = y + (tsize / sizeof (v_t));
@@ -843,11 +843,11 @@ static void
raidz_rec_pq_abd(void **tc, const size_t tsize, void **c,
const unsigned *mul)
{
- v_t *x = (v_t *) tc[TARGET_X];
- v_t *y = (v_t *) tc[TARGET_Y];
+ v_t *x = (v_t *)tc[TARGET_X];
+ v_t *y = (v_t *)tc[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
- const v_t *p = (v_t *) c[CODE_P];
- const v_t *q = (v_t *) c[CODE_Q];
+ const v_t *p = (v_t *)c[CODE_P];
+ const v_t *q = (v_t *)c[CODE_Q];
REC_PQ_DEFINE();
@@ -939,7 +939,7 @@ raidz_reconstruct_pq_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
- raidz_syn_pq_abd);
+ raidz_syn_pq_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_pq_abd, coeff);
@@ -969,9 +969,9 @@ static void
raidz_syn_pr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
- v_t *x = (v_t *) c[TARGET_X];
- v_t *y = (v_t *) c[TARGET_Y];
- const v_t *d = (v_t *) dc;
+ v_t *x = (v_t *)c[TARGET_X];
+ v_t *y = (v_t *)c[TARGET_Y];
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
const v_t * const yend = y + (tsize / sizeof (v_t));
@@ -1001,11 +1001,11 @@ static void
raidz_rec_pr_abd(void **t, const size_t tsize, void **c,
const unsigned *mul)
{
- v_t *x = (v_t *) t[TARGET_X];
- v_t *y = (v_t *) t[TARGET_Y];
+ v_t *x = (v_t *)t[TARGET_X];
+ v_t *y = (v_t *)t[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
- const v_t *p = (v_t *) c[CODE_P];
- const v_t *q = (v_t *) c[CODE_Q];
+ const v_t *p = (v_t *)c[CODE_P];
+ const v_t *q = (v_t *)c[CODE_Q];
REC_PR_DEFINE();
@@ -1095,7 +1095,7 @@ raidz_reconstruct_pr_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
- raidz_syn_pr_abd);
+ raidz_syn_pr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_pr_abd, coeff);
@@ -1127,10 +1127,10 @@ static void
raidz_syn_qr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
- v_t *x = (v_t *) c[TARGET_X];
- v_t *y = (v_t *) c[TARGET_Y];
+ v_t *x = (v_t *)c[TARGET_X];
+ v_t *y = (v_t *)c[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
- const v_t *d = (v_t *) dc;
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
SYN_QR_DEFINE();
@@ -1161,11 +1161,11 @@ static void
raidz_rec_qr_abd(void **t, const size_t tsize, void **c,
const unsigned *mul)
{
- v_t *x = (v_t *) t[TARGET_X];
- v_t *y = (v_t *) t[TARGET_Y];
+ v_t *x = (v_t *)t[TARGET_X];
+ v_t *y = (v_t *)t[TARGET_Y];
const v_t * const xend = x + (tsize / sizeof (v_t));
- const v_t *p = (v_t *) c[CODE_P];
- const v_t *q = (v_t *) c[CODE_Q];
+ const v_t *p = (v_t *)c[CODE_P];
+ const v_t *q = (v_t *)c[CODE_Q];
REC_QR_DEFINE();
@@ -1258,7 +1258,7 @@ raidz_reconstruct_qr_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 2,
- raidz_syn_qr_abd);
+ raidz_syn_qr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 2, raidz_rec_qr_abd, coeff);
@@ -1291,11 +1291,11 @@ static void
raidz_syn_pqr_abd(void **c, const void *dc, const size_t tsize,
const size_t dsize)
{
- v_t *x = (v_t *) c[TARGET_X];
- v_t *y = (v_t *) c[TARGET_Y];
- v_t *z = (v_t *) c[TARGET_Z];
+ v_t *x = (v_t *)c[TARGET_X];
+ v_t *y = (v_t *)c[TARGET_Y];
+ v_t *z = (v_t *)c[TARGET_Z];
const v_t * const yend = y + (tsize / sizeof (v_t));
- const v_t *d = (v_t *) dc;
+ const v_t *d = (v_t *)dc;
const v_t * const dend = d + (dsize / sizeof (v_t));
SYN_PQR_DEFINE();
@@ -1328,13 +1328,13 @@ static void
raidz_rec_pqr_abd(void **t, const size_t tsize, void **c,
const unsigned * const mul)
{
- v_t *x = (v_t *) t[TARGET_X];
- v_t *y = (v_t *) t[TARGET_Y];
- v_t *z = (v_t *) t[TARGET_Z];
+ v_t *x = (v_t *)t[TARGET_X];
+ v_t *y = (v_t *)t[TARGET_Y];
+ v_t *z = (v_t *)t[TARGET_Z];
const v_t * const xend = x + (tsize / sizeof (v_t));
- const v_t *p = (v_t *) c[CODE_P];
- const v_t *q = (v_t *) c[CODE_Q];
- const v_t *r = (v_t *) c[CODE_R];
+ const v_t *p = (v_t *)c[CODE_P];
+ const v_t *q = (v_t *)c[CODE_Q];
+ const v_t *r = (v_t *)c[CODE_R];
REC_PQR_DEFINE();
@@ -1451,7 +1451,7 @@ raidz_reconstruct_pqr_impl(raidz_map_t *rm, const int *tgtidx)
}
abd_raidz_gen_iterate(tabds, dabd, xsize, dsize, 3,
- raidz_syn_pqr_abd);
+ raidz_syn_pqr_abd);
}
abd_raidz_rec_iterate(cabds, tabds, xsize, 3, raidz_rec_pqr_abd, coeff);
diff --git a/module/zfs/zfs_ctldir.c b/module/zfs/zfs_ctldir.c
index 53674d975..d0e2f7ee0 100644
--- a/module/zfs/zfs_ctldir.c
+++ b/module/zfs/zfs_ctldir.c
@@ -864,7 +864,7 @@ zfsctl_snapdir_rename(struct inode *sdip, char *snm,
ZFS_MAX_DATASET_NAME_LEN, from);
if (error == 0)
error = zfsctl_snapshot_name(ITOZSB(tdip), tnm,
- ZFS_MAX_DATASET_NAME_LEN, to);
+ ZFS_MAX_DATASET_NAME_LEN, to);
if (error == 0)
error = zfs_secpolicy_rename_perms(from, to, cr);
if (error != 0)
diff --git a/module/zfs/zfs_debug.c b/module/zfs/zfs_debug.c
index 2770359c8..b553d21e4 100644
--- a/module/zfs/zfs_debug.c
+++ b/module/zfs/zfs_debug.c
@@ -62,7 +62,7 @@ zfs_dbgmsg_data(char *buf, size_t size, void *data)
zfs_dbgmsg_t *zdm = (zfs_dbgmsg_t *)data;
(void) snprintf(buf, size, "%-12llu %-s\n",
- (u_longlong_t) zdm->zdm_timestamp, zdm->zdm_msg);
+ (u_longlong_t)zdm->zdm_timestamp, zdm->zdm_msg);
return (0);
}
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 0a8d260e6..18af93cae 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -3883,7 +3883,7 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
* because GRUB doesn't support them.
*/
if (zfs_is_bootfs(dsname) &&
- intval != ZFS_DNSIZE_LEGACY) {
+ intval != ZFS_DNSIZE_LEGACY) {
return (SET_ERROR(EDOM));
}
@@ -4275,7 +4275,7 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin,
*read_bytes = off - input_fp->f_offset;
if (VOP_SEEK(input_fp->f_vnode, input_fp->f_offset, &off, NULL) == 0)
- input_fp->f_offset = off;
+ input_fp->f_offset = off;
#ifdef DEBUG
if (zfs_ioc_recv_inject_err) {
@@ -4463,7 +4463,7 @@ zfs_ioc_recv_new(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
return (error);
error = nvlist_lookup_byte_array(innvl, "begin_record",
- (uchar_t **) &begin_record, &begin_record_size);
+ (uchar_t **)&begin_record, &begin_record_size);
if (error != 0 || begin_record_size != sizeof (*begin_record))
return (SET_ERROR(EINVAL));
@@ -5356,7 +5356,7 @@ zfs_ioc_events_next(zfs_cmd_t *zc)
do {
error = zfs_zevent_next(ze, &event,
- &zc->zc_nvlist_dst_size, &dropped);
+ &zc->zc_nvlist_dst_size, &dropped);
if (event != NULL) {
zc->zc_cookie = dropped;
error = put_nvlist(zc, event);
@@ -5562,7 +5562,7 @@ zfs_ioc_send_new(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
off = fp->f_offset;
error = dmu_send(snapname, fromname, embedok, largeblockok, compressok,
- fd, resumeobj, resumeoff, fp->f_vnode, &off);
+ fd, resumeobj, resumeoff, fp->f_vnode, &off);
if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0)
fp->f_offset = off;
@@ -5631,7 +5631,7 @@ zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
if (error != 0)
goto out;
error = dmu_send_estimate(tosnap, fromsnap, compressok,
- &space);
+ &space);
dsl_dataset_rele(fromsnap, FTAG);
} else if (strchr(fromname, '#') != NULL) {
/*
diff --git a/module/zfs/zfs_vfsops.c b/module/zfs/zfs_vfsops.c
index 39e92ce21..1c3dccdca 100644
--- a/module/zfs/zfs_vfsops.c
+++ b/module/zfs/zfs_vfsops.c
@@ -1222,8 +1222,9 @@ zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects)
defined(SHRINKER_NUMA_AWARE)
if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) {
*objects = 0;
- for_each_online_node(sc.nid)
+ for_each_online_node(sc.nid) {
*objects += (*shrinker->scan_objects)(shrinker, &sc);
+ }
} else {
*objects = (*shrinker->scan_objects)(shrinker, &sc);
}
@@ -1344,7 +1345,7 @@ zfs_sb_teardown(zfs_sb_t *zsb, boolean_t unmounting)
if (!unmounting) {
mutex_enter(&zsb->z_znodes_lock);
for (zp = list_head(&zsb->z_all_znodes); zp != NULL;
- zp = list_next(&zsb->z_all_znodes, zp)) {
+ zp = list_next(&zsb->z_all_znodes, zp)) {
if (zp->z_sa_hdl)
zfs_znode_dmu_fini(zp);
}
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index 7601e7618..9fe4c7870 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -2626,7 +2626,7 @@ zfs_getattr_fast(struct inode *ip, struct kstat *sp)
if (zsb->z_issnap) {
if (ip->i_sb->s_root->d_inode == ip)
sp->ino = ZFSCTL_INO_SNAPDIRS -
- dmu_objset_id(zsb->z_os);
+ dmu_objset_id(zsb->z_os);
}
ZFS_EXIT(zsb);
@@ -4932,6 +4932,7 @@ zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
#endif /* HAVE_UIO_ZEROCOPY */
#if defined(_KERNEL) && defined(HAVE_SPL)
+/* CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
module_param(zfs_read_chunk_size, long, 0644);
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index a4d1520b1..624e92696 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -575,9 +575,7 @@ zfs_znode_alloc(zfs_sb_t *zsb, dmu_buf_t *db, int blksz,
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zsb), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zsb), NULL, &ctime, 16);
- if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 ||
- tmp_gen == 0) {
-
+ if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
@@ -2142,6 +2140,7 @@ zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path);
+/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
#endif
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index b2d07166e..b3b069900 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -2270,6 +2270,7 @@ MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
module_param(zfs_nocacheflush, int, 0644);
MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
+/* CSTYLED */
module_param(zil_slog_limit, ulong, 0644);
MODULE_PARM_DESC(zil_slog_limit, "Max commit bytes to separate log device");
#endif
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index e9d08093e..a242c6bf6 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -1577,7 +1577,7 @@ zio_delay_interrupt(zio_t *zio)
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
- (task_func_t *) zio_interrupt,
+ (task_func_t *)zio_interrupt,
zio, TQ_NOSLEEP, expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
@@ -3802,9 +3802,9 @@ zio_done(zio_t *zio)
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
- !vdev_is_dead(zio->io_vd))
+ !vdev_is_dead(zio->io_vd))
zfs_ereport_post(FM_EREPORT_ZFS_IO, zio->io_spa,
- zio->io_vd, zio, 0, 0);
+ zio->io_vd, zio, 0, 0);
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
diff --git a/module/zfs/zpl_export.c b/module/zfs/zpl_export.c
index 6f051a048..a264d664c 100644
--- a/module/zfs/zpl_export.c
+++ b/module/zfs/zpl_export.c
@@ -37,6 +37,7 @@ zpl_encode_fh(struct inode *ip, __u32 *fh, int *max_len, struct inode *parent)
#else
zpl_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len, int connectable)
{
+ /* CSTYLED */
struct inode *ip = dentry->d_inode;
#endif /* HAVE_ENCODE_FH_WITH_INODE */
fstrans_cookie_t cookie;
diff --git a/module/zfs/zpl_inode.c b/module/zfs/zpl_inode.c
index e3c574810..afbf76dfe 100644
--- a/module/zfs/zpl_inode.c
+++ b/module/zfs/zpl_inode.c
@@ -314,7 +314,7 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, zpl_umode_t mode)
}
static int
-zpl_rmdir(struct inode * dir, struct dentry *dentry)
+zpl_rmdir(struct inode *dir, struct dentry *dentry)
{
cred_t *cr = CRED();
int error;
@@ -379,7 +379,7 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia)
if (vap->va_mask & ATTR_ATIME)
ip->i_atime = timespec_trunc(ia->ia_atime,
- ip->i_sb->s_time_gran);
+ ip->i_sb->s_time_gran);
cookie = spl_fstrans_mark();
error = -zfs_setattr(ip, vap, 0, cr);
@@ -657,6 +657,7 @@ zpl_revalidate(struct dentry *dentry, struct nameidata *nd)
zpl_revalidate(struct dentry *dentry, unsigned int flags)
{
#endif /* HAVE_D_REVALIDATE_NAMEIDATA */
+ /* CSTYLED */
zfs_sb_t *zsb = dentry->d_sb->s_fs_info;
int error;
diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c
index 61d0538a3..2c99b73c2 100644
--- a/module/zfs/zvol.c
+++ b/module/zfs/zvol.c
@@ -572,9 +572,9 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
return;
immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
- ? 0 : zvol_immediate_write_sz;
+ ? 0 : zvol_immediate_write_sz;
slogging = spa_has_slogs(zilog->zl_spa) &&
- (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
+ (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
while (size) {
itx_t *itx;
@@ -1441,7 +1441,7 @@ zvol_create_minor_impl(const char *name)
if (len > 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
- ZIO_PRIORITY_SYNC_READ);
+ ZIO_PRIORITY_SYNC_READ);
}
zv->zv_objset = NULL;
@@ -1539,7 +1539,7 @@ zvol_create_snap_minor_cb(const char *dsname, void *arg)
/* at this point, the dsname should name a snapshot */
if (strchr(dsname, '@') == 0) {
dprintf("zvol_create_snap_minor_cb(): "
- "%s is not a shapshot name\n", dsname);
+ "%s is not a shapshot name\n", dsname);
} else {
minors_job_t *job;
char *n = strdup(dsname);
@@ -1608,7 +1608,7 @@ zvol_create_minors_cb(const char *dsname, void *arg)
}
} else {
dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
- dsname);
+ dsname);
}
return (0);
@@ -1954,7 +1954,7 @@ zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
- task, TQ_SLEEP);
+ task, TQ_SLEEP);
return (0);
}
@@ -2087,6 +2087,7 @@ zvol_fini(void)
mutex_destroy(&zvol_state_lock);
}
+/* BEGIN CSTYLED */
module_param(zvol_inhibit_dev, uint, 0644);
MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
@@ -2098,3 +2099,4 @@ MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
module_param(zvol_prefetch_bytes, uint, 0644);
MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
+/* END CSTYLED */
diff --git a/module/zpios/pios.c b/module/zpios/pios.c
index 43af6bff0..c1791eb7d 100644
--- a/module/zpios/pios.c
+++ b/module/zpios/pios.c
@@ -179,7 +179,7 @@ zpios_dmu_object_free(run_args_t *run_args, objset_t *os, uint64_t obj)
rc = dmu_tx_assign(tx, TXG_WAIT);
if (rc) {
zpios_print(run_args->file,
- "dmu_tx_assign() failed: %d\n", rc);
+ "dmu_tx_assign() failed: %d\n", rc);
dmu_tx_abort(tx);
return (rc);
}
@@ -187,7 +187,7 @@ zpios_dmu_object_free(run_args_t *run_args, objset_t *os, uint64_t obj)
rc = dmu_object_free(os, obj, tx);
if (rc) {
zpios_print(run_args->file,
- "dmu_object_free() failed: %d\n", rc);
+ "dmu_object_free() failed: %d\n", rc);
dmu_tx_abort(tx);
return (rc);
}
@@ -213,14 +213,14 @@ zpios_dmu_setup(run_args_t *run_args)
rc = dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL);
if (rc) {
zpios_print(run_args->file, "Error dmu_objset_create(%s, ...) "
- "failed: %d\n", name, rc);
+ "failed: %d\n", name, rc);
goto out;
}
rc = dmu_objset_own(name, DMU_OST_OTHER, 0, zpios_tag, &os);
if (rc) {
zpios_print(run_args->file, "Error dmu_objset_own(%s, ...) "
- "failed: %d\n", name, rc);
+ "failed: %d\n", name, rc);
goto out_destroy;
}
@@ -229,7 +229,7 @@ zpios_dmu_setup(run_args_t *run_args)
if (obj == 0) {
rc = -EBADF;
zpios_print(run_args->file, "Error zpios_dmu_"
- "object_create() failed, %d\n", rc);
+ "object_create() failed, %d\n", rc);
goto out_destroy;
}
}
@@ -268,7 +268,7 @@ out_destroy:
rc2 = dsl_destroy_head(name);
if (rc2)
zpios_print(run_args->file, "Error dsl_destroy_head"
- "(%s, ...) failed: %d\n", name, rc2);
+ "(%s, ...) failed: %d\n", name, rc2);
}
out:
t->stop = zpios_timespec_now();
@@ -497,7 +497,7 @@ zpios_dmu_write(run_args_t *run_args, objset_t *os, uint64_t object,
continue;
}
zpios_print(run_args->file,
- "Error in dmu_tx_assign(), %d", rc);
+ "Error in dmu_tx_assign(), %d", rc);
dmu_tx_abort(tx);
return (rc);
}
@@ -588,7 +588,7 @@ zpios_thread_main(void *data)
if (rc) {
zpios_print(run_args->file, "IO error while doing "
- "dmu_write(): %d\n", rc);
+ "dmu_write(): %d\n", rc);
break;
}
@@ -651,13 +651,13 @@ zpios_thread_main(void *data)
t.start = zpios_timespec_now();
rc = zpios_dmu_read(run_args, obj.os, obj.obj,
- offset, chunk_size, buf);
+ offset, chunk_size, buf);
t.stop = zpios_timespec_now();
t.delta = zpios_timespec_sub(t.stop, t.start);
if (rc) {
zpios_print(run_args->file, "IO error while doing "
- "dmu_read(): %d\n", rc);
+ "dmu_read(): %d\n", rc);
break;
}
@@ -928,7 +928,7 @@ zpios_open(struct inode *inode, struct file *file)
spin_lock_init(&info->info_lock);
info->info_size = ZPIOS_INFO_BUFFER_SIZE;
info->info_buffer =
- (char *) vmem_alloc(ZPIOS_INFO_BUFFER_SIZE, KM_SLEEP);
+ (char *)vmem_alloc(ZPIOS_INFO_BUFFER_SIZE, KM_SLEEP);
info->info_head = info->info_buffer;
file->private_data = (void *)info;
@@ -1035,7 +1035,7 @@ zpios_ioctl_cfg(struct file *file, unsigned long arg)
break;
default:
zpios_print(file, "Bad config command %d\n",
- kcfg.cfg_cmd);
+ kcfg.cfg_cmd);
rc = -EINVAL;
break;
}
@@ -1055,7 +1055,7 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
rc = copy_from_user(kcmd, (zpios_cfg_t *)arg, sizeof (zpios_cmd_t));
if (rc) {
zpios_print(file, "Unable to copy command structure "
- "from user to kernel memory, %d\n", rc);
+ "from user to kernel memory, %d\n", rc);
goto out_cmd;
}
@@ -1074,7 +1074,7 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
cmd_data_str)), kcmd->cmd_data_size);
if (rc) {
zpios_print(file, "Unable to copy data buffer "
- "from user to kernel memory, %d\n", rc);
+ "from user to kernel memory, %d\n", rc);
goto out_data;
}
}
@@ -1090,7 +1090,7 @@ zpios_ioctl_cmd(struct file *file, unsigned long arg)
cmd_data_str)), data, kcmd->cmd_data_size);
if (rc) {
zpios_print(file, "Unable to copy data buffer "
- "from kernel to user memory, %d\n", rc);
+ "from kernel to user memory, %d\n", rc);
rc = -EFAULT;
}