diff options
author | Michael Kjorling <[email protected]> | 2013-11-01 20:26:11 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2013-12-18 16:46:35 -0800 |
commit | d1d7e2689db9e03f11c069ebc9f1ba12829e5dac (patch) | |
tree | 75b9a2b23334d5f673fb31f142f74146d351865c /include/linux | |
parent | 8ffef572ed2ba97e0c2d6a8aa2240012e611dc6f (diff) |
cstyle: Resolve C style issues
The vast majority of these changes are in Linux specific code.
They are the result of not having an automated style checker to
validate the code when it was originally written. Others were
caused when the common code was slightly adjusted for Linux.
This patch contains no functional changes. It only refreshes
the code to conform to style guide.
Everyone submitting patches for inclusion upstream should now
run 'make checkstyle' and resolve any warning prior to opening
a pull request. The automated builders have been updated to
fail a build if when 'make checkstyle' detects an issue.
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #1821
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/blkdev_compat.h | 136 | ||||
-rw-r--r-- | include/linux/dcache_compat.h | 14 | ||||
-rw-r--r-- | include/linux/vfs_compat.h | 38 | ||||
-rw-r--r-- | include/linux/xattr_compat.h | 32 |
4 files changed, 115 insertions, 105 deletions
diff --git a/include/linux/blkdev_compat.h b/include/linux/blkdev_compat.h index ec9926ffd..e45601bc8 100644 --- a/include/linux/blkdev_compat.h +++ b/include/linux/blkdev_compat.h @@ -27,7 +27,7 @@ */ #ifndef _ZFS_BLKDEV_H -#define _ZFS_BLKDEV_H +#define _ZFS_BLKDEV_H #include <linux/blkdev.h> #include <linux/elevator.h> @@ -46,7 +46,7 @@ blk_fetch_request(struct request_queue *q) if (req) blkdev_dequeue_request(req); - return req; + return (req); } #endif /* HAVE_BLK_FETCH_REQUEST */ @@ -79,7 +79,7 @@ __blk_end_request(struct request *req, int error, unsigned int nr_bytes) req->hard_cur_sectors = nr_bytes >> 9; end_request(req, ((error == 0) ? 1 : error)); - return 0; + return (0); } static inline bool @@ -92,17 +92,17 @@ blk_end_request(struct request *req, int error, unsigned int nr_bytes) rc = __blk_end_request(req, error, nr_bytes); spin_unlock_irq(q->queue_lock); - return rc; + return (rc); } #else -# ifdef HAVE_BLK_END_REQUEST_GPL_ONLY +#ifdef HAVE_BLK_END_REQUEST_GPL_ONLY /* * Define required to avoid conflicting 2.6.29 non-static prototype for a * GPL-only version of the helper. As of 2.6.31 the helper is available * to non-GPL modules and is not explicitly exported GPL-only. */ -# define __blk_end_request __blk_end_request_x -# define blk_end_request blk_end_request_x +#define __blk_end_request __blk_end_request_x +#define blk_end_request blk_end_request_x static inline bool __blk_end_request_x(struct request *req, int error, unsigned int nr_bytes) @@ -115,7 +115,7 @@ __blk_end_request_x(struct request *req, int error, unsigned int nr_bytes) req->hard_cur_sectors = nr_bytes >> 9; end_request(req, ((error == 0) ? 1 : error)); - return 0; + return (0); } static inline bool blk_end_request_x(struct request *req, int error, unsigned int nr_bytes) @@ -127,9 +127,9 @@ blk_end_request_x(struct request *req, int error, unsigned int nr_bytes) rc = __blk_end_request_x(req, error, nr_bytes); spin_unlock_irq(q->queue_lock); - return rc; + return (rc); } -# endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */ +#endif /* HAVE_BLK_END_REQUEST_GPL_ONLY */ #endif /* HAVE_BLK_END_REQUEST */ /* @@ -141,7 +141,7 @@ blk_end_request_x(struct request *req, int error, unsigned int nr_bytes) * that long term this function will be opened up. */ #if defined(HAVE_BLK_QUEUE_FLUSH) && defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) -#define blk_queue_flush __blk_queue_flush +#define blk_queue_flush __blk_queue_flush static inline void __blk_queue_flush(struct request_queue *q, unsigned int flags) { @@ -153,7 +153,7 @@ __blk_queue_flush(struct request_queue *q, unsigned int flags) static inline sector_t blk_rq_pos(struct request *req) { - return req->sector; + return (req->sector); } #endif /* HAVE_BLK_RQ_POS */ @@ -161,7 +161,7 @@ blk_rq_pos(struct request *req) static inline unsigned int blk_rq_sectors(struct request *req) { - return req->nr_sectors; + return (req->nr_sectors); } #endif /* HAVE_BLK_RQ_SECTORS */ @@ -171,11 +171,11 @@ blk_rq_sectors(struct request *req) * GPL-only version of the helper. As of 2.6.31 the helper is available * to non-GPL modules in the form of a static inline in the header. */ -#define blk_rq_bytes __blk_rq_bytes +#define blk_rq_bytes __blk_rq_bytes static inline unsigned int __blk_rq_bytes(struct request *req) { - return blk_rq_sectors(req) << 9; + return (blk_rq_sectors(req) << 9); } #endif /* !HAVE_BLK_RQ_BYTES || HAVE_BLK_RQ_BYTES_GPL_ONLY */ @@ -186,7 +186,7 @@ __blk_rq_bytes(struct request *req) * macros are redefined here if they are missing from the kernel. */ #ifndef blk_fs_request -#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) +#define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) #endif /* @@ -197,7 +197,7 @@ __blk_rq_bytes(struct request *req) * this legacy behavior. */ #ifndef blk_queue_stackable -#define blk_queue_stackable(q) ((q)->request_fn == NULL) +#define blk_queue_stackable(q) ((q)->request_fn == NULL) #endif /* @@ -205,7 +205,7 @@ __blk_rq_bytes(struct request *req) * The blk_queue_max_hw_sectors() function replaces blk_queue_max_sectors(). */ #ifndef HAVE_BLK_QUEUE_MAX_HW_SECTORS -#define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors +#define blk_queue_max_hw_sectors __blk_queue_max_hw_sectors static inline void __blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) { @@ -219,7 +219,7 @@ __blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) * blk_queue_max_hw_segments() and blk_queue_max_phys_segments(). */ #ifndef HAVE_BLK_QUEUE_MAX_SEGMENTS -#define blk_queue_max_segments __blk_queue_max_segments +#define blk_queue_max_segments __blk_queue_max_segments static inline void __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) { @@ -235,7 +235,7 @@ __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) * a read-modify-write penalty. For older kernels this is a no-op. */ #ifndef HAVE_BLK_QUEUE_PHYSICAL_BLOCK_SIZE -#define blk_queue_physical_block_size(q, x) ((void)(0)) +#define blk_queue_physical_block_size(q, x) ((void)(0)) #endif /* @@ -244,7 +244,7 @@ __blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) * I/O size for the device. For older kernels this is a no-op. */ #ifndef HAVE_BLK_QUEUE_IO_OPT -#define blk_queue_io_opt(q, x) ((void)(0)) +#define blk_queue_io_opt(q, x) ((void)(0)) #endif #ifndef HAVE_GET_DISK_RO @@ -256,7 +256,7 @@ get_disk_ro(struct gendisk *disk) if (disk->part[0]) policy = disk->part[0]->policy; - return policy; + return (policy); } #endif /* HAVE_GET_DISK_RO */ @@ -274,14 +274,14 @@ struct req_iterator { struct bio *bio; }; -# define for_each_bio(_bio) \ +#define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) -# define __rq_for_each_bio(_bio, rq) \ +#define __rq_for_each_bio(_bio, rq) \ if ((rq->bio)) \ for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) -# define rq_for_each_segment(bvl, _rq, _iter) \ +#define rq_for_each_segment(bvl, _rq, _iter) \ __rq_for_each_bio(_iter.bio, _rq) \ bio_for_each_segment(bvl, _iter.bio, _iter.i) #endif /* HAVE_RQ_FOR_EACH_SEGMENT */ @@ -315,21 +315,23 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) #ifdef HAVE_BIO_RW_FAILFAST_DTD /* BIO_RW_FAILFAST_* preferred interface from 2.6.28 - 2.6.35 */ - *flags |= - ((1 << BIO_RW_FAILFAST_DEV) | - (1 << BIO_RW_FAILFAST_TRANSPORT) | - (1 << BIO_RW_FAILFAST_DRIVER)); + *flags |= ( + (1 << BIO_RW_FAILFAST_DEV) | + (1 << BIO_RW_FAILFAST_TRANSPORT) | + (1 << BIO_RW_FAILFAST_DRIVER)); #else -# ifdef HAVE_BIO_RW_FAILFAST +#ifdef HAVE_BIO_RW_FAILFAST /* BIO_RW_FAILFAST preferred interface from 2.6.12 - 2.6.27 */ *flags |= (1 << BIO_RW_FAILFAST); -# else -# ifdef HAVE_REQ_FAILFAST_MASK - /* REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx, - * the BIO_* and REQ_* flags were unified under REQ_* flags. */ +#else +#ifdef HAVE_REQ_FAILFAST_MASK + /* + * REQ_FAILFAST_* preferred interface from 2.6.36 - 2.6.xx, + * the BIO_* and REQ_* flags were unified under REQ_* flags. + */ *flags |= REQ_FAILFAST_MASK; -# endif /* HAVE_REQ_FAILFAST_MASK */ -# endif /* HAVE_BIO_RW_FAILFAST */ +#endif /* HAVE_REQ_FAILFAST_MASK */ +#endif /* HAVE_BIO_RW_FAILFAST */ #endif /* HAVE_BIO_RW_FAILFAST_DTD */ } @@ -337,7 +339,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * Maximum disk label length, it may be undefined for some kernels. */ #ifndef DISK_NAME_LEN -#define DISK_NAME_LEN 32 +#define DISK_NAME_LEN 32 #endif /* DISK_NAME_LEN */ /* @@ -346,12 +348,14 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * macro's to ensure the prototype and return value are handled. */ #ifdef HAVE_2ARGS_BIO_END_IO_T -# define BIO_END_IO_PROTO(fn, x, y, z) static void fn(struct bio *x, int z) -# define BIO_END_IO_RETURN(rc) return +#define BIO_END_IO_PROTO(fn, x, y, z) static void fn(struct bio *x, int z) +#define BIO_END_IO_RETURN(rc) return #else -# define BIO_END_IO_PROTO(fn, x, y, z) static int fn(struct bio *x, \ - unsigned int y, int z) -# define BIO_END_IO_RETURN(rc) return rc +#define BIO_END_IO_PROTO(fn, x, y, z) static int fn( \ + struct bio *x, \ + unsigned int y, \ + int z) +#define BIO_END_IO_RETURN(rc) return rc #endif /* HAVE_2ARGS_BIO_END_IO_T */ /* @@ -370,15 +374,15 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * Used to exclusively open a block device from within the kernel. */ #if defined(HAVE_BLKDEV_GET_BY_PATH) -# define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \ +#define vdev_bdev_open(path, md, hld) blkdev_get_by_path(path, \ (md) | FMODE_EXCL, hld) -# define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL) +#define vdev_bdev_close(bdev, md) blkdev_put(bdev, (md) | FMODE_EXCL) #elif defined(HAVE_OPEN_BDEV_EXCLUSIVE) -# define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld) -# define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md) +#define vdev_bdev_open(path, md, hld) open_bdev_exclusive(path, md, hld) +#define vdev_bdev_close(bdev, md) close_bdev_exclusive(bdev, md) #else -# define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld) -# define vdev_bdev_close(bdev, md) close_bdev_excl(bdev) +#define vdev_bdev_open(path, md, hld) open_bdev_excl(path, md, hld) +#define vdev_bdev_close(bdev, md) close_bdev_excl(bdev) #endif /* HAVE_BLKDEV_GET_BY_PATH | HAVE_OPEN_BDEV_EXCLUSIVE */ /* @@ -387,9 +391,9 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * it was unused. */ #ifdef HAVE_1ARG_INVALIDATE_BDEV -# define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev) +#define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev) #else -# define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1) +#define vdev_bdev_invalidate(bdev) invalidate_bdev(bdev, 1) #endif /* HAVE_1ARG_INVALIDATE_BDEV */ /* @@ -398,7 +402,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * symbol was not exported. */ #ifndef HAVE_LOOKUP_BDEV -# define lookup_bdev(path) ERR_PTR(-ENOTSUP) +#define lookup_bdev(path) ERR_PTR(-ENOTSUP) #endif /* @@ -416,13 +420,13 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * the logical block size interface and then the older hard sector size. */ #ifdef HAVE_BDEV_PHYSICAL_BLOCK_SIZE -# define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev) +#define vdev_bdev_block_size(bdev) bdev_physical_block_size(bdev) +#else +#ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE +#define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev) #else -# ifdef HAVE_BDEV_LOGICAL_BLOCK_SIZE -# define vdev_bdev_block_size(bdev) bdev_logical_block_size(bdev) -# else -# define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev) -# endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */ +#define vdev_bdev_block_size(bdev) bdev_hardsect_size(bdev) +#endif /* HAVE_BDEV_LOGICAL_BLOCK_SIZE */ #endif /* HAVE_BDEV_PHYSICAL_BLOCK_SIZE */ /* @@ -438,13 +442,13 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * compatibility macros. */ #ifdef WRITE_FLUSH_FUA -# define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA -# define VDEV_REQ_FLUSH REQ_FLUSH -# define VDEV_REQ_FUA REQ_FUA +#define VDEV_WRITE_FLUSH_FUA WRITE_FLUSH_FUA +#define VDEV_REQ_FLUSH REQ_FLUSH +#define VDEV_REQ_FUA REQ_FUA #else -# define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER -# define VDEV_REQ_FLUSH REQ_HARDBARRIER -# define VDEV_REQ_FUA REQ_HARDBARRIER +#define VDEV_WRITE_FLUSH_FUA WRITE_BARRIER +#define VDEV_REQ_FLUSH REQ_HARDBARRIER +#define VDEV_REQ_FUA REQ_HARDBARRIER #endif /* @@ -452,7 +456,7 @@ bio_set_flags_failfast(struct block_device *bdev, int *flags) * Use the normal I/O patch for discards. */ #ifdef REQ_DISCARD -# define VDEV_REQ_DISCARD REQ_DISCARD +#define VDEV_REQ_DISCARD REQ_DISCARD #endif /* @@ -467,7 +471,7 @@ blk_queue_discard_granularity(struct request_queue *q, unsigned int dg) q->limits.discard_granularity = dg; } #else -#define blk_queue_discard_granularity(x, dg) ((void)0) +#define blk_queue_discard_granularity(x, dg) ((void)0) #endif /* HAVE_DISCARD_GRANULARITY */ /* @@ -485,6 +489,6 @@ blk_queue_discard_granularity(struct request_queue *q, unsigned int dg) * user space processes which don't pass this value will get EBUSY. This is * currently required for the correct operation of hot spares. */ -#define VDEV_HOLDER ((void *)0x2401de7) +#define VDEV_HOLDER ((void *)0x2401de7) #endif /* _ZFS_BLKDEV_H */ diff --git a/include/linux/dcache_compat.h b/include/linux/dcache_compat.h index 2b9e5c1c4..bdaa5db3e 100644 --- a/include/linux/dcache_compat.h +++ b/include/linux/dcache_compat.h @@ -24,15 +24,15 @@ */ #ifndef _ZFS_DCACHE_H -#define _ZFS_DCACHE_H +#define _ZFS_DCACHE_H #include <linux/dcache.h> -#define dname(dentry) ((char *)((dentry)->d_name.name)) -#define dlen(dentry) ((int)((dentry)->d_name.len)) +#define dname(dentry) ((char *)((dentry)->d_name.name)) +#define dlen(dentry) ((int)((dentry)->d_name.len)) #ifndef HAVE_D_MAKE_ROOT -#define d_make_root(inode) d_alloc_root(inode) +#define d_make_root(inode) d_alloc_root(inode) #endif /* HAVE_D_MAKE_ROOT */ /* @@ -74,9 +74,9 @@ d_clear_d_op(struct dentry *dentry) { #ifdef HAVE_D_SET_D_OP dentry->d_op = NULL; - dentry->d_flags &= - ~(DCACHE_OP_HASH | DCACHE_OP_COMPARE | - DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE); + dentry->d_flags &= ~( + DCACHE_OP_HASH | DCACHE_OP_COMPARE | + DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE); #endif /* HAVE_D_SET_D_OP */ } diff --git a/include/linux/vfs_compat.h b/include/linux/vfs_compat.h index 8ec1bccad..4778491fa 100644 --- a/include/linux/vfs_compat.h +++ b/include/linux/vfs_compat.h @@ -24,7 +24,7 @@ */ #ifndef _ZFS_VFS_H -#define _ZFS_VFS_H +#define _ZFS_VFS_H /* * 2.6.28 API change, @@ -71,7 +71,10 @@ truncate_setsize(struct inode *ip, loff_t new) extern atomic_long_t zfs_bdi_seq; static inline int -bdi_setup_and_register(struct backing_dev_info *bdi,char *name,unsigned int cap) +bdi_setup_and_register( + struct backing_dev_info *bdi, + char *name, + unsigned int cap) { char tmp[32]; int error; @@ -99,7 +102,7 @@ bdi_setup_and_register(struct backing_dev_info *bdi,char *name,unsigned int cap) * LOOKUP_RCU flag introduced to distinguish rcu-walk from ref-walk cases. */ #ifndef LOOKUP_RCU -#define LOOKUP_RCU 0x0 +#define LOOKUP_RCU 0x0 #endif /* LOOKUP_RCU */ /* @@ -136,7 +139,7 @@ typedef int zpl_umode_t; * configure check in config/kernel-clear-inode.m4 for full details. */ #if defined(HAVE_EVICT_INODE) && !defined(HAVE_CLEAR_INODE) -#define clear_inode(ip) end_writeback(ip) +#define clear_inode(ip) end_writeback(ip) #endif /* HAVE_EVICT_INODE && !HAVE_CLEAR_INODE */ /* @@ -144,18 +147,21 @@ typedef int zpl_umode_t; * The sget() helper function now takes the mount flags as an argument. */ #ifdef HAVE_5ARG_SGET -#define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd) +#define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, fl, mtd) #else -#define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd) +#define zpl_sget(type, cmp, set, fl, mtd) sget(type, cmp, set, mtd) #endif /* HAVE_5ARG_SGET */ -#define ZFS_IOC_GETFLAGS FS_IOC_GETFLAGS -#define ZFS_IOC_SETFLAGS FS_IOC_SETFLAGS +#define ZFS_IOC_GETFLAGS FS_IOC_GETFLAGS +#define ZFS_IOC_SETFLAGS FS_IOC_SETFLAGS #if defined(SEEK_HOLE) && defined(SEEK_DATA) && !defined(HAVE_LSEEK_EXECUTE) static inline loff_t -lseek_execute(struct file *filp, struct inode *inode, - loff_t offset, loff_t maxsize) +lseek_execute( + struct file *filp, + struct inode *inode, + loff_t offset, + loff_t maxsize) { if (offset < 0 && !(filp->f_mode & FMODE_UNSIGNED_OFFSET)) return (-EINVAL); @@ -186,7 +192,7 @@ lseek_execute(struct file *filp, struct inode *inode, */ #include <linux/posix_acl.h> #ifndef HAVE_POSIX_ACL_CACHING -#define ACL_NOT_CACHED ((void *)(-1)) +#define ACL_NOT_CACHED ((void *)(-1)) #endif /* HAVE_POSIX_ACL_CACHING */ #if defined(HAVE_POSIX_ACL_RELEASE) && !defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY) @@ -224,14 +230,14 @@ zpl_set_cached_acl(struct inode *ip, int type, struct posix_acl *newer) { if ((newer != ACL_NOT_CACHED) && (newer != NULL)) posix_acl_dup(newer); - switch(type) { + switch (type) { case ACL_TYPE_ACCESS: older = ip->i_acl; - rcu_assign_pointer(ip->i_acl,newer); + rcu_assign_pointer(ip->i_acl, newer); break; case ACL_TYPE_DEFAULT: older = ip->i_default_acl; - rcu_assign_pointer(ip->i_default_acl,newer); + rcu_assign_pointer(ip->i_default_acl, newer); break; } @@ -276,11 +282,11 @@ posix_acl_chmod(struct posix_acl **acl, int flags, umode_t umode) { *acl = NULL; } - return (error); + return (error); } static inline int -posix_acl_create(struct posix_acl** acl, int flags, umode_t* umodep) { +posix_acl_create(struct posix_acl **acl, int flags, umode_t *umodep) { struct posix_acl *oldacl = *acl; mode_t mode = *umodep; int error; diff --git a/include/linux/xattr_compat.h b/include/linux/xattr_compat.h index f06ba1fa0..a7371f946 100644 --- a/include/linux/xattr_compat.h +++ b/include/linux/xattr_compat.h @@ -24,7 +24,7 @@ */ #ifndef _ZFS_XATTR_H -#define _ZFS_XATTR_H +#define _ZFS_XATTR_H #include <linux/posix_acl_xattr.h> @@ -47,19 +47,19 @@ typedef struct xattr_handler xattr_handler_t; * instead of an inode, and a handler_flags argument was added. */ #ifdef HAVE_DENTRY_XATTR_GET -#define ZPL_XATTR_GET_WRAPPER(fn) \ +#define ZPL_XATTR_GET_WRAPPER(fn) \ static int \ fn(struct dentry *dentry, const char *name, void *buffer, size_t size, \ int unused_handler_flags) \ { \ - return __ ## fn(dentry->d_inode, name, buffer, size); \ + return (__ ## fn(dentry->d_inode, name, buffer, size)); \ } #else -#define ZPL_XATTR_GET_WRAPPER(fn) \ +#define ZPL_XATTR_GET_WRAPPER(fn) \ static int \ fn(struct inode *ip, const char *name, void *buffer, size_t size) \ { \ - return __ ## fn(ip, name, buffer, size); \ + return (__ ## fn(ip, name, buffer, size)); \ } #endif /* HAVE_DENTRY_XATTR_GET */ @@ -69,28 +69,28 @@ fn(struct inode *ip, const char *name, void *buffer, size_t size) \ * instead of an inode, and a handler_flags argument was added. */ #ifdef HAVE_DENTRY_XATTR_SET -#define ZPL_XATTR_SET_WRAPPER(fn) \ +#define ZPL_XATTR_SET_WRAPPER(fn) \ static int \ fn(struct dentry *dentry, const char *name, const void *buffer, \ size_t size, int flags, int unused_handler_flags) \ { \ - return __ ## fn(dentry->d_inode, name, buffer, size, flags); \ + return (__ ## fn(dentry->d_inode, name, buffer, size, flags)); \ } #else -#define ZPL_XATTR_SET_WRAPPER(fn) \ +#define ZPL_XATTR_SET_WRAPPER(fn) \ static int \ fn(struct inode *ip, const char *name, const void *buffer, \ size_t size, int flags) \ { \ - return __ ## fn(ip, name, buffer, size, flags); \ + return (__ ## fn(ip, name, buffer, size, flags)); \ } #endif /* HAVE_DENTRY_XATTR_SET */ #ifdef HAVE_6ARGS_SECURITY_INODE_INIT_SECURITY -#define zpl_security_inode_init_security(ip, dip, qstr, nm, val, len) \ +#define zpl_security_inode_init_security(ip, dip, qstr, nm, val, len) \ security_inode_init_security(ip, dip, qstr, nm, val, len) #else -#define zpl_security_inode_init_security(ip, dip, qstr, nm, val, len) \ +#define zpl_security_inode_init_security(ip, dip, qstr, nm, val, len) \ security_inode_init_security(ip, dip, nm, val, len) #endif /* HAVE_6ARGS_SECURITY_INODE_INIT_SECURITY */ @@ -103,27 +103,27 @@ fn(struct inode *ip, const char *name, const void *buffer, \ static inline struct posix_acl * zpl_acl_from_xattr(const void *value, int size) { - return posix_acl_from_xattr(CRED()->user_ns, value, size); + return (posix_acl_from_xattr(CRED()->user_ns, value, size)); } static inline int zpl_acl_to_xattr(struct posix_acl *acl, void *value, int size) { - return posix_acl_to_xattr(CRED()->user_ns,acl, value, size); + return (posix_acl_to_xattr(CRED()->user_ns, acl, value, size)); } #else static inline struct posix_acl * -zpl_acl_from_xattr(const void *value,int size) +zpl_acl_from_xattr(const void *value, int size) { - return posix_acl_from_xattr(value, size); + return (posix_acl_from_xattr(value, size)); } static inline int zpl_acl_to_xattr(struct posix_acl *acl, void *value, int size) { - return posix_acl_to_xattr(acl, value, size); + return (posix_acl_to_xattr(acl, value, size)); } #endif /* HAVE_POSIX_ACL_FROM_XATTR_USERNS */ |