diff options
author | Brian Behlendorf <[email protected]> | 2014-12-08 13:35:51 -0500 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2015-01-16 13:55:09 -0800 |
commit | b34b95635a99223b6bff5437fb389e9340dc7dcd (patch) | |
tree | 71a140683067236d1277c0f2e5ca653656e8ef6f /include | |
parent | e5b9b344c728bb0d9304f1a143db9255901dc5fe (diff) |
Fix kmem cstyle issues
Address all cstyle issues in the kmem, vmem, and kmem_cache source
and headers. This will done to make it easier to review subsequent
changes which will rework the kmem/vmem implementation.
Signed-off-by: Brian Behlendorf <[email protected]>
Diffstat (limited to 'include')
-rw-r--r-- | include/sys/kmem.h | 98 | ||||
-rw-r--r-- | include/sys/kmem_cache.h | 121 | ||||
-rw-r--r-- | include/sys/vmem.h | 68 |
3 files changed, 141 insertions, 146 deletions
diff --git a/include/sys/kmem.h b/include/sys/kmem.h index ee25e4c8c..a9d94c909 100644 --- a/include/sys/kmem.h +++ b/include/sys/kmem.h @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,7 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see <http://www.gnu.org/licenses/>. -\*****************************************************************************/ + */ #ifndef _SPL_KMEM_H #define _SPL_KMEM_H @@ -36,18 +36,18 @@ extern void strfree(char *str); /* * Memory allocation interfaces */ -#define KM_SLEEP GFP_KERNEL /* Can sleep, never fails */ -#define KM_NOSLEEP GFP_ATOMIC /* Can not sleep, may fail */ -#define KM_PUSHPAGE (GFP_NOIO | __GFP_HIGH) /* Use reserved memory */ -#define KM_NODEBUG __GFP_NOWARN /* Suppress warnings */ -#define KM_FLAGS __GFP_BITS_MASK -#define KM_VMFLAGS GFP_LEVEL_MASK +#define KM_SLEEP GFP_KERNEL /* Can sleep, never fails */ +#define KM_NOSLEEP GFP_ATOMIC /* Can not sleep, may fail */ +#define KM_PUSHPAGE (GFP_NOIO | __GFP_HIGH) /* Use reserved memory */ +#define KM_NODEBUG __GFP_NOWARN /* Suppress warnings */ +#define KM_FLAGS __GFP_BITS_MASK +#define KM_VMFLAGS GFP_LEVEL_MASK /* * Used internally, the kernel does not need to support this flag */ #ifndef __GFP_ZERO -# define __GFP_ZERO 0x8000 +#define __GFP_ZERO 0x8000 #endif /* @@ -66,7 +66,7 @@ kmalloc_nofail(size_t size, gfp_t flags) ptr = kmalloc(size, flags); } while (ptr == NULL && (flags & __GFP_WAIT)); - return ptr; + return (ptr); } static inline void * @@ -78,7 +78,7 @@ kzalloc_nofail(size_t size, gfp_t flags) ptr = kzalloc(size, flags); } while (ptr == NULL && (flags & __GFP_WAIT)); - return ptr; + return (ptr); } static inline void * @@ -90,7 +90,7 @@ kmalloc_node_nofail(size_t size, gfp_t flags, int node) ptr = kmalloc_node(size, flags, node); } while (ptr == NULL && (flags & __GFP_WAIT)); - return ptr; + return (ptr); } #ifdef DEBUG_KMEM @@ -98,29 +98,23 @@ kmalloc_node_nofail(size_t size, gfp_t flags, int node) /* * Memory accounting functions to be used only when DEBUG_KMEM is set. */ -# ifdef HAVE_ATOMIC64_T - -# define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used) -# define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used) -# define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used) -# define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size) - +#ifdef HAVE_ATOMIC64_T +#define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used) +#define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used) +#define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used) +#define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size) extern atomic64_t kmem_alloc_used; extern unsigned long long kmem_alloc_max; - -# else /* HAVE_ATOMIC64_T */ - -# define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used) -# define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used) -# define kmem_alloc_used_read() atomic_read(&kmem_alloc_used) -# define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size) - +#else /* HAVE_ATOMIC64_T */ +#define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used) +#define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used) +#define kmem_alloc_used_read() atomic_read(&kmem_alloc_used) +#define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size) extern atomic_t kmem_alloc_used; extern unsigned long long kmem_alloc_max; +#endif /* HAVE_ATOMIC64_T */ -# endif /* HAVE_ATOMIC64_T */ - -# ifdef DEBUG_KMEM_TRACKING +#ifdef DEBUG_KMEM_TRACKING /* * DEBUG_KMEM && DEBUG_KMEM_TRACKING * @@ -132,18 +126,18 @@ extern unsigned long long kmem_alloc_max; * be enabled for debugging. This feature may be enabled by passing * --enable-debug-kmem-tracking to configure. */ -# define kmem_alloc(sz, fl) kmem_alloc_track((sz), (fl), \ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_zalloc(sz, fl) kmem_alloc_track((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_alloc_node(sz, fl, nd) kmem_alloc_track((sz), (fl), \ - __FUNCTION__, __LINE__, 1, nd) -# define kmem_free(ptr, sz) kmem_free_track((ptr), (sz)) +#define kmem_alloc(sz, fl) kmem_alloc_track((sz), (fl), \ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_zalloc(sz, fl) kmem_alloc_track((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_alloc_node(sz, fl, nd) kmem_alloc_track((sz), (fl), \ + __FUNCTION__, __LINE__, 1, nd) +#define kmem_free(ptr, sz) kmem_free_track((ptr), (sz)) extern void *kmem_alloc_track(size_t, int, const char *, int, int, int); extern void kmem_free_track(const void *, size_t); -# else /* DEBUG_KMEM_TRACKING */ +#else /* DEBUG_KMEM_TRACKING */ /* * DEBUG_KMEM && !DEBUG_KMEM_TRACKING * @@ -153,18 +147,18 @@ extern void kmem_free_track(const void *, size_t); * will be reported on the console. To disable this basic accounting * pass the --disable-debug-kmem option to configure. */ -# define kmem_alloc(sz, fl) kmem_alloc_debug((sz), (fl), \ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_zalloc(sz, fl) kmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_alloc_node(sz, fl, nd) kmem_alloc_debug((sz), (fl), \ - __FUNCTION__, __LINE__, 1, nd) -# define kmem_free(ptr, sz) kmem_free_debug((ptr), (sz)) +#define kmem_alloc(sz, fl) kmem_alloc_debug((sz), (fl), \ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_zalloc(sz, fl) kmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_alloc_node(sz, fl, nd) kmem_alloc_debug((sz), (fl), \ + __FUNCTION__, __LINE__, 1, nd) +#define kmem_free(ptr, sz) kmem_free_debug((ptr), (sz)) extern void *kmem_alloc_debug(size_t, int, const char *, int, int, int); extern void kmem_free_debug(const void *, size_t); -# endif /* DEBUG_KMEM_TRACKING */ +#endif /* DEBUG_KMEM_TRACKING */ #else /* DEBUG_KMEM */ /* * !DEBUG_KMEM && !DEBUG_KMEM_TRACKING @@ -173,17 +167,17 @@ extern void kmem_free_debug(const void *, size_t); * minimal memory accounting. To enable basic accounting pass the * --enable-debug-kmem option to configure. */ -# define kmem_alloc(sz, fl) kmalloc_nofail((sz), (fl)) -# define kmem_zalloc(sz, fl) kzalloc_nofail((sz), (fl)) -# define kmem_alloc_node(sz, fl, nd) kmalloc_node_nofail((sz), (fl), (nd)) -# define kmem_free(ptr, sz) ((void)(sz), kfree(ptr)) +#define kmem_alloc(sz, fl) kmalloc_nofail((sz), (fl)) +#define kmem_zalloc(sz, fl) kzalloc_nofail((sz), (fl)) +#define kmem_alloc_node(sz, fl, nd) kmalloc_node_nofail((sz), (fl), (nd)) +#define kmem_free(ptr, sz) ((void)(sz), kfree(ptr)) #endif /* DEBUG_KMEM */ int spl_kmem_init(void); void spl_kmem_fini(void); -#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \ - ((ptr) < (void *)VMALLOC_END)) +#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \ + ((ptr) < (void *)VMALLOC_END)) #endif /* _SPL_KMEM_H */ diff --git a/include/sys/kmem_cache.h b/include/sys/kmem_cache.h index 654a2ea43..a5bc0322b 100644 --- a/include/sys/kmem_cache.h +++ b/include/sys/kmem_cache.h @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,7 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see <http://www.gnu.org/licenses/>. -\*****************************************************************************/ + */ #ifndef _SPL_KMEM_CACHE_H #define _SPL_KMEM_CACHE_H @@ -33,7 +33,7 @@ * allocated from the physical or virtal memory address space. The virtual * slabs allow for good behavior when allocation large objects of identical * size. This slab implementation also supports both constructors and - * destructions which the Linux slab does not. + * destructors which the Linux slab does not. */ enum { KMC_BIT_NOTOUCH = 0, /* Don't update ages */ @@ -46,8 +46,8 @@ enum { KMC_BIT_SLAB = 7, /* Use Linux slab cache */ KMC_BIT_OFFSLAB = 8, /* Objects not on slab */ KMC_BIT_NOEMERGENCY = 9, /* Disable emergency objects */ - KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */ - KMC_BIT_GROWING = 15, /* Growing in progress */ + KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */ + KMC_BIT_GROWING = 15, /* Growing in progress */ KMC_BIT_REAPING = 16, /* Reaping in progress */ KMC_BIT_DESTROY = 17, /* Destroy in progress */ KMC_BIT_TOTAL = 18, /* Proc handler helper bit */ @@ -64,29 +64,29 @@ typedef enum kmem_cbrc { KMEM_CBRC_DONT_KNOW = 4, /* Object unknown */ } kmem_cbrc_t; -#define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH) -#define KMC_NODEBUG (1 << KMC_BIT_NODEBUG) -#define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE) -#define KMC_NOHASH (1 << KMC_BIT_NOHASH) -#define KMC_QCACHE (1 << KMC_BIT_QCACHE) -#define KMC_KMEM (1 << KMC_BIT_KMEM) -#define KMC_VMEM (1 << KMC_BIT_VMEM) -#define KMC_SLAB (1 << KMC_BIT_SLAB) -#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB) -#define KMC_NOEMERGENCY (1 << KMC_BIT_NOEMERGENCY) -#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED) -#define KMC_GROWING (1 << KMC_BIT_GROWING) -#define KMC_REAPING (1 << KMC_BIT_REAPING) -#define KMC_DESTROY (1 << KMC_BIT_DESTROY) -#define KMC_TOTAL (1 << KMC_BIT_TOTAL) -#define KMC_ALLOC (1 << KMC_BIT_ALLOC) -#define KMC_MAX (1 << KMC_BIT_MAX) - -#define KMC_REAP_CHUNK INT_MAX -#define KMC_DEFAULT_SEEKS 1 - -#define KMC_EXPIRE_AGE 0x1 /* Due to age */ -#define KMC_EXPIRE_MEM 0x2 /* Due to low memory */ +#define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH) +#define KMC_NODEBUG (1 << KMC_BIT_NODEBUG) +#define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE) +#define KMC_NOHASH (1 << KMC_BIT_NOHASH) +#define KMC_QCACHE (1 << KMC_BIT_QCACHE) +#define KMC_KMEM (1 << KMC_BIT_KMEM) +#define KMC_VMEM (1 << KMC_BIT_VMEM) +#define KMC_SLAB (1 << KMC_BIT_SLAB) +#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB) +#define KMC_NOEMERGENCY (1 << KMC_BIT_NOEMERGENCY) +#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED) +#define KMC_GROWING (1 << KMC_BIT_GROWING) +#define KMC_REAPING (1 << KMC_BIT_REAPING) +#define KMC_DESTROY (1 << KMC_BIT_DESTROY) +#define KMC_TOTAL (1 << KMC_BIT_TOTAL) +#define KMC_ALLOC (1 << KMC_BIT_ALLOC) +#define KMC_MAX (1 << KMC_BIT_MAX) + +#define KMC_REAP_CHUNK INT_MAX +#define KMC_DEFAULT_SEEKS 1 + +#define KMC_EXPIRE_AGE 0x1 /* Due to age */ +#define KMC_EXPIRE_MEM 0x2 /* Due to low memory */ #define KMC_RECLAIM_ONCE 0x1 /* Force a single shrinker pass */ @@ -94,19 +94,19 @@ extern unsigned int spl_kmem_cache_expire; extern struct list_head spl_kmem_cache_list; extern struct rw_semaphore spl_kmem_cache_sem; -#define SKM_MAGIC 0x2e2e2e2e -#define SKO_MAGIC 0x20202020 -#define SKS_MAGIC 0x22222222 -#define SKC_MAGIC 0x2c2c2c2c +#define SKM_MAGIC 0x2e2e2e2e +#define SKO_MAGIC 0x20202020 +#define SKS_MAGIC 0x22222222 +#define SKC_MAGIC 0x2c2c2c2c -#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */ -#define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */ -#define SPL_KMEM_CACHE_OBJ_PER_SLAB 16 /* Target objects per slab */ -#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */ -#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */ +#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */ +#define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */ +#define SPL_KMEM_CACHE_OBJ_PER_SLAB 16 /* Target objects per slab */ +#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */ +#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */ -#define POINTER_IS_VALID(p) 0 /* Unimplemented */ -#define POINTER_INVALIDATE(pp) /* Unimplemented */ +#define POINTER_IS_VALID(p) 0 /* Unimplemented */ +#define POINTER_INVALIDATE(pp) /* Unimplemented */ typedef int (*spl_kmem_ctor_t)(void *, void *, int); typedef void (*spl_kmem_dtor_t)(void *, void *); @@ -124,14 +124,14 @@ typedef struct spl_kmem_magazine { } spl_kmem_magazine_t; typedef struct spl_kmem_obj { - uint32_t sko_magic; /* Sanity magic */ + uint32_t sko_magic; /* Sanity magic */ void *sko_addr; /* Buffer address */ struct spl_kmem_slab *sko_slab; /* Owned by slab */ struct list_head sko_list; /* Free object list linkage */ } spl_kmem_obj_t; typedef struct spl_kmem_slab { - uint32_t sks_magic; /* Sanity magic */ + uint32_t sks_magic; /* Sanity magic */ uint32_t sks_objs; /* Objects per slab */ struct spl_kmem_cache *sks_cache; /* Owned by cache */ struct list_head sks_list; /* Slab list linkage */ @@ -174,14 +174,14 @@ typedef struct spl_kmem_cache { atomic_t skc_ref; /* Ref count callers */ taskqid_t skc_taskqid; /* Slab reclaim task */ struct list_head skc_list; /* List of caches linkage */ - struct list_head skc_complete_list;/* Completely alloc'ed */ - struct list_head skc_partial_list; /* Partially alloc'ed */ + struct list_head skc_complete_list; /* Completely alloc'ed */ + struct list_head skc_partial_list; /* Partially alloc'ed */ struct rb_root skc_emergency_tree; /* Min sized objects */ spinlock_t skc_lock; /* Cache lock */ wait_queue_head_t skc_waitq; /* Allocation waiters */ uint64_t skc_slab_fail; /* Slab alloc failures */ - uint64_t skc_slab_create;/* Slab creates */ - uint64_t skc_slab_destroy;/* Slab destroys */ + uint64_t skc_slab_create; /* Slab creates */ + uint64_t skc_slab_destroy; /* Slab destroys */ uint64_t skc_slab_total; /* Slab total current */ uint64_t skc_slab_alloc; /* Slab alloc current */ uint64_t skc_slab_max; /* Slab max historic */ @@ -192,30 +192,31 @@ typedef struct spl_kmem_cache { uint64_t skc_obj_emergency; /* Obj emergency current */ uint64_t skc_obj_emergency_max; /* Obj emergency max */ } spl_kmem_cache_t; -#define kmem_cache_t spl_kmem_cache_t +#define kmem_cache_t spl_kmem_cache_t extern spl_kmem_cache_t *spl_kmem_cache_create(char *name, size_t size, - size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, - spl_kmem_reclaim_t reclaim, void *priv, void *vmp, int flags); + size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, + spl_kmem_reclaim_t reclaim, void *priv, void *vmp, int flags); extern void spl_kmem_cache_set_move(spl_kmem_cache_t *, - kmem_cbrc_t (*)(void *, void *, size_t, void *)); + kmem_cbrc_t (*)(void *, void *, size_t, void *)); extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc); extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags); extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj); extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count); extern void spl_kmem_reap(void); -#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \ - spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) -#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move) -#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc) -#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags) -#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj) -#define kmem_cache_reap_now(skc) \ - spl_kmem_cache_reap_now(skc, skc->skc_reap) -#define kmem_reap() spl_kmem_reap() -#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \ - ((ptr) < (void *)VMALLOC_END)) +#define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \ + spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) +#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move) +#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc) +#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags) +#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj) +#define kmem_cache_reap_now(skc) \ + spl_kmem_cache_reap_now(skc, skc->skc_reap) +#define kmem_reap() spl_kmem_reap() +#define kmem_virt(ptr) \ + (((ptr) >= (void *)VMALLOC_START) && \ + ((ptr) < (void *)VMALLOC_END)) /* * Allow custom slab allocation flags to be set for KMC_SLAB based caches. diff --git a/include/sys/vmem.h b/include/sys/vmem.h index e86e89bb4..f59ac5e8b 100644 --- a/include/sys/vmem.h +++ b/include/sys/vmem.h @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,7 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see <http://www.gnu.org/licenses/>. -\*****************************************************************************/ + */ #ifndef _SPL_VMEM_H #define _SPL_VMEM_H @@ -40,11 +40,11 @@ extern size_t vmem_size(vmem_t *vmp, int typemask); /* * Memory allocation interfaces */ -#define VMEM_ALLOC 0x01 -#define VMEM_FREE 0x02 +#define VMEM_ALLOC 0x01 +#define VMEM_FREE 0x02 #ifndef VMALLOC_TOTAL -#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) +#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) #endif static inline void * @@ -78,7 +78,7 @@ vmalloc_nofail(size_t size, gfp_t flags) } } - return ptr; + return (ptr); } static inline void * @@ -90,7 +90,7 @@ vzalloc_nofail(size_t size, gfp_t flags) if (ptr) memset(ptr, 0, (size)); - return ptr; + return (ptr); } #ifdef DEBUG_KMEM @@ -98,29 +98,29 @@ vzalloc_nofail(size_t size, gfp_t flags) /* * Memory accounting functions to be used only when DEBUG_KMEM is set. */ -# ifdef HAVE_ATOMIC64_T +#ifdef HAVE_ATOMIC64_T -# define vmem_alloc_used_add(size) atomic64_add(size, &vmem_alloc_used) -# define vmem_alloc_used_sub(size) atomic64_sub(size, &vmem_alloc_used) -# define vmem_alloc_used_read() atomic64_read(&vmem_alloc_used) -# define vmem_alloc_used_set(size) atomic64_set(&vmem_alloc_used, size) +#define vmem_alloc_used_add(size) atomic64_add(size, &vmem_alloc_used) +#define vmem_alloc_used_sub(size) atomic64_sub(size, &vmem_alloc_used) +#define vmem_alloc_used_read() atomic64_read(&vmem_alloc_used) +#define vmem_alloc_used_set(size) atomic64_set(&vmem_alloc_used, size) extern atomic64_t vmem_alloc_used; extern unsigned long long vmem_alloc_max; -# else /* HAVE_ATOMIC64_T */ +#else /* HAVE_ATOMIC64_T */ -# define vmem_alloc_used_add(size) atomic_add(size, &vmem_alloc_used) -# define vmem_alloc_used_sub(size) atomic_sub(size, &vmem_alloc_used) -# define vmem_alloc_used_read() atomic_read(&vmem_alloc_used) -# define vmem_alloc_used_set(size) atomic_set(&vmem_alloc_used, size) +#define vmem_alloc_used_add(size) atomic_add(size, &vmem_alloc_used) +#define vmem_alloc_used_sub(size) atomic_sub(size, &vmem_alloc_used) +#define vmem_alloc_used_read() atomic_read(&vmem_alloc_used) +#define vmem_alloc_used_set(size) atomic_set(&vmem_alloc_used, size) extern atomic_t vmem_alloc_used; extern unsigned long long vmem_alloc_max; -# endif /* HAVE_ATOMIC64_T */ +#endif /* HAVE_ATOMIC64_T */ -# ifdef DEBUG_KMEM_TRACKING +#ifdef DEBUG_KMEM_TRACKING /* * DEBUG_KMEM && DEBUG_KMEM_TRACKING * @@ -132,18 +132,18 @@ extern unsigned long long vmem_alloc_max; * be enabled for debugging. This feature may be enabled by passing * --enable-debug-kmem-tracking to configure. */ -# define vmem_alloc(sz, fl) vmem_alloc_track((sz), (fl), \ - __FUNCTION__, __LINE__) -# define vmem_zalloc(sz, fl) vmem_alloc_track((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__) -# define vmem_free(ptr, sz) vmem_free_track((ptr), (sz)) +#define vmem_alloc(sz, fl) vmem_alloc_track((sz), (fl), \ + __FUNCTION__, __LINE__) +#define vmem_zalloc(sz, fl) vmem_alloc_track((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__) +#define vmem_free(ptr, sz) vmem_free_track((ptr), (sz)) extern void *kmem_alloc_track(size_t, int, const char *, int, int, int); extern void kmem_free_track(const void *, size_t); extern void *vmem_alloc_track(size_t, int, const char *, int); extern void vmem_free_track(const void *, size_t); -# else /* DEBUG_KMEM_TRACKING */ +#else /* DEBUG_KMEM_TRACKING */ /* * DEBUG_KMEM && !DEBUG_KMEM_TRACKING * @@ -153,16 +153,16 @@ extern void vmem_free_track(const void *, size_t); * will be reported on the console. To disable this basic accounting * pass the --disable-debug-kmem option to configure. */ -# define vmem_alloc(sz, fl) vmem_alloc_debug((sz), (fl), \ - __FUNCTION__, __LINE__) -# define vmem_zalloc(sz, fl) vmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__) -# define vmem_free(ptr, sz) vmem_free_debug((ptr), (sz)) +#define vmem_alloc(sz, fl) vmem_alloc_debug((sz), (fl), \ + __FUNCTION__, __LINE__) +#define vmem_zalloc(sz, fl) vmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__) +#define vmem_free(ptr, sz) vmem_free_debug((ptr), (sz)) extern void *vmem_alloc_debug(size_t, int, const char *, int); extern void vmem_free_debug(const void *, size_t); -# endif /* DEBUG_KMEM_TRACKING */ +#endif /* DEBUG_KMEM_TRACKING */ #else /* DEBUG_KMEM */ /* * !DEBUG_KMEM && !DEBUG_KMEM_TRACKING @@ -171,9 +171,9 @@ extern void vmem_free_debug(const void *, size_t); * minimal memory accounting. To enable basic accounting pass the * --enable-debug-kmem option to configure. */ -# define vmem_alloc(sz, fl) vmalloc_nofail((sz), (fl)) -# define vmem_zalloc(sz, fl) vzalloc_nofail((sz), (fl)) -# define vmem_free(ptr, sz) ((void)(sz), vfree(ptr)) +#define vmem_alloc(sz, fl) vmalloc_nofail((sz), (fl)) +#define vmem_zalloc(sz, fl) vzalloc_nofail((sz), (fl)) +#define vmem_free(ptr, sz) ((void)(sz), vfree(ptr)) #endif /* DEBUG_KMEM */ |