aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2009-01-30 20:54:49 -0800
committerBrian Behlendorf <[email protected]>2009-01-30 20:54:49 -0800
commitea3e6ca9e595ebfba82b964ee2eaf1ddd7076f0f (patch)
tree7480b87145297f3882ffe18234280512e136cdb4 /include
parent34e71c9e97f4d0d2b3ede850d016a7de558b0f3c (diff)
kmem_cache hardening and performance improvements
- Added slab work queue task which gradually ages and free's slabs from the cache which have not been used recently. - Optimized slab packing algorithm to ensure each slab contains the maximum number of objects without create to large a slab. - Fix deadlock, we can never call kv_free() under the skc_lock. We now unlink the objects and slabs from the cache itself and attach them to a private work list. The contents of the list are then subsequently freed outside the spin lock. - Move magazine create/destroy operation on to local cpu. - Further performace optimizations by minimize the usage of the large per-cache skc_lock. This includes the addition of KMC_BIT_REAPING bit mask which is used to prevent concurrent reaping, and to defer new slab creation when reaping is occuring. - Add KMC_BIT_DESTROYING bit mask which is set when the cache is being destroyed, this is used to catch any task accessing the cache while it is being destroyed. - Add comments to all the functions and additional comments to try and make everything as clear as possible. - Major cleanup and additions to the SPLAT kmem tests to more rigerously stress the cache implementation and look for any problems. This includes correctness and performance tests. - Updated portable work queue interfaces
Diffstat (limited to 'include')
-rw-r--r--include/sys/kmem.h73
-rw-r--r--include/sys/sysmacros.h12
-rw-r--r--include/sys/vmsystm.h3
3 files changed, 48 insertions, 40 deletions
diff --git a/include/sys/kmem.h b/include/sys/kmem.h
index ef5876312..4f939e0fc 100644
--- a/include/sys/kmem.h
+++ b/include/sys/kmem.h
@@ -45,6 +45,7 @@ extern "C" {
#include <asm/atomic_compat.h>
#include <sys/types.h>
#include <sys/debug.h>
+#include <sys/workqueue.h>
/*
* Memory allocation interfaces
@@ -161,17 +162,32 @@ kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
/*
* Slab allocation interfaces
*/
-#define KMC_NOTOUCH 0x00000001
-#define KMC_NODEBUG 0x00000002 /* Default behavior */
-#define KMC_NOMAGAZINE 0x00000004 /* XXX: No disable support available */
-#define KMC_NOHASH 0x00000008 /* XXX: No hash available */
-#define KMC_QCACHE 0x00000010 /* XXX: Unsupported */
-#define KMC_KMEM 0x00000100 /* Use kmem cache */
-#define KMC_VMEM 0x00000200 /* Use vmem cache */
-#define KMC_OFFSLAB 0x00000400 /* Objects not on slab */
-
-#define KMC_REAP_CHUNK 256
-#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
+enum {
+ KMC_BIT_NOTOUCH = 0, /* Don't update ages */
+ KMC_BIT_NODEBUG = 1, /* Default behavior */
+ KMC_BIT_NOMAGAZINE = 2, /* XXX: Unsupported */
+ KMC_BIT_NOHASH = 3, /* XXX: Unsupported */
+ KMC_BIT_QCACHE = 4, /* XXX: Unsupported */
+ KMC_BIT_KMEM = 5, /* Use kmem cache */
+ KMC_BIT_VMEM = 6, /* Use vmem cache */
+ KMC_BIT_OFFSLAB = 7, /* Objects not on slab */
+ KMC_BIT_REAPING = 16, /* Reaping in progress */
+ KMC_BIT_DESTROY = 17, /* Destroy in progress */
+};
+
+#define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH)
+#define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
+#define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE)
+#define KMC_NOHASH (1 << KMC_BIT_NOHASH)
+#define KMC_QCACHE (1 << KMC_BIT_QCACHE)
+#define KMC_KMEM (1 << KMC_BIT_KMEM)
+#define KMC_VMEM (1 << KMC_BIT_VMEM)
+#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
+#define KMC_REAPING (1 << KMC_BIT_REAPING)
+#define KMC_DESTROY (1 << KMC_BIT_DESTROY)
+
+#define KMC_REAP_CHUNK INT_MAX
+#define KMC_DEFAULT_SEEKS 1
#ifdef DEBUG_KMEM_UNIMPLEMENTED
static __inline__ void kmem_init(void) {
@@ -223,9 +239,10 @@ extern struct rw_semaphore spl_kmem_cache_sem;
#define SKS_MAGIC 0x22222222
#define SKC_MAGIC 0x2c2c2c2c
-#define SPL_KMEM_CACHE_DELAY 5
-#define SPL_KMEM_CACHE_OBJ_PER_SLAB 32
-#define SPL_KMEM_CACHE_ALIGN 8
+#define SPL_KMEM_CACHE_DELAY 5 /* Minimum slab release age */
+#define SPL_KMEM_CACHE_OBJ_PER_SLAB 32 /* Target objects per slab */
+#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 8 /* Minimum objects per slab */
+#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
typedef int (*spl_kmem_ctor_t)(void *, void *, int);
typedef void (*spl_kmem_dtor_t)(void *, void *);
@@ -258,24 +275,28 @@ typedef struct spl_kmem_slab {
} spl_kmem_slab_t;
typedef struct spl_kmem_cache {
- uint32_t skc_magic; /* Sanity magic */
- uint32_t skc_name_size; /* Name length */
- char *skc_name; /* Name string */
+ uint32_t skc_magic; /* Sanity magic */
+ uint32_t skc_name_size; /* Name length */
+ char *skc_name; /* Name string */
spl_kmem_magazine_t *skc_mag[NR_CPUS]; /* Per-CPU warm cache */
uint32_t skc_mag_size; /* Magazine size */
uint32_t skc_mag_refill; /* Magazine refill count */
- spl_kmem_ctor_t skc_ctor; /* Constructor */
- spl_kmem_dtor_t skc_dtor; /* Destructor */
- spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */
- void *skc_private; /* Private data */
- void *skc_vmp; /* Unused */
+ spl_kmem_ctor_t skc_ctor; /* Constructor */
+ spl_kmem_dtor_t skc_dtor; /* Destructor */
+ spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */
+ void *skc_private; /* Private data */
+ void *skc_vmp; /* Unused */
uint32_t skc_flags; /* Flags */
uint32_t skc_obj_size; /* Object size */
uint32_t skc_obj_align; /* Object alignment */
uint32_t skc_slab_objs; /* Objects per slab */
- uint32_t skc_slab_size; /* Slab size */
- uint32_t skc_delay; /* slab reclaim interval */
- struct list_head skc_list; /* List of caches linkage */
+ uint32_t skc_slab_size; /* Slab size */
+ uint32_t skc_delay; /* Slab reclaim interval */
+ atomic_t skc_ref; /* Ref count callers */
+ struct delayed_work skc_work; /* Slab reclaim work */
+ struct work_struct work;
+ struct timer_list timer;
+ struct list_head skc_list; /* List of caches linkage */
struct list_head skc_complete_list;/* Completely alloc'ed */
struct list_head skc_partial_list; /* Partially alloc'ed */
spinlock_t skc_lock; /* Cache lock */
@@ -283,7 +304,7 @@ typedef struct spl_kmem_cache {
uint64_t skc_slab_create;/* Slab creates */
uint64_t skc_slab_destroy;/* Slab destroys */
uint64_t skc_slab_total; /* Slab total current */
- uint64_t skc_slab_alloc; /* Slab alloc current */
+ uint64_t skc_slab_alloc; /* Slab alloc current */
uint64_t skc_slab_max; /* Slab max historic */
uint64_t skc_obj_total; /* Obj total current */
uint64_t skc_obj_alloc; /* Obj alloc current */
diff --git a/include/sys/sysmacros.h b/include/sys/sysmacros.h
index 94ff3f84e..b82812385 100644
--- a/include/sys/sysmacros.h
+++ b/include/sys/sysmacros.h
@@ -203,18 +203,6 @@ extern int ddi_strtoul(const char *str, char **nptr,
#define offsetof(s, m) ((size_t)(&(((s *)0)->m)))
#endif
-#ifdef HAVE_3ARGS_INIT_WORK
-
-#define spl_init_work(wq,cb,d) INIT_WORK((wq), (void *)(cb), (void *)(d))
-#define spl_get_work_data(type,field,data) (data)
-
-#else
-
-#define spl_init_work(wq,cb,d) INIT_WORK((wq), (void *)(cb));
-#define spl_get_work_data(type,field,data) container_of(data,type,field)
-
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/include/sys/vmsystm.h b/include/sys/vmsystm.h
index e92c17bdd..1cb716f13 100644
--- a/include/sys/vmsystm.h
+++ b/include/sys/vmsystm.h
@@ -35,8 +35,7 @@
extern vmem_t *zio_alloc_arena; /* arena for zio caches */
#define physmem num_physpages
-#define freemem nr_free_pages() // Expensive on linux,
- // cheap on solaris
+#define freemem nr_free_pages()
#define minfree 0
#define needfree 0 /* # of needed pages */
#define ptob(pages) (pages * PAGE_SIZE)