summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Behlendorf <[email protected]>2009-11-13 11:12:43 -0800
committerBrian Behlendorf <[email protected]>2009-11-13 11:12:43 -0800
commit8b45dda2bc82ee821992b928ea9d7278a953e8f9 (patch)
tree1919ad6df8bea1089d7d0ec7778e3a8265ba649c
parentc89fdee4d3530e22270ec2b700f697c5e0d46d71 (diff)
Linux 2.6.31 kmem cache alignment fixes and cleanup.
The big fix here is the removal of kmalloc() in kv_alloc(). It used to be true in previous kernels that kmallocs over PAGE_SIZE would always be pages aligned. This is no longer true atleast in 2.6.31 there are no longer any alignment expectations. Since kv_alloc() requires the resulting address to be page align we no only either directly allocate pages in the KMC_KMEM case, or directly call __vmalloc() both of which will always return a page aligned address. Additionally, to avoid wasting memory size is always a power of two. As for cleanup several helper functions were introduced to calculate the aligned sizes of various data structures. This helps ensure no case is accidentally missed where the alignment needs to be taken in to account. The helpers now use P2ROUNDUP_TYPE instead of P2ROUNDUP which is safer since the type will be explict and we no longer count on the compiler to auto promote types hopefully as we expected. Always wnforce minimum (SPL_KMEM_CACHE_ALIGN) and maximum (PAGE_SIZE) alignment restrictions at cache creation time. Use SPL_KMEM_CACHE_ALIGN in splat alignment test.
-rw-r--r--include/sys/sysmacros.h4
-rw-r--r--module/spl/spl-kmem.c144
-rw-r--r--module/splat/splat-kmem.c2
3 files changed, 93 insertions, 57 deletions
diff --git a/include/sys/sysmacros.h b/include/sys/sysmacros.h
index e66d8d991..4ed41d4c5 100644
--- a/include/sys/sysmacros.h
+++ b/include/sys/sysmacros.h
@@ -172,12 +172,8 @@ extern void spl_cleanup(void);
#define P2ALIGN(x, align) ((x) & -(align))
#define P2CROSS(x, y, align) (((x) ^ (y)) > (align) - 1)
#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
-#define P2ROUNDUP_TYPED(x, align, type) \
- (-(-(type)(x) & -(type)(align)))
#define P2PHASE(x, align) ((x) & ((align) - 1))
#define P2NPHASE(x, align) (-(x) & ((align) - 1))
-#define P2NPHASE_TYPED(x, align, type) \
- (-(type)(x) & ((type)(align) - 1))
#define ISP2(x) (((x) & ((x) - 1)) == 0)
#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
#define P2BOUNDARY(off, len, align) \
diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c
index 438f7c6d3..b86a8ad82 100644
--- a/module/spl/spl-kmem.c
+++ b/module/spl/spl-kmem.c
@@ -720,14 +720,15 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
{
void *ptr;
- if (skc->skc_flags & KMC_KMEM) {
- if (size > (2 * PAGE_SIZE)) {
- ptr = (void *)__get_free_pages(flags, get_order(size));
- } else
- ptr = kmem_alloc(size, flags);
- } else {
- ptr = vmem_alloc(size, flags);
- }
+ ASSERT(ISP2(size));
+
+ if (skc->skc_flags & KMC_KMEM)
+ ptr = (void *)__get_free_pages(flags, get_order(size));
+ else
+ ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
+
+ /* Resulting allocated memory will be page aligned */
+ ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
return ptr;
}
@@ -735,14 +736,55 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
static void
kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
{
- if (skc->skc_flags & KMC_KMEM) {
- if (size > (2 * PAGE_SIZE))
- free_pages((unsigned long)ptr, get_order(size));
- else
- kmem_free(ptr, size);
- } else {
- vmem_free(ptr, size);
- }
+ ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
+ ASSERT(ISP2(size));
+
+ if (skc->skc_flags & KMC_KMEM)
+ free_pages((unsigned long)ptr, get_order(size));
+ else
+ vfree(ptr);
+}
+
+/*
+ * Required space for each aligned sks.
+ */
+static inline uint32_t
+spl_sks_size(spl_kmem_cache_t *skc)
+{
+ return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t),
+ skc->skc_obj_align, uint32_t);
+}
+
+/*
+ * Required space for each aligned object.
+ */
+static inline uint32_t
+spl_obj_size(spl_kmem_cache_t *skc)
+{
+ uint32_t align = skc->skc_obj_align;
+
+ return P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
+ P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t), align, uint32_t);
+}
+
+/*
+ * Lookup the spl_kmem_object_t for an object given that object.
+ */
+static inline spl_kmem_obj_t *
+spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
+{
+ return obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
+ skc->skc_obj_align, uint32_t);
+}
+
+/*
+ * Required space for each offslab object taking in to account alignment
+ * restrictions and the power-of-two requirement of kv_alloc().
+ */
+static inline uint32_t
+spl_offslab_size(spl_kmem_cache_t *skc)
+{
+ return 1UL << (highbit(spl_obj_size(skc)) + 1);
}
/*
@@ -782,7 +824,8 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
spl_kmem_slab_t *sks;
spl_kmem_obj_t *sko, *n;
void *base, *obj;
- int i, align, size, rc = 0;
+ uint32_t obj_size, offslab_size = 0;
+ int i, rc = 0;
base = kv_alloc(skc, skc->skc_slab_size, flags);
if (base == NULL)
@@ -796,23 +839,22 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
INIT_LIST_HEAD(&sks->sks_list);
INIT_LIST_HEAD(&sks->sks_free_list);
sks->sks_ref = 0;
+ obj_size = spl_obj_size(skc);
- align = skc->skc_obj_align;
- size = P2ROUNDUP(skc->skc_obj_size, align) +
- P2ROUNDUP(sizeof(spl_kmem_obj_t), align);
+ if (skc->skc_flags * KMC_OFFSLAB)
+ offslab_size = spl_offslab_size(skc);
for (i = 0; i < sks->sks_objs; i++) {
if (skc->skc_flags & KMC_OFFSLAB) {
- obj = kv_alloc(skc, size, flags);
+ obj = kv_alloc(skc, offslab_size, flags);
if (!obj)
GOTO(out, rc = -ENOMEM);
} else {
- obj = base +
- P2ROUNDUP(sizeof(spl_kmem_slab_t), align) +
- (i * size);
+ obj = base + spl_sks_size(skc) + (i * obj_size);
}
- sko = obj + P2ROUNDUP(skc->skc_obj_size, align);
+ ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
+ sko = spl_sko_from_obj(skc, obj);
sko->sko_addr = obj;
sko->sko_magic = SKO_MAGIC;
sko->sko_slab = sks;
@@ -828,7 +870,7 @@ out:
if (skc->skc_flags & KMC_OFFSLAB)
list_for_each_entry_safe(sko, n, &sks->sks_free_list,
sko_list)
- kv_free(skc, sko->sko_addr, size);
+ kv_free(skc, sko->sko_addr, offslab_size);
kv_free(skc, base, skc->skc_slab_size);
sks = NULL;
@@ -886,7 +928,8 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
spl_kmem_obj_t *sko, *n;
LIST_HEAD(sks_list);
LIST_HEAD(sko_list);
- int size = 0, i = 0;
+ uint32_t size = 0;
+ int i = 0;
ENTRY;
/*
@@ -922,8 +965,7 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
* objects and slabs back to the system.
*/
if (skc->skc_flags & KMC_OFFSLAB)
- size = P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) +
- P2ROUNDUP(sizeof(spl_kmem_obj_t), skc->skc_obj_align);
+ size = spl_offslab_size(skc);
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
ASSERT(sko->sko_magic == SKO_MAGIC);
@@ -994,7 +1036,7 @@ spl_cache_age(void *data)
}
/*
- * Size a slab based on the size of each aliged object plus spl_kmem_obj_t.
+ * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
* When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However,
* for very small objects we may end up with more than this so as not
* to waste space in the minimal allocation of a single page. Also for
@@ -1004,30 +1046,29 @@ spl_cache_age(void *data)
static int
spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
{
- int sks_size, obj_size, max_size, align;
+ uint32_t sks_size, obj_size, max_size;
if (skc->skc_flags & KMC_OFFSLAB) {
*objs = SPL_KMEM_CACHE_OBJ_PER_SLAB;
*size = sizeof(spl_kmem_slab_t);
} else {
- align = skc->skc_obj_align;
- sks_size = P2ROUNDUP(sizeof(spl_kmem_slab_t), align);
- obj_size = P2ROUNDUP(skc->skc_obj_size, align) +
- P2ROUNDUP(sizeof(spl_kmem_obj_t), align);
+ sks_size = spl_sks_size(skc);
+ obj_size = spl_obj_size(skc);
if (skc->skc_flags & KMC_KMEM)
- max_size = ((uint64_t)1 << (MAX_ORDER-1)) * PAGE_SIZE;
+ max_size = ((uint32_t)1 << (MAX_ORDER-1)) * PAGE_SIZE;
else
max_size = (32 * 1024 * 1024);
- for (*size = PAGE_SIZE; *size <= max_size; *size += PAGE_SIZE) {
+ /* Power of two sized slab */
+ for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
*objs = (*size - sks_size) / obj_size;
if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB)
RETURN(0);
}
/*
- * Unable to satisfy target objets per slab, fallback to
+ * Unable to satisfy target objects per slab, fall back to
* allocating a maximally sized slab and assuming it can
* contain the minimum objects count use it. If not fail.
*/
@@ -1048,17 +1089,18 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
static int
spl_magazine_size(spl_kmem_cache_t *skc)
{
- int size, align = skc->skc_obj_align;
+ uint32_t obj_size = spl_obj_size(skc);
+ int size;
ENTRY;
/* Per-magazine sizes below assume a 4Kib page size */
- if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 256))
+ if (obj_size > (PAGE_SIZE * 256))
size = 4; /* Minimum 4Mib per-magazine */
- else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 32))
+ else if (obj_size > (PAGE_SIZE * 32))
size = 16; /* Minimum 2Mib per-magazine */
- else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE))
+ else if (obj_size > (PAGE_SIZE))
size = 64; /* Minimum 256Kib per-magazine */
- else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE / 4))
+ else if (obj_size > (PAGE_SIZE / 4))
size = 128; /* Minimum 128Kib per-magazine */
else
size = 256;
@@ -1240,19 +1282,18 @@ spl_kmem_cache_create(char *name, size_t size, size_t align,
skc->skc_obj_max = 0;
if (align) {
- ASSERT((align & (align - 1)) == 0); /* Power of two */
- ASSERT(align >= SPL_KMEM_CACHE_ALIGN); /* Minimum size */
+ VERIFY(ISP2(align));
+ VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); /* Min alignment */
+ VERIFY3U(align, <=, PAGE_SIZE); /* Max alignment */
skc->skc_obj_align = align;
}
/* If none passed select a cache type based on object size */
if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) {
- if (P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) <
- (PAGE_SIZE / 8)) {
+ if (spl_obj_size(skc) < (PAGE_SIZE / 8))
skc->skc_flags |= KMC_KMEM;
- } else {
+ else
skc->skc_flags |= KMC_VMEM;
- }
}
rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size);
@@ -1492,9 +1533,8 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock));
- sko = obj + P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align);
+ sko = spl_sko_from_obj(skc, obj);
ASSERT(sko->sko_magic == SKO_MAGIC);
-
sks = sko->sko_slab;
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(sks->sks_cache == skc);
@@ -1600,7 +1640,7 @@ restart:
local_irq_restore(irq_flags);
ASSERT(obj);
- ASSERT(((unsigned long)(obj) % skc->skc_obj_align) == 0);
+ ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
/* Pre-emptively migrate object to CPU L1 cache */
prefetchw(obj);
diff --git a/module/splat/splat-kmem.c b/module/splat/splat-kmem.c
index 1007f7855..55c42b7d0 100644
--- a/module/splat/splat-kmem.c
+++ b/module/splat/splat-kmem.c
@@ -762,7 +762,7 @@ splat_kmem_test7(struct file *file, void *arg)
char *name = SPLAT_KMEM_TEST7_NAME;
int i, rc;
- for (i = 8; i <= PAGE_SIZE; i *= 2) {
+ for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
if (rc)
return rc;