aboutsummaryrefslogtreecommitdiffstats
path: root/module/os/linux/spl
diff options
context:
space:
mode:
authorloli10K <[email protected]>2020-01-14 18:09:59 +0100
committerBrian Behlendorf <[email protected]>2020-01-14 09:09:59 -0800
commit7e2da7786ec089d1b9f9010677dc8e8a65dc01a1 (patch)
treeccd99b4a45c9a1dc0a58b500eebf750101b1df52 /module/os/linux/spl
parent68a192e4b7627f8f00d412ba68c291fcc90d3de9 (diff)
KMC_KVMEM disrupts kv_alloc() memory alignment expectations
On kernels with KASAN enabled the following failure can be observed as soon as the zfs module is loaded: VERIFY(IS_P2ALIGNED(ptr, PAGE_SIZE)) failed PANIC at spl-kmem-cache.c:228:kv_alloc() The problem is kmalloc() has never guaranteed aligned allocations; this requirement resulted in zfsonlinux/spl@8b45dda which removed all kmalloc() usage in kv_alloc(). Until a GFP_ALIGNED flag (or equivalent functionality) is provided by the kernel this commit partially reverts 66955885 and 6d948c35 to prevent k(v)malloc() allocations in kv_alloc(). Reviewed-by: Kjeld Schouten <[email protected]> Reviewed-by: Michael Niewöhner <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: loli10K <[email protected]> Closes #9813
Diffstat (limited to 'module/os/linux/spl')
-rw-r--r--module/os/linux/spl/spl-kmem-cache.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c
index 452625718..7dd8e8543 100644
--- a/module/os/linux/spl/spl-kmem-cache.c
+++ b/module/os/linux/spl/spl-kmem-cache.c
@@ -202,26 +202,8 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
if (skc->skc_flags & KMC_KMEM) {
ASSERT(ISP2(size));
ptr = (void *)__get_free_pages(lflags, get_order(size));
- } else if (skc->skc_flags & KMC_KVMEM) {
- ptr = spl_kvmalloc(size, lflags);
} else {
- /*
- * GFP_KERNEL allocations can safely use kvmalloc which may
- * improve performance by avoiding a) high latency caused by
- * vmalloc's on-access allocation, b) performance loss due to
- * MMU memory address mapping and c) vmalloc locking overhead.
- * This has the side-effect that the slab statistics will
- * incorrectly report this as a vmem allocation, but that is
- * purely cosmetic.
- *
- * For non-GFP_KERNEL allocations we stick to __vmalloc.
- */
- if ((lflags & GFP_KERNEL) == GFP_KERNEL) {
- ptr = spl_kvmalloc(size, lflags);
- } else {
- ptr = __vmalloc(size, lflags | __GFP_HIGHMEM,
- PAGE_KERNEL);
- }
+ ptr = __vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL);
}
/* Resulting allocated memory will be page aligned */
@@ -249,7 +231,7 @@ kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
ASSERT(ISP2(size));
free_pages((unsigned long)ptr, get_order(size));
} else {
- spl_kmem_free_impl(ptr, size);
+ vfree(ptr);
}
}