diff options
author | Fabian-Gruenbichler <[email protected]> | 2019-12-10 21:53:25 +0100 |
---|---|---|
committer | Brian Behlendorf <[email protected]> | 2019-12-10 12:53:25 -0800 |
commit | b119e2c6f185008001667a621521417111b21aa8 (patch) | |
tree | 19d6688391a9988b1372879d1a013319a9c09bdd | |
parent | 362ae8d11f81e5f65cd20aaf773075a3f045644d (diff) |
SIMD: Use alloc_pages_node to force alignment
fxsave and xsave require the target address to be 16-/64-byte aligned.
kmalloc(_node) does not (yet) offer such fine-grained control over
alignment[0,1], even though it does "the right thing" most of the time
for power-of-2 sizes. unfortunately, alignment is completely off when
using certain debugging or hardening features/configs, such as KASAN,
slub_debug=Z or the not-yet-upstream SLAB_CANARY.
Use alloc_pages_node() instead which allows us to allocate page-aligned
memory. Since fpregs_state is padded to a full page anyway, and this
code is only relevant for x86 which has 4k pages, this approach should
not allocate any unnecessary memory but still guarantee the needed
alignment.
0: https://lwn.net/Articles/787740/
1: https://lore.kernel.org/linux-block/[email protected]/
Reviewed-by: Tony Hutter <[email protected]>
Signed-off-by: Fabian Grünbichler <[email protected]>
Signed-off-by: Brian Behlendorf <[email protected]>
Closes #9608
Closes #9674
-rw-r--r-- | include/os/linux/kernel/linux/simd_x86.h | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/include/os/linux/kernel/linux/simd_x86.h b/include/os/linux/kernel/linux/simd_x86.h index d711578fd..67d8cdc3a 100644 --- a/include/os/linux/kernel/linux/simd_x86.h +++ b/include/os/linux/kernel/linux/simd_x86.h @@ -135,6 +135,8 @@ */ #if defined(HAVE_KERNEL_FPU_INTERNAL) +#include <linux/mm.h> + extern union fpregs_state **zfs_kfpu_fpregs; /* @@ -147,7 +149,8 @@ kfpu_fini(void) for_each_possible_cpu(cpu) { if (zfs_kfpu_fpregs[cpu] != NULL) { - kfree(zfs_kfpu_fpregs[cpu]); + free_pages((unsigned long)zfs_kfpu_fpregs[cpu], + get_order(sizeof (union fpregs_state))); } } @@ -157,20 +160,28 @@ kfpu_fini(void) static inline int kfpu_init(void) { - int cpu; - zfs_kfpu_fpregs = kzalloc(num_possible_cpus() * sizeof (union fpregs_state *), GFP_KERNEL); if (zfs_kfpu_fpregs == NULL) return (-ENOMEM); + /* + * The fxsave and xsave operations require 16-/64-byte alignment of + * the target memory. Since kmalloc() provides no alignment + * guarantee instead use alloc_pages_node(). + */ + unsigned int order = get_order(sizeof (union fpregs_state)); + int cpu; + for_each_possible_cpu(cpu) { - zfs_kfpu_fpregs[cpu] = kmalloc_node(sizeof (union fpregs_state), - GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); - if (zfs_kfpu_fpregs[cpu] == NULL) { + struct page *page = alloc_pages_node(cpu_to_node(cpu), + GFP_KERNEL | __GFP_ZERO, order); + if (page == NULL) { kfpu_fini(); return (-ENOMEM); } + + zfs_kfpu_fpregs[cpu] = page_address(page); } return (0); |