aboutsummaryrefslogtreecommitdiffstats
path: root/module
diff options
context:
space:
mode:
authorAlexander Lobakin <[email protected]>2022-10-16 23:41:39 +0200
committerTony Hutter <[email protected]>2022-12-01 12:39:39 -0800
commitab22031d79691a25d5be20244ad72e34c05d9973 (patch)
tree55e08e8b3c7e56b2767432baa5ebb72394812783 /module
parent33bc03dea7bc976307ad158bf4240c947700b3c2 (diff)
icp: fix all !ENDBR objtool warnings in x86 Asm code
Currently, only Blake3 x86 Asm code has signs of being ENDBR-aware. At least, under certain conditions it includes some header file and uses some custom macro from there. Linux has its own NOENDBR since several releases ago. It's defined in the same <asm/linkage.h>, so currently <sys/asm_linkage.h> already is provided with it. Let's unify those two into one %ENDBR macro. At first, check if it's present already. If so -- use Linux kernel version. Otherwise, try to go that second way and use %_CET_ENDBR from <cet.h> if available. If no, fall back to just empty definition. This fixes a couple more 'relocations to !ENDBR' across the module. And now that we always have the latest/actual ENDBR definition, use it at the entrance of the few corresponding functions that objtool still complains about. This matches the way how it's used in the upstream x86 core Asm code. Reviewed-by: Attila Fülöp <[email protected]> Reviewed-by: Tino Reichardt <[email protected]> Reviewed-by: Richard Yao <[email protected]> Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Alexander Lobakin <[email protected]> Closes #14035
Diffstat (limited to 'module')
-rw-r--r--module/icp/asm-x86_64/aes/aes_amd64.S2
-rw-r--r--module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S5
-rw-r--r--module/icp/asm-x86_64/modes/ghash-x86_64.S4
-rw-r--r--module/icp/asm-x86_64/sha2/sha256_impl.S1
-rw-r--r--module/icp/asm-x86_64/sha2/sha512_impl.S1
-rw-r--r--module/icp/include/sys/ia32/asm_linkage.h18
6 files changed, 31 insertions, 0 deletions
diff --git a/module/icp/asm-x86_64/aes/aes_amd64.S b/module/icp/asm-x86_64/aes/aes_amd64.S
index 931d24806..e631752af 100644
--- a/module/icp/asm-x86_64/aes/aes_amd64.S
+++ b/module/icp/asm-x86_64/aes/aes_amd64.S
@@ -704,6 +704,7 @@ enc_tab:
ENTRY_NP(aes_encrypt_amd64)
+ ENDBR
#ifdef GLADMAN_INTERFACE
// Original interface
sub $[4*8], %rsp // gnu/linux/opensolaris binary interface
@@ -809,6 +810,7 @@ dec_tab:
ENTRY_NP(aes_decrypt_amd64)
+ ENDBR
#ifdef GLADMAN_INTERFACE
// Original interface
sub $[4*8], %rsp // gnu/linux/opensolaris binary interface
diff --git a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S
index 6da43ee00..cf17b3768 100644
--- a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S
+++ b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S
@@ -59,6 +59,7 @@
.align 32
_aesni_ctr32_ghash_6x:
.cfi_startproc
+ ENDBR
vmovdqu 32(%r11),%xmm2
subq $6,%rdx
vpxor %xmm4,%xmm4,%xmm4
@@ -375,6 +376,7 @@ _aesni_ctr32_ghash_6x:
.align 32
_aesni_ctr32_ghash_no_movbe_6x:
.cfi_startproc
+ ENDBR
vmovdqu 32(%r11),%xmm2
subq $6,%rdx
vpxor %xmm4,%xmm4,%xmm4
@@ -703,6 +705,7 @@ _aesni_ctr32_ghash_no_movbe_6x:
.align 32
aesni_gcm_decrypt:
.cfi_startproc
+ ENDBR
xorq %r10,%r10
cmpq $0x60,%rdx
jb .Lgcm_dec_abort
@@ -820,6 +823,7 @@ aesni_gcm_decrypt:
.align 32
_aesni_ctr32_6x:
.cfi_startproc
+ ENDBR
vmovdqu 0-128(%rcx),%xmm4
vmovdqu 32(%r11),%xmm2
leaq -2(%rbp),%r13 // ICP uses 10,12,14 not 9,11,13 for rounds.
@@ -914,6 +918,7 @@ _aesni_ctr32_6x:
.align 32
aesni_gcm_encrypt:
.cfi_startproc
+ ENDBR
xorq %r10,%r10
cmpq $288,%rdx
jb .Lgcm_enc_abort
diff --git a/module/icp/asm-x86_64/modes/ghash-x86_64.S b/module/icp/asm-x86_64/modes/ghash-x86_64.S
index d7cdaeb36..bf3724a23 100644
--- a/module/icp/asm-x86_64/modes/ghash-x86_64.S
+++ b/module/icp/asm-x86_64/modes/ghash-x86_64.S
@@ -107,6 +107,7 @@
.align 16
gcm_gmult_clmul:
.cfi_startproc
+ ENDBR
.L_gmult_clmul:
movdqu (%rdi),%xmm0
movdqa .Lbswap_mask(%rip),%xmm5
@@ -161,6 +162,7 @@ gcm_gmult_clmul:
.align 32
gcm_init_htab_avx:
.cfi_startproc
+ ENDBR
vzeroupper
vmovdqu (%rsi),%xmm2
@@ -274,6 +276,7 @@ gcm_init_htab_avx:
.align 32
gcm_gmult_avx:
.cfi_startproc
+ ENDBR
jmp .L_gmult_clmul
.cfi_endproc
.size gcm_gmult_avx,.-gcm_gmult_avx
@@ -282,6 +285,7 @@ gcm_gmult_avx:
.align 32
gcm_ghash_avx:
.cfi_startproc
+ ENDBR
vzeroupper
vmovdqu (%rdi),%xmm10
diff --git a/module/icp/asm-x86_64/sha2/sha256_impl.S b/module/icp/asm-x86_64/sha2/sha256_impl.S
index 31da7f976..fc0c68ba2 100644
--- a/module/icp/asm-x86_64/sha2/sha256_impl.S
+++ b/module/icp/asm-x86_64/sha2/sha256_impl.S
@@ -84,6 +84,7 @@ SHA256TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num)
ENTRY_NP(SHA256TransformBlocks)
.cfi_startproc
+ ENDBR
movq %rsp, %rax
.cfi_def_cfa_register %rax
push %rbx
diff --git a/module/icp/asm-x86_64/sha2/sha512_impl.S b/module/icp/asm-x86_64/sha2/sha512_impl.S
index c2ba18538..ab9474a4e 100644
--- a/module/icp/asm-x86_64/sha2/sha512_impl.S
+++ b/module/icp/asm-x86_64/sha2/sha512_impl.S
@@ -85,6 +85,7 @@ SHA512TransformBlocks(SHA2_CTX *ctx, const void *in, size_t num)
ENTRY_NP(SHA512TransformBlocks)
.cfi_startproc
+ ENDBR
movq %rsp, %rax
.cfi_def_cfa_register %rax
push %rbx
diff --git a/module/icp/include/sys/ia32/asm_linkage.h b/module/icp/include/sys/ia32/asm_linkage.h
index 0717db4ab..beb3592f3 100644
--- a/module/icp/include/sys/ia32/asm_linkage.h
+++ b/module/icp/include/sys/ia32/asm_linkage.h
@@ -34,6 +34,24 @@
#include <linux/linkage.h>
#endif
+#ifndef ENDBR
+#if defined(__ELF__) && defined(__CET__) && defined(__has_include)
+/* CSTYLED */
+#if __has_include(<cet.h>)
+
+#include <cet.h>
+
+#ifdef _CET_ENDBR
+#define ENDBR _CET_ENDBR
+#endif /* _CET_ENDBR */
+
+#endif /* <cet.h> */
+#endif /* __ELF__ && __CET__ && __has_include */
+#endif /* !ENDBR */
+
+#ifndef ENDBR
+#define ENDBR
+#endif
#ifndef RET
#define RET ret
#endif