summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFangrui Song <maskray@google.com>2024-08-13 21:48:02 -0700
committerHerbert Xu <herbert@gondor.apana.org.au>2024-08-24 21:36:07 +0800
commit3363c460ef726ba693704dbcd73b7e7214ccc788 (patch)
treec7e543a85a76b1980f57c77736cdf370fa6d21da
parent86c85d6657e478c516c825d3d4a2a61aef627934 (diff)
crypto: x86/sha256 - Add parentheses around macros' single arguments
The macros FOUR_ROUNDS_AND_SCHED and DO_4ROUNDS rely on an unexpected/undocumented behavior of the GNU assembler, which might change in the future (https://sourceware.org/bugzilla/show_bug.cgi?id=32073). M (1) (2) // 1 arg !? Future: 2 args M 1 + 2 // 1 arg !? Future: 3 args M 1 2 // 2 args Add parentheses around the single arguments to support future GNU assembler and LLVM integrated assembler (when the IsOperator hack from the following link is dropped). Link: https://github.com/llvm/llvm-project/commit/055006475e22014b28a070db1bff41ca15f322f0 Signed-off-by: Fangrui Song <maskray@google.com> Reviewed-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 0ffb072be956..0bbec1c75cd0 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -592,22 +592,22 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP ## reuse INP as scratch reg
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 0*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 0*32)
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 1*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 1*32)
leaq K256+2*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 2*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 2*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 2*32)
leaq K256+3*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 3*32+_XFER(%rsp, SRND)
- FOUR_ROUNDS_AND_SCHED _XFER + 3*32
+ FOUR_ROUNDS_AND_SCHED (_XFER + 3*32)
add $4*32, SRND
cmp $3*4*32, SRND
@@ -618,12 +618,12 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
leaq K256+0*32(%rip), INP
vpaddd (INP, SRND), X0, XFER
vmovdqa XFER, 0*32+_XFER(%rsp, SRND)
- DO_4ROUNDS _XFER + 0*32
+ DO_4ROUNDS (_XFER + 0*32)
leaq K256+1*32(%rip), INP
vpaddd (INP, SRND), X1, XFER
vmovdqa XFER, 1*32+_XFER(%rsp, SRND)
- DO_4ROUNDS _XFER + 1*32
+ DO_4ROUNDS (_XFER + 1*32)
add $2*32, SRND
vmovdqa X2, X0
@@ -651,8 +651,8 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
xor SRND, SRND
.align 16
.Lloop3:
- DO_4ROUNDS _XFER + 0*32 + 16
- DO_4ROUNDS _XFER + 1*32 + 16
+ DO_4ROUNDS (_XFER + 0*32 + 16)
+ DO_4ROUNDS (_XFER + 1*32 + 16)
add $2*32, SRND
cmp $4*4*32, SRND
jb .Lloop3