diff --git a/cipher/asm-common-aarch64.h b/cipher/asm-common-aarch64.h index cf0afe1f..451539e8 100644 --- a/cipher/asm-common-aarch64.h +++ b/cipher/asm-common-aarch64.h @@ -1,104 +1,108 @@ /* asm-common-aarch64.h - Common macros for AArch64 assembly * * Copyright (C) 2018 Martin Storsjö * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifndef GCRY_ASM_COMMON_AARCH64_H #define GCRY_ASM_COMMON_AARCH64_H #include #ifdef HAVE_GCC_ASM_ELF_DIRECTIVES # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif #ifdef __APPLE__ #define GET_DATA_POINTER(reg, name) \ adrp reg, name@GOTPAGE ; \ add reg, reg, name@GOTPAGEOFF ; #elif defined(_WIN32) #define GET_DATA_POINTER(reg, name) \ adrp reg, name ; \ add reg, reg, #:lo12:name ; #else #define GET_DATA_POINTER(reg, name) \ adrp reg, :got:name ; \ ldr reg, [reg, #:got_lo12:name] ; #endif #ifdef HAVE_GCC_ASM_CFI_DIRECTIVES /* CFI directives to emit DWARF stack unwinding information. */ # define CFI_STARTPROC() .cfi_startproc # define CFI_ENDPROC() .cfi_endproc # define CFI_REMEMBER_STATE() .cfi_remember_state # define CFI_RESTORE_STATE() .cfi_restore_state # define CFI_ADJUST_CFA_OFFSET(off) .cfi_adjust_cfa_offset off # define CFI_REL_OFFSET(reg,off) .cfi_rel_offset reg, off # define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg # define CFI_REGISTER(ro,rn) .cfi_register ro, rn # define CFI_RESTORE(reg) .cfi_restore reg /* CFA expressions are used for pointing CFA and registers to * SP relative offsets. */ # define DW_REGNO_SP 31 /* Fixed length encoding used for integers for now. */ # define DW_SLEB128_7BIT(value) \ 0x00|((value) & 0x7f) # define DW_SLEB128_28BIT(value) \ 0x80|((value)&0x7f), \ 0x80|(((value)>>7)&0x7f), \ 0x80|(((value)>>14)&0x7f), \ 0x00|(((value)>>21)&0x7f) # define CFI_CFA_ON_STACK(rsp_offs,cfa_depth) \ .cfi_escape \ 0x0f, /* DW_CFA_def_cfa_expression */ \ DW_SLEB128_7BIT(11), /* length */ \ 0x8f, /* DW_OP_breg31, rsp + constant */ \ DW_SLEB128_28BIT(rsp_offs), \ 0x06, /* DW_OP_deref */ \ 0x23, /* DW_OP_plus_constu */ \ DW_SLEB128_28BIT((cfa_depth)+8) # define CFI_REG_ON_STACK(regno,rsp_offs) \ .cfi_escape \ 0x10, /* DW_CFA_expression */ \ DW_SLEB128_7BIT(regno), \ DW_SLEB128_7BIT(5), /* length */ \ 0x8f, /* DW_OP_breg31, rsp + constant */ \ DW_SLEB128_28BIT(rsp_offs) #else # define CFI_STARTPROC() # define CFI_ENDPROC() # define CFI_REMEMBER_STATE() # define CFI_RESTORE_STATE() # define CFI_ADJUST_CFA_OFFSET(off) # define CFI_REL_OFFSET(reg,off) # define CFI_DEF_CFA_REGISTER(reg) # define CFI_REGISTER(ro,rn) # define CFI_RESTORE(reg) # define CFI_CFA_ON_STACK(rsp_offs,cfa_depth) # define CFI_REG_ON_STACK(reg,rsp_offs) #endif +/* 'ret' instruction replacement for straight-line speculation mitigation */ +#define ret_spec_stop \ + ret; dsb sy; isb; + #endif /* GCRY_ASM_COMMON_AARCH64_H */ diff --git a/cipher/asm-poly1305-aarch64.h b/cipher/asm-poly1305-aarch64.h index 90092709..2f05aae2 100644 --- a/cipher/asm-poly1305-aarch64.h +++ b/cipher/asm-poly1305-aarch64.h @@ -1,245 +1,245 @@ /* asm-common-aarch64.h - Poly1305 macros for ARMv8/AArch64 assembly * * Copyright (C) 2019 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifndef GCRY_ASM_POLY1305_AARCH64_H #define GCRY_ASM_POLY1305_AARCH64_H #include "asm-common-aarch64.h" #ifdef __AARCH64EL__ #define le_to_host(reg) /*_*/ #else #define le_to_host(reg) rev reg, reg; #endif /********************************************************************** poly1305 for stitched chacha20-poly1305 Aarch64 implementations **********************************************************************/ #define POLY_RSTATE x8 #define POLY_RSRC x9 #define POLY_R_H0 x10 #define POLY_R_H1 x11 #define POLY_R_H2 x12 #define POLY_R_H2d w12 #define POLY_R_R0 x13 #define POLY_R_R1 x14 #define POLY_R_R1_MUL5 x15 #define POLY_R_X0_HI x16 #define POLY_R_X0_LO x17 #define POLY_R_X1_HI x19 #define POLY_R_X1_LO x20 #define POLY_R_ONE x21 #define POLY_R_ONEd w21 #define POLY_TMP0 x22 #define POLY_TMP1 x23 #define POLY_TMP2 x24 #define POLY_TMP3 x25 #define POLY_CHACHA_ROUND x26 #define POLY_S_R0 (4 * 4 + 0 * 8) #define POLY_S_R1 (4 * 4 + 1 * 8) #define POLY_S_H0 (4 * 4 + 2 * 8 + 0 * 8) #define POLY_S_H1 (4 * 4 + 2 * 8 + 1 * 8) #define POLY_S_H2d (4 * 4 + 2 * 8 + 2 * 8) #define POLY1305_PUSH_REGS() \ stp x19, x20, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); \ CFI_REG_ON_STACK(19, 0); \ CFI_REG_ON_STACK(20, 8); \ stp x21, x22, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); \ CFI_REG_ON_STACK(21, 0); \ CFI_REG_ON_STACK(22, 8); \ stp x23, x24, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); \ CFI_REG_ON_STACK(23, 0); \ CFI_REG_ON_STACK(24, 8); \ stp x25, x26, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); \ CFI_REG_ON_STACK(25, 0); \ CFI_REG_ON_STACK(26, 8); #define POLY1305_POP_REGS() \ ldp x25, x26, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); \ CFI_RESTORE(x25); \ CFI_RESTORE(x26); \ ldp x23, x24, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); \ CFI_RESTORE(x23); \ CFI_RESTORE(x24); \ ldp x21, x22, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); \ CFI_RESTORE(x21); \ CFI_RESTORE(x22); \ ldp x19, x20, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); \ CFI_RESTORE(x19); \ CFI_RESTORE(x20); #define POLY1305_LOAD_STATE() \ ldr POLY_R_R1, [POLY_RSTATE, #(POLY_S_R1)]; \ ldr POLY_R_H0, [POLY_RSTATE, #(POLY_S_H0)]; \ ldr POLY_R_H1, [POLY_RSTATE, #(POLY_S_H1)]; \ ldr POLY_R_H2d, [POLY_RSTATE, #(POLY_S_H2d)]; \ ldr POLY_R_R0, [POLY_RSTATE, #(POLY_S_R0)]; \ add POLY_R_R1_MUL5, POLY_R_R1, POLY_R_R1, lsr #2; \ mov POLY_R_ONE, #1; #define POLY1305_STORE_STATE() \ str POLY_R_H0, [POLY_RSTATE, #(POLY_S_H0)]; \ str POLY_R_H1, [POLY_RSTATE, #(POLY_S_H1)]; \ str POLY_R_H2d, [POLY_RSTATE, #(POLY_S_H2d)]; #define POLY1305_BLOCK_PART1(src_offset) \ /* a = h + m */ \ ldr POLY_TMP0, [POLY_RSRC, #((src_offset) + 0 * 8)]; #define POLY1305_BLOCK_PART2(src_offset) \ ldr POLY_TMP1, [POLY_RSRC, #((src_offset) + 1 * 8)]; #define POLY1305_BLOCK_PART3() \ le_to_host(POLY_TMP0); #define POLY1305_BLOCK_PART4() \ le_to_host(POLY_TMP1); #define POLY1305_BLOCK_PART5() \ adds POLY_R_H0, POLY_R_H0, POLY_TMP0; #define POLY1305_BLOCK_PART6() \ adcs POLY_R_H1, POLY_R_H1, POLY_TMP1; #define POLY1305_BLOCK_PART7() \ adc POLY_R_H2d, POLY_R_H2d, POLY_R_ONEd; #define POLY1305_BLOCK_PART8() \ /* h = a * r (partial mod 2^130-5): */ \ mul POLY_R_X1_LO, POLY_R_H0, POLY_R_R1; /* lo: h0 * r1 */ #define POLY1305_BLOCK_PART9() \ mul POLY_TMP0, POLY_R_H1, POLY_R_R0; /* lo: h1 * r0 */ #define POLY1305_BLOCK_PART10() \ mul POLY_R_X0_LO, POLY_R_H0, POLY_R_R0; /* lo: h0 * r0 */ #define POLY1305_BLOCK_PART11() \ umulh POLY_R_X1_HI, POLY_R_H0, POLY_R_R1; /* hi: h0 * r1 */ #define POLY1305_BLOCK_PART12() \ adds POLY_R_X1_LO, POLY_R_X1_LO, POLY_TMP0; #define POLY1305_BLOCK_PART13() \ umulh POLY_TMP1, POLY_R_H1, POLY_R_R0; /* hi: h1 * r0 */ #define POLY1305_BLOCK_PART14() \ mul POLY_TMP2, POLY_R_H1, POLY_R_R1_MUL5; /* lo: h1 * r1 mod 2^130-5 */ #define POLY1305_BLOCK_PART15() \ umulh POLY_R_X0_HI, POLY_R_H0, POLY_R_R0; /* hi: h0 * r0 */ #define POLY1305_BLOCK_PART16() \ adc POLY_R_X1_HI, POLY_R_X1_HI, POLY_TMP1; #define POLY1305_BLOCK_PART17() \ umulh POLY_TMP3, POLY_R_H1, POLY_R_R1_MUL5; /* hi: h1 * r1 mod 2^130-5 */ #define POLY1305_BLOCK_PART18() \ adds POLY_R_X0_LO, POLY_R_X0_LO, POLY_TMP2; #define POLY1305_BLOCK_PART19() \ mul POLY_R_H1, POLY_R_H2, POLY_R_R1_MUL5; /* h2 * r1 mod 2^130-5 */ #define POLY1305_BLOCK_PART20() \ adc POLY_R_X0_HI, POLY_R_X0_HI, POLY_TMP3; #define POLY1305_BLOCK_PART21() \ mul POLY_R_H2, POLY_R_H2, POLY_R_R0; /* h2 * r0 */ #define POLY1305_BLOCK_PART22() \ adds POLY_R_H1, POLY_R_H1, POLY_R_X1_LO; #define POLY1305_BLOCK_PART23() \ adc POLY_R_H0, POLY_R_H2, POLY_R_X1_HI; #define POLY1305_BLOCK_PART24() \ /* carry propagation */ \ and POLY_R_H2, POLY_R_H0, #3; #define POLY1305_BLOCK_PART25() \ lsr POLY_R_H0, POLY_R_H0, #2; #define POLY1305_BLOCK_PART26() \ add POLY_R_H0, POLY_R_H0, POLY_R_H0, lsl #2; #define POLY1305_BLOCK_PART27() \ adds POLY_R_H0, POLY_R_H0, POLY_R_X0_LO; #define POLY1305_BLOCK_PART28() \ adcs POLY_R_H1, POLY_R_H1, POLY_R_X0_HI; #define POLY1305_BLOCK_PART29() \ adc POLY_R_H2d, POLY_R_H2d, wzr; //#define TESTING_POLY1305_ASM #ifdef TESTING_POLY1305_ASM /* for testing only. */ .align 3 .globl _gcry_poly1305_aarch64_blocks1 ELF(.type _gcry_poly1305_aarch64_blocks1,%function;) _gcry_poly1305_aarch64_blocks1: /* input: * x0: poly1305-state * x1: src * x2: nblks */ CFI_STARTPROC() POLY1305_PUSH_REGS(); mov POLY_RSTATE, x0; mov POLY_RSRC, x1; POLY1305_LOAD_STATE(); .L_gcry_poly1305_aarch64_loop1: POLY1305_BLOCK_PART1(0 * 16); POLY1305_BLOCK_PART2(0 * 16); add POLY_RSRC, POLY_RSRC, #16; POLY1305_BLOCK_PART3(); POLY1305_BLOCK_PART4(); POLY1305_BLOCK_PART5(); POLY1305_BLOCK_PART6(); POLY1305_BLOCK_PART7(); POLY1305_BLOCK_PART8(); POLY1305_BLOCK_PART9(); POLY1305_BLOCK_PART10(); POLY1305_BLOCK_PART11(); POLY1305_BLOCK_PART12(); POLY1305_BLOCK_PART13(); POLY1305_BLOCK_PART14(); POLY1305_BLOCK_PART15(); POLY1305_BLOCK_PART16(); POLY1305_BLOCK_PART17(); POLY1305_BLOCK_PART18(); POLY1305_BLOCK_PART19(); POLY1305_BLOCK_PART20(); POLY1305_BLOCK_PART21(); POLY1305_BLOCK_PART22(); POLY1305_BLOCK_PART23(); POLY1305_BLOCK_PART24(); POLY1305_BLOCK_PART25(); POLY1305_BLOCK_PART26(); POLY1305_BLOCK_PART27(); POLY1305_BLOCK_PART28(); POLY1305_BLOCK_PART29(); subs x2, x2, #1; b.ne .L_gcry_poly1305_aarch64_loop1; POLY1305_STORE_STATE(); mov x0, #0; POLY1305_POP_REGS(); - ret; + ret_spec_stop; CFI_ENDPROC() ELF(.size _gcry_poly1305_aarch64_blocks1, .-_gcry_poly1305_aarch64_blocks1;) #endif #endif /* GCRY_ASM_POLY1305_AARCH64_H */ diff --git a/cipher/camellia-aarch64.S b/cipher/camellia-aarch64.S index f4980862..30b568d3 100644 --- a/cipher/camellia-aarch64.S +++ b/cipher/camellia-aarch64.S @@ -1,586 +1,586 @@ /* camellia-aarch64.S - ARMv8/AArch64 assembly implementation of Camellia * cipher * * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS .text /* struct camellia_ctx: */ #define key_table 0 /* register macros */ #define CTX x0 #define RDST x1 #define RSRC x2 #define RKEYBITS w3 #define RTAB1 x4 #define RTAB2 x5 #define RTAB3 x6 #define RTAB4 x7 #define RMASK w8 #define IL w9 #define IR w10 #define xIL x9 #define xIR x10 #define XL w11 #define XR w12 #define YL w13 #define YR w14 #define RT0 w15 #define RT1 w16 #define RT2 w17 #define RT3 w19 #define xRT0 x15 #define xRT1 x16 #define xRT2 x17 #define xRT3 x19 #ifdef __AARCH64EL__ #define host_to_be(reg, rtmp) \ rev reg, reg; #define be_to_host(reg, rtmp) \ rev reg, reg; #else /* nop on big-endian */ #define host_to_be(reg, rtmp) /*_*/ #define be_to_host(reg, rtmp) /*_*/ #endif #define ldr_input_aligned_be(rin, a, b, c, d, rtmp) \ ldr a, [rin, #0]; \ ldr b, [rin, #4]; \ be_to_host(a, rtmp); \ ldr c, [rin, #8]; \ be_to_host(b, rtmp); \ ldr d, [rin, #12]; \ be_to_host(c, rtmp); \ be_to_host(d, rtmp); #define str_output_aligned_be(rout, a, b, c, d, rtmp) \ be_to_host(a, rtmp); \ be_to_host(b, rtmp); \ str a, [rout, #0]; \ be_to_host(c, rtmp); \ str b, [rout, #4]; \ be_to_host(d, rtmp); \ str c, [rout, #8]; \ str d, [rout, #12]; /* unaligned word reads/writes allowed */ #define ldr_input_be(rin, ra, rb, rc, rd, rtmp) \ ldr_input_aligned_be(rin, ra, rb, rc, rd, rtmp) #define str_output_be(rout, ra, rb, rc, rd, rtmp0, rtmp1) \ str_output_aligned_be(rout, ra, rb, rc, rd, rtmp0) /********************************************************************** 1-way camellia **********************************************************************/ #define roundsm(xl, xr, kl, kr, yl, yr) \ ldr RT2, [CTX, #(key_table + ((kl) * 4))]; \ and IR, RMASK, xr, lsl#(4); /*sp1110*/ \ ldr RT3, [CTX, #(key_table + ((kr) * 4))]; \ and IL, RMASK, xl, lsr#(24 - 4); /*sp1110*/ \ and RT0, RMASK, xr, lsr#(16 - 4); /*sp3033*/ \ ldr IR, [RTAB1, xIR]; \ and RT1, RMASK, xl, lsr#(8 - 4); /*sp3033*/ \ eor yl, yl, RT2; \ ldr IL, [RTAB1, xIL]; \ eor yr, yr, RT3; \ \ ldr RT0, [RTAB3, xRT0]; \ ldr RT1, [RTAB3, xRT1]; \ \ and RT2, RMASK, xr, lsr#(24 - 4); /*sp0222*/ \ and RT3, RMASK, xl, lsr#(16 - 4); /*sp0222*/ \ \ eor IR, IR, RT0; \ eor IL, IL, RT1; \ \ ldr RT2, [RTAB2, xRT2]; \ and RT0, RMASK, xr, lsr#(8 - 4); /*sp4404*/ \ ldr RT3, [RTAB2, xRT3]; \ and RT1, RMASK, xl, lsl#(4); /*sp4404*/ \ \ ldr RT0, [RTAB4, xRT0]; \ ldr RT1, [RTAB4, xRT1]; \ \ eor IR, IR, RT2; \ eor IL, IL, RT3; \ eor IR, IR, RT0; \ eor IL, IL, RT1; \ \ eor IR, IR, IL; \ eor yr, yr, IL, ror#8; \ eor yl, yl, IR; \ eor yr, yr, IR; #define enc_rounds(n) \ roundsm(XL, XR, ((n) + 2) * 2 + 0, ((n) + 2) * 2 + 1, YL, YR); \ roundsm(YL, YR, ((n) + 3) * 2 + 0, ((n) + 3) * 2 + 1, XL, XR); \ roundsm(XL, XR, ((n) + 4) * 2 + 0, ((n) + 4) * 2 + 1, YL, YR); \ roundsm(YL, YR, ((n) + 5) * 2 + 0, ((n) + 5) * 2 + 1, XL, XR); \ roundsm(XL, XR, ((n) + 6) * 2 + 0, ((n) + 6) * 2 + 1, YL, YR); \ roundsm(YL, YR, ((n) + 7) * 2 + 0, ((n) + 7) * 2 + 1, XL, XR); #define dec_rounds(n) \ roundsm(XL, XR, ((n) + 7) * 2 + 0, ((n) + 7) * 2 + 1, YL, YR); \ roundsm(YL, YR, ((n) + 6) * 2 + 0, ((n) + 6) * 2 + 1, XL, XR); \ roundsm(XL, XR, ((n) + 5) * 2 + 0, ((n) + 5) * 2 + 1, YL, YR); \ roundsm(YL, YR, ((n) + 4) * 2 + 0, ((n) + 4) * 2 + 1, XL, XR); \ roundsm(XL, XR, ((n) + 3) * 2 + 0, ((n) + 3) * 2 + 1, YL, YR); \ roundsm(YL, YR, ((n) + 2) * 2 + 0, ((n) + 2) * 2 + 1, XL, XR); /* perform FL and FL⁻¹ */ #define fls(ll, lr, rl, rr, kll, klr, krl, krr) \ ldr RT0, [CTX, #(key_table + ((kll) * 4))]; \ ldr RT2, [CTX, #(key_table + ((krr) * 4))]; \ and RT0, RT0, ll; \ ldr RT3, [CTX, #(key_table + ((krl) * 4))]; \ orr RT2, RT2, rr; \ ldr RT1, [CTX, #(key_table + ((klr) * 4))]; \ eor rl, rl, RT2; \ eor lr, lr, RT0, ror#31; \ and RT3, RT3, rl; \ orr RT1, RT1, lr; \ eor ll, ll, RT1; \ eor rr, rr, RT3, ror#31; #define enc_fls(n) \ fls(XL, XR, YL, YR, \ (n) * 2 + 0, (n) * 2 + 1, \ (n) * 2 + 2, (n) * 2 + 3); #define dec_fls(n) \ fls(XL, XR, YL, YR, \ (n) * 2 + 2, (n) * 2 + 3, \ (n) * 2 + 0, (n) * 2 + 1); #define inpack(n) \ ldr_input_be(RSRC, XL, XR, YL, YR, RT0); \ ldr RT0, [CTX, #(key_table + ((n) * 8) + 0)]; \ ldr RT1, [CTX, #(key_table + ((n) * 8) + 4)]; \ eor XL, XL, RT0; \ eor XR, XR, RT1; #define outunpack(n) \ ldr RT0, [CTX, #(key_table + ((n) * 8) + 0)]; \ ldr RT1, [CTX, #(key_table + ((n) * 8) + 4)]; \ eor YL, YL, RT0; \ eor YR, YR, RT1; \ str_output_be(RDST, YL, YR, XL, XR, RT0, RT1); .globl _gcry_camellia_arm_encrypt_block ELF(.type _gcry_camellia_arm_encrypt_block,@function;) _gcry_camellia_arm_encrypt_block: CFI_STARTPROC() stp x19, x30, [sp, #-16]! CFI_ADJUST_CFA_OFFSET(16) CFI_REG_ON_STACK(19, 0) CFI_REG_ON_STACK(30, 8) /* input: * x0: keytable * x1: dst * x2: src * w3: keybitlen */ adr RTAB1, _gcry_camellia_arm_tables; mov RMASK, #(0xff<<4); /* byte mask */ add RTAB2, RTAB1, #(1 * 4); add RTAB3, RTAB1, #(2 * 4); add RTAB4, RTAB1, #(3 * 4); inpack(0); enc_rounds(0); enc_fls(8); enc_rounds(8); enc_fls(16); enc_rounds(16); cmp RKEYBITS, #(16 * 8); bne .Lenc_256; outunpack(24); CFI_REMEMBER_STATE() ldp x19, x30, [sp], #16 CFI_ADJUST_CFA_OFFSET(-16) CFI_RESTORE(x19) CFI_RESTORE(x30) - ret; + ret_spec_stop; CFI_RESTORE_STATE() .ltorg .Lenc_256: enc_fls(24); enc_rounds(24); outunpack(32); ldp x19, x30, [sp], #16 CFI_ADJUST_CFA_OFFSET(-16) CFI_RESTORE(x19) CFI_RESTORE(x30) - ret; + ret_spec_stop; CFI_ENDPROC() .ltorg ELF(.size _gcry_camellia_arm_encrypt_block,.-_gcry_camellia_arm_encrypt_block;) .globl _gcry_camellia_arm_decrypt_block ELF(.type _gcry_camellia_arm_decrypt_block,@function;) _gcry_camellia_arm_decrypt_block: CFI_STARTPROC() stp x19, x30, [sp, #-16]! CFI_ADJUST_CFA_OFFSET(16) CFI_REG_ON_STACK(19, 0) CFI_REG_ON_STACK(30, 8) /* input: * x0: keytable * x1: dst * x2: src * w3: keybitlen */ adr RTAB1, _gcry_camellia_arm_tables; mov RMASK, #(0xff<<4); /* byte mask */ add RTAB2, RTAB1, #(1 * 4); add RTAB3, RTAB1, #(2 * 4); add RTAB4, RTAB1, #(3 * 4); cmp RKEYBITS, #(16 * 8); bne .Ldec_256; inpack(24); .Ldec_128: dec_rounds(16); dec_fls(16); dec_rounds(8); dec_fls(8); dec_rounds(0); outunpack(0); CFI_REMEMBER_STATE() ldp x19, x30, [sp], #16 CFI_ADJUST_CFA_OFFSET(-16) CFI_RESTORE(x19) CFI_RESTORE(x30) - ret; + ret_spec_stop; CFI_RESTORE_STATE() .ltorg .Ldec_256: inpack(32); dec_rounds(24); dec_fls(24); b .Ldec_128; CFI_ENDPROC() .ltorg ELF(.size _gcry_camellia_arm_decrypt_block,.-_gcry_camellia_arm_decrypt_block;) /* Encryption/Decryption tables */ ELF(.type _gcry_camellia_arm_tables,@object;) .balign 32 _gcry_camellia_arm_tables: .Lcamellia_sp1110: .long 0x70707000 .Lcamellia_sp0222: .long 0x00e0e0e0 .Lcamellia_sp3033: .long 0x38003838 .Lcamellia_sp4404: .long 0x70700070 .long 0x82828200, 0x00050505, 0x41004141, 0x2c2c002c .long 0x2c2c2c00, 0x00585858, 0x16001616, 0xb3b300b3 .long 0xececec00, 0x00d9d9d9, 0x76007676, 0xc0c000c0 .long 0xb3b3b300, 0x00676767, 0xd900d9d9, 0xe4e400e4 .long 0x27272700, 0x004e4e4e, 0x93009393, 0x57570057 .long 0xc0c0c000, 0x00818181, 0x60006060, 0xeaea00ea .long 0xe5e5e500, 0x00cbcbcb, 0xf200f2f2, 0xaeae00ae .long 0xe4e4e400, 0x00c9c9c9, 0x72007272, 0x23230023 .long 0x85858500, 0x000b0b0b, 0xc200c2c2, 0x6b6b006b .long 0x57575700, 0x00aeaeae, 0xab00abab, 0x45450045 .long 0x35353500, 0x006a6a6a, 0x9a009a9a, 0xa5a500a5 .long 0xeaeaea00, 0x00d5d5d5, 0x75007575, 0xeded00ed .long 0x0c0c0c00, 0x00181818, 0x06000606, 0x4f4f004f .long 0xaeaeae00, 0x005d5d5d, 0x57005757, 0x1d1d001d .long 0x41414100, 0x00828282, 0xa000a0a0, 0x92920092 .long 0x23232300, 0x00464646, 0x91009191, 0x86860086 .long 0xefefef00, 0x00dfdfdf, 0xf700f7f7, 0xafaf00af .long 0x6b6b6b00, 0x00d6d6d6, 0xb500b5b5, 0x7c7c007c .long 0x93939300, 0x00272727, 0xc900c9c9, 0x1f1f001f .long 0x45454500, 0x008a8a8a, 0xa200a2a2, 0x3e3e003e .long 0x19191900, 0x00323232, 0x8c008c8c, 0xdcdc00dc .long 0xa5a5a500, 0x004b4b4b, 0xd200d2d2, 0x5e5e005e .long 0x21212100, 0x00424242, 0x90009090, 0x0b0b000b .long 0xededed00, 0x00dbdbdb, 0xf600f6f6, 0xa6a600a6 .long 0x0e0e0e00, 0x001c1c1c, 0x07000707, 0x39390039 .long 0x4f4f4f00, 0x009e9e9e, 0xa700a7a7, 0xd5d500d5 .long 0x4e4e4e00, 0x009c9c9c, 0x27002727, 0x5d5d005d .long 0x1d1d1d00, 0x003a3a3a, 0x8e008e8e, 0xd9d900d9 .long 0x65656500, 0x00cacaca, 0xb200b2b2, 0x5a5a005a .long 0x92929200, 0x00252525, 0x49004949, 0x51510051 .long 0xbdbdbd00, 0x007b7b7b, 0xde00dede, 0x6c6c006c .long 0x86868600, 0x000d0d0d, 0x43004343, 0x8b8b008b .long 0xb8b8b800, 0x00717171, 0x5c005c5c, 0x9a9a009a .long 0xafafaf00, 0x005f5f5f, 0xd700d7d7, 0xfbfb00fb .long 0x8f8f8f00, 0x001f1f1f, 0xc700c7c7, 0xb0b000b0 .long 0x7c7c7c00, 0x00f8f8f8, 0x3e003e3e, 0x74740074 .long 0xebebeb00, 0x00d7d7d7, 0xf500f5f5, 0x2b2b002b .long 0x1f1f1f00, 0x003e3e3e, 0x8f008f8f, 0xf0f000f0 .long 0xcecece00, 0x009d9d9d, 0x67006767, 0x84840084 .long 0x3e3e3e00, 0x007c7c7c, 0x1f001f1f, 0xdfdf00df .long 0x30303000, 0x00606060, 0x18001818, 0xcbcb00cb .long 0xdcdcdc00, 0x00b9b9b9, 0x6e006e6e, 0x34340034 .long 0x5f5f5f00, 0x00bebebe, 0xaf00afaf, 0x76760076 .long 0x5e5e5e00, 0x00bcbcbc, 0x2f002f2f, 0x6d6d006d .long 0xc5c5c500, 0x008b8b8b, 0xe200e2e2, 0xa9a900a9 .long 0x0b0b0b00, 0x00161616, 0x85008585, 0xd1d100d1 .long 0x1a1a1a00, 0x00343434, 0x0d000d0d, 0x04040004 .long 0xa6a6a600, 0x004d4d4d, 0x53005353, 0x14140014 .long 0xe1e1e100, 0x00c3c3c3, 0xf000f0f0, 0x3a3a003a .long 0x39393900, 0x00727272, 0x9c009c9c, 0xdede00de .long 0xcacaca00, 0x00959595, 0x65006565, 0x11110011 .long 0xd5d5d500, 0x00ababab, 0xea00eaea, 0x32320032 .long 0x47474700, 0x008e8e8e, 0xa300a3a3, 0x9c9c009c .long 0x5d5d5d00, 0x00bababa, 0xae00aeae, 0x53530053 .long 0x3d3d3d00, 0x007a7a7a, 0x9e009e9e, 0xf2f200f2 .long 0xd9d9d900, 0x00b3b3b3, 0xec00ecec, 0xfefe00fe .long 0x01010100, 0x00020202, 0x80008080, 0xcfcf00cf .long 0x5a5a5a00, 0x00b4b4b4, 0x2d002d2d, 0xc3c300c3 .long 0xd6d6d600, 0x00adadad, 0x6b006b6b, 0x7a7a007a .long 0x51515100, 0x00a2a2a2, 0xa800a8a8, 0x24240024 .long 0x56565600, 0x00acacac, 0x2b002b2b, 0xe8e800e8 .long 0x6c6c6c00, 0x00d8d8d8, 0x36003636, 0x60600060 .long 0x4d4d4d00, 0x009a9a9a, 0xa600a6a6, 0x69690069 .long 0x8b8b8b00, 0x00171717, 0xc500c5c5, 0xaaaa00aa .long 0x0d0d0d00, 0x001a1a1a, 0x86008686, 0xa0a000a0 .long 0x9a9a9a00, 0x00353535, 0x4d004d4d, 0xa1a100a1 .long 0x66666600, 0x00cccccc, 0x33003333, 0x62620062 .long 0xfbfbfb00, 0x00f7f7f7, 0xfd00fdfd, 0x54540054 .long 0xcccccc00, 0x00999999, 0x66006666, 0x1e1e001e .long 0xb0b0b000, 0x00616161, 0x58005858, 0xe0e000e0 .long 0x2d2d2d00, 0x005a5a5a, 0x96009696, 0x64640064 .long 0x74747400, 0x00e8e8e8, 0x3a003a3a, 0x10100010 .long 0x12121200, 0x00242424, 0x09000909, 0x00000000 .long 0x2b2b2b00, 0x00565656, 0x95009595, 0xa3a300a3 .long 0x20202000, 0x00404040, 0x10001010, 0x75750075 .long 0xf0f0f000, 0x00e1e1e1, 0x78007878, 0x8a8a008a .long 0xb1b1b100, 0x00636363, 0xd800d8d8, 0xe6e600e6 .long 0x84848400, 0x00090909, 0x42004242, 0x09090009 .long 0x99999900, 0x00333333, 0xcc00cccc, 0xdddd00dd .long 0xdfdfdf00, 0x00bfbfbf, 0xef00efef, 0x87870087 .long 0x4c4c4c00, 0x00989898, 0x26002626, 0x83830083 .long 0xcbcbcb00, 0x00979797, 0xe500e5e5, 0xcdcd00cd .long 0xc2c2c200, 0x00858585, 0x61006161, 0x90900090 .long 0x34343400, 0x00686868, 0x1a001a1a, 0x73730073 .long 0x7e7e7e00, 0x00fcfcfc, 0x3f003f3f, 0xf6f600f6 .long 0x76767600, 0x00ececec, 0x3b003b3b, 0x9d9d009d .long 0x05050500, 0x000a0a0a, 0x82008282, 0xbfbf00bf .long 0x6d6d6d00, 0x00dadada, 0xb600b6b6, 0x52520052 .long 0xb7b7b700, 0x006f6f6f, 0xdb00dbdb, 0xd8d800d8 .long 0xa9a9a900, 0x00535353, 0xd400d4d4, 0xc8c800c8 .long 0x31313100, 0x00626262, 0x98009898, 0xc6c600c6 .long 0xd1d1d100, 0x00a3a3a3, 0xe800e8e8, 0x81810081 .long 0x17171700, 0x002e2e2e, 0x8b008b8b, 0x6f6f006f .long 0x04040400, 0x00080808, 0x02000202, 0x13130013 .long 0xd7d7d700, 0x00afafaf, 0xeb00ebeb, 0x63630063 .long 0x14141400, 0x00282828, 0x0a000a0a, 0xe9e900e9 .long 0x58585800, 0x00b0b0b0, 0x2c002c2c, 0xa7a700a7 .long 0x3a3a3a00, 0x00747474, 0x1d001d1d, 0x9f9f009f .long 0x61616100, 0x00c2c2c2, 0xb000b0b0, 0xbcbc00bc .long 0xdedede00, 0x00bdbdbd, 0x6f006f6f, 0x29290029 .long 0x1b1b1b00, 0x00363636, 0x8d008d8d, 0xf9f900f9 .long 0x11111100, 0x00222222, 0x88008888, 0x2f2f002f .long 0x1c1c1c00, 0x00383838, 0x0e000e0e, 0xb4b400b4 .long 0x32323200, 0x00646464, 0x19001919, 0x78780078 .long 0x0f0f0f00, 0x001e1e1e, 0x87008787, 0x06060006 .long 0x9c9c9c00, 0x00393939, 0x4e004e4e, 0xe7e700e7 .long 0x16161600, 0x002c2c2c, 0x0b000b0b, 0x71710071 .long 0x53535300, 0x00a6a6a6, 0xa900a9a9, 0xd4d400d4 .long 0x18181800, 0x00303030, 0x0c000c0c, 0xabab00ab .long 0xf2f2f200, 0x00e5e5e5, 0x79007979, 0x88880088 .long 0x22222200, 0x00444444, 0x11001111, 0x8d8d008d .long 0xfefefe00, 0x00fdfdfd, 0x7f007f7f, 0x72720072 .long 0x44444400, 0x00888888, 0x22002222, 0xb9b900b9 .long 0xcfcfcf00, 0x009f9f9f, 0xe700e7e7, 0xf8f800f8 .long 0xb2b2b200, 0x00656565, 0x59005959, 0xacac00ac .long 0xc3c3c300, 0x00878787, 0xe100e1e1, 0x36360036 .long 0xb5b5b500, 0x006b6b6b, 0xda00dada, 0x2a2a002a .long 0x7a7a7a00, 0x00f4f4f4, 0x3d003d3d, 0x3c3c003c .long 0x91919100, 0x00232323, 0xc800c8c8, 0xf1f100f1 .long 0x24242400, 0x00484848, 0x12001212, 0x40400040 .long 0x08080800, 0x00101010, 0x04000404, 0xd3d300d3 .long 0xe8e8e800, 0x00d1d1d1, 0x74007474, 0xbbbb00bb .long 0xa8a8a800, 0x00515151, 0x54005454, 0x43430043 .long 0x60606000, 0x00c0c0c0, 0x30003030, 0x15150015 .long 0xfcfcfc00, 0x00f9f9f9, 0x7e007e7e, 0xadad00ad .long 0x69696900, 0x00d2d2d2, 0xb400b4b4, 0x77770077 .long 0x50505000, 0x00a0a0a0, 0x28002828, 0x80800080 .long 0xaaaaaa00, 0x00555555, 0x55005555, 0x82820082 .long 0xd0d0d000, 0x00a1a1a1, 0x68006868, 0xecec00ec .long 0xa0a0a000, 0x00414141, 0x50005050, 0x27270027 .long 0x7d7d7d00, 0x00fafafa, 0xbe00bebe, 0xe5e500e5 .long 0xa1a1a100, 0x00434343, 0xd000d0d0, 0x85850085 .long 0x89898900, 0x00131313, 0xc400c4c4, 0x35350035 .long 0x62626200, 0x00c4c4c4, 0x31003131, 0x0c0c000c .long 0x97979700, 0x002f2f2f, 0xcb00cbcb, 0x41410041 .long 0x54545400, 0x00a8a8a8, 0x2a002a2a, 0xefef00ef .long 0x5b5b5b00, 0x00b6b6b6, 0xad00adad, 0x93930093 .long 0x1e1e1e00, 0x003c3c3c, 0x0f000f0f, 0x19190019 .long 0x95959500, 0x002b2b2b, 0xca00caca, 0x21210021 .long 0xe0e0e000, 0x00c1c1c1, 0x70007070, 0x0e0e000e .long 0xffffff00, 0x00ffffff, 0xff00ffff, 0x4e4e004e .long 0x64646400, 0x00c8c8c8, 0x32003232, 0x65650065 .long 0xd2d2d200, 0x00a5a5a5, 0x69006969, 0xbdbd00bd .long 0x10101000, 0x00202020, 0x08000808, 0xb8b800b8 .long 0xc4c4c400, 0x00898989, 0x62006262, 0x8f8f008f .long 0x00000000, 0x00000000, 0x00000000, 0xebeb00eb .long 0x48484800, 0x00909090, 0x24002424, 0xcece00ce .long 0xa3a3a300, 0x00474747, 0xd100d1d1, 0x30300030 .long 0xf7f7f700, 0x00efefef, 0xfb00fbfb, 0x5f5f005f .long 0x75757500, 0x00eaeaea, 0xba00baba, 0xc5c500c5 .long 0xdbdbdb00, 0x00b7b7b7, 0xed00eded, 0x1a1a001a .long 0x8a8a8a00, 0x00151515, 0x45004545, 0xe1e100e1 .long 0x03030300, 0x00060606, 0x81008181, 0xcaca00ca .long 0xe6e6e600, 0x00cdcdcd, 0x73007373, 0x47470047 .long 0xdadada00, 0x00b5b5b5, 0x6d006d6d, 0x3d3d003d .long 0x09090900, 0x00121212, 0x84008484, 0x01010001 .long 0x3f3f3f00, 0x007e7e7e, 0x9f009f9f, 0xd6d600d6 .long 0xdddddd00, 0x00bbbbbb, 0xee00eeee, 0x56560056 .long 0x94949400, 0x00292929, 0x4a004a4a, 0x4d4d004d .long 0x87878700, 0x000f0f0f, 0xc300c3c3, 0x0d0d000d .long 0x5c5c5c00, 0x00b8b8b8, 0x2e002e2e, 0x66660066 .long 0x83838300, 0x00070707, 0xc100c1c1, 0xcccc00cc .long 0x02020200, 0x00040404, 0x01000101, 0x2d2d002d .long 0xcdcdcd00, 0x009b9b9b, 0xe600e6e6, 0x12120012 .long 0x4a4a4a00, 0x00949494, 0x25002525, 0x20200020 .long 0x90909000, 0x00212121, 0x48004848, 0xb1b100b1 .long 0x33333300, 0x00666666, 0x99009999, 0x99990099 .long 0x73737300, 0x00e6e6e6, 0xb900b9b9, 0x4c4c004c .long 0x67676700, 0x00cecece, 0xb300b3b3, 0xc2c200c2 .long 0xf6f6f600, 0x00ededed, 0x7b007b7b, 0x7e7e007e .long 0xf3f3f300, 0x00e7e7e7, 0xf900f9f9, 0x05050005 .long 0x9d9d9d00, 0x003b3b3b, 0xce00cece, 0xb7b700b7 .long 0x7f7f7f00, 0x00fefefe, 0xbf00bfbf, 0x31310031 .long 0xbfbfbf00, 0x007f7f7f, 0xdf00dfdf, 0x17170017 .long 0xe2e2e200, 0x00c5c5c5, 0x71007171, 0xd7d700d7 .long 0x52525200, 0x00a4a4a4, 0x29002929, 0x58580058 .long 0x9b9b9b00, 0x00373737, 0xcd00cdcd, 0x61610061 .long 0xd8d8d800, 0x00b1b1b1, 0x6c006c6c, 0x1b1b001b .long 0x26262600, 0x004c4c4c, 0x13001313, 0x1c1c001c .long 0xc8c8c800, 0x00919191, 0x64006464, 0x0f0f000f .long 0x37373700, 0x006e6e6e, 0x9b009b9b, 0x16160016 .long 0xc6c6c600, 0x008d8d8d, 0x63006363, 0x18180018 .long 0x3b3b3b00, 0x00767676, 0x9d009d9d, 0x22220022 .long 0x81818100, 0x00030303, 0xc000c0c0, 0x44440044 .long 0x96969600, 0x002d2d2d, 0x4b004b4b, 0xb2b200b2 .long 0x6f6f6f00, 0x00dedede, 0xb700b7b7, 0xb5b500b5 .long 0x4b4b4b00, 0x00969696, 0xa500a5a5, 0x91910091 .long 0x13131300, 0x00262626, 0x89008989, 0x08080008 .long 0xbebebe00, 0x007d7d7d, 0x5f005f5f, 0xa8a800a8 .long 0x63636300, 0x00c6c6c6, 0xb100b1b1, 0xfcfc00fc .long 0x2e2e2e00, 0x005c5c5c, 0x17001717, 0x50500050 .long 0xe9e9e900, 0x00d3d3d3, 0xf400f4f4, 0xd0d000d0 .long 0x79797900, 0x00f2f2f2, 0xbc00bcbc, 0x7d7d007d .long 0xa7a7a700, 0x004f4f4f, 0xd300d3d3, 0x89890089 .long 0x8c8c8c00, 0x00191919, 0x46004646, 0x97970097 .long 0x9f9f9f00, 0x003f3f3f, 0xcf00cfcf, 0x5b5b005b .long 0x6e6e6e00, 0x00dcdcdc, 0x37003737, 0x95950095 .long 0xbcbcbc00, 0x00797979, 0x5e005e5e, 0xffff00ff .long 0x8e8e8e00, 0x001d1d1d, 0x47004747, 0xd2d200d2 .long 0x29292900, 0x00525252, 0x94009494, 0xc4c400c4 .long 0xf5f5f500, 0x00ebebeb, 0xfa00fafa, 0x48480048 .long 0xf9f9f900, 0x00f3f3f3, 0xfc00fcfc, 0xf7f700f7 .long 0xb6b6b600, 0x006d6d6d, 0x5b005b5b, 0xdbdb00db .long 0x2f2f2f00, 0x005e5e5e, 0x97009797, 0x03030003 .long 0xfdfdfd00, 0x00fbfbfb, 0xfe00fefe, 0xdada00da .long 0xb4b4b400, 0x00696969, 0x5a005a5a, 0x3f3f003f .long 0x59595900, 0x00b2b2b2, 0xac00acac, 0x94940094 .long 0x78787800, 0x00f0f0f0, 0x3c003c3c, 0x5c5c005c .long 0x98989800, 0x00313131, 0x4c004c4c, 0x02020002 .long 0x06060600, 0x000c0c0c, 0x03000303, 0x4a4a004a .long 0x6a6a6a00, 0x00d4d4d4, 0x35003535, 0x33330033 .long 0xe7e7e700, 0x00cfcfcf, 0xf300f3f3, 0x67670067 .long 0x46464600, 0x008c8c8c, 0x23002323, 0xf3f300f3 .long 0x71717100, 0x00e2e2e2, 0xb800b8b8, 0x7f7f007f .long 0xbababa00, 0x00757575, 0x5d005d5d, 0xe2e200e2 .long 0xd4d4d400, 0x00a9a9a9, 0x6a006a6a, 0x9b9b009b .long 0x25252500, 0x004a4a4a, 0x92009292, 0x26260026 .long 0xababab00, 0x00575757, 0xd500d5d5, 0x37370037 .long 0x42424200, 0x00848484, 0x21002121, 0x3b3b003b .long 0x88888800, 0x00111111, 0x44004444, 0x96960096 .long 0xa2a2a200, 0x00454545, 0x51005151, 0x4b4b004b .long 0x8d8d8d00, 0x001b1b1b, 0xc600c6c6, 0xbebe00be .long 0xfafafa00, 0x00f5f5f5, 0x7d007d7d, 0x2e2e002e .long 0x72727200, 0x00e4e4e4, 0x39003939, 0x79790079 .long 0x07070700, 0x000e0e0e, 0x83008383, 0x8c8c008c .long 0xb9b9b900, 0x00737373, 0xdc00dcdc, 0x6e6e006e .long 0x55555500, 0x00aaaaaa, 0xaa00aaaa, 0x8e8e008e .long 0xf8f8f800, 0x00f1f1f1, 0x7c007c7c, 0xf5f500f5 .long 0xeeeeee00, 0x00dddddd, 0x77007777, 0xb6b600b6 .long 0xacacac00, 0x00595959, 0x56005656, 0xfdfd00fd .long 0x0a0a0a00, 0x00141414, 0x05000505, 0x59590059 .long 0x36363600, 0x006c6c6c, 0x1b001b1b, 0x98980098 .long 0x49494900, 0x00929292, 0xa400a4a4, 0x6a6a006a .long 0x2a2a2a00, 0x00545454, 0x15001515, 0x46460046 .long 0x68686800, 0x00d0d0d0, 0x34003434, 0xbaba00ba .long 0x3c3c3c00, 0x00787878, 0x1e001e1e, 0x25250025 .long 0x38383800, 0x00707070, 0x1c001c1c, 0x42420042 .long 0xf1f1f100, 0x00e3e3e3, 0xf800f8f8, 0xa2a200a2 .long 0xa4a4a400, 0x00494949, 0x52005252, 0xfafa00fa .long 0x40404000, 0x00808080, 0x20002020, 0x07070007 .long 0x28282800, 0x00505050, 0x14001414, 0x55550055 .long 0xd3d3d300, 0x00a7a7a7, 0xe900e9e9, 0xeeee00ee .long 0x7b7b7b00, 0x00f6f6f6, 0xbd00bdbd, 0x0a0a000a .long 0xbbbbbb00, 0x00777777, 0xdd00dddd, 0x49490049 .long 0xc9c9c900, 0x00939393, 0xe400e4e4, 0x68680068 .long 0x43434300, 0x00868686, 0xa100a1a1, 0x38380038 .long 0xc1c1c100, 0x00838383, 0xe000e0e0, 0xa4a400a4 .long 0x15151500, 0x002a2a2a, 0x8a008a8a, 0x28280028 .long 0xe3e3e300, 0x00c7c7c7, 0xf100f1f1, 0x7b7b007b .long 0xadadad00, 0x005b5b5b, 0xd600d6d6, 0xc9c900c9 .long 0xf4f4f400, 0x00e9e9e9, 0x7a007a7a, 0xc1c100c1 .long 0x77777700, 0x00eeeeee, 0xbb00bbbb, 0xe3e300e3 .long 0xc7c7c700, 0x008f8f8f, 0xe300e3e3, 0xf4f400f4 .long 0x80808000, 0x00010101, 0x40004040, 0xc7c700c7 .long 0x9e9e9e00, 0x003d3d3d, 0x4f004f4f, 0x9e9e009e ELF(.size _gcry_camellia_arm_tables,.-_gcry_camellia_arm_tables;) #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/ #endif /*__AARCH64EL__*/ diff --git a/cipher/chacha20-aarch64.S b/cipher/chacha20-aarch64.S index 4f76834b..2a980b95 100644 --- a/cipher/chacha20-aarch64.S +++ b/cipher/chacha20-aarch64.S @@ -1,648 +1,648 @@ /* chacha20-aarch64.S - ARMv8/AArch64 accelerated chacha20 blocks function * * Copyright (C) 2017-2019 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Based on D. J. Bernstein reference implementation at * http://cr.yp.to/chacha.html: * * chacha-regs.c version 20080118 * D. J. Bernstein * Public domain. */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_NEON) && \ defined(USE_CHACHA20) .cpu generic+simd .text #include "asm-poly1305-aarch64.h" /* register macros */ #define INPUT x0 #define DST x1 #define SRC x2 #define NBLKS x3 #define ROUND x4 #define INPUT_CTR x5 #define INPUT_POS x6 #define CTR x7 /* vector registers */ #define X0 v16 #define X1 v17 #define X2 v18 #define X3 v19 #define X4 v20 #define X5 v21 #define X6 v22 #define X7 v23 #define X8 v24 #define X9 v25 #define X10 v26 #define X11 v27 #define X12 v28 #define X13 v29 #define X14 v30 #define X15 v31 #define VCTR v0 #define VTMP0 v1 #define VTMP1 v2 #define VTMP2 v3 #define VTMP3 v4 #define X12_TMP v5 #define X13_TMP v6 #define ROT8 v7 /********************************************************************** helper macros **********************************************************************/ #define _(...) __VA_ARGS__ #define vpunpckldq(s1, s2, dst) \ zip1 dst.4s, s2.4s, s1.4s; #define vpunpckhdq(s1, s2, dst) \ zip2 dst.4s, s2.4s, s1.4s; #define vpunpcklqdq(s1, s2, dst) \ zip1 dst.2d, s2.2d, s1.2d; #define vpunpckhqdq(s1, s2, dst) \ zip2 dst.2d, s2.2d, s1.2d; /* 4x4 32-bit integer matrix transpose */ #define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \ vpunpckhdq(x1, x0, t2); \ vpunpckldq(x1, x0, x0); \ \ vpunpckldq(x3, x2, t1); \ vpunpckhdq(x3, x2, x2); \ \ vpunpckhqdq(t1, x0, x1); \ vpunpcklqdq(t1, x0, x0); \ \ vpunpckhqdq(x2, t2, x3); \ vpunpcklqdq(x2, t2, x2); #define clear(x) \ movi x.16b, #0; /********************************************************************** 4-way chacha20 **********************************************************************/ #define XOR(d,s1,s2) \ eor d.16b, s2.16b, s1.16b; #define PLUS(ds,s) \ add ds.4s, ds.4s, s.4s; #define ROTATE4(dst1,dst2,dst3,dst4,c,src1,src2,src3,src4,iop1,iop2,iop3) \ shl dst1.4s, src1.4s, #(c); \ shl dst2.4s, src2.4s, #(c); \ iop1; \ shl dst3.4s, src3.4s, #(c); \ shl dst4.4s, src4.4s, #(c); \ iop2; \ sri dst1.4s, src1.4s, #(32 - (c)); \ sri dst2.4s, src2.4s, #(32 - (c)); \ iop3; \ sri dst3.4s, src3.4s, #(32 - (c)); \ sri dst4.4s, src4.4s, #(32 - (c)); #define ROTATE4_8(dst1,dst2,dst3,dst4,src1,src2,src3,src4,iop1,iop2,iop3) \ tbl dst1.16b, {src1.16b}, ROT8.16b; \ iop1; \ tbl dst2.16b, {src2.16b}, ROT8.16b; \ iop2; \ tbl dst3.16b, {src3.16b}, ROT8.16b; \ iop3; \ tbl dst4.16b, {src4.16b}, ROT8.16b; #define ROTATE4_16(dst1,dst2,dst3,dst4,src1,src2,src3,src4,iop1) \ rev32 dst1.8h, src1.8h; \ rev32 dst2.8h, src2.8h; \ iop1; \ rev32 dst3.8h, src3.8h; \ rev32 dst4.8h, src4.8h; #define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4,ign,tmp1,tmp2,tmp3,tmp4,\ iop1,iop2,iop3,iop4,iop5,iop6,iop7,iop8,iop9,iop10,iop11,iop12,iop13,iop14,\ iop15,iop16,iop17,iop18,iop19,iop20,iop21,iop22,iop23,iop24,iop25,iop26,\ iop27,iop28,iop29) \ PLUS(a1,b1); PLUS(a2,b2); iop1; \ PLUS(a3,b3); PLUS(a4,b4); iop2; \ XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); iop3; \ XOR(tmp3,d3,a3); XOR(tmp4,d4,a4); iop4; \ ROTATE4_16(d1, d2, d3, d4, tmp1, tmp2, tmp3, tmp4, _(iop5)); \ iop6; \ PLUS(c1,d1); PLUS(c2,d2); iop7; \ PLUS(c3,d3); PLUS(c4,d4); iop8; \ XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); iop9; \ XOR(tmp3,b3,c3); XOR(tmp4,b4,c4); iop10; \ ROTATE4(b1, b2, b3, b4, 12, tmp1, tmp2, tmp3, tmp4, \ _(iop11), _(iop12), _(iop13)); iop14; \ PLUS(a1,b1); PLUS(a2,b2); iop15; \ PLUS(a3,b3); PLUS(a4,b4); iop16; \ XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); iop17; \ XOR(tmp3,d3,a3); XOR(tmp4,d4,a4); iop18; \ ROTATE4_8(d1, d2, d3, d4, tmp1, tmp2, tmp3, tmp4, \ _(iop19), _(iop20), _(iop21)); iop22; \ PLUS(c1,d1); PLUS(c2,d2); iop23; \ PLUS(c3,d3); PLUS(c4,d4); iop24; \ XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); iop25; \ XOR(tmp3,b3,c3); XOR(tmp4,b4,c4); iop26; \ ROTATE4(b1, b2, b3, b4, 7, tmp1, tmp2, tmp3, tmp4, \ _(iop27), _(iop28), _(iop29)); .align 4 .globl _gcry_chacha20_aarch64_blocks4_data_inc_counter _gcry_chacha20_aarch64_blocks4_data_inc_counter: .long 0,1,2,3 .align 4 .globl _gcry_chacha20_aarch64_blocks4_data_rot8 _gcry_chacha20_aarch64_blocks4_data_rot8: .byte 3,0,1,2 .byte 7,4,5,6 .byte 11,8,9,10 .byte 15,12,13,14 .align 3 .globl _gcry_chacha20_aarch64_blocks4 ELF(.type _gcry_chacha20_aarch64_blocks4,%function;) _gcry_chacha20_aarch64_blocks4: /* input: * x0: input * x1: dst * x2: src * x3: nblks (multiple of 4) */ CFI_STARTPROC() GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_rot8); add INPUT_CTR, INPUT, #(12*4); ld1 {ROT8.16b}, [CTR]; GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_inc_counter); mov INPUT_POS, INPUT; ld1 {VCTR.16b}, [CTR]; .Loop4: /* Construct counter vectors X12 and X13 */ ld1 {X15.16b}, [INPUT_CTR]; mov ROUND, #20; ld1 {VTMP1.16b-VTMP3.16b}, [INPUT_POS]; dup X12.4s, X15.s[0]; dup X13.4s, X15.s[1]; ldr CTR, [INPUT_CTR]; add X12.4s, X12.4s, VCTR.4s; dup X0.4s, VTMP1.s[0]; dup X1.4s, VTMP1.s[1]; dup X2.4s, VTMP1.s[2]; dup X3.4s, VTMP1.s[3]; dup X14.4s, X15.s[2]; cmhi VTMP0.4s, VCTR.4s, X12.4s; dup X15.4s, X15.s[3]; add CTR, CTR, #4; /* Update counter */ dup X4.4s, VTMP2.s[0]; dup X5.4s, VTMP2.s[1]; dup X6.4s, VTMP2.s[2]; dup X7.4s, VTMP2.s[3]; sub X13.4s, X13.4s, VTMP0.4s; dup X8.4s, VTMP3.s[0]; dup X9.4s, VTMP3.s[1]; dup X10.4s, VTMP3.s[2]; dup X11.4s, VTMP3.s[3]; mov X12_TMP.16b, X12.16b; mov X13_TMP.16b, X13.16b; str CTR, [INPUT_CTR]; .Lround2: subs ROUND, ROUND, #2 QUARTERROUND4(X0, X4, X8, X12, X1, X5, X9, X13, X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,VTMP0,VTMP1,VTMP2,VTMP3, ,,,,,,,,,,,,,,,,,,,,,,,,,,,,) QUARTERROUND4(X0, X5, X10, X15, X1, X6, X11, X12, X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,VTMP0,VTMP1,VTMP2,VTMP3, ,,,,,,,,,,,,,,,,,,,,,,,,,,,,) b.ne .Lround2; ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS], #32; PLUS(X12, X12_TMP); /* INPUT + 12 * 4 + counter */ PLUS(X13, X13_TMP); /* INPUT + 13 * 4 + counter */ dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 0 * 4 */ dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 1 * 4 */ dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 2 * 4 */ dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 3 * 4 */ PLUS(X0, VTMP2); PLUS(X1, VTMP3); PLUS(X2, X12_TMP); PLUS(X3, X13_TMP); dup VTMP2.4s, VTMP1.s[0]; /* INPUT + 4 * 4 */ dup VTMP3.4s, VTMP1.s[1]; /* INPUT + 5 * 4 */ dup X12_TMP.4s, VTMP1.s[2]; /* INPUT + 6 * 4 */ dup X13_TMP.4s, VTMP1.s[3]; /* INPUT + 7 * 4 */ ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS]; mov INPUT_POS, INPUT; PLUS(X4, VTMP2); PLUS(X5, VTMP3); PLUS(X6, X12_TMP); PLUS(X7, X13_TMP); dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 8 * 4 */ dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 9 * 4 */ dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 10 * 4 */ dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 11 * 4 */ dup VTMP0.4s, VTMP1.s[2]; /* INPUT + 14 * 4 */ dup VTMP1.4s, VTMP1.s[3]; /* INPUT + 15 * 4 */ PLUS(X8, VTMP2); PLUS(X9, VTMP3); PLUS(X10, X12_TMP); PLUS(X11, X13_TMP); PLUS(X14, VTMP0); PLUS(X15, VTMP1); transpose_4x4(X0, X1, X2, X3, VTMP0, VTMP1, VTMP2); transpose_4x4(X4, X5, X6, X7, VTMP0, VTMP1, VTMP2); transpose_4x4(X8, X9, X10, X11, VTMP0, VTMP1, VTMP2); transpose_4x4(X12, X13, X14, X15, VTMP0, VTMP1, VTMP2); subs NBLKS, NBLKS, #4; ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64; ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32; eor VTMP0.16b, X0.16b, VTMP0.16b; eor VTMP1.16b, X4.16b, VTMP1.16b; eor VTMP2.16b, X8.16b, VTMP2.16b; eor VTMP3.16b, X12.16b, VTMP3.16b; eor X12_TMP.16b, X1.16b, X12_TMP.16b; eor X13_TMP.16b, X5.16b, X13_TMP.16b; st1 {VTMP0.16b-VTMP3.16b}, [DST], #64; ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64; st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32; ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32; eor VTMP0.16b, X9.16b, VTMP0.16b; eor VTMP1.16b, X13.16b, VTMP1.16b; eor VTMP2.16b, X2.16b, VTMP2.16b; eor VTMP3.16b, X6.16b, VTMP3.16b; eor X12_TMP.16b, X10.16b, X12_TMP.16b; eor X13_TMP.16b, X14.16b, X13_TMP.16b; st1 {VTMP0.16b-VTMP3.16b}, [DST], #64; ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64; st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32; eor VTMP0.16b, X3.16b, VTMP0.16b; eor VTMP1.16b, X7.16b, VTMP1.16b; eor VTMP2.16b, X11.16b, VTMP2.16b; eor VTMP3.16b, X15.16b, VTMP3.16b; st1 {VTMP0.16b-VTMP3.16b}, [DST], #64; b.ne .Loop4; /* clear the used vector registers and stack */ clear(VTMP0); clear(VTMP1); clear(VTMP2); clear(VTMP3); clear(X12_TMP); clear(X13_TMP); clear(X0); clear(X1); clear(X2); clear(X3); clear(X4); clear(X5); clear(X6); clear(X7); clear(X8); clear(X9); clear(X10); clear(X11); clear(X12); clear(X13); clear(X14); clear(X15); eor x0, x0, x0 - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;) /********************************************************************** 4-way stitched chacha20-poly1305 **********************************************************************/ .align 3 .globl _gcry_chacha20_poly1305_aarch64_blocks4 ELF(.type _gcry_chacha20_poly1305_aarch64_blocks4,%function;) _gcry_chacha20_poly1305_aarch64_blocks4: /* input: * x0: input * x1: dst * x2: src * x3: nblks (multiple of 4) * x4: poly1305-state * x5: poly1305-src */ CFI_STARTPROC() POLY1305_PUSH_REGS() mov POLY_RSTATE, x4; mov POLY_RSRC, x5; GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_rot8); add INPUT_CTR, INPUT, #(12*4); ld1 {ROT8.16b}, [CTR]; GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_inc_counter); mov INPUT_POS, INPUT; ld1 {VCTR.16b}, [CTR]; POLY1305_LOAD_STATE() .Loop_poly4: /* Construct counter vectors X12 and X13 */ ld1 {X15.16b}, [INPUT_CTR]; ld1 {VTMP1.16b-VTMP3.16b}, [INPUT_POS]; dup X12.4s, X15.s[0]; dup X13.4s, X15.s[1]; ldr CTR, [INPUT_CTR]; add X12.4s, X12.4s, VCTR.4s; dup X0.4s, VTMP1.s[0]; dup X1.4s, VTMP1.s[1]; dup X2.4s, VTMP1.s[2]; dup X3.4s, VTMP1.s[3]; dup X14.4s, X15.s[2]; cmhi VTMP0.4s, VCTR.4s, X12.4s; dup X15.4s, X15.s[3]; add CTR, CTR, #4; /* Update counter */ dup X4.4s, VTMP2.s[0]; dup X5.4s, VTMP2.s[1]; dup X6.4s, VTMP2.s[2]; dup X7.4s, VTMP2.s[3]; sub X13.4s, X13.4s, VTMP0.4s; dup X8.4s, VTMP3.s[0]; dup X9.4s, VTMP3.s[1]; dup X10.4s, VTMP3.s[2]; dup X11.4s, VTMP3.s[3]; mov X12_TMP.16b, X12.16b; mov X13_TMP.16b, X13.16b; str CTR, [INPUT_CTR]; mov ROUND, #20 .Lround4_with_poly1305_outer: mov POLY_CHACHA_ROUND, #6; .Lround4_with_poly1305_inner1: POLY1305_BLOCK_PART1(0 * 16) QUARTERROUND4(X0, X4, X8, X12, X1, X5, X9, X13, X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,VTMP0,VTMP1,VTMP2,VTMP3, POLY1305_BLOCK_PART2(0 * 16), POLY1305_BLOCK_PART3(), POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5(), POLY1305_BLOCK_PART6(), POLY1305_BLOCK_PART7(), POLY1305_BLOCK_PART8(), POLY1305_BLOCK_PART9(), POLY1305_BLOCK_PART10(), POLY1305_BLOCK_PART11(), POLY1305_BLOCK_PART12(), POLY1305_BLOCK_PART13(), POLY1305_BLOCK_PART14(), POLY1305_BLOCK_PART15(), POLY1305_BLOCK_PART16(), POLY1305_BLOCK_PART17(), POLY1305_BLOCK_PART18(), POLY1305_BLOCK_PART19(), POLY1305_BLOCK_PART20(), POLY1305_BLOCK_PART21(), POLY1305_BLOCK_PART22(), POLY1305_BLOCK_PART23(), POLY1305_BLOCK_PART24(), POLY1305_BLOCK_PART25(), POLY1305_BLOCK_PART26(), POLY1305_BLOCK_PART27(), POLY1305_BLOCK_PART28(), POLY1305_BLOCK_PART29(), POLY1305_BLOCK_PART1(1 * 16)) POLY1305_BLOCK_PART2(1 * 16) QUARTERROUND4(X0, X5, X10, X15, X1, X6, X11, X12, X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,VTMP0,VTMP1,VTMP2,VTMP3, _(add POLY_RSRC, POLY_RSRC, #(2*16)), POLY1305_BLOCK_PART3(), POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5(), POLY1305_BLOCK_PART6(), POLY1305_BLOCK_PART7(), POLY1305_BLOCK_PART8(), POLY1305_BLOCK_PART9(), POLY1305_BLOCK_PART10(), POLY1305_BLOCK_PART11(), POLY1305_BLOCK_PART12(), POLY1305_BLOCK_PART13(), POLY1305_BLOCK_PART14(), POLY1305_BLOCK_PART15(), POLY1305_BLOCK_PART16(), POLY1305_BLOCK_PART17(), POLY1305_BLOCK_PART18(), POLY1305_BLOCK_PART19(), POLY1305_BLOCK_PART20(), POLY1305_BLOCK_PART21(), POLY1305_BLOCK_PART22(), POLY1305_BLOCK_PART23(), POLY1305_BLOCK_PART24(), POLY1305_BLOCK_PART25(), POLY1305_BLOCK_PART26(), POLY1305_BLOCK_PART27(), POLY1305_BLOCK_PART28(), POLY1305_BLOCK_PART29(), _(subs POLY_CHACHA_ROUND, POLY_CHACHA_ROUND, #2)); b.ne .Lround4_with_poly1305_inner1; mov POLY_CHACHA_ROUND, #4; .Lround4_with_poly1305_inner2: POLY1305_BLOCK_PART1(0 * 16) QUARTERROUND4(X0, X4, X8, X12, X1, X5, X9, X13, X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,VTMP0,VTMP1,VTMP2,VTMP3,, POLY1305_BLOCK_PART2(0 * 16),, _(add POLY_RSRC, POLY_RSRC, #(1*16)),, POLY1305_BLOCK_PART3(),, POLY1305_BLOCK_PART4(),, POLY1305_BLOCK_PART5(),, POLY1305_BLOCK_PART6(),, POLY1305_BLOCK_PART7(),, POLY1305_BLOCK_PART8(),, POLY1305_BLOCK_PART9(),, POLY1305_BLOCK_PART10(),, POLY1305_BLOCK_PART11(),, POLY1305_BLOCK_PART12(),, POLY1305_BLOCK_PART13(),, POLY1305_BLOCK_PART14(),) POLY1305_BLOCK_PART15() QUARTERROUND4(X0, X5, X10, X15, X1, X6, X11, X12, X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,VTMP0,VTMP1,VTMP2,VTMP3, POLY1305_BLOCK_PART16(),, POLY1305_BLOCK_PART17(),, POLY1305_BLOCK_PART18(),, POLY1305_BLOCK_PART19(),, POLY1305_BLOCK_PART20(),, POLY1305_BLOCK_PART21(),, POLY1305_BLOCK_PART22(),, POLY1305_BLOCK_PART23(),, POLY1305_BLOCK_PART24(),, POLY1305_BLOCK_PART25(),, POLY1305_BLOCK_PART26(),, POLY1305_BLOCK_PART27(),, POLY1305_BLOCK_PART28(),, POLY1305_BLOCK_PART29(), _(subs POLY_CHACHA_ROUND, POLY_CHACHA_ROUND, #2),) b.ne .Lround4_with_poly1305_inner2; subs ROUND, ROUND, #10 b.ne .Lround4_with_poly1305_outer; ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS], #32; PLUS(X12, X12_TMP); /* INPUT + 12 * 4 + counter */ PLUS(X13, X13_TMP); /* INPUT + 13 * 4 + counter */ dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 0 * 4 */ dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 1 * 4 */ dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 2 * 4 */ dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 3 * 4 */ PLUS(X0, VTMP2); PLUS(X1, VTMP3); PLUS(X2, X12_TMP); PLUS(X3, X13_TMP); dup VTMP2.4s, VTMP1.s[0]; /* INPUT + 4 * 4 */ dup VTMP3.4s, VTMP1.s[1]; /* INPUT + 5 * 4 */ dup X12_TMP.4s, VTMP1.s[2]; /* INPUT + 6 * 4 */ dup X13_TMP.4s, VTMP1.s[3]; /* INPUT + 7 * 4 */ ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS]; mov INPUT_POS, INPUT; PLUS(X4, VTMP2); PLUS(X5, VTMP3); PLUS(X6, X12_TMP); PLUS(X7, X13_TMP); dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 8 * 4 */ dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 9 * 4 */ dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 10 * 4 */ dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 11 * 4 */ dup VTMP0.4s, VTMP1.s[2]; /* INPUT + 14 * 4 */ dup VTMP1.4s, VTMP1.s[3]; /* INPUT + 15 * 4 */ PLUS(X8, VTMP2); PLUS(X9, VTMP3); PLUS(X10, X12_TMP); PLUS(X11, X13_TMP); PLUS(X14, VTMP0); PLUS(X15, VTMP1); transpose_4x4(X0, X1, X2, X3, VTMP0, VTMP1, VTMP2); transpose_4x4(X4, X5, X6, X7, VTMP0, VTMP1, VTMP2); transpose_4x4(X8, X9, X10, X11, VTMP0, VTMP1, VTMP2); transpose_4x4(X12, X13, X14, X15, VTMP0, VTMP1, VTMP2); subs NBLKS, NBLKS, #4; ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64; ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32; eor VTMP0.16b, X0.16b, VTMP0.16b; eor VTMP1.16b, X4.16b, VTMP1.16b; eor VTMP2.16b, X8.16b, VTMP2.16b; eor VTMP3.16b, X12.16b, VTMP3.16b; eor X12_TMP.16b, X1.16b, X12_TMP.16b; eor X13_TMP.16b, X5.16b, X13_TMP.16b; st1 {VTMP0.16b-VTMP3.16b}, [DST], #64; ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64; st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32; ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32; eor VTMP0.16b, X9.16b, VTMP0.16b; eor VTMP1.16b, X13.16b, VTMP1.16b; eor VTMP2.16b, X2.16b, VTMP2.16b; eor VTMP3.16b, X6.16b, VTMP3.16b; eor X12_TMP.16b, X10.16b, X12_TMP.16b; eor X13_TMP.16b, X14.16b, X13_TMP.16b; st1 {VTMP0.16b-VTMP3.16b}, [DST], #64; ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64; st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32; eor VTMP0.16b, X3.16b, VTMP0.16b; eor VTMP1.16b, X7.16b, VTMP1.16b; eor VTMP2.16b, X11.16b, VTMP2.16b; eor VTMP3.16b, X15.16b, VTMP3.16b; st1 {VTMP0.16b-VTMP3.16b}, [DST], #64; b.ne .Loop_poly4; POLY1305_STORE_STATE() /* clear the used vector registers and stack */ clear(VTMP0); clear(VTMP1); clear(VTMP2); clear(VTMP3); clear(X12_TMP); clear(X13_TMP); clear(X0); clear(X1); clear(X2); clear(X3); clear(X4); clear(X5); clear(X6); clear(X7); clear(X8); clear(X9); clear(X10); clear(X11); clear(X12); clear(X13); clear(X14); clear(X15); eor x0, x0, x0 POLY1305_POP_REGS() - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_chacha20_poly1305_aarch64_blocks4, .-_gcry_chacha20_poly1305_aarch64_blocks4;) #endif diff --git a/cipher/cipher-gcm-armv8-aarch64-ce.S b/cipher/cipher-gcm-armv8-aarch64-ce.S index 2c619f9b..e6714249 100644 --- a/cipher/cipher-gcm-armv8-aarch64-ce.S +++ b/cipher/cipher-gcm-armv8-aarch64-ce.S @@ -1,652 +1,652 @@ /* cipher-gcm-armv8-aarch64-ce.S - ARM/CE accelerated GHASH * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) .cpu generic+simd+crypto .text /* Constants */ .align 4 gcry_gcm_reduction_constant: .Lrconst: .quad 0x87 /* Register macros */ #define rhash v0 #define rr0 v1 #define rr1 v2 #define rbuf v3 #define rbuf1 v4 #define rbuf2 v5 #define rbuf3 v6 #define rbuf4 v7 #define rbuf5 v8 #define rr2 v9 #define rr3 v10 #define rr4 v11 #define rr5 v12 #define rr6 v13 #define rr7 v14 #define rr8 v15 #define rr9 v16 #define rrconst v18 #define rh1 v19 #define rh2 v20 #define rh3 v21 #define rh4 v22 #define rh5 v23 #define rh6 v24 #define t0 v25 #define t1 v26 #define t2 v27 #define t3 v28 #define t4 v29 #define t5 v30 #define vZZ v31 /* GHASH macros */ /* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in * Cryptology — CT-RSA 2015" for details. */ /* Input: 'a' and 'b', Output: 'r0:r1' (low 128-bits in r0, high in r1) */ #define PMUL_128x128(r0, r1, a, b, T0, T1, interleave_op) \ ext T0.16b, b.16b, b.16b, #8; \ pmull r0.1q, a.1d, b.1d; \ pmull2 r1.1q, a.2d, b.2d; \ pmull T1.1q, a.1d, T0.1d; \ pmull2 T0.1q, a.2d, T0.2d; \ interleave_op; \ eor T0.16b, T0.16b, T1.16b; \ ext T1.16b, vZZ.16b, T0.16b, #8; \ ext T0.16b, T0.16b, vZZ.16b, #8; \ eor r0.16b, r0.16b, T1.16b; \ eor r1.16b, r1.16b, T0.16b; /* Input: 'aA' and 'bA', Output: 'r0A:r1A' (low 128-bits in r0A, high in r1A) * Input: 'aB' and 'bB', Output: 'r0B:r1B' (low 128-bits in r0B, high in r1B) * Input: 'aC' and 'bC', Output: 'r0C:r1C' (low 128-bits in r0C, high in r1C) */ #define PMUL_128x128_3(r0A, r1A, aA, bA, t0A, t1A, \ r0B, r1B, aB, bB, t0B, t1B, \ r0C, r1C, aC, bC, t0C, t1C, interleave_op) \ ext t0A.16b, bA.16b, bA.16b, #8; \ pmull r0A.1q, aA.1d, bA.1d; \ pmull2 r1A.1q, aA.2d, bA.2d; \ ext t0B.16b, bB.16b, bB.16b, #8; \ pmull r0B.1q, aB.1d, bB.1d; \ pmull2 r1B.1q, aB.2d, bB.2d; \ ext t0C.16b, bC.16b, bC.16b, #8; \ pmull r0C.1q, aC.1d, bC.1d; \ pmull2 r1C.1q, aC.2d, bC.2d; \ pmull t1A.1q, aA.1d, t0A.1d; \ pmull2 t0A.1q, aA.2d, t0A.2d; \ pmull t1B.1q, aB.1d, t0B.1d; \ pmull2 t0B.1q, aB.2d, t0B.2d; \ pmull t1C.1q, aC.1d, t0C.1d; \ pmull2 t0C.1q, aC.2d, t0C.2d; \ eor t0A.16b, t0A.16b, t1A.16b; \ eor t0B.16b, t0B.16b, t1B.16b; \ eor t0C.16b, t0C.16b, t1C.16b; \ interleave_op; \ ext t1A.16b, vZZ.16b, t0A.16b, #8; \ ext t0A.16b, t0A.16b, vZZ.16b, #8; \ ext t1B.16b, vZZ.16b, t0B.16b, #8; \ ext t0B.16b, t0B.16b, vZZ.16b, #8; \ ext t1C.16b, vZZ.16b, t0C.16b, #8; \ ext t0C.16b, t0C.16b, vZZ.16b, #8; \ eor r0A.16b, r0A.16b, t1A.16b; \ eor r1A.16b, r1A.16b, t0A.16b; \ eor r0B.16b, r0B.16b, t1B.16b; \ eor r1B.16b, r1B.16b, t0B.16b; \ eor r0C.16b, r0C.16b, t1C.16b; \ eor r1C.16b, r1C.16b, t0C.16b; \ /* Input: 'r0:r1', Output: 'a' */ #define REDUCTION(a, r0, r1, rconst, T0, T1, interleave_op1, interleave_op2, \ interleave_op3) \ pmull2 T0.1q, r1.2d, rconst.2d; \ interleave_op1; \ ext T1.16b, T0.16b, vZZ.16b, #8; \ ext T0.16b, vZZ.16b, T0.16b, #8; \ interleave_op2; \ eor r1.16b, r1.16b, T1.16b; \ eor r0.16b, r0.16b, T0.16b; \ pmull T0.1q, r1.1d, rconst.1d; \ interleave_op3; \ eor a.16b, r0.16b, T0.16b; /* Other functional macros */ #define _(...) __VA_ARGS__ #define __ _() #define CLEAR_REG(reg) movi reg.16b, #0; #define VPUSH_ABI \ stp d8, d9, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); \ stp d10, d11, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); \ stp d12, d13, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); \ stp d14, d15, [sp, #-16]!; \ CFI_ADJUST_CFA_OFFSET(16); #define VPOP_ABI \ ldp d14, d15, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); \ ldp d12, d13, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); \ ldp d10, d11, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); \ ldp d8, d9, [sp], #16; \ CFI_ADJUST_CFA_OFFSET(-16); /* * unsigned int _gcry_ghash_armv8_ce_pmull (void *gcm_key, byte *result, * const byte *buf, size_t nblocks, * void *gcm_table); */ .align 3 .globl _gcry_ghash_armv8_ce_pmull ELF(.type _gcry_ghash_armv8_ce_pmull,%function;) _gcry_ghash_armv8_ce_pmull: /* input: * x0: gcm_key * x1: result/hash * x2: buf * x3: nblocks * x4: gcm_table */ CFI_STARTPROC(); cbz x3, .Ldo_nothing; GET_DATA_POINTER(x5, .Lrconst) eor vZZ.16b, vZZ.16b, vZZ.16b ld1 {rhash.16b}, [x1] ld1 {rh1.16b}, [x0] rbit rhash.16b, rhash.16b /* bit-swap */ ld1r {rrconst.2d}, [x5] cmp x3, #6 b.lo .Less_than_6 add x6, x4, #64 VPUSH_ABI ld1 {rh2.16b-rh5.16b}, [x4] ld1 {rh6.16b}, [x6] sub x3, x3, #6 ld1 {rbuf.16b-rbuf2.16b}, [x2], #(3*16) ld1 {rbuf3.16b-rbuf5.16b}, [x2], #(3*16) rbit rbuf.16b, rbuf.16b /* bit-swap */ rbit rbuf1.16b, rbuf1.16b /* bit-swap */ rbit rbuf2.16b, rbuf2.16b /* bit-swap */ rbit rbuf3.16b, rbuf3.16b /* bit-swap */ rbit rbuf4.16b, rbuf4.16b /* bit-swap */ rbit rbuf5.16b, rbuf5.16b /* bit-swap */ eor rhash.16b, rhash.16b, rbuf.16b cmp x3, #6 b.lo .Lend_6 .Loop_6: /* (in1) * H⁵ => rr0:rr1 */ /* (in2) * H⁴ => rr2:rr3 */ /* (in0 ^ hash) * H⁶ => rr4:rr5 */ PMUL_128x128_3(rr0, rr1, rbuf1, rh5, t0, t1, rr2, rr3, rbuf2, rh4, t2, t3, rr4, rr5, rhash, rh6, t4, t5, _(sub x3, x3, #6)) ld1 {rbuf.16b-rbuf2.16b}, [x2], #(3*16) cmp x3, #6 eor rr0.16b, rr0.16b, rr2.16b eor rr1.16b, rr1.16b, rr3.16b /* (in3) * H³ => rr2:rr3 */ /* (in4) * H² => rr6:rr7 */ /* (in5) * H¹ => rr8:rr9 */ PMUL_128x128_3(rr2, rr3, rbuf3, rh3, t0, t1, rr6, rr7, rbuf4, rh2, t2, t3, rr8, rr9, rbuf5, rh1, t4, t5, _(eor rr0.16b, rr0.16b, rr4.16b; eor rr1.16b, rr1.16b, rr5.16b)) eor rr0.16b, rr0.16b, rr2.16b eor rr1.16b, rr1.16b, rr3.16b rbit rbuf.16b, rbuf.16b eor rr0.16b, rr0.16b, rr6.16b eor rr1.16b, rr1.16b, rr7.16b rbit rbuf1.16b, rbuf1.16b eor rr0.16b, rr0.16b, rr8.16b eor rr1.16b, rr1.16b, rr9.16b ld1 {rbuf3.16b-rbuf5.16b}, [x2], #(3*16) REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, _(rbit rbuf2.16b, rbuf2.16b), _(rbit rbuf3.16b, rbuf3.16b), _(rbit rbuf4.16b, rbuf4.16b)) rbit rbuf5.16b, rbuf5.16b eor rhash.16b, rhash.16b, rbuf.16b b.hs .Loop_6 .Lend_6: /* (in1) * H⁵ => rr0:rr1 */ /* (in0 ^ hash) * H⁶ => rr2:rr3 */ /* (in2) * H⁴ => rr4:rr5 */ PMUL_128x128_3(rr0, rr1, rbuf1, rh5, t0, t1, rr2, rr3, rhash, rh6, t2, t3, rr4, rr5, rbuf2, rh4, t4, t5, __) eor rr0.16b, rr0.16b, rr2.16b eor rr1.16b, rr1.16b, rr3.16b eor rr0.16b, rr0.16b, rr4.16b eor rr1.16b, rr1.16b, rr5.16b /* (in3) * H³ => rhash:rbuf */ /* (in4) * H² => rr6:rr7 */ /* (in5) * H¹ => rr8:rr9 */ PMUL_128x128_3(rhash, rbuf, rbuf3, rh3, t0, t1, rr6, rr7, rbuf4, rh2, t2, t3, rr8, rr9, rbuf5, rh1, t4, t5, _(CLEAR_REG(rh4); CLEAR_REG(rh5); CLEAR_REG(rh6))) eor rr0.16b, rr0.16b, rhash.16b eor rr1.16b, rr1.16b, rbuf.16b eor rr0.16b, rr0.16b, rr6.16b eor rr1.16b, rr1.16b, rr7.16b eor rr0.16b, rr0.16b, rr8.16b eor rr1.16b, rr1.16b, rr9.16b REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, _(CLEAR_REG(rh2); CLEAR_REG(rh3); CLEAR_REG(rr2); CLEAR_REG(rbuf2); CLEAR_REG(rbuf3)), _(CLEAR_REG(rr3); CLEAR_REG(rr4); CLEAR_REG(rr5); CLEAR_REG(rr6); CLEAR_REG(rr7)), _(CLEAR_REG(rr8); CLEAR_REG(rr9); CLEAR_REG(rbuf1); CLEAR_REG(rbuf2))) CLEAR_REG(rbuf4) CLEAR_REG(rbuf5) CLEAR_REG(t2) CLEAR_REG(t3) CLEAR_REG(t4) CLEAR_REG(t5) VPOP_ABI cbz x3, .Ldone .Less_than_6: /* Handle remaining blocks. */ ld1 {rbuf.16b}, [x2], #16 sub x3, x3, #1 rbit rbuf.16b, rbuf.16b /* bit-swap */ eor rhash.16b, rhash.16b, rbuf.16b cbz x3, .Lend .Loop: PMUL_128x128(rr0, rr1, rh1, rhash, t0, t1, _(ld1 {rbuf.16b}, [x2], #16)) REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, _(sub x3, x3, #1), _(rbit rbuf.16b, rbuf.16b), __) eor rhash.16b, rhash.16b, rbuf.16b cbnz x3, .Loop .Lend: PMUL_128x128(rr0, rr1, rh1, rhash, t0, t1, _(CLEAR_REG(rbuf))) REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, __, _(CLEAR_REG(rh1)), __) .Ldone: CLEAR_REG(rr1) CLEAR_REG(rr0) rbit rhash.16b, rhash.16b /* bit-swap */ CLEAR_REG(t0) CLEAR_REG(t1) st1 {rhash.2d}, [x1] CLEAR_REG(rhash) .Ldo_nothing: mov x0, #0 - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;) /* * unsigned int _gcry_polyval_armv8_ce_pmull (void *gcm_key, byte *result, * const byte *buf, size_t nblocks, * void *gcm_table); */ .align 3 .globl _gcry_polyval_armv8_ce_pmull ELF(.type _gcry_polyval_armv8_ce_pmull,%function;) _gcry_polyval_armv8_ce_pmull: /* input: * x0: gcm_key * x1: result/hash * x2: buf * x3: nblocks * x4: gcm_table */ CFI_STARTPROC(); cbz x3, .Lpolyval_do_nothing; GET_DATA_POINTER(x5, .Lrconst) eor vZZ.16b, vZZ.16b, vZZ.16b ld1 {rhash.16b}, [x1] ld1 {rh1.16b}, [x0] rbit rhash.16b, rhash.16b /* bit-swap */ ld1r {rrconst.2d}, [x5] cmp x3, #6 b.lo .Lpolyval_less_than_6 add x6, x4, #64 VPUSH_ABI ld1 {rh2.16b-rh5.16b}, [x4] ld1 {rh6.16b}, [x6] sub x3, x3, #6 ld1 {rbuf.16b-rbuf2.16b}, [x2], #(3*16) ld1 {rbuf3.16b-rbuf5.16b}, [x2], #(3*16) rev64 rbuf.16b, rbuf.16b /* byte-swap */ rev64 rbuf1.16b, rbuf1.16b /* byte-swap */ rev64 rbuf2.16b, rbuf2.16b /* byte-swap */ rev64 rbuf3.16b, rbuf3.16b /* byte-swap */ rev64 rbuf4.16b, rbuf4.16b /* byte-swap */ rev64 rbuf5.16b, rbuf5.16b /* byte-swap */ ext rbuf.16b, rbuf.16b, rbuf.16b, #8 /* byte-swap */ ext rbuf1.16b, rbuf1.16b, rbuf1.16b, #8 /* byte-swap */ ext rbuf2.16b, rbuf2.16b, rbuf2.16b, #8 /* byte-swap */ ext rbuf3.16b, rbuf3.16b, rbuf3.16b, #8 /* byte-swap */ ext rbuf4.16b, rbuf4.16b, rbuf4.16b, #8 /* byte-swap */ ext rbuf5.16b, rbuf5.16b, rbuf5.16b, #8 /* byte-swap */ rbit rbuf.16b, rbuf.16b /* bit-swap */ rbit rbuf1.16b, rbuf1.16b /* bit-swap */ rbit rbuf2.16b, rbuf2.16b /* bit-swap */ rbit rbuf3.16b, rbuf3.16b /* bit-swap */ rbit rbuf4.16b, rbuf4.16b /* bit-swap */ rbit rbuf5.16b, rbuf5.16b /* bit-swap */ eor rhash.16b, rhash.16b, rbuf.16b cmp x3, #6 b.lo .Lpolyval_end_6 .Lpolyval_loop_6: /* (in1) * H⁵ => rr0:rr1 */ /* (in2) * H⁴ => rr2:rr3 */ /* (in0 ^ hash) * H⁶ => rr4:rr5 */ PMUL_128x128_3(rr0, rr1, rbuf1, rh5, t0, t1, rr2, rr3, rbuf2, rh4, t2, t3, rr4, rr5, rhash, rh6, t4, t5, _(sub x3, x3, #6)) ld1 {rbuf.16b-rbuf2.16b}, [x2], #(3*16) cmp x3, #6 eor rr0.16b, rr0.16b, rr2.16b eor rr1.16b, rr1.16b, rr3.16b /* (in3) * H³ => rr2:rr3 */ /* (in4) * H² => rr6:rr7 */ /* (in5) * H¹ => rr8:rr9 */ PMUL_128x128_3(rr2, rr3, rbuf3, rh3, t0, t1, rr6, rr7, rbuf4, rh2, t2, t3, rr8, rr9, rbuf5, rh1, t4, t5, _(eor rr0.16b, rr0.16b, rr4.16b; eor rr1.16b, rr1.16b, rr5.16b)) rev64 rbuf.16b, rbuf.16b /* byte-swap */ rev64 rbuf1.16b, rbuf1.16b /* byte-swap */ rev64 rbuf2.16b, rbuf2.16b /* byte-swap */ ext rbuf.16b, rbuf.16b, rbuf.16b, #8 /* byte-swap */ ext rbuf1.16b, rbuf1.16b, rbuf1.16b, #8 /* byte-swap */ ext rbuf2.16b, rbuf2.16b, rbuf2.16b, #8 /* byte-swap */ eor rr0.16b, rr0.16b, rr2.16b eor rr1.16b, rr1.16b, rr3.16b rbit rbuf.16b, rbuf.16b /* bit-swap */ eor rr0.16b, rr0.16b, rr6.16b eor rr1.16b, rr1.16b, rr7.16b rbit rbuf1.16b, rbuf1.16b /* bit-swap */ eor rr0.16b, rr0.16b, rr8.16b eor rr1.16b, rr1.16b, rr9.16b ld1 {rbuf3.16b-rbuf5.16b}, [x2], #(3*16) REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, _(rbit rbuf2.16b, rbuf2.16b), /* bit-swap */ _(rev64 rbuf3.16b, rbuf3.16b), /* byte-swap */ _(rev64 rbuf4.16b, rbuf4.16b)) /* byte-swap */ rev64 rbuf5.16b, rbuf5.16b /* byte-swap */ ext rbuf3.16b, rbuf3.16b, rbuf3.16b, #8 /* byte-swap */ eor rhash.16b, rhash.16b, rbuf.16b ext rbuf4.16b, rbuf4.16b, rbuf4.16b, #8 /* byte-swap */ ext rbuf5.16b, rbuf5.16b, rbuf5.16b, #8 /* byte-swap */ rbit rbuf3.16b, rbuf3.16b /* bit-swap */ rbit rbuf4.16b, rbuf4.16b /* bit-swap */ rbit rbuf5.16b, rbuf5.16b /* bit-swap */ b.hs .Lpolyval_loop_6 .Lpolyval_end_6: /* (in1) * H⁵ => rr0:rr1 */ /* (in0 ^ hash) * H⁶ => rr2:rr3 */ /* (in2) * H⁴ => rr4:rr5 */ PMUL_128x128_3(rr0, rr1, rbuf1, rh5, t0, t1, rr2, rr3, rhash, rh6, t2, t3, rr4, rr5, rbuf2, rh4, t4, t5, __) eor rr0.16b, rr0.16b, rr2.16b eor rr1.16b, rr1.16b, rr3.16b eor rr0.16b, rr0.16b, rr4.16b eor rr1.16b, rr1.16b, rr5.16b /* (in3) * H³ => rhash:rbuf */ /* (in4) * H² => rr6:rr7 */ /* (in5) * H¹ => rr8:rr9 */ PMUL_128x128_3(rhash, rbuf, rbuf3, rh3, t0, t1, rr6, rr7, rbuf4, rh2, t2, t3, rr8, rr9, rbuf5, rh1, t4, t5, _(CLEAR_REG(rh4); CLEAR_REG(rh5); CLEAR_REG(rh6))) eor rr0.16b, rr0.16b, rhash.16b eor rr1.16b, rr1.16b, rbuf.16b eor rr0.16b, rr0.16b, rr6.16b eor rr1.16b, rr1.16b, rr7.16b eor rr0.16b, rr0.16b, rr8.16b eor rr1.16b, rr1.16b, rr9.16b REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, _(CLEAR_REG(rh2); CLEAR_REG(rh3); CLEAR_REG(rr2); CLEAR_REG(rbuf2); CLEAR_REG(rbuf3)), _(CLEAR_REG(rr3); CLEAR_REG(rr4); CLEAR_REG(rr5); CLEAR_REG(rr6); CLEAR_REG(rr7)), _(CLEAR_REG(rr8); CLEAR_REG(rr9); CLEAR_REG(rbuf1); CLEAR_REG(rbuf2))) CLEAR_REG(rbuf4) CLEAR_REG(rbuf5) CLEAR_REG(t2) CLEAR_REG(t3) CLEAR_REG(t4) CLEAR_REG(t5) VPOP_ABI cbz x3, .Lpolyval_done .Lpolyval_less_than_6: /* Handle remaining blocks. */ ld1 {rbuf.16b}, [x2], #16 sub x3, x3, #1 rev64 rbuf.16b, rbuf.16b /* byte-swap */ ext rbuf.16b, rbuf.16b, rbuf.16b, #8 /* byte-swap */ rbit rbuf.16b, rbuf.16b /* bit-swap */ eor rhash.16b, rhash.16b, rbuf.16b cbz x3, .Lpolyval_end .Lpolyval_loop: PMUL_128x128(rr0, rr1, rh1, rhash, t0, t1, _(ld1 {rbuf.16b}, [x2], #16)) REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, _(sub x3, x3, #1; rev64 rbuf.16b, rbuf.16b), /* byte-swap */ _(ext rbuf.16b, rbuf.16b, rbuf.16b, #8), /* byte-swap */ _(rbit rbuf.16b, rbuf.16b)) /* bit-swap */ eor rhash.16b, rhash.16b, rbuf.16b cbnz x3, .Lpolyval_loop .Lpolyval_end: PMUL_128x128(rr0, rr1, rh1, rhash, t0, t1, _(CLEAR_REG(rbuf))) REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, __, _(CLEAR_REG(rh1)), __) .Lpolyval_done: CLEAR_REG(rr1) CLEAR_REG(rr0) rbit rhash.16b, rhash.16b /* bit-swap */ CLEAR_REG(t0) CLEAR_REG(t1) st1 {rhash.2d}, [x1] CLEAR_REG(rhash) .Lpolyval_do_nothing: mov x0, #0 - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_polyval_armv8_ce_pmull,.-_gcry_polyval_armv8_ce_pmull;) /* * void _gcry_ghash_setup_armv8_ce_pmull (void *gcm_key, void *gcm_table); */ .align 3 .globl _gcry_ghash_setup_armv8_ce_pmull ELF(.type _gcry_ghash_setup_armv8_ce_pmull,%function;) _gcry_ghash_setup_armv8_ce_pmull: /* input: * x0: gcm_key * x1: gcm_table */ CFI_STARTPROC() GET_DATA_POINTER(x2, .Lrconst) eor vZZ.16b, vZZ.16b, vZZ.16b /* H¹ */ ld1 {rh1.16b}, [x0] rbit rh1.16b, rh1.16b st1 {rh1.16b}, [x0] ld1r {rrconst.2d}, [x2] /* H² */ PMUL_128x128(rr0, rr1, rh1, rh1, t0, t1, __) REDUCTION(rh2, rr0, rr1, rrconst, t0, t1, __, __, __) /* H³ */ PMUL_128x128(rr0, rr1, rh2, rh1, t0, t1, __) REDUCTION(rh3, rr0, rr1, rrconst, t0, t1, __, __, __) /* H⁴ */ PMUL_128x128(rr0, rr1, rh2, rh2, t0, t1, __) REDUCTION(rh4, rr0, rr1, rrconst, t0, t1, __, __, __) /* H⁵ */ PMUL_128x128(rr0, rr1, rh2, rh3, t0, t1, __) REDUCTION(rh5, rr0, rr1, rrconst, t0, t1, __, __, __) /* H⁶ */ PMUL_128x128(rr0, rr1, rh3, rh3, t0, t1, __) REDUCTION(rh6, rr0, rr1, rrconst, t0, t1, __, __, __) st1 {rh2.16b-rh4.16b}, [x1], #(3*16) st1 {rh5.16b-rh6.16b}, [x1] - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;) #endif diff --git a/cipher/crc-armv8-aarch64-ce.S b/cipher/crc-armv8-aarch64-ce.S index 060abdfe..7ac884af 100644 --- a/cipher/crc-armv8-aarch64-ce.S +++ b/cipher/crc-armv8-aarch64-ce.S @@ -1,497 +1,497 @@ /* crc-armv8-aarch64-ce.S - ARMv8/CE PMULL accelerated CRC implementation * Copyright (C) 2019 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) .cpu generic+simd+crypto .text /* Structure of crc32_consts_s */ #define consts_k(idx) ((idx) * 8) #define consts_my_p(idx) (consts_k(6) + (idx) * 8) /* Constants */ .align 6 .Lcrc32_constants: .Lcrc32_partial_fold_input_mask: .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 .Lcrc32_refl_shuf_shift: .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f .Lcrc32_shuf_shift: .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .Lcrc32_bswap_shuf: .byte 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08 .byte 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff /* * void _gcry_crc32r_armv8_ce_bulk (u32 *pcrc, const byte *inbuf, size_t inlen, * const struct crc32_consts_s *consts); */ .align 3 .globl _gcry_crc32r_armv8_ce_bulk ELF(.type _gcry_crc32r_armv8_ce_bulk,%function;) _gcry_crc32r_armv8_ce_bulk: /* input: * x0: pcrc * x1: inbuf * x2: inlen * x3: consts */ CFI_STARTPROC() GET_DATA_POINTER(x7, .Lcrc32_constants) add x9, x3, #consts_k(5 - 1) cmp x2, #128 b.lo .Lcrc32r_fold_by_one_setup eor v4.16b, v4.16b, v4.16b add x4, x3, #consts_k(1 - 1) ld1 {v4.s}[0], [x0] /* load pcrc */ ld1 {v0.16b-v3.16b}, [x1], #64 /* load 64 bytes of input */ sub x2, x2, #64 ld1 {v6.16b}, [x4] eor v0.16b, v0.16b, v4.16b add x4, x3, #consts_k(3 - 1) add x5, x3, #consts_my_p(0) .Lcrc32r_fold_by_four: /* Fold by 4. */ ld1 {v16.16b-v19.16b}, [x1], #64 /* load 64 bytes of input */ sub x2, x2, #64 pmull v20.1q, v0.1d, v6.1d pmull v21.1q, v1.1d, v6.1d pmull v22.1q, v2.1d, v6.1d pmull v23.1q, v3.1d, v6.1d cmp x2, #64 pmull2 v24.1q, v0.2d, v6.2d pmull2 v25.1q, v1.2d, v6.2d pmull2 v26.1q, v2.2d, v6.2d pmull2 v27.1q, v3.2d, v6.2d eor v0.16b, v20.16b, v16.16b eor v1.16b, v21.16b, v17.16b eor v2.16b, v22.16b, v18.16b eor v3.16b, v23.16b, v19.16b eor v0.16b, v0.16b, v24.16b eor v1.16b, v1.16b, v25.16b eor v2.16b, v2.16b, v26.16b eor v3.16b, v3.16b, v27.16b b.hs .Lcrc32r_fold_by_four ld1 {v6.16b}, [x4] ld1 {v5.16b}, [x5] cmp x2, #16 /* Fold 4 to 1. */ pmull v16.1q, v0.1d, v6.1d pmull2 v4.1q, v0.2d, v6.2d eor v0.16b, v16.16b, v1.16b eor v0.16b, v0.16b, v4.16b pmull v16.1q, v0.1d, v6.1d pmull2 v4.1q, v0.2d, v6.2d eor v0.16b, v16.16b, v2.16b eor v0.16b, v0.16b, v4.16b pmull v16.1q, v0.1d, v6.1d pmull2 v4.1q, v0.2d, v6.2d eor v0.16b, v16.16b, v3.16b eor v0.16b, v0.16b, v4.16b b.lo .Lcrc32r_fold_by_one_done b .Lcrc32r_fold_by_one .Lcrc32r_fold_by_one_setup: eor v1.16b, v1.16b, v1.16b add x4, x3, #consts_k(3 - 1) add x5, x3, #consts_my_p(0) sub x2, x2, #16 ld1 {v1.s}[0], [x0] /* load pcrc */ ld1 {v0.16b}, [x1], #16 /* load 16 bytes of input */ cmp x2, #16 ld1 {v6.16b}, [x4] /* load k3k4 */ ld1 {v5.16b}, [x5] /* load my_p */ eor v0.16b, v0.16b, v1.16b b.lo .Lcrc32r_fold_by_one_done .Lcrc32r_fold_by_one: sub x2, x2, #16 ld1 {v2.16b}, [x1], #16 /* load 16 bytes of input */ pmull v3.1q, v0.1d, v6.1d pmull2 v1.1q, v0.2d, v6.2d cmp x2, #16 eor v0.16b, v3.16b, v2.16b eor v0.16b, v0.16b, v1.16b b.hs .Lcrc32r_fold_by_one .Lcrc32r_fold_by_one_done: cmp x2, #0 b.eq .Lcrc32r_final_fold /* Partial fold. */ add x4, x7, #.Lcrc32_refl_shuf_shift - .Lcrc32_constants add x5, x7, #.Lcrc32_refl_shuf_shift - .Lcrc32_constants + 16 add x6, x7, #.Lcrc32_partial_fold_input_mask - .Lcrc32_constants sub x8, x2, #16 add x4, x4, x2 add x5, x5, x2 add x6, x6, x2 add x8, x1, x8 /* Load last input and add padding zeros. */ ld1 {v4.16b}, [x4] eor x2, x2, x2 ld1 {v3.16b}, [x5] ld1 {v2.16b}, [x6] tbl v30.16b, {v0.16b}, v4.16b ld1 {v4.16b}, [x8] tbl v1.16b, {v0.16b}, v3.16b pmull v0.1q, v30.1d, v6.1d and v2.16b, v2.16b, v4.16b pmull2 v31.1q, v30.2d, v6.2d orr v2.16b, v2.16b, v1.16b eor v0.16b, v0.16b, v31.16b eor v0.16b, v0.16b, v2.16b .Lcrc32r_final_fold: /* Final fold. */ eor v2.16b, v2.16b, v2.16b /* zero reg */ ld1 {v7.16b}, [x9] /* reduce 128-bits to 96-bits */ ext v6.16b, v6.16b, v6.16b, #8 /* swap high and low parts */ mov v1.16b, v0.16b pmull v0.1q, v0.1d, v6.1d ext v6.16b, v5.16b, v5.16b, #8 /* swap high and low parts */ ext v1.16b, v1.16b, v2.16b, #8 /* high to low, high zeroed */ eor v3.16b, v0.16b, v1.16b /* reduce 96-bits to 64-bits */ eor v1.16b, v1.16b, v1.16b ext v0.16b, v3.16b, v2.16b, #4 /* [00][00][x2][x1] */ mov v1.s[0], v3.s[0] /* [00][00][00][x0] */ eor v3.16b, v3.16b, v3.16b pmull v1.1q, v1.1d, v7.1d /* [00][00][xx][xx] */ eor v0.16b, v0.16b, v1.16b /* top 64-bit are zero */ /* barrett reduction */ mov v3.s[1], v0.s[0] /* [00][00][x1][00] */ ext v0.16b, v2.16b, v0.16b, #12 /* [??][x1][??][00] */ pmull v1.1q, v3.1d, v5.1d /* [00][xx][xx][00] */ pmull v1.1q, v1.1d, v6.1d /* [00][xx][xx][00] */ eor v0.16b, v0.16b, v1.16b /* store CRC */ st1 {v0.s}[2], [x0] - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_crc32r_armv8_ce_bulk,.-_gcry_crc32r_armv8_ce_bulk;) /* * void _gcry_crc32r_armv8_ce_reduction_4 (u32 *pcrc, u32 data, u32 crc, * const struct crc32_consts_s *consts); */ .align 3 .globl _gcry_crc32r_armv8_ce_reduction_4 ELF(.type _gcry_crc32r_armv8_ce_reduction_4,%function;) _gcry_crc32r_armv8_ce_reduction_4: /* input: * w0: data * w1: crc * x2: crc32 constants */ CFI_STARTPROC() eor v0.16b, v0.16b, v0.16b add x2, x2, #consts_my_p(0) eor v1.16b, v1.16b, v1.16b ld1 {v5.16b}, [x2] mov v0.s[0], w0 pmull v0.1q, v0.1d, v5.1d /* [00][00][xx][xx] */ mov v1.s[1], w1 mov v0.s[2], v0.s[0] /* [00][x0][x1][x0] */ pmull2 v0.1q, v0.2d, v5.2d /* [00][00][xx][xx] */ eor v0.16b, v0.16b, v1.16b mov w0, v0.s[1] - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_crc32r_armv8_ce_reduction_4,.-_gcry_crc32r_armv8_ce_reduction_4;) /* * void _gcry_crc32_armv8_ce_bulk (u32 *pcrc, const byte *inbuf, size_t inlen, * const struct crc32_consts_s *consts); */ .align 3 .globl _gcry_crc32_armv8_ce_bulk ELF(.type _gcry_crc32_armv8_ce_bulk,%function;) _gcry_crc32_armv8_ce_bulk: /* input: * x0: pcrc * x1: inbuf * x2: inlen * x3: consts */ CFI_STARTPROC() GET_DATA_POINTER(x7, .Lcrc32_constants) add x4, x7, #.Lcrc32_bswap_shuf - .Lcrc32_constants cmp x2, #128 ld1 {v7.16b}, [x4] b.lo .Lcrc32_fold_by_one_setup eor v4.16b, v4.16b, v4.16b add x4, x3, #consts_k(1 - 1) ld1 {v4.s}[0], [x0] /* load pcrc */ ld1 {v0.16b-v3.16b}, [x1], #64 /* load 64 bytes of input */ sub x2, x2, #64 ld1 {v6.16b}, [x4] eor v0.16b, v0.16b, v4.16b ext v4.16b, v6.16b, v6.16b, #8 tbl v0.16b, { v0.16b }, v7.16b /* byte swap */ tbl v1.16b, { v1.16b }, v7.16b /* byte swap */ tbl v2.16b, { v2.16b }, v7.16b /* byte swap */ tbl v3.16b, { v3.16b }, v7.16b /* byte swap */ add x4, x3, #consts_k(3 - 1) add x5, x3, #consts_my_p(0) .Lcrc32_fold_by_four: /* Fold by 4. */ ld1 {v16.16b-v19.16b}, [x1], #64 /* load 64 bytes of input */ sub x2, x2, #64 tbl v16.16b, { v16.16b }, v7.16b /* byte swap */ tbl v17.16b, { v17.16b }, v7.16b /* byte swap */ tbl v18.16b, { v18.16b }, v7.16b /* byte swap */ tbl v19.16b, { v19.16b }, v7.16b /* byte swap */ cmp x2, #64 pmull2 v20.1q, v0.2d, v4.2d pmull2 v21.1q, v1.2d, v4.2d pmull2 v22.1q, v2.2d, v4.2d pmull2 v23.1q, v3.2d, v4.2d pmull v24.1q, v0.1d, v4.1d pmull v25.1q, v1.1d, v4.1d pmull v26.1q, v2.1d, v4.1d pmull v27.1q, v3.1d, v4.1d eor v0.16b, v20.16b, v16.16b eor v1.16b, v21.16b, v17.16b eor v2.16b, v22.16b, v18.16b eor v3.16b, v23.16b, v19.16b eor v0.16b, v0.16b, v24.16b eor v1.16b, v1.16b, v25.16b eor v2.16b, v2.16b, v26.16b eor v3.16b, v3.16b, v27.16b b.hs .Lcrc32_fold_by_four ld1 {v6.16b}, [x4] ld1 {v5.16b}, [x5] ext v6.16b, v6.16b, v6.16b, #8 ext v5.16b, v5.16b, v5.16b, #8 cmp x2, #16 /* Fold 4 to 1. */ pmull2 v16.1q, v0.2d, v6.2d pmull v4.1q, v0.1d, v6.1d eor v0.16b, v16.16b, v1.16b eor v0.16b, v0.16b, v4.16b pmull2 v16.1q, v0.2d, v6.2d pmull v4.1q, v0.1d, v6.1d eor v0.16b, v16.16b, v2.16b eor v0.16b, v0.16b, v4.16b pmull2 v16.1q, v0.2d, v6.2d pmull v4.1q, v0.1d, v6.1d eor v0.16b, v16.16b, v3.16b eor v0.16b, v0.16b, v4.16b b.lo .Lcrc32_fold_by_one_done b .Lcrc32_fold_by_one .Lcrc32_fold_by_one_setup: eor v1.16b, v1.16b, v1.16b add x4, x3, #consts_k(3 - 1) add x5, x3, #consts_my_p(0) ld1 {v1.s}[0], [x0] /* load pcrc */ sub x2, x2, #16 ld1 {v0.16b}, [x1], #16 /* load 16 bytes of input */ ld1 {v6.16b}, [x4] /* load k3k4 */ ld1 {v5.16b}, [x5] /* load my_p */ eor v0.16b, v0.16b, v1.16b cmp x2, #16 ext v6.16b, v6.16b, v6.16b, #8 /* swap high and low parts */ ext v5.16b, v5.16b, v5.16b, #8 /* swap high and low parts */ tbl v0.16b, { v0.16b }, v7.16b /* byte swap */ b.lo .Lcrc32_fold_by_one_done .Lcrc32_fold_by_one: sub x2, x2, #16 ld1 {v2.16b}, [x1], #16 /* load 16 bytes of input */ pmull2 v3.1q, v0.2d, v6.2d tbl v2.16b, { v2.16b }, v7.16b /* byte swap */ pmull v1.1q, v0.1d, v6.1d cmp x2, #16 eor v0.16b, v3.16b, v2.16b eor v0.16b, v0.16b, v1.16b b.hs .Lcrc32_fold_by_one .Lcrc32_fold_by_one_done: cmp x2, #0 b.eq .Lcrc32_final_fold /* Partial fold. */ add x4, x7, #.Lcrc32_refl_shuf_shift - .Lcrc32_constants + 32 add x5, x7, #.Lcrc32_shuf_shift - .Lcrc32_constants + 16 add x6, x7, #.Lcrc32_partial_fold_input_mask - .Lcrc32_constants sub x8, x2, #16 sub x4, x4, x2 add x5, x5, x2 add x6, x6, x2 add x8, x1, x8 /* Load last input and add padding zeros. */ ld1 {v4.16b}, [x4] eor x2, x2, x2 ld1 {v3.16b}, [x5] ld1 {v2.16b}, [x6] tbl v30.16b, {v0.16b}, v4.16b ld1 {v4.16b}, [x8] tbl v1.16b, {v0.16b}, v3.16b and v2.16b, v2.16b, v4.16b pmull2 v0.1q, v30.2d, v6.2d orr v2.16b, v2.16b, v1.16b pmull v1.1q, v30.1d, v6.1d tbl v2.16b, {v2.16b}, v7.16b /* byte swap */ eor v0.16b, v0.16b, v1.16b eor v0.16b, v0.16b, v2.16b .Lcrc32_final_fold: /* Final fold. */ eor v2.16b, v2.16b, v2.16b /* zero reg */ /* reduce 128-bits to 96-bits */ add x4, x3, #consts_k(4) ext v3.16b, v6.16b, v6.16b, #8 /* swap high and low parts */ eor v6.16b, v6.16b, v6.16b mov v1.16b, v0.16b pmull2 v0.1q, v0.2d, v3.2d ld1 {v6.d}[1], [x4] /* load k4 */ ext v1.16b, v2.16b, v1.16b, #8 /* low to high, low zeroed */ eor v3.16b, v0.16b, v1.16b /* bottom 32-bit are zero */ /* reduce 96-bits to 64-bits */ eor v0.16b, v0.16b, v0.16b eor v1.16b, v1.16b, v1.16b mov v0.s[1], v3.s[1] /* [00][00][x1][00] */ mov v1.s[2], v3.s[3] /* [00][x3][00][00] */ mov v0.s[2], v3.s[2] /* [00][x2][x1][00] */ eor v3.16b, v3.16b, v3.16b pmull2 v1.1q, v1.2d, v6.2d /* [00][xx][xx][00] */ eor v0.16b, v0.16b, v1.16b /* top and bottom 32-bit are zero */ /* barrett reduction */ mov v3.s[0], v0.s[1] /* [00][00][00][x1] */ pmull2 v0.1q, v0.2d, v5.2d /* [00][xx][xx][xx] */ ext v0.16b, v0.16b, v2.16b, #4 /* [00][00][xx][xx] */ pmull v0.1q, v0.1d, v5.1d eor v0.16b, v0.16b, v3.16b /* store CRC in input endian */ rev32 v0.8b, v0.8b /* byte swap */ st1 {v0.s}[0], [x0] - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_crc32_armv8_ce_bulk,.-_gcry_crc32_armv8_ce_bulk;) /* * void _gcry_crc32_armv8_ce_reduction_4 (u32 *pcrc, u32 data, u32 crc, * const struct crc32_consts_s *consts); */ .align 3 .globl _gcry_crc32_armv8_ce_reduction_4 ELF(.type _gcry_crc32_armv8_ce_reduction_4,%function;) _gcry_crc32_armv8_ce_reduction_4: /* input: * w0: data * w1: crc * x2: crc32 constants */ CFI_STARTPROC() eor v0.16b, v0.16b, v0.16b add x2, x2, #consts_my_p(0) eor v1.16b, v1.16b, v1.16b ld1 {v5.16b}, [x2] mov v0.s[1], w0 pmull v0.1q, v0.1d, v5.1d /* [00][xx][xx][00] */ mov v1.s[0], w1 pmull2 v0.1q, v0.2d, v5.2d /* [00][00][xx][xx] */ eor v0.16b, v0.16b, v1.16b rev32 v0.8b, v0.8b /* Return in input endian */ mov w0, v0.s[0] - ret + ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_crc32_armv8_ce_reduction_4,.-_gcry_crc32_armv8_ce_reduction_4;) #endif diff --git a/cipher/rijndael-aarch64.S b/cipher/rijndael-aarch64.S index e77dd4e0..184fcd20 100644 --- a/cipher/rijndael-aarch64.S +++ b/cipher/rijndael-aarch64.S @@ -1,514 +1,514 @@ /* rijndael-aarch64.S - ARMv8/AArch64 assembly implementation of AES cipher * * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS .text /* register macros */ #define CTX x0 #define RDST x1 #define RSRC x2 #define NROUNDS w3 #define RTAB x4 #define RMASK w5 #define RA w8 #define RB w9 #define RC w10 #define RD w11 #define RNA w12 #define RNB w13 #define RNC w14 #define RND w15 #define RT0 w6 #define RT1 w7 #define RT2 w16 #define xRT0 x6 #define xRT1 x7 #define xRT2 x16 #define xw8 x8 #define xw9 x9 #define xw10 x10 #define xw11 x11 #define xw12 x12 #define xw13 x13 #define xw14 x14 #define xw15 x15 /*********************************************************************** * ARMv8/AArch64 assembly implementation of the AES cipher ***********************************************************************/ #define preload_first_key(round, ra) \ ldr ra, [CTX, #(((round) * 16) + 0 * 4)]; #define dummy(round, ra) /* nothing */ #define addroundkey(ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_key) \ ldp rna, rnb, [CTX]; \ ldp rnc, rnd, [CTX, #8]; \ eor ra, ra, rna; \ eor rb, rb, rnb; \ eor rc, rc, rnc; \ preload_key(1, rna); \ eor rd, rd, rnd; #define do_encround(next_r, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_key) \ ldr rnb, [CTX, #(((next_r) * 16) + 1 * 4)]; \ \ and RT0, RMASK, ra, lsl#2; \ ldr rnc, [CTX, #(((next_r) * 16) + 2 * 4)]; \ and RT1, RMASK, ra, lsr#(8 - 2); \ ldr rnd, [CTX, #(((next_r) * 16) + 3 * 4)]; \ and RT2, RMASK, ra, lsr#(16 - 2); \ ldr RT0, [RTAB, xRT0]; \ and ra, RMASK, ra, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rna, rna, RT0; \ ldr RT2, [RTAB, xRT2]; \ and RT0, RMASK, rd, lsl#2; \ ldr ra, [RTAB, x##ra]; \ \ eor rnd, rnd, RT1, ror #24; \ and RT1, RMASK, rd, lsr#(8 - 2); \ eor rnc, rnc, RT2, ror #16; \ and RT2, RMASK, rd, lsr#(16 - 2); \ eor rnb, rnb, ra, ror #8; \ ldr RT0, [RTAB, xRT0]; \ and rd, RMASK, rd, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rnd, rnd, RT0; \ ldr RT2, [RTAB, xRT2]; \ and RT0, RMASK, rc, lsl#2; \ ldr rd, [RTAB, x##rd]; \ \ eor rnc, rnc, RT1, ror #24; \ and RT1, RMASK, rc, lsr#(8 - 2); \ eor rnb, rnb, RT2, ror #16; \ and RT2, RMASK, rc, lsr#(16 - 2); \ eor rna, rna, rd, ror #8; \ ldr RT0, [RTAB, xRT0]; \ and rc, RMASK, rc, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rnc, rnc, RT0; \ ldr RT2, [RTAB, xRT2]; \ and RT0, RMASK, rb, lsl#2; \ ldr rc, [RTAB, x##rc]; \ \ eor rnb, rnb, RT1, ror #24; \ and RT1, RMASK, rb, lsr#(8 - 2); \ eor rna, rna, RT2, ror #16; \ and RT2, RMASK, rb, lsr#(16 - 2); \ eor rnd, rnd, rc, ror #8; \ ldr RT0, [RTAB, xRT0]; \ and rb, RMASK, rb, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rnb, rnb, RT0; \ ldr RT2, [RTAB, xRT2]; \ eor rna, rna, RT1, ror #24; \ ldr rb, [RTAB, x##rb]; \ \ eor rnd, rnd, RT2, ror #16; \ preload_key((next_r) + 1, ra); \ eor rnc, rnc, rb, ror #8; #define do_lastencround(ra, rb, rc, rd, rna, rnb, rnc, rnd) \ and RT0, RMASK, ra, lsl#2; \ and RT1, RMASK, ra, lsr#(8 - 2); \ and RT2, RMASK, ra, lsr#(16 - 2); \ ldrb rna, [RTAB, xRT0]; \ and ra, RMASK, ra, lsr#(24 - 2); \ ldrb rnd, [RTAB, xRT1]; \ and RT0, RMASK, rd, lsl#2; \ ldrb rnc, [RTAB, xRT2]; \ ror rnd, rnd, #24; \ ldrb rnb, [RTAB, x##ra]; \ and RT1, RMASK, rd, lsr#(8 - 2); \ ror rnc, rnc, #16; \ and RT2, RMASK, rd, lsr#(16 - 2); \ ror rnb, rnb, #8; \ ldrb RT0, [RTAB, xRT0]; \ and rd, RMASK, rd, lsr#(24 - 2); \ ldrb RT1, [RTAB, xRT1]; \ \ orr rnd, rnd, RT0; \ ldrb RT2, [RTAB, xRT2]; \ and RT0, RMASK, rc, lsl#2; \ ldrb rd, [RTAB, x##rd]; \ orr rnc, rnc, RT1, ror #24; \ and RT1, RMASK, rc, lsr#(8 - 2); \ orr rnb, rnb, RT2, ror #16; \ and RT2, RMASK, rc, lsr#(16 - 2); \ orr rna, rna, rd, ror #8; \ ldrb RT0, [RTAB, xRT0]; \ and rc, RMASK, rc, lsr#(24 - 2); \ ldrb RT1, [RTAB, xRT1]; \ \ orr rnc, rnc, RT0; \ ldrb RT2, [RTAB, xRT2]; \ and RT0, RMASK, rb, lsl#2; \ ldrb rc, [RTAB, x##rc]; \ orr rnb, rnb, RT1, ror #24; \ and RT1, RMASK, rb, lsr#(8 - 2); \ orr rna, rna, RT2, ror #16; \ ldrb RT0, [RTAB, xRT0]; \ and RT2, RMASK, rb, lsr#(16 - 2); \ ldrb RT1, [RTAB, xRT1]; \ orr rnd, rnd, rc, ror #8; \ ldrb RT2, [RTAB, xRT2]; \ and rb, RMASK, rb, lsr#(24 - 2); \ ldrb rb, [RTAB, x##rb]; \ \ orr rnb, rnb, RT0; \ orr rna, rna, RT1, ror #24; \ orr rnd, rnd, RT2, ror #16; \ orr rnc, rnc, rb, ror #8; #define firstencround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \ addroundkey(ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_first_key); \ do_encround((round) + 1, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_first_key); #define encround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_key) \ do_encround((round) + 1, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_key); #define lastencround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \ add CTX, CTX, #(((round) + 1) * 16); \ add RTAB, RTAB, #1; \ do_lastencround(ra, rb, rc, rd, rna, rnb, rnc, rnd); \ addroundkey(rna, rnb, rnc, rnd, ra, rb, rc, rd, dummy); .globl _gcry_aes_arm_encrypt_block ELF(.type _gcry_aes_arm_encrypt_block,%function;) _gcry_aes_arm_encrypt_block: /* input: * %x0: keysched, CTX * %x1: dst * %x2: src * %w3: number of rounds.. 10, 12 or 14 * %x4: encryption table */ CFI_STARTPROC(); /* read input block */ /* aligned load */ ldp RA, RB, [RSRC]; ldp RC, RD, [RSRC, #8]; #ifndef __AARCH64EL__ rev RA, RA; rev RB, RB; rev RC, RC; rev RD, RD; #endif mov RMASK, #(0xff<<2); firstencround(0, RA, RB, RC, RD, RNA, RNB, RNC, RND); encround(1, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); encround(2, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); encround(3, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); encround(4, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); encround(5, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); encround(6, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); encround(7, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); cmp NROUNDS, #12; bge .Lenc_not_128; encround(8, RA, RB, RC, RD, RNA, RNB, RNC, RND, dummy); lastencround(9, RNA, RNB, RNC, RND, RA, RB, RC, RD); .Lenc_done: /* store output block */ /* aligned store */ #ifndef __AARCH64EL__ rev RA, RA; rev RB, RB; rev RC, RC; rev RD, RD; #endif /* write output block */ stp RA, RB, [RDST]; stp RC, RD, [RDST, #8]; mov x0, #(0); - ret; + ret_spec_stop; .ltorg .Lenc_not_128: beq .Lenc_192 encround(8, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); encround(9, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); encround(10, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); encround(11, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); encround(12, RA, RB, RC, RD, RNA, RNB, RNC, RND, dummy); lastencround(13, RNA, RNB, RNC, RND, RA, RB, RC, RD); b .Lenc_done; .ltorg .Lenc_192: encround(8, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); encround(9, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); encround(10, RA, RB, RC, RD, RNA, RNB, RNC, RND, dummy); lastencround(11, RNA, RNB, RNC, RND, RA, RB, RC, RD); b .Lenc_done; CFI_ENDPROC(); ELF(.size _gcry_aes_arm_encrypt_block,.-_gcry_aes_arm_encrypt_block;) #define addroundkey_dec(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \ ldr rna, [CTX, #(((round) * 16) + 0 * 4)]; \ ldr rnb, [CTX, #(((round) * 16) + 1 * 4)]; \ eor ra, ra, rna; \ ldr rnc, [CTX, #(((round) * 16) + 2 * 4)]; \ eor rb, rb, rnb; \ ldr rnd, [CTX, #(((round) * 16) + 3 * 4)]; \ eor rc, rc, rnc; \ preload_first_key((round) - 1, rna); \ eor rd, rd, rnd; #define do_decround(next_r, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_key) \ ldr rnb, [CTX, #(((next_r) * 16) + 1 * 4)]; \ \ and RT0, RMASK, ra, lsl#2; \ ldr rnc, [CTX, #(((next_r) * 16) + 2 * 4)]; \ and RT1, RMASK, ra, lsr#(8 - 2); \ ldr rnd, [CTX, #(((next_r) * 16) + 3 * 4)]; \ and RT2, RMASK, ra, lsr#(16 - 2); \ ldr RT0, [RTAB, xRT0]; \ and ra, RMASK, ra, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rna, rna, RT0; \ ldr RT2, [RTAB, xRT2]; \ and RT0, RMASK, rb, lsl#2; \ ldr ra, [RTAB, x##ra]; \ \ eor rnb, rnb, RT1, ror #24; \ and RT1, RMASK, rb, lsr#(8 - 2); \ eor rnc, rnc, RT2, ror #16; \ and RT2, RMASK, rb, lsr#(16 - 2); \ eor rnd, rnd, ra, ror #8; \ ldr RT0, [RTAB, xRT0]; \ and rb, RMASK, rb, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rnb, rnb, RT0; \ ldr RT2, [RTAB, xRT2]; \ and RT0, RMASK, rc, lsl#2; \ ldr rb, [RTAB, x##rb]; \ \ eor rnc, rnc, RT1, ror #24; \ and RT1, RMASK, rc, lsr#(8 - 2); \ eor rnd, rnd, RT2, ror #16; \ and RT2, RMASK, rc, lsr#(16 - 2); \ eor rna, rna, rb, ror #8; \ ldr RT0, [RTAB, xRT0]; \ and rc, RMASK, rc, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rnc, rnc, RT0; \ ldr RT2, [RTAB, xRT2]; \ and RT0, RMASK, rd, lsl#2; \ ldr rc, [RTAB, x##rc]; \ \ eor rnd, rnd, RT1, ror #24; \ and RT1, RMASK, rd, lsr#(8 - 2); \ eor rna, rna, RT2, ror #16; \ and RT2, RMASK, rd, lsr#(16 - 2); \ eor rnb, rnb, rc, ror #8; \ ldr RT0, [RTAB, xRT0]; \ and rd, RMASK, rd, lsr#(24 - 2); \ \ ldr RT1, [RTAB, xRT1]; \ eor rnd, rnd, RT0; \ ldr RT2, [RTAB, xRT2]; \ eor rna, rna, RT1, ror #24; \ ldr rd, [RTAB, x##rd]; \ \ eor rnb, rnb, RT2, ror #16; \ preload_key((next_r) - 1, ra); \ eor rnc, rnc, rd, ror #8; #define do_lastdecround(ra, rb, rc, rd, rna, rnb, rnc, rnd) \ and RT0, RMASK, ra; \ and RT1, RMASK, ra, lsr#8; \ and RT2, RMASK, ra, lsr#16; \ ldrb rna, [RTAB, xRT0]; \ lsr ra, ra, #24; \ ldrb rnb, [RTAB, xRT1]; \ and RT0, RMASK, rb; \ ldrb rnc, [RTAB, xRT2]; \ ror rnb, rnb, #24; \ ldrb rnd, [RTAB, x##ra]; \ and RT1, RMASK, rb, lsr#8; \ ror rnc, rnc, #16; \ and RT2, RMASK, rb, lsr#16; \ ror rnd, rnd, #8; \ ldrb RT0, [RTAB, xRT0]; \ lsr rb, rb, #24; \ ldrb RT1, [RTAB, xRT1]; \ \ orr rnb, rnb, RT0; \ ldrb RT2, [RTAB, xRT2]; \ and RT0, RMASK, rc; \ ldrb rb, [RTAB, x##rb]; \ orr rnc, rnc, RT1, ror #24; \ and RT1, RMASK, rc, lsr#8; \ orr rnd, rnd, RT2, ror #16; \ and RT2, RMASK, rc, lsr#16; \ orr rna, rna, rb, ror #8; \ ldrb RT0, [RTAB, xRT0]; \ lsr rc, rc, #24; \ ldrb RT1, [RTAB, xRT1]; \ \ orr rnc, rnc, RT0; \ ldrb RT2, [RTAB, xRT2]; \ and RT0, RMASK, rd; \ ldrb rc, [RTAB, x##rc]; \ orr rnd, rnd, RT1, ror #24; \ and RT1, RMASK, rd, lsr#8; \ orr rna, rna, RT2, ror #16; \ ldrb RT0, [RTAB, xRT0]; \ and RT2, RMASK, rd, lsr#16; \ ldrb RT1, [RTAB, xRT1]; \ orr rnb, rnb, rc, ror #8; \ ldrb RT2, [RTAB, xRT2]; \ lsr rd, rd, #24; \ ldrb rd, [RTAB, x##rd]; \ \ orr rnd, rnd, RT0; \ orr rna, rna, RT1, ror #24; \ orr rnb, rnb, RT2, ror #16; \ orr rnc, rnc, rd, ror #8; #define firstdecround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \ addroundkey_dec(((round) + 1), ra, rb, rc, rd, rna, rnb, rnc, rnd); \ do_decround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_first_key); #define decround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_key) \ do_decround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd, preload_key); #define set_last_round_rmask(_, __) \ mov RMASK, #0xff; #define lastdecround(round, ra, rb, rc, rd, rna, rnb, rnc, rnd) \ add RTAB, RTAB, #(4 * 256); \ do_lastdecround(ra, rb, rc, rd, rna, rnb, rnc, rnd); \ addroundkey(rna, rnb, rnc, rnd, ra, rb, rc, rd, dummy); .globl _gcry_aes_arm_decrypt_block ELF(.type _gcry_aes_arm_decrypt_block,%function;) _gcry_aes_arm_decrypt_block: /* input: * %x0: keysched, CTX * %x1: dst * %x2: src * %w3: number of rounds.. 10, 12 or 14 * %x4: decryption table */ CFI_STARTPROC(); /* read input block */ /* aligned load */ ldp RA, RB, [RSRC]; ldp RC, RD, [RSRC, #8]; #ifndef __AARCH64EL__ rev RA, RA; rev RB, RB; rev RC, RC; rev RD, RD; #endif mov RMASK, #(0xff << 2); cmp NROUNDS, #12; bge .Ldec_256; firstdecround(9, RA, RB, RC, RD, RNA, RNB, RNC, RND); .Ldec_tail: decround(8, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); decround(7, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); decround(6, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); decround(5, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); decround(4, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); decround(3, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); decround(2, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); decround(1, RA, RB, RC, RD, RNA, RNB, RNC, RND, set_last_round_rmask); lastdecround(0, RNA, RNB, RNC, RND, RA, RB, RC, RD); /* store output block */ /* aligned store */ #ifndef __AARCH64EL__ rev RA, RA; rev RB, RB; rev RC, RC; rev RD, RD; #endif /* write output block */ stp RA, RB, [RDST]; stp RC, RD, [RDST, #8]; mov x0, #(0); - ret; + ret_spec_stop; .ltorg .Ldec_256: beq .Ldec_192; firstdecround(13, RA, RB, RC, RD, RNA, RNB, RNC, RND); decround(12, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); decround(11, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); decround(10, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); decround(9, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); b .Ldec_tail; .ltorg .Ldec_192: firstdecround(11, RA, RB, RC, RD, RNA, RNB, RNC, RND); decround(10, RNA, RNB, RNC, RND, RA, RB, RC, RD, preload_first_key); decround(9, RA, RB, RC, RD, RNA, RNB, RNC, RND, preload_first_key); b .Ldec_tail; CFI_ENDPROC(); ELF(.size _gcry_aes_arm_decrypt_block,.-_gcry_aes_arm_decrypt_block;) #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/ #endif /*__AARCH64EL__ */ diff --git a/cipher/rijndael-armv8-aarch64-ce.S b/cipher/rijndael-armv8-aarch64-ce.S index 9f8d9d49..4fef0345 100644 --- a/cipher/rijndael-armv8-aarch64-ce.S +++ b/cipher/rijndael-armv8-aarch64-ce.S @@ -1,1921 +1,1921 @@ /* rijndael-armv8-aarch64-ce.S - ARMv8/CE accelerated AES * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) .cpu generic+simd+crypto .text /* Register macros */ #define vk0 v17 #define vk1 v18 #define vk2 v19 #define vk3 v20 #define vk4 v21 #define vk5 v22 #define vk6 v23 #define vk7 v24 #define vk8 v25 #define vk9 v26 #define vk10 v27 #define vk11 v28 #define vk12 v29 #define vk13 v30 #define vklast v31 /* Helper macros */ #define __ /*_*/ #define _(...) __VA_ARGS__ /* AES macros */ #define aes_preload_keys(keysched, nrounds) \ cmp nrounds, #12; \ ld1 {vk0.16b-vk3.16b}, [keysched], #64; \ ld1 {vk4.16b-vk7.16b}, [keysched], #64; \ ld1 {vk8.16b-vk10.16b}, [keysched], #48; \ mov vklast.16b, vk10.16b; \ b.lo 1f; \ ld1 {vk11.16b-vk12.16b}, [keysched], #32; \ mov vklast.16b, vk12.16b; \ b.eq 1f; \ ld1 {vk13.16b-vklast.16b}, [keysched]; \ 1: ; #define do_aes_one_part1(ed, mcimc, vb, vkfirst) \ aes##ed vb.16b, vkfirst.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk1.16b; \ aes##mcimc vb.16b, vb.16b; #define do_aes_one_part2_128(ed, mcimc, vb, iop1, iop2) \ aes##ed vb.16b, vk2.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk3.16b; \ aes##mcimc vb.16b, vb.16b; \ iop1; \ aes##ed vb.16b, vk4.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk5.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk6.16b; \ aes##mcimc vb.16b, vb.16b; \ iop2; \ aes##ed vb.16b, vk7.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk8.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk9.16b; #define do_aes_one_part2_192(ed, mcimc, vb, iop1, iop2) \ aes##ed vb.16b, vk2.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk3.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk4.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk5.16b; \ aes##mcimc vb.16b, vb.16b; \ iop1; \ aes##ed vb.16b, vk6.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk7.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk8.16b; \ aes##mcimc vb.16b, vb.16b; \ iop2; \ aes##ed vb.16b, vk9.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk10.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk11.16b; #define do_aes_one_part2_256(ed, mcimc, vb, iop1, iop2) \ aes##ed vb.16b, vk2.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk3.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk4.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk5.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk6.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk7.16b; \ aes##mcimc vb.16b, vb.16b; \ iop1; \ aes##ed vb.16b, vk8.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk9.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk10.16b; \ aes##mcimc vb.16b, vb.16b; \ iop2; \ aes##ed vb.16b, vk11.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk12.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk13.16b; #define do_aes_one128(ed, mcimc, vo, vb, vkfirst) \ do_aes_one_part1(ed, mcimc, vb, vkfirst); \ do_aes_one_part2_128(ed, mcimc, vb, __, __); \ eor vo.16b, vb.16b, vklast.16b; #define do_aes_one192(ed, mcimc, vo, vb, vkfirst) \ do_aes_one_part1(ed, mcimc, vb, vkfirst); \ do_aes_one_part2_192(ed, mcimc, vb, __, __); \ eor vo.16b, vb.16b, vklast.16b; #define do_aes_one256(ed, mcimc, vo, vb, vkfirst) \ do_aes_one_part1(ed, mcimc, vb, vkfirst); \ do_aes_one_part2_256(ed, mcimc, vb, __, __); \ eor vo.16b, vb.16b, vklast.16b; #define aes_round_4_multikey(ed, mcimc, b0, b1, b2, b3, key0, key1, key2, key3) \ aes##ed b0.16b, key0.16b; \ aes##mcimc b0.16b, b0.16b; \ aes##ed b1.16b, key1.16b; \ aes##mcimc b1.16b, b1.16b; \ aes##ed b2.16b, key2.16b; \ aes##mcimc b2.16b, b2.16b; \ aes##ed b3.16b, key3.16b; \ aes##mcimc b3.16b, b3.16b; #define aes_round_4(ed, mcimc, b0, b1, b2, b3, key) \ aes_round_4_multikey(ed, mcimc, b0, b1, b2, b3, key, key, key, key); #define aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, key1, b0_key2, b1_key2, b2_key2, b3_key2) \ aes##ed b0.16b, key1.16b; \ aes##ed b1.16b, key1.16b; \ aes##ed b2.16b, key1.16b; \ aes##ed b3.16b, key1.16b; \ eor o0.16b, b0.16b, b0_key2.16b; \ eor o1.16b, b1.16b, b1_key2.16b; \ eor o2.16b, b2.16b, b2_key2.16b; \ eor o3.16b, b3.16b, b3_key2.16b; #define do_aes_4_part1_multikey(ed, mcimc, b0, b1, b2, b3, key0, key1, key2, key3) \ aes_round_4_multikey(ed, mcimc, b0, b1, b2, b3, key0, key1, key2, key3); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk1); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk2); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk3); #define do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vkfirst) \ do_aes_4_part1_multikey(ed, mcimc, b0, b1, b2, b3, vkfirst, vkfirst, vkfirst, vkfirst); #define do_aes_4_part2_128(ed, mcimc, o0, o1, o2, o3, b0, b1, b2, b3, \ b0_key, b1_key, b2_key, b3_key) \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk4); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk5); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk8); \ aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, vk9, b0_key, b1_key, b2_key, b3_key); #define do_aes_4_part2_192(ed, mcimc, o0, o1, o2, o3, b0, b1, b2, b3, \ b0_key, b1_key, b2_key, b3_key) \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk4); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk5); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk8); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk9); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk10); \ aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, vk11, b0_key, b1_key, b2_key, b3_key); #define do_aes_4_part2_256(ed, mcimc, o0, o1, o2, o3, b0, b1, b2, b3, \ b0_key, b1_key, b2_key, b3_key) \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk4); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk5); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk8); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk9); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk10); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk11); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk12); \ aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, vk13, b0_key, b1_key, b2_key, b3_key); #define do_aes_4_128(ed, mcimc, b0, b1, b2, b3) \ do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vk0); \ do_aes_4_part2_128(ed, mcimc, b0, b1, b2, b3, b0, b1, b2, b3, vklast, vklast, vklast, vklast); #define do_aes_4_192(ed, mcimc, b0, b1, b2, b3) \ do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vk0); \ do_aes_4_part2_192(ed, mcimc, b0, b1, b2, b3, b0, b1, b2, b3, vklast, vklast, vklast, vklast); #define do_aes_4_256(ed, mcimc, b0, b1, b2, b3) \ do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vk0); \ do_aes_4_part2_256(ed, mcimc, b0, b1, b2, b3, b0, b1, b2, b3, vklast, vklast, vklast, vklast); /* Other functional macros */ #define CLEAR_REG(reg) movi reg.16b, #0; #define aes_clear_keys(nrounds) \ CLEAR_REG(vk0); \ CLEAR_REG(vk1); \ CLEAR_REG(vk2); \ CLEAR_REG(vk3); \ CLEAR_REG(vk4); \ CLEAR_REG(vk5); \ CLEAR_REG(vk6); \ CLEAR_REG(vk7); \ CLEAR_REG(vk9); \ CLEAR_REG(vk8); \ CLEAR_REG(vk10); \ CLEAR_REG(vk11); \ CLEAR_REG(vk12); \ CLEAR_REG(vk13); \ CLEAR_REG(vklast); /* * unsigned int _gcry_aes_enc_armv8_ce(void *keysched, byte *dst, * const byte *src, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_enc_armv8_ce ELF(.type _gcry_aes_enc_armv8_ce,%function;) _gcry_aes_enc_armv8_ce: /* input: * x0: keysched * x1: dst * x2: src * w3: nrounds */ CFI_STARTPROC(); aes_preload_keys(x0, w3); ld1 {v0.16b}, [x2] b.hi .Lenc1_256 b.eq .Lenc1_192 .Lenc1_128: do_aes_one128(e, mc, v0, v0, vk0); .Lenc1_tail: CLEAR_REG(vk0) CLEAR_REG(vk1) CLEAR_REG(vk2) CLEAR_REG(vk3) CLEAR_REG(vk4) CLEAR_REG(vk5) CLEAR_REG(vk6) CLEAR_REG(vk7) CLEAR_REG(vk8) CLEAR_REG(vk9) CLEAR_REG(vk10) CLEAR_REG(vklast) st1 {v0.16b}, [x1] CLEAR_REG(v0) mov x0, #0 - ret + ret_spec_stop .Lenc1_192: do_aes_one192(e, mc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) b .Lenc1_tail .Lenc1_256: do_aes_one256(e, mc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) CLEAR_REG(vk13) b .Lenc1_tail CFI_ENDPROC(); ELF(.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;) /* * unsigned int _gcry_aes_dec_armv8_ce(void *keysched, byte *dst, * const byte *src, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_dec_armv8_ce ELF(.type _gcry_aes_dec_armv8_ce,%function;) _gcry_aes_dec_armv8_ce: /* input: * x0: keysched * x1: dst * x2: src * w3: nrounds */ CFI_STARTPROC(); aes_preload_keys(x0, w3); ld1 {v0.16b}, [x2] b.hi .Ldec1_256 b.eq .Ldec1_192 .Ldec1_128: do_aes_one128(d, imc, v0, v0, vk0); .Ldec1_tail: CLEAR_REG(vk0) CLEAR_REG(vk1) CLEAR_REG(vk2) CLEAR_REG(vk3) CLEAR_REG(vk4) CLEAR_REG(vk5) CLEAR_REG(vk6) CLEAR_REG(vk7) CLEAR_REG(vk8) CLEAR_REG(vk9) CLEAR_REG(vk10) CLEAR_REG(vklast) st1 {v0.16b}, [x1] CLEAR_REG(v0) mov x0, #0 - ret + ret_spec_stop .Ldec1_192: do_aes_one192(d, imc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) b .Ldec1_tail .Ldec1_256: do_aes_one256(d, imc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) CLEAR_REG(vk13) b .Ldec1_tail CFI_ENDPROC(); ELF(.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;) /* * void _gcry_aes_cbc_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, size_t nblocks, * int cbc_mac, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cbc_enc_armv8_ce ELF(.type _gcry_aes_cbc_enc_armv8_ce,%function;) _gcry_aes_cbc_enc_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: iv * x4: nblocks * w5: cbc_mac * w6: nrounds */ CFI_STARTPROC(); cbz x4, .Lcbc_enc_skip cmp w5, #0 ld1 {v4.16b}, [x3] /* load IV */ csetm x5, eq aes_preload_keys(x0, w6); and x5, x5, #16 ld1 {v3.16b}, [x2], #16; /* load plaintext */ mov v0.16b, vk0.16b; sub x4, x4, #1; eor v16.16b, vk0.16b, vklast.16b; eor v4.16b, v4.16b, v3.16b; do_aes_one_part1(e, mc, v4, v0); b.eq .Lcbc_enc_entry_192 b.hi .Lcbc_enc_entry_256 #define CBC_ENC(bits) \ .Lcbc_enc_entry_##bits: \ cbz x4, .Lcbc_enc_done_##bits; \ \ .Lcbc_enc_loop_##bits: \ do_aes_one_part2_##bits(e, mc, v4, \ _(ld1 {v0.16b}, [x2], #16 /* load plaintext */), \ _(eor v0.16b, v0.16b, v16.16b)); \ sub x4, x4, #1; \ eor v3.16b, v4.16b, vklast.16b; \ do_aes_one_part1(e, mc, v4, v0); \ st1 {v3.16b}, [x1], x5; /* store ciphertext */ \ cbnz x4, .Lcbc_enc_loop_##bits; \ \ .Lcbc_enc_done_##bits: \ do_aes_one_part2_##bits(e, mc, v4, __, __); \ b .Lcbc_enc_done; CBC_ENC(128) CBC_ENC(192) CBC_ENC(256) #undef CBC_ENC .Lcbc_enc_done: eor v3.16b, v4.16b, vklast.16b; st1 {v3.16b}, [x1]; /* store ciphertext */ aes_clear_keys(w6) st1 {v3.16b}, [x3] /* store IV */ CLEAR_REG(v16) CLEAR_REG(v4) CLEAR_REG(v3) CLEAR_REG(v0) .Lcbc_enc_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;) /* * void _gcry_aes_cbc_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cbc_dec_armv8_ce ELF(.type _gcry_aes_cbc_dec_armv8_ce,%function;) _gcry_aes_cbc_dec_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lcbc_dec_skip add sp, sp, #-64; CFI_ADJUST_CFA_OFFSET(64); ld1 {v16.16b}, [x3] /* load IV */ aes_preload_keys(x0, w5); b.eq .Lcbc_dec_entry_192 b.hi .Lcbc_dec_entry_256 #define CBC_DEC(bits) \ .Lcbc_dec_entry_##bits: \ cmp x4, #4; \ b.lo .Lcbc_dec_loop_##bits; \ \ ld1 {v0.16b-v3.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ sub x4, x4, #4; \ eor v4.16b, v16.16b, vklast.16b; \ eor v5.16b, v0.16b, vklast.16b; \ eor v6.16b, v1.16b, vklast.16b; \ eor v7.16b, v2.16b, vklast.16b; \ mov v16.16b, v3.16b; /* next IV */ \ \ do_aes_4_part1(d, imc, v0, v1, v2, v3, vk0); \ b.lo .Lcbc_dec_done4_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ \ .Lcbc_dec_loop4_##bits: \ do_aes_4_part2_##bits(d, imc, v8, v9, v10, v11, v0, v1, v2, v3, v4, v5, v6, v7); \ ld1 {v0.16b-v3.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ sub x4, x4, #4; \ eor v4.16b, v16.16b, vklast.16b; \ eor v5.16b, v0.16b, vklast.16b; \ eor v6.16b, v1.16b, vklast.16b; \ eor v7.16b, v2.16b, vklast.16b; \ mov v16.16b, v3.16b; /* next IV */ \ \ do_aes_4_part1(d, imc, v0, v1, v2, v3, vk0); \ st1 {v8.16b-v11.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lcbc_dec_loop4_##bits; \ \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ \ .Lcbc_dec_done4_##bits: \ do_aes_4_part2_##bits(d, imc, v0, v1, v2, v3, v0, v1, v2, v3, v4, v5, v6, v7); \ \ CLEAR_REG(v4); \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ st1 {v0.16b-v3.16b}, [x1], #64; /* store plaintext */ \ CLEAR_REG(v0); \ CLEAR_REG(v3); \ cbz x4, .Lcbc_dec_done; \ \ .Lcbc_dec_loop_##bits: \ ld1 {v1.16b}, [x2], #16; /* load ciphertext */ \ sub x4, x4, #1; \ eor v16.16b, v16.16b, vklast.16b; \ mov v2.16b, v1.16b; \ \ do_aes_one_part1(d, imc, v1, vk0); \ do_aes_one_part2_##bits(d, imc, v1, __, __); \ eor v1.16b, v1.16b, v16.16b; \ \ mov v16.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lcbc_dec_loop_##bits; \ b .Lcbc_dec_done; CBC_DEC(128) CBC_DEC(192) CBC_DEC(256) #undef CBC_DEC .Lcbc_dec_done: aes_clear_keys(w5) st1 {v16.16b}, [x3] /* store IV */ CLEAR_REG(v16) CLEAR_REG(v1) CLEAR_REG(v2) add sp, sp, #64; CFI_ADJUST_CFA_OFFSET(-64); .Lcbc_dec_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;) /* * void _gcry_aes_ctr_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_ctr_enc_armv8_ce ELF(.type _gcry_aes_ctr_enc_armv8_ce,%function;) _gcry_aes_ctr_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lctr_enc_skip add x8, sp, #-64 add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); mov w6, #(1 << 24) movi v16.16b, #0 mov v16.S[3], w6 /* 1 */ /* load IV */ ldp x9, x10, [x3] ld1 {v0.16b}, [x3] rev x9, x9 rev x10, x10 mov x12, #(4 << 56) lsl x11, x10, #56 aes_preload_keys(x0, w5); b.eq .Lctr_enc_entry_192 b.hi .Lctr_enc_entry_256 #define CTR_ENC(bits) \ .Lctr_enc_entry_##bits: \ cmp x4, #4; \ b.lo .Lctr_enc_loop_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ \ adds x11, x11, x12; \ add v9.4s, v16.4s, v16.4s; /* 2 */ \ add v10.4s, v16.4s, v9.4s; /* 3 */ \ add v11.4s, v9.4s, v9.4s; /* 4 */ \ mov x7, #1; \ sub x4, x4, #4; \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ b.cs .Lctr_enc_carry4_##bits; \ \ mov v1.16b, v0.16b; \ add x10, x10, #4; \ add v2.16b, v0.16b, v16.16b; \ add v3.8h, v0.8h, v9.8h; \ add v4.4s, v0.4s, v10.4s; \ add v0.2d, v0.2d, v11.2d; \ \ .Lctr_enc_entry4_##bits##_carry_done: \ mov x7, #0; \ cmp x4, #4; \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ b.lo .Lctr_enc_done4_##bits; \ \ st1 {v12.16b-v15.16b}, [x8]; /* store callee saved registers */ \ \ .Lctr_enc_loop4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v12, v13, v14, v15, v1, v2, v3, v4, v5, v6, v7, v8); \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ adds x11, x11, x12; \ sub x4, x4, #4; \ b.cs .Lctr_enc_carry4_##bits; \ \ mov v1.16b, v0.16b; \ add x10, x10, #4; \ add v2.16b, v0.16b, v16.16b; \ add v3.8h, v0.8h, v9.8h; \ add v4.4s, v0.4s, v10.4s; \ add v0.2d, v0.2d, v11.2d; \ \ .Lctr_enc_loop4_##bits##_carry_done: \ cmp x4, #4; \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ st1 {v12.16b-v15.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lctr_enc_loop4_##bits; \ \ ld1 {v12.16b-v15.16b}, [x8]; /* restore callee saved registers */ \ \ .Lctr_enc_done4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v5, v6, v7, v8, v1, v2, v3, v4, v5, v6, v7, v8); \ \ st1 {v5.16b-v8.16b}, [x1], #64; /* store plaintext */ \ \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x4, .Lctr_enc_done; \ \ .Lctr_enc_loop_##bits: \ \ adds x10, x10, #1; \ mov v1.16b, v0.16b; \ adc x9, x9, xzr; \ dup v0.2d, x10; \ sub x4, x4, #1; \ ins v0.D[0], x9; \ ld1 {v2.16b}, [x2], #16; /* load ciphertext */ \ rev64 v0.16b, v0.16b; \ \ do_aes_one_part1(e, mc, v1, vk0); \ eor v2.16b, v2.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v1, __, __); \ \ eor v1.16b, v1.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lctr_enc_loop_##bits; \ b .Lctr_enc_done; \ \ .Lctr_enc_carry4_##bits: \ \ adds x13, x10, #1; \ mov v1.16b, v0.16b; \ adc x14, x9, xzr; \ dup v2.2d, x13; \ adds x13, x10, #2; \ ins v2.D[0], x14; \ adc x14, x9, xzr; \ rev64 v2.16b, v2.16b; \ dup v3.2d, x13; \ adds x13, x10, #3; \ ins v3.D[0], x14; \ adc x14, x9, xzr; \ rev64 v3.16b, v3.16b; \ dup v4.2d, x13; \ adds x10, x10, #4; \ ins v4.D[0], x14; \ adc x9, x9, xzr; \ rev64 v4.16b, v4.16b; \ dup v0.2d, x10; \ ins v0.D[0], x9; \ rev64 v0.16b, v0.16b; \ \ cbz x7, .Lctr_enc_loop4_##bits##_carry_done; \ b .Lctr_enc_entry4_##bits##_carry_done; CTR_ENC(128) CTR_ENC(192) CTR_ENC(256) #undef CTR_ENC .Lctr_enc_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); .Lctr_enc_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;) /* * void _gcry_aes_ctr32le_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_ctr32le_enc_armv8_ce ELF(.type _gcry_aes_ctr32le_enc_armv8_ce,%function;) _gcry_aes_ctr32le_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lctr32le_enc_skip add x8, sp, #-64 add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); mov w6, #1 movi v16.16b, #0 mov v16.S[0], w6 /* load IV */ ld1 {v0.16b}, [x3] aes_preload_keys(x0, w5); b.eq .Lctr32le_enc_entry_192 b.hi .Lctr32le_enc_entry_256 #define CTR32LE_ENC(bits) \ .Lctr32le_enc_entry_##bits: \ cmp x4, #4; \ b.lo .Lctr32le_enc_loop_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ add v9.4s, v16.4s, v16.4s; /* 2 */ \ cmp x4, #8; \ add v10.4s, v9.4s, v16.4s; /* 3 */ \ sub x4, x4, #4; \ add v11.4s, v9.4s, v9.4s; /* 4 */ \ \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ \ mov v1.16b, v0.16b; \ add v2.4s, v0.4s, v16.4s; \ add v3.4s, v0.4s, v9.4s; \ add v4.4s, v0.4s, v10.4s; \ add v0.4s, v0.4s, v11.4s; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ b.lo .Lctr32le_enc_done4_##bits; \ \ st1 {v12.16b-v15.16b}, [x8]; /* store callee saved registers */ \ \ .Lctr32le_enc_loop4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v12, v13, v14, v15, v1, v2, v3, v4, v5, v6, v7, v8); \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ \ cmp x4, #8; \ sub x4, x4, #4; \ \ mov v1.16b, v0.16b; \ add v2.4s, v0.4s, v16.4s; \ add v3.4s, v0.4s, v9.4s; \ add v4.4s, v0.4s, v10.4s; \ add v0.4s, v0.4s, v11.4s; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ st1 {v12.16b-v15.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lctr32le_enc_loop4_##bits; \ \ ld1 {v12.16b-v15.16b}, [x8]; /* restore callee saved registers */ \ \ .Lctr32le_enc_done4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v5, v6, v7, v8, v1, v2, v3, v4, v5, v6, v7, v8); \ \ st1 {v5.16b-v8.16b}, [x1], #64; /* store plaintext */ \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x4, .Lctr32le_enc_done; \ \ .Lctr32le_enc_loop_##bits: \ \ mov v1.16b, v0.16b; \ ld1 {v2.16b}, [x2], #16; /* load ciphertext */ \ sub x4, x4, #1; \ add v0.4s, v0.4s, v16.4s; \ \ do_aes_one_part1(e, mc, v1, vk0); \ eor v2.16b, v2.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v1, __, __); \ \ eor v1.16b, v1.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lctr32le_enc_loop_##bits; \ b .Lctr32le_enc_done; CTR32LE_ENC(128) CTR32LE_ENC(192) CTR32LE_ENC(256) #undef CTR32LE_ENC .Lctr32le_enc_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); .Lctr32le_enc_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ctr32le_enc_armv8_ce,.-_gcry_aes_ctr32le_enc_armv8_ce;) /* * void _gcry_aes_cfb_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cfb_enc_armv8_ce ELF(.type _gcry_aes_cfb_enc_armv8_ce,%function;) _gcry_aes_cfb_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lcfb_enc_skip /* load IV */ ld1 {v0.16b}, [x3] aes_preload_keys(x0, w5); ld1 {v1.16b}, [x2], #16; /* load plaintext */ eor v3.16b, vk0.16b, vklast.16b; eor v0.16b, v0.16b, vklast.16b; sub x4, x4, #1; mov v4.16b, v3.16b; do_aes_one_part1(e, mc, v0, v4); b.eq .Lcfb_enc_entry_192 b.hi .Lcfb_enc_entry_256 #define CFB_ENC(bits) \ .Lcfb_enc_entry_##bits: \ cbz x4, .Lcfb_enc_done_##bits; \ \ .Lcfb_enc_loop_##bits: \ eor v2.16b, v1.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v0, \ _(eor v4.16b, v3.16b, v1.16b), \ _(ld1 {v1.16b}, [x2], #16 /* load plaintext */)); \ sub x4, x4, #1; \ eor v2.16b, v2.16b, v0.16b; \ do_aes_one_part1(e, mc, v0, v4); \ st1 {v2.16b}, [x1], #16; /* store ciphertext */ \ cbnz x4, .Lcfb_enc_loop_##bits; \ \ .Lcfb_enc_done_##bits: \ eor v2.16b, v1.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v0, __, __); \ b .Lcfb_enc_done; CFB_ENC(128) CFB_ENC(192) CFB_ENC(256) #undef CFB_ENC .Lcfb_enc_done: eor v2.16b, v2.16b, v0.16b; st1 {v2.16b}, [x1]; /* store ciphertext */ aes_clear_keys(w5) st1 {v2.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v3) CLEAR_REG(v4) .Lcfb_enc_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;) /* * void _gcry_aes_cfb_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cfb_dec_armv8_ce ELF(.type _gcry_aes_cfb_dec_armv8_ce,%function;) _gcry_aes_cfb_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lcfb_dec_skip add sp, sp, #-64; CFI_ADJUST_CFA_OFFSET(64); /* load IV */ ld1 {v0.16b}, [x3] aes_preload_keys(x0, w5); b.eq .Lcfb_dec_entry_192 b.hi .Lcfb_dec_entry_256 #define CFB_DEC(bits) \ .Lcfb_dec_entry_##bits: \ cmp x4, #4; \ b.lo .Lcfb_dec_loop_##bits; \ \ ld1 {v2.16b-v5.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ mov v1.16b, v0.16b; \ sub x4, x4, #4; \ eor v6.16b, v2.16b, vklast.16b; \ eor v7.16b, v3.16b, vklast.16b; \ eor v16.16b, v4.16b, vklast.16b; \ mov v0.16b, v5.16b; /* next IV */ \ eor v5.16b, v5.16b, vklast.16b; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ b.lo .Lcfb_dec_done4_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ \ .Lcfb_dec_loop4_##bits: \ do_aes_4_part2_##bits(e, mc, v8, v9, v10, v11, v1, v2, v3, v4, v6, v7, v16, v5); \ ld1 {v2.16b-v5.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ mov v1.16b, v0.16b; \ sub x4, x4, #4; \ eor v6.16b, v2.16b, vklast.16b; \ eor v7.16b, v3.16b, vklast.16b; \ eor v16.16b, v4.16b, vklast.16b; \ mov v0.16b, v5.16b; /* next IV */ \ eor v5.16b, v5.16b, vklast.16b; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ st1 {v8.16b-v11.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lcfb_dec_loop4_##bits; \ \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ \ .Lcfb_dec_done4_##bits: \ do_aes_4_part2_##bits(e, mc, v1, v2, v3, v4, v1, v2, v3, v4, v6, v7, v16, v5); \ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ st1 {v1.16b-v4.16b}, [x1], #64; /* store plaintext */ \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ cbz x4, .Lcfb_dec_done; \ \ .Lcfb_dec_loop_##bits: \ ld1 {v1.16b}, [x2], #16; /* load ciphertext */ \ sub x4, x4, #1; \ \ do_aes_one_part1(e, mc, v0, vk0); \ eor v2.16b, v1.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v0, __, __); \ eor v2.16b, v2.16b, v0.16b; \ \ mov v0.16b, v1.16b; \ st1 {v2.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lcfb_dec_loop_##bits; \ b .Lcfb_dec_done; CFB_DEC(128) CFB_DEC(192) CFB_DEC(256) #undef CFB_DEC .Lcfb_dec_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #64; CFI_ADJUST_CFA_OFFSET(-64); .Lcfb_dec_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;) /* * void _gcry_aes_ocb_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_enc_armv8_ce ELF(.type _gcry_aes_ocb_enc_armv8_ce,%function;) _gcry_aes_ocb_enc_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: offset * x4: checksum * x5: Ltable * x6: nblocks (0 < nblocks) * w7: nrounds * %st+0: blkn => w12 */ CFI_STARTPROC(); ldr w12, [sp] ld1 {v0.16b}, [x3] /* load offset */ ld1 {v16.16b}, [x4] /* load checksum */ add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); aes_preload_keys(x0, w7); st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ eor v0.16b, v0.16b, vk0.16b; /* offset ^ first key */ eor v9.16b, vk0.16b, vklast.16b; /* first key ^ last key */ b.eq .Locb_ecry_entry_192 b.hi .Locb_ecry_entry_256 #define OCB_CRYPT(bits, ed, mcimc) \ .Locb_##ed##cry_entry_##bits: \ /* Get number of blocks to align nblk to 4. */ \ neg x13, x12; \ add x12, x12, #1; /* Pre-increment nblk for ntz calculation */ \ and x13, x13, #(4-1); \ cmp x13, x6; \ csel x13, x6, x13, hi; \ cbz x13, .Locb_##ed##cry_alignment_ok_##bits; \ \ /* Number of blocks after alignment. */ \ sub x14, x6, x13; \ \ /* If number after alignment is less than 4, skip aligned handling \ * completely. */ \ cmp x14, #4; \ csel x13, x6, x13, lo; \ \ .Locb_##ed##cry_unaligned_entry_##bits: \ cmp x13, #4; \ \ .Locb_##ed##cry_loop1_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ rbit x8, x12; \ add x12, x12, #1; \ clz x8, x8; /* ntz(i) */ \ add x8, x5, x8, lsl #4; \ \ ld1 {v1.16b}, [x2], #16; /* load plaintext */ \ ld1 {v2.16b}, [x8]; /* load L_{ntz(i)} */ \ eor v0.16b, v0.16b, v2.16b; \ sub x13, x13, #1; \ ENC(eor v16.16b, v16.16b, v1.16b); \ sub x6, x6, #1; \ \ do_aes_one_part1(ed, mcimc, v1, v0); \ eor v2.16b, v0.16b, v9.16b; \ do_aes_one_part2_##bits(ed, mcimc, v1, __, __); \ eor v1.16b, v1.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store ciphertext */ \ DEC(eor v16.16b, v16.16b, v1.16b); \ \ cbnz x13, .Locb_##ed##cry_loop1_##bits; \ \ cbz x6, .Locb_##ed##cry_done; \ \ /* nblk is now aligned and we have 4 or more blocks. So jump directly to \ * aligned processing. */ \ b .Locb_##ed##cry_aligned_entry_##bits; \ \ .Locb_##ed##cry_alignment_ok_##bits: \ cbz x6, .Locb_##ed##cry_done; \ \ /* Short buffers do not benefit from L-array optimization. */ \ cmp x6, #4; \ mov x13, x6; \ b.lo .Locb_##ed##cry_unaligned_entry_##bits; \ \ .Locb_##ed##cry_aligned_entry_##bits: \ /* Prepare L-array optimization. \ * Since nblk is aligned to 4, offsets will have following construction: \ * - block1 = ntz{0} = offset ^ L[0] \ * - block2 = ntz{1} = offset ^ L[0] ^ L[1] \ * - block3 = ntz{0} = offset ^ L[1] \ * - block4 = ntz{x} = offset ^ L[1] ^ L[ntz{x}] \ */ \ ld1 {v10.16b-v11.16b}, [x5]; /* preload L[0] && L[1] */ \ mov x15, #4; \ \ st1 {v12.16b-v15.16b}, [x16]; /* store callee saved registers */ \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ add x11, x12, #3; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load P_i+<0-3> */ \ rbit x11, x11; \ eor v6.16b, v10.16b, v11.16b; /* L[0] ^ L[1] */ \ ENC(eor v16.16b, v16.16b, v1.16b); /* Checksum_i+0 */ \ add x12, x12, #4; \ clz x11, x11; /* ntz(i+3) */ \ add x15, x15, #4; \ add x11, x5, x11, lsl #4; \ \ eor v5.16b, v0.16b, v10.16b; /* Offset_i+0 */ \ ENC(eor v16.16b, v16.16b, v2.16b); /* Checksum_i+1 */ \ ld1 {v8.16b}, [x11]; /* load L_{ntz(i+3)} */ \ ENC(eor v16.16b, v16.16b, v3.16b); /* Checksum_i+2 */ \ eor v6.16b, v0.16b, v6.16b; /* Offset_i+1 */ \ ENC(eor v16.16b, v16.16b, v4.16b); /* Checksum_i+3 */ \ eor v7.16b, v0.16b, v11.16b; /* Offset_i+2 */ \ eor v8.16b, v8.16b, v11.16b; /* L[1] ^ L[ntz{x}] */ \ cmp x15, x13; \ eor v0.16b, v0.16b, v8.16b; /* Offset_i+3 */ \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v5, v6, v7, v0); /* P_i+j xor Offset_i+j */ \ b.hi .Locb_##ed##cry_aligned_done4_##bits; \ \ .Locb_##ed##cry_aligned_loop4_##bits: \ add x11, x12, #3; \ eor v5.16b, v5.16b, v9.16b; \ eor v6.16b, v6.16b, v9.16b; \ rbit x11, x11; \ eor v7.16b, v7.16b, v9.16b; \ eor v8.16b, v0.16b, v9.16b; \ clz x11, x11; /* ntz(i+3) */ \ do_aes_4_part2_##bits(ed, mcimc, v12, v13, v14, v15, v1, v2, v3, v4, v5, v6, v7, v8); /* xor Offset_i+j */ \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ add x12, x12, #4; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load P_i+<0-3> */ \ eor v6.16b, v10.16b, v11.16b; /* L[0] ^ L[1] */ \ add x15, x15, #4; \ DEC(eor v16.16b, v16.16b, v12.16b); /* Checksum_i+0 */ \ ENC(eor v16.16b, v16.16b, v1.16b); /* Checksum_i+0 */ \ add x11, x5, x11, lsl #4; \ \ eor v5.16b, v0.16b, v10.16b; /* Offset_i+0 */ \ ENC(eor v16.16b, v16.16b, v2.16b); /* Checksum_i+1 */ \ DEC(eor v16.16b, v16.16b, v13.16b); /* Checksum_1+2 */ \ ld1 {v8.16b}, [x11]; /* load L_{ntz(i+3)} */ \ ENC(eor v16.16b, v16.16b, v3.16b); /* Checksum_i+2 */ \ DEC(eor v16.16b, v16.16b, v14.16b); /* Checksum_i+0+3 */ \ eor v6.16b, v0.16b, v6.16b; /* Offset_i+1 */ \ ENC(eor v16.16b, v16.16b, v4.16b); /* Checksum_i+3 */ \ DEC(eor v16.16b, v16.16b, v15.16b); /* Checksum_i+0+1+2 */ \ eor v7.16b, v0.16b, v11.16b; /* Offset_i+2 */ \ eor v8.16b, v8.16b, v11.16b; /* L[1] ^ L[ntz{x}] */ \ cmp x15, x13; \ eor v0.16b, v0.16b, v8.16b; /* Offset_i+3 */ \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v5, v6, v7, v0); /* P_i+j xor Offset_i+j */ \ st1 {v12.16b-v15.16b}, [x1], #64; \ \ b.ls .Locb_##ed##cry_aligned_loop4_##bits; \ \ .Locb_##ed##cry_aligned_done4_##bits: \ eor v5.16b, v5.16b, v9.16b; \ eor v6.16b, v6.16b, v9.16b; \ eor v7.16b, v7.16b, v9.16b; \ eor v8.16b, v0.16b, v9.16b; \ do_aes_4_part2_##bits(ed, mcimc, v1, v2, v3, v4, v1, v2, v3, v4, v5, v6, v7, v8); /* xor Offset_i+j */ \ DEC(eor v16.16b, v16.16b, v1.16b); /* Checksum_i+0 */ \ DEC(eor v5.16b, v2.16b, v3.16b); /* Checksum_1+2 */ \ DEC(eor v16.16b, v16.16b, v4.16b); /* Checksum_i+0+3 */ \ st1 {v1.16b-v4.16b}, [x1], #64; \ DEC(eor v16.16b, v16.16b, v5.16b); /* Checksum_i+0+1+2 */ \ \ sub x15, x15, #4; \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ ld1 {v12.16b-v15.16b}, [x16]; /* restore callee saved registers */ \ sub x13, x13, x15; \ sub x6, x6, x15; \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ \ /* Handle tailing 1…3 blocks in unaligned loop. */ \ mov x13, x6; \ cbnz x6, .Locb_##ed##cry_unaligned_entry_##bits; \ \ b .Locb_##ed##cry_done; #define ENC(...) __VA_ARGS__ #define DEC(...) /*_*/ OCB_CRYPT(128, e, mc) OCB_CRYPT(192, e, mc) OCB_CRYPT(256, e, mc) #undef ENC #undef DEC .Locb_ecry_done: eor v0.16b, v0.16b, vk0.16b; /* restore offset */ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ aes_clear_keys(w7) st1 {v16.16b}, [x4] /* store checksum */ st1 {v0.16b}, [x3] /* store offset */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v7) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;) /* * void _gcry_aes_ocb_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_dec_armv8_ce ELF(.type _gcry_aes_ocb_dec_armv8_ce,%function;) _gcry_aes_ocb_dec_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: offset * x4: checksum * x5: Ltable * x6: nblocks (0 < nblocks) * w7: nrounds * %st+0: blkn => w12 */ CFI_STARTPROC(); ldr w12, [sp] ld1 {v0.16b}, [x3] /* load offset */ ld1 {v16.16b}, [x4] /* load checksum */ add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); aes_preload_keys(x0, w7); st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ eor v0.16b, v0.16b, vk0.16b; /* offset ^ first key */ eor v9.16b, vk0.16b, vklast.16b; /* first key ^ last key */ b.eq .Locb_dcry_entry_192 b.hi .Locb_dcry_entry_256 #define ENC(...) /*_*/ #define DEC(...) __VA_ARGS__ OCB_CRYPT(128, d, imc) OCB_CRYPT(192, d, imc) OCB_CRYPT(256, d, imc) #undef ENC #undef DEC #undef OCB_CRYPT .Locb_dcry_done: eor v0.16b, v0.16b, vk0.16b; /* restore offset */ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ aes_clear_keys(w7) st1 {v16.16b}, [x4] /* store checksum */ st1 {v0.16b}, [x3] /* store offset */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;) /* * void _gcry_aes_ocb_auth_armv8_ce (const void *keysched, * const unsigned char *abuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_auth_armv8_ce ELF(.type _gcry_aes_ocb_auth_armv8_ce,%function;) _gcry_aes_ocb_auth_armv8_ce: /* input: * x0: keysched * x1: abuf * x2: offset => x3 * x3: checksum => x4 * x4: Ltable => x5 * x5: nblocks => x6 (0 < nblocks <= 32) * w6: nrounds => w7 * w7: blkn => w12 */ CFI_STARTPROC(); mov w12, w7 mov w7, w6 mov x6, x5 mov x5, x4 mov x4, x3 mov x3, x2 aes_preload_keys(x0, w7); ld1 {v0.16b}, [x3] /* load offset */ ld1 {v16.16b}, [x4] /* load checksum */ beq .Locb_auth_entry_192 bhi .Locb_auth_entry_256 #define OCB_AUTH(bits) \ .Locb_auth_entry_##bits: \ cmp x6, #4; \ add w12, w12, #1; \ b.lo .Locb_auth_loop_##bits; \ \ .Locb_auth_loop4_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \ \ add w9, w12, #1; \ add w10, w12, #2; \ add w11, w12, #3; \ rbit w8, w12; \ add w12, w12, #4; \ rbit w9, w9; \ rbit w10, w10; \ rbit w11, w11; \ clz w8, w8; /* ntz(i+0) */ \ clz w9, w9; /* ntz(i+1) */ \ clz w10, w10; /* ntz(i+2) */ \ clz w11, w11; /* ntz(i+3) */ \ add x8, x5, x8, lsl #4; \ ld1 {v1.16b-v4.16b}, [x1], #64; /* load A_i+<0-3> */ \ add x9, x5, x9, lsl #4; \ add x10, x5, x10, lsl #4; \ add x11, x5, x11, lsl #4; \ \ sub x6, x6, #4; \ \ ld1 {v5.16b}, [x8]; /* load L_{ntz(i+0)} */ \ ld1 {v6.16b}, [x9]; /* load L_{ntz(i+1)} */ \ ld1 {v7.16b}, [x10]; /* load L_{ntz(i+2)} */ \ eor v5.16b, v5.16b, v0.16b; /* Offset_i+0 */ \ ld1 {v0.16b}, [x11]; /* load L_{ntz(i+3)} */ \ eor v6.16b, v6.16b, v5.16b; /* Offset_i+1 */ \ eor v1.16b, v1.16b, v5.16b; /* A_i+0 xor Offset_i+0 */ \ eor v7.16b, v7.16b, v6.16b; /* Offset_i+2 */ \ eor v2.16b, v2.16b, v6.16b; /* A_i+1 xor Offset_i+1 */ \ eor v0.16b, v0.16b, v7.16b; /* Offset_i+3 */ \ cmp x6, #4; \ eor v3.16b, v3.16b, v7.16b; /* A_i+2 xor Offset_i+2 */ \ eor v4.16b, v4.16b, v0.16b; /* A_i+3 xor Offset_i+3 */ \ \ do_aes_4_##bits(e, mc, v1, v2, v3, v4); \ \ eor v1.16b, v1.16b, v2.16b; \ eor v16.16b, v16.16b, v3.16b; \ eor v1.16b, v1.16b, v4.16b; \ eor v16.16b, v16.16b, v1.16b; \ \ b.hs .Locb_auth_loop4_##bits; \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x6, .Locb_auth_done; \ \ .Locb_auth_loop_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \ \ rbit w8, w12; \ add w12, w12, #1; \ clz w8, w8; /* ntz(i) */ \ add x8, x5, x8, lsl #4; \ \ ld1 {v1.16b}, [x1], #16; /* load aadtext */ \ ld1 {v2.16b}, [x8]; /* load L_{ntz(i)} */ \ sub x6, x6, #1; \ eor v0.16b, v0.16b, v2.16b; \ eor v1.16b, v1.16b, v0.16b; \ \ do_aes_one##bits(e, mc, v1, v1, vk0) \ \ eor v16.16b, v16.16b, v1.16b; \ \ cbnz x6, .Locb_auth_loop_##bits; \ b .Locb_auth_done; OCB_AUTH(128) OCB_AUTH(192) OCB_AUTH(256) #undef OCB_AUTH .Locb_auth_done: aes_clear_keys(w7) st1 {v16.16b}, [x4] /* store checksum */ st1 {v0.16b}, [x3] /* store offset */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;) /* * void _gcry_aes_xts_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *tweak, * size_t nblocks, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_xts_enc_armv8_ce ELF(.type _gcry_aes_xts_enc_armv8_ce,%function;) _gcry_aes_xts_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: tweak * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lxts_enc_skip add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); /* load tweak */ ld1 {v0.16b}, [x3] /* load gfmul mask */ mov x6, #0x87 mov x7, #0x01 mov v16.D[0], x6 mov v16.D[1], x7 aes_preload_keys(x0, w5); eor vklast.16b, vklast.16b, vk0.16b; b.eq .Lxts_ecry_entry_192 b.hi .Lxts_ecry_entry_256 #define XTS_CRYPT(bits, ed, mcimc) \ .Lxts_##ed##cry_entry_##bits: \ cmp x4, #4; \ b.lo .Lxts_##ed##cry_loop_##bits; \ \ st1 {v8.16b}, [sp]; /* store callee saved registers */ \ ext v4.16b, v0.16b, v0.16b, #8; \ mov v8.16b, v0.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v5.2d, v0.2d, v0.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v5.16b, v5.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v6.2d, v5.2d, v5.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v6.16b, v6.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v7.2d, v6.2d, v6.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v7.16b, v7.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v3.2d, v7.2d, v7.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v0.16b, v3.16b, v2.16b; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load plaintext */ \ cmp x4, #8; \ sub x4, x4, #4; \ \ eor v8.16b, v8.16b, vk0.16b; \ eor v5.16b, v5.16b, vk0.16b; \ eor v6.16b, v6.16b, vk0.16b; \ eor v7.16b, v7.16b, vk0.16b; \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v8, v5, v6, v7); \ b.lo .Lxts_##ed##cry_done4_##bits; \ \ st1 {v9.16b-v12.16b}, [x16]; /* store callee saved registers */ \ \ .Lxts_##ed##cry_loop4_##bits: \ eor v8.16b, v8.16b, vklast.16b; \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ do_aes_4_part2_##bits(ed, mcimc, v9, v10, v11, v12, v1, v2, v3, v4, v8, v5, v6, v7); \ \ ext v4.16b, v0.16b, v0.16b, #8; \ mov v8.16b, v0.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v5.2d, v0.2d, v0.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v5.16b, v5.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v6.2d, v5.2d, v5.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v6.16b, v6.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v7.2d, v6.2d, v6.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v7.16b, v7.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v3.2d, v7.2d, v7.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v0.16b, v3.16b, v2.16b; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load plaintext */ \ cmp x4, #8; \ sub x4, x4, #4; \ \ eor v8.16b, v8.16b, vk0.16b; \ eor v5.16b, v5.16b, vk0.16b; \ eor v6.16b, v6.16b, vk0.16b; \ eor v7.16b, v7.16b, vk0.16b; \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v8, v5, v6, v7); \ \ st1 {v9.16b-v12.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lxts_##ed##cry_loop4_##bits; \ \ ld1 {v9.16b-v12.16b}, [x16]; /* restore callee saved registers */ \ \ .Lxts_##ed##cry_done4_##bits: \ eor v8.16b, v8.16b, vklast.16b; \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ do_aes_4_part2_##bits(ed, mcimc, v1, v2, v3, v4, v1, v2, v3, v4, v8, v5, v6, v7); \ \ st1 {v1.16b-v4.16b}, [x1], #64; /* store plaintext */ \ \ CLEAR_REG(v4); \ ld1 {v8.16b}, [sp]; /* restore callee saved registers */ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x4, .Lxts_##ed##cry_done; \ \ .Lxts_##ed##cry_loop_##bits: \ \ ld1 {v1.16b}, [x2], #16; /* load plaintext */ \ ext v3.16b, v0.16b, v0.16b, #8; \ eor v2.16b, v0.16b, vk0.16b; \ sshr v3.2d, v3.2d, #63; \ add v0.2d, v0.2d, v0.2d; \ and v3.16b, v3.16b, v16.16b; \ sub x4, x4, #1; \ eor v0.16b, v0.16b, v3.16b; \ \ do_aes_one_part1(ed, mcimc, v1, v2); \ eor v2.16b, v2.16b, vklast.16b; \ do_aes_one_part2_##bits(ed, mcimc, v1, __, __); \ eor v1.16b, v1.16b, v2.16b; \ \ st1 {v1.16b}, [x1], #16; /* store ciphertext */ \ \ cbnz x4, .Lxts_##ed##cry_loop_##bits; \ b .Lxts_##ed##cry_done; XTS_CRYPT(128, e, mc) XTS_CRYPT(192, e, mc) XTS_CRYPT(256, e, mc) .Lxts_ecry_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store tweak */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v3) CLEAR_REG(v16) add sp, sp, 128; CFI_ADJUST_CFA_OFFSET(-128); .Lxts_enc_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;) /* * void _gcry_aes_xts_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *tweak, * size_t nblocks, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_xts_dec_armv8_ce ELF(.type _gcry_aes_xts_dec_armv8_ce,%function;) _gcry_aes_xts_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: tweak * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lxts_dec_skip add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); /* load tweak */ ld1 {v0.16b}, [x3] /* load gfmul mask */ mov x6, #0x87 mov x7, #0x01 mov v16.D[0], x6 mov v16.D[1], x7 aes_preload_keys(x0, w5); eor vklast.16b, vklast.16b, vk0.16b; b.eq .Lxts_dcry_entry_192 b.hi .Lxts_dcry_entry_256 XTS_CRYPT(128, d, imc) XTS_CRYPT(192, d, imc) XTS_CRYPT(256, d, imc) #undef XTS_CRYPT .Lxts_dcry_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store tweak */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) add sp, sp, 128; CFI_ADJUST_CFA_OFFSET(-128); .Lxts_dec_skip: - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;) /* * u32 _gcry_aes_sbox4_armv8_ce(u32 in4b); */ .align 3 .globl _gcry_aes_sbox4_armv8_ce ELF(.type _gcry_aes_sbox4_armv8_ce,%function;) _gcry_aes_sbox4_armv8_ce: /* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in * Cryptology — CT-RSA 2015" for details. */ CFI_STARTPROC(); movi v0.16b, #0x52 movi v1.16b, #0 mov v0.S[0], w0 aese v0.16b, v1.16b addv s0, v0.4s mov w0, v0.S[0] CLEAR_REG(v0) - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;) /* * void _gcry_aes_invmixcol_armv8_ce(void *dst, const void *src); */ .align 3 .globl _gcry_aes_invmixcol_armv8_ce ELF(.type _gcry_aes_invmixcol_armv8_ce,%function;) _gcry_aes_invmixcol_armv8_ce: CFI_STARTPROC(); ld1 {v0.16b}, [x1] aesimc v0.16b, v0.16b st1 {v0.16b}, [x0] CLEAR_REG(v0) - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;) #endif diff --git a/cipher/sha1-armv8-aarch64-ce.S b/cipher/sha1-armv8-aarch64-ce.S index 8ea1486b..ea26564b 100644 --- a/cipher/sha1-armv8-aarch64-ce.S +++ b/cipher/sha1-armv8-aarch64-ce.S @@ -1,201 +1,201 @@ /* sha1-armv8-aarch64-ce.S - ARM/CE accelerated SHA-1 transform function * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) && defined(USE_SHA1) .cpu generic+simd+crypto .text /* Constants */ #define K1 0x5A827999 #define K2 0x6ED9EBA1 #define K3 0x8F1BBCDC #define K4 0xCA62C1D6 .align 4 gcry_sha1_aarch64_ce_K_VEC: .LK_VEC: .LK1: .long K1, K1, K1, K1 .LK2: .long K2, K2, K2, K2 .LK3: .long K3, K3, K3, K3 .LK4: .long K4, K4, K4, K4 /* Register macros */ #define sH4 s0 #define vH4 v0 #define vH0123 v1 #define qABCD q2 #define sABCD s2 #define vABCD v2 #define sE0 s3 #define vE0 v3 #define sE1 s4 #define vE1 v4 #define vT0 v5 #define vT1 v6 #define vW0 v16 #define vW1 v17 #define vW2 v18 #define vW3 v19 #define vK1 v20 #define vK2 v21 #define vK3 v22 #define vK4 v23 /* Round macros */ #define _(...) /*_*/ #define do_add(dst, src0, src1) add dst.4s, src0.4s, src1.4s; #define do_sha1su0(w0,w1,w2) sha1su0 w0.4s,w1.4s,w2.4s; #define do_sha1su1(w0,w3) sha1su1 w0.4s,w3.4s; #define do_rounds(f, e0, e1, t, k, w0, w1, w2, w3, add_fn, sha1su0_fn, sha1su1_fn) \ sha1su1_fn( v##w3, v##w2 ); \ sha1h e0, sABCD; \ sha1##f qABCD, e1, v##t.4s; \ add_fn( v##t, v##w2, v##k ); \ sha1su0_fn( v##w0, v##w1, v##w2 ); /* Other functional macros */ #define CLEAR_REG(reg) movi reg.16b, #0; /* * unsigned int * _gcry_sha1_transform_armv8_ce (void *ctx, const unsigned char *data, * size_t nblks) */ .align 3 .globl _gcry_sha1_transform_armv8_ce ELF(.type _gcry_sha1_transform_armv8_ce,%function;) _gcry_sha1_transform_armv8_ce: /* input: * x0: ctx, CTX * x1: data (64*nblks bytes) * x2: nblks */ CFI_STARTPROC(); cbz x2, .Ldo_nothing; GET_DATA_POINTER(x4, .LK_VEC); ld1 {vH0123.4s}, [x0] /* load h0,h1,h2,h3 */ ld1 {vK1.4s-vK4.4s}, [x4] /* load K1,K2,K3,K4 */ ldr sH4, [x0, #16] /* load h4 */ ld1 {vW0.16b-vW3.16b}, [x1], #64 mov vABCD.16b, vH0123.16b rev32 vW0.16b, vW0.16b rev32 vW1.16b, vW1.16b rev32 vW2.16b, vW2.16b do_add(vT0, vW0, vK1) rev32 vW3.16b, vW3.16b do_add(vT1, vW1, vK1) .Loop: do_rounds(c, sE1, sH4, T0, K1, W0, W1, W2, W3, do_add, do_sha1su0, _) sub x2, x2, #1 do_rounds(c, sE0, sE1, T1, K1, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1) do_rounds(c, sE1, sE0, T0, K1, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1) do_rounds(c, sE0, sE1, T1, K2, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1) do_rounds(c, sE1, sE0, T0, K2, W0, W1, W2, W3, do_add, do_sha1su0, do_sha1su1) do_rounds(p, sE0, sE1, T1, K2, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1) do_rounds(p, sE1, sE0, T0, K2, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1) do_rounds(p, sE0, sE1, T1, K2, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1) do_rounds(p, sE1, sE0, T0, K3, W0, W1, W2, W3, do_add, do_sha1su0, do_sha1su1) do_rounds(p, sE0, sE1, T1, K3, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1) do_rounds(m, sE1, sE0, T0, K3, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1) do_rounds(m, sE0, sE1, T1, K3, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1) do_rounds(m, sE1, sE0, T0, K3, W0, W1, W2, W3, do_add, do_sha1su0, do_sha1su1) do_rounds(m, sE0, sE1, T1, K4, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1) do_rounds(m, sE1, sE0, T0, K4, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1) do_rounds(p, sE0, sE1, T1, K4, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1) cbz x2, .Lend ld1 {vW0.16b-vW1.16b}, [x1], #32 /* preload */ do_rounds(p, sE1, sE0, T0, K4, _ , _ , W2, W3, do_add, _, do_sha1su1) rev32 vW0.16b, vW0.16b ld1 {vW2.16b}, [x1], #16 rev32 vW1.16b, vW1.16b do_rounds(p, sE0, sE1, T1, K4, _ , _ , W3, _ , do_add, _, _) ld1 {vW3.16b}, [x1], #16 rev32 vW2.16b, vW2.16b do_rounds(p, sE1, sE0, T0, _, _, _, _, _, _, _, _) rev32 vW3.16b, vW3.16b do_rounds(p, sE0, sE1, T1, _, _, _, _, _, _, _, _) do_add(vT0, vW0, vK1) add vH4.2s, vH4.2s, vE0.2s add vABCD.4s, vABCD.4s, vH0123.4s do_add(vT1, vW1, vK1) mov vH0123.16b, vABCD.16b b .Loop .Lend: do_rounds(p, sE1, sE0, T0, K4, _ , _ , W2, W3, do_add, _, do_sha1su1) do_rounds(p, sE0, sE1, T1, K4, _ , _ , W3, _ , do_add, _, _) do_rounds(p, sE1, sE0, T0, _, _, _, _, _, _, _, _) do_rounds(p, sE0, sE1, T1, _, _, _, _, _, _, _, _) add vH4.2s, vH4.2s, vE0.2s add vH0123.4s, vH0123.4s, vABCD.4s CLEAR_REG(vW0) CLEAR_REG(vW1) CLEAR_REG(vW2) CLEAR_REG(vW3) CLEAR_REG(vABCD) CLEAR_REG(vE1) CLEAR_REG(vE0) str sH4, [x0, #16] /* store h4 */ st1 {vH0123.4s}, [x0] /* store h0,h1,h2,h3 */ CLEAR_REG(vH0123) CLEAR_REG(vH4) .Ldo_nothing: mov x0, #0 - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;) #endif diff --git a/cipher/sha256-armv8-aarch64-ce.S b/cipher/sha256-armv8-aarch64-ce.S index 5c39e83e..d0fa6285 100644 --- a/cipher/sha256-armv8-aarch64-ce.S +++ b/cipher/sha256-armv8-aarch64-ce.S @@ -1,215 +1,215 @@ /* sha256-armv8-aarch64-ce.S - ARM/CE accelerated SHA-256 transform function * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) && defined(USE_SHA256) .cpu generic+simd+crypto .text /* Constants */ .align 4 gcry_sha256_aarch64_ce_K: .LK: .long 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5 .long 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5 .long 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3 .long 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174 .long 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc .long 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da .long 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7 .long 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967 .long 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13 .long 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85 .long 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3 .long 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070 .long 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5 .long 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3 .long 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208 .long 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 /* Register macros */ #define vH0123 v0 #define vH4567 v1 #define vABCD0 v2 #define qABCD0 q2 #define vABCD1 v3 #define qABCD1 q3 #define vEFGH v4 #define qEFGH q4 #define vT0 v5 #define vT1 v6 #define vW0 v16 #define vW1 v17 #define vW2 v18 #define vW3 v19 #define vK0 v20 #define vK1 v21 #define vK2 v22 #define vK3 v23 /* Round macros */ #define _(...) /*_*/ #define do_loadk(nk0, nk1) ld1 {nk0.16b-nk1.16b},[x3],#32; #define do_add(a, b) add a.4s, a.4s, b.4s; #define do_sha256su0(w0, w1) sha256su0 w0.4s, w1.4s; #define do_sha256su1(w0, w2, w3) sha256su1 w0.4s, w2.4s, w3.4s; #define do_rounds(k, nk0, nk1, w0, w1, w2, w3, loadk_fn, add_fn, su0_fn, su1_fn) \ loadk_fn( v##nk0, v##nk1 ); \ su0_fn( v##w0, v##w1 ); \ mov vABCD1.16b, vABCD0.16b; \ sha256h qABCD0, qEFGH, v##k.4s; \ sha256h2 qEFGH, qABCD1, v##k.4s; \ add_fn( v##nk0, v##w2 ); \ su1_fn( v##w0, v##w2, v##w3 ); /* Other functional macros */ #define CLEAR_REG(reg) movi reg.16b, #0; /* * unsigned int * _gcry_sha256_transform_armv8_ce (u32 state[8], const void *input_data, * size_t num_blks) */ .align 3 .globl _gcry_sha256_transform_armv8_ce ELF(.type _gcry_sha256_transform_armv8_ce,%function;) _gcry_sha256_transform_armv8_ce: /* input: * r0: ctx, CTX * r1: data (64*nblks bytes) * r2: nblks */ CFI_STARTPROC(); cbz x2, .Ldo_nothing; GET_DATA_POINTER(x3, .LK); mov x4, x3 ld1 {vH0123.4s-vH4567.4s}, [x0] /* load state */ ld1 {vW0.16b-vW1.16b}, [x1], #32 do_loadk(vK0, vK1) ld1 {vW2.16b-vW3.16b}, [x1], #32 mov vABCD0.16b, vH0123.16b mov vEFGH.16b, vH4567.16b rev32 vW0.16b, vW0.16b rev32 vW1.16b, vW1.16b rev32 vW2.16b, vW2.16b do_add(vK0, vW0) rev32 vW3.16b, vW3.16b do_add(vK1, vW1) .Loop: do_rounds(K0, K2, K3, W0, W1, W2, W3, do_loadk, do_add, do_sha256su0, do_sha256su1) sub x2,x2,#1 do_rounds(K1, K3, _ , W1, W2, W3, W0, _ , do_add, do_sha256su0, do_sha256su1) do_rounds(K2, K0, K1, W2, W3, W0, W1, do_loadk, do_add, do_sha256su0, do_sha256su1) do_rounds(K3, K1, _ , W3, W0, W1, W2, _ , do_add, do_sha256su0, do_sha256su1) do_rounds(K0, K2, K3, W0, W1, W2, W3, do_loadk, do_add, do_sha256su0, do_sha256su1) do_rounds(K1, K3, _ , W1, W2, W3, W0, _ , do_add, do_sha256su0, do_sha256su1) do_rounds(K2, K0, K1, W2, W3, W0, W1, do_loadk, do_add, do_sha256su0, do_sha256su1) do_rounds(K3, K1, _ , W3, W0, W1, W2, _ , do_add, do_sha256su0, do_sha256su1) do_rounds(K0, K2, K3, W0, W1, W2, W3, do_loadk, do_add, do_sha256su0, do_sha256su1) do_rounds(K1, K3, _ , W1, W2, W3, W0, _ , do_add, do_sha256su0, do_sha256su1) do_rounds(K2, K0, K1, W2, W3, W0, W1, do_loadk, do_add, do_sha256su0, do_sha256su1) do_rounds(K3, K1, _ , W3, W0, W1, W2, _ , do_add, do_sha256su0, do_sha256su1) cbz x2, .Lend do_rounds(K0, K2, K3, W0, _ , W2, W3, do_loadk, do_add, _, _) ld1 {vW0.16b}, [x1], #16 mov x3, x4 do_rounds(K1, K3, _ , W1, _ , W3, _ , _ , do_add, _, _) ld1 {vW1.16b}, [x1], #16 rev32 vW0.16b, vW0.16b do_rounds(K2, K0, K1, W2, _ , W0, _ , do_loadk, do_add, _, _) rev32 vW1.16b, vW1.16b ld1 {vW2.16b}, [x1], #16 do_rounds(K3, K1, _ , W3, _ , W1, _ , _ , do_add, _, _) ld1 {vW3.16b}, [x1], #16 do_add(vH0123, vABCD0) do_add(vH4567, vEFGH) rev32 vW2.16b, vW2.16b mov vABCD0.16b, vH0123.16b rev32 vW3.16b, vW3.16b mov vEFGH.16b, vH4567.16b b .Loop .Lend: do_rounds(K0, K2, K3, W0, _ , W2, W3, do_loadk, do_add, _, _) do_rounds(K1, K3, _ , W1, _ , W3, _ , _ , do_add, _, _) do_rounds(K2, _ , _ , W2, _ , _ , _ , _ , _, _, _) do_rounds(K3, _ , _ , W3, _ , _ , _ , _ , _, _, _) CLEAR_REG(vW0) CLEAR_REG(vW1) CLEAR_REG(vW2) CLEAR_REG(vW3) CLEAR_REG(vK0) CLEAR_REG(vK1) CLEAR_REG(vK2) CLEAR_REG(vK3) do_add(vH0123, vABCD0) do_add(vH4567, vEFGH) CLEAR_REG(vABCD0) CLEAR_REG(vABCD1) CLEAR_REG(vEFGH) st1 {vH0123.4s-vH4567.4s}, [x0] /* store state */ CLEAR_REG(vH0123) CLEAR_REG(vH4567) .Ldo_nothing: mov x0, #0 - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;) #endif diff --git a/cipher/sm3-aarch64.S b/cipher/sm3-aarch64.S index 77dba2ba..3fb89006 100644 --- a/cipher/sm3-aarch64.S +++ b/cipher/sm3-aarch64.S @@ -1,657 +1,657 @@ /* sm3-aarch64.S - ARMv8/AArch64 accelerated SM3 transform function * * Copyright (C) 2021 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_NEON) && \ defined(USE_SM3) .cpu generic+simd /* Constants */ .text .align 4 ELF(.type _gcry_sm3_aarch64_consts,@object) _gcry_sm3_aarch64_consts: .LKtable: .long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb .long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc .long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce .long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6 .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 .long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53 .long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d .long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4 .long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43 .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 ELF(.size _gcry_sm3_aarch64_consts,.-_gcry_sm3_aarch64_consts) /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 #define state_h5 20 #define state_h6 24 #define state_h7 28 /* Stack structure */ #define STACK_W_SIZE (32 * 2 * 3) #define STACK_W (0) #define STACK_SIZE (STACK_W + STACK_W_SIZE) /* Register macros */ #define RSTATE x0 #define RDATA x1 #define RNBLKS x2 #define RKPTR x28 #define RFRAME x29 #define ra w3 #define rb w4 #define rc w5 #define rd w6 #define re w7 #define rf w8 #define rg w9 #define rh w10 #define t0 w11 #define t1 w12 #define t2 w13 #define t3 w14 #define t4 w15 #define t5 w16 #define t6 w17 #define k_even w19 #define k_odd w20 #define addr0 x21 #define addr1 x22 #define s0 w23 #define s1 w24 #define s2 w25 #define s3 w26 #define W0 v0 #define W1 v1 #define W2 v2 #define W3 v3 #define W4 v4 #define W5 v5 #define XTMP0 v6 #define XTMP1 v7 #define XTMP2 v16 #define XTMP3 v17 #define XTMP4 v18 #define XTMP5 v19 #define XTMP6 v20 /* Helper macros. */ #define _(...) /*_*/ #define clear_vec(x) \ movi x.8h, #0; #define rolw(o, a, n) \ ror o, a, #(32 - n); /* Round function macros. */ #define GG1_1(x, y, z, o, t) \ eor o, x, y; #define GG1_2(x, y, z, o, t) \ eor o, o, z; #define GG1_3(x, y, z, o, t) #define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t) #define FF1_2(x, y, z, o, t) #define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t) #define GG2_1(x, y, z, o, t) \ bic o, z, x; #define GG2_2(x, y, z, o, t) \ and t, y, x; #define GG2_3(x, y, z, o, t) \ eor o, o, t; #define FF2_1(x, y, z, o, t) \ eor o, x, y; #define FF2_2(x, y, z, o, t) \ and t, x, y; \ and o, o, z; #define FF2_3(x, y, z, o, t) \ eor o, o, t; #define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ K_LOAD(round); \ ldr t5, [sp, #(wtype##_W1_ADDR(round, widx))]; \ rolw(t0, a, 12); /* rol(a, 12) => t0 */ \ IOP(1, iop_param); \ FF##i##_1(a, b, c, t1, t2); \ ldr t6, [sp, #(wtype##_W1W2_ADDR(round, widx))]; \ add k, k, e; \ IOP(2, iop_param); \ GG##i##_1(e, f, g, t3, t4); \ FF##i##_2(a, b, c, t1, t2); \ IOP(3, iop_param); \ add k, k, t0; \ add h, h, t5; \ add d, d, t6; /* w1w2 + d => d */ \ IOP(4, iop_param); \ rolw(k, k, 7); /* rol (t0 + e + t), 7) => k */ \ GG##i##_2(e, f, g, t3, t4); \ add h, h, k; /* h + w1 + k => h */ \ IOP(5, iop_param); \ FF##i##_3(a, b, c, t1, t2); \ eor t0, t0, k; /* k ^ t0 => t0 */ \ GG##i##_3(e, f, g, t3, t4); \ add d, d, t1; /* FF(a,b,c) + d => d */ \ IOP(6, iop_param); \ add t3, t3, h; /* GG(e,f,g) + h => t3 */ \ rolw(b, b, 9); /* rol(b, 9) => b */ \ eor h, t3, t3, ror #(32-9); \ IOP(7, iop_param); \ add d, d, t0; /* t0 + d => d */ \ rolw(f, f, 19); /* rol(f, 19) => f */ \ IOP(8, iop_param); \ eor h, h, t3, ror #(32-17); /* P0(t3) => h */ \ #define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) #define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \ R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param) #define KL(round) \ ldp k_even, k_odd, [RKPTR, #(4*(round))]; /* Input expansion macros. */ /* Byte-swapped input address. */ #define IW_W_ADDR(round, widx, offs) \ (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4)) /* Expanded input address. */ #define XW_W_ADDR(round, widx, offs) \ (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4)) /* Rounds 1-12, byte-swapped input block addresses. */ #define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 32) #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48) /* Rounds 1-12, expanded input block addresses. */ #define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0) #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16) /* Input block loading. * Interleaving within round function needed for in-order CPUs. */ #define LOAD_W_VEC_1_1() \ add addr0, sp, #IW_W1_ADDR(0, 0); #define LOAD_W_VEC_1_2() \ add addr1, sp, #IW_W1_ADDR(4, 0); #define LOAD_W_VEC_1_3() \ ld1 {W0.16b}, [RDATA], #16; #define LOAD_W_VEC_1_4() \ ld1 {W1.16b}, [RDATA], #16; #define LOAD_W_VEC_1_5() \ ld1 {W2.16b}, [RDATA], #16; #define LOAD_W_VEC_1_6() \ ld1 {W3.16b}, [RDATA], #16; #define LOAD_W_VEC_1_7() \ rev32 XTMP0.16b, W0.16b; #define LOAD_W_VEC_1_8() \ rev32 XTMP1.16b, W1.16b; #define LOAD_W_VEC_2_1() \ rev32 XTMP2.16b, W2.16b; #define LOAD_W_VEC_2_2() \ rev32 XTMP3.16b, W3.16b; #define LOAD_W_VEC_2_3() \ eor XTMP4.16b, XTMP1.16b, XTMP0.16b; #define LOAD_W_VEC_2_4() \ eor XTMP5.16b, XTMP2.16b, XTMP1.16b; #define LOAD_W_VEC_2_5() \ st1 {XTMP0.16b}, [addr0], #16; #define LOAD_W_VEC_2_6() \ st1 {XTMP4.16b}, [addr0]; \ add addr0, sp, #IW_W1_ADDR(8, 0); #define LOAD_W_VEC_2_7() \ eor XTMP6.16b, XTMP3.16b, XTMP2.16b; #define LOAD_W_VEC_2_8() \ ext W0.16b, XTMP0.16b, XTMP0.16b, #8; /* W0: xx, w0, xx, xx */ #define LOAD_W_VEC_3_1() \ mov W2.16b, XTMP1.16b; /* W2: xx, w6, w5, w4 */ #define LOAD_W_VEC_3_2() \ st1 {XTMP1.16b}, [addr1], #16; #define LOAD_W_VEC_3_3() \ st1 {XTMP5.16b}, [addr1]; \ ext W1.16b, XTMP0.16b, XTMP0.16b, #4; /* W1: xx, w3, w2, w1 */ #define LOAD_W_VEC_3_4() \ ext W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */ #define LOAD_W_VEC_3_5() \ ext W4.16b, XTMP2.16b, XTMP3.16b, #8; /* W4: xx, w12, w11, w10 */ #define LOAD_W_VEC_3_6() \ st1 {XTMP2.16b}, [addr0], #16; #define LOAD_W_VEC_3_7() \ st1 {XTMP6.16b}, [addr0]; #define LOAD_W_VEC_3_8() \ ext W5.16b, XTMP3.16b, XTMP3.16b, #4; /* W5: xx, w15, w14, w13 */ #define LOAD_W_VEC_1(iop_num, ...) \ LOAD_W_VEC_1_##iop_num() #define LOAD_W_VEC_2(iop_num, ...) \ LOAD_W_VEC_2_##iop_num() #define LOAD_W_VEC_3(iop_num, ...) \ LOAD_W_VEC_3_##iop_num() /* Message scheduling. Note: 3 words per vector register. * Interleaving within round function needed for in-order CPUs. */ #define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \ /* Load (w[i - 16]) => XTMP0 */ \ /* Load (w[i - 13]) => XTMP5 */ \ ext XTMP0.16b, w0.16b, w0.16b, #12; /* XTMP0: w0, xx, xx, xx */ #define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \ ext XTMP5.16b, w1.16b, w1.16b, #12; #define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \ ext XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */ #define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \ ext XTMP5.16b, XTMP5.16b, w2.16b, #12; #define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 9] == w3 */ \ /* W3 ^ XTMP0 => XTMP0 */ \ eor XTMP0.16b, XTMP0.16b, w3.16b; #define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 3] == w5 */ \ /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \ /* rol(XTMP5, 7) => XTMP1 */ \ add addr0, sp, #XW_W1_ADDR((round), 0); \ shl XTMP2.4s, w5.4s, #15; #define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \ shl XTMP1.4s, XTMP5.4s, #7; #define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \ sri XTMP2.4s, w5.4s, #(32-15); #define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \ sri XTMP1.4s, XTMP5.4s, #(32-7); #define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \ eor XTMP0.16b, XTMP0.16b, XTMP2.16b; #define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 6] == W4 */ \ /* W4 ^ XTMP1 => XTMP1 */ \ eor XTMP1.16b, XTMP1.16b, w4.16b; #define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \ /* P1(XTMP0) ^ XTMP1 => W0 */ \ shl XTMP3.4s, XTMP0.4s, #15; #define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \ shl XTMP4.4s, XTMP0.4s, #23; #define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \ eor w0.16b, XTMP1.16b, XTMP0.16b; #define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \ sri XTMP3.4s, XTMP0.4s, #(32-15); #define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \ sri XTMP4.4s, XTMP0.4s, #(32-23); #define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \ eor w0.16b, w0.16b, XTMP3.16b; #define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \ /* Load (w[i - 3]) => XTMP2 */ \ ext XTMP2.16b, w4.16b, w4.16b, #12; #define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \ eor w0.16b, w0.16b, XTMP4.16b; #define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \ ext XTMP2.16b, XTMP2.16b, w5.16b, #12; #define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \ /* W1 ^ W2 => XTMP3 */ \ eor XTMP3.16b, XTMP2.16b, w0.16b; #define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5) #define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \ st1 { XTMP2.16b-XTMP3.16b }, [addr0]; #define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5) #define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5) #define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5) #define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5) #define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0) #define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0) #define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0) #define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1) #define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1) #define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1) #define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2) #define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2) #define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2) #define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3) #define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3) #define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3) #define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \ SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4) #define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \ SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4) #define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \ SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4) /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sm3_transform_aarch64 (void *ctx, const unsigned char *data, * size_t nblks) */ .align 3 .globl _gcry_sm3_transform_aarch64 ELF(.type _gcry_sm3_transform_aarch64,%function;) _gcry_sm3_transform_aarch64: CFI_STARTPROC(); ldp ra, rb, [RSTATE, #0]; ldp rc, rd, [RSTATE, #8]; ldp re, rf, [RSTATE, #16]; ldp rg, rh, [RSTATE, #24]; stp x28, x29, [sp, #-16]!; CFI_ADJUST_CFA_OFFSET(16); CFI_REG_ON_STACK(28, 0); CFI_REG_ON_STACK(29, 8); stp x19, x20, [sp, #-16]!; CFI_ADJUST_CFA_OFFSET(16); CFI_REG_ON_STACK(19, 0); CFI_REG_ON_STACK(20, 8); stp x21, x22, [sp, #-16]!; CFI_ADJUST_CFA_OFFSET(16); CFI_REG_ON_STACK(21, 0); CFI_REG_ON_STACK(22, 8); stp x23, x24, [sp, #-16]!; CFI_ADJUST_CFA_OFFSET(16); CFI_REG_ON_STACK(23, 0); CFI_REG_ON_STACK(24, 8); stp x25, x26, [sp, #-16]!; CFI_ADJUST_CFA_OFFSET(16); CFI_REG_ON_STACK(25, 0); CFI_REG_ON_STACK(26, 8); mov RFRAME, sp; CFI_DEF_CFA_REGISTER(RFRAME); sub addr0, sp, #STACK_SIZE; GET_DATA_POINTER(RKPTR, .LKtable); and sp, addr0, #(~63); /* Preload first block. */ LOAD_W_VEC_1(1, 0); LOAD_W_VEC_1(2, 0); LOAD_W_VEC_1(3, 0); LOAD_W_VEC_1(4, 0); LOAD_W_VEC_1(5, 0); LOAD_W_VEC_1(6, 0); LOAD_W_VEC_1(7, 0); LOAD_W_VEC_1(8, 0); LOAD_W_VEC_2(1, 0); LOAD_W_VEC_2(2, 0); LOAD_W_VEC_2(3, 0); LOAD_W_VEC_2(4, 0); LOAD_W_VEC_2(5, 0); LOAD_W_VEC_2(6, 0); LOAD_W_VEC_2(7, 0); LOAD_W_VEC_2(8, 0); LOAD_W_VEC_3(1, 0); LOAD_W_VEC_3(2, 0); LOAD_W_VEC_3(3, 0); LOAD_W_VEC_3(4, 0); LOAD_W_VEC_3(5, 0); LOAD_W_VEC_3(6, 0); LOAD_W_VEC_3(7, 0); LOAD_W_VEC_3(8, 0); .balign 16 .Loop: /* Transform 0-3 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0); R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0); R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0); R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0); /* Transform 4-7 + Precalc 12-14 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0); R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0); R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12); R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12); /* Transform 8-11 + Precalc 12-17 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12); R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15); R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15); R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15); /* Transform 12-14 + Precalc 18-20 */ R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18); R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18); R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18); /* Transform 15-17 + Precalc 21-23 */ R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21); R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21); R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21); /* Transform 18-20 + Precalc 24-26 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24) /* Transform 21-23 + Precalc 27-29 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27) /* Transform 24-26 + Precalc 30-32 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30) /* Transform 27-29 + Precalc 33-35 */ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33) /* Transform 30-32 + Precalc 36-38 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36) /* Transform 33-35 + Precalc 39-41 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39) /* Transform 36-38 + Precalc 42-44 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42) /* Transform 39-41 + Precalc 45-47 */ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45) /* Transform 42-44 + Precalc 48-50 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48) /* Transform 45-47 + Precalc 51-53 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51) /* Transform 48-50 + Precalc 54-56 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54) /* Transform 51-53 + Precalc 57-59 */ R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57) R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57) /* Transform 54-56 + Precalc 60-62 */ R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60) R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60) /* Transform 57-59 + Precalc 63 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63) R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63) R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63) /* Transform 60 */ R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _); subs RNBLKS, RNBLKS, #1; b.eq .Lend; /* Transform 61-63 + Preload next block */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, LOAD_W_VEC_1, _); ldp s0, s1, [RSTATE, #0]; R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _); ldp s2, s3, [RSTATE, #8]; R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, LOAD_W_VEC_3, _); /* Update the chaining variables. */ eor ra, ra, s0; eor rb, rb, s1; ldp s0, s1, [RSTATE, #16]; eor rc, rc, s2; ldp k_even, k_odd, [RSTATE, #24]; eor rd, rd, s3; eor re, re, s0; stp ra, rb, [RSTATE, #0]; eor rf, rf, s1; stp rc, rd, [RSTATE, #8]; eor rg, rg, k_even; stp re, rf, [RSTATE, #16]; eor rh, rh, k_odd; stp rg, rh, [RSTATE, #24]; b .Loop; .Lend: /* Transform 61-63 */ R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, _, _); ldp s0, s1, [RSTATE, #0]; R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _); ldp s2, s3, [RSTATE, #8]; R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, _, _); /* Update the chaining variables. */ eor ra, ra, s0; clear_vec(W0); eor rb, rb, s1; clear_vec(W1); ldp s0, s1, [RSTATE, #16]; clear_vec(W2); eor rc, rc, s2; clear_vec(W3); ldp k_even, k_odd, [RSTATE, #24]; clear_vec(W4); eor rd, rd, s3; clear_vec(W5); eor re, re, s0; clear_vec(XTMP0); stp ra, rb, [RSTATE, #0]; clear_vec(XTMP1); eor rf, rf, s1; clear_vec(XTMP2); stp rc, rd, [RSTATE, #8]; clear_vec(XTMP3); eor rg, rg, k_even; clear_vec(XTMP4); stp re, rf, [RSTATE, #16]; clear_vec(XTMP5); eor rh, rh, k_odd; clear_vec(XTMP6); stp rg, rh, [RSTATE, #24]; /* Clear message expansion area */ add addr0, sp, #STACK_W; eor x0, x0, x0; // stack burned st1 {W0.16b-W3.16b}, [addr0], #64; st1 {W0.16b-W3.16b}, [addr0], #64; st1 {W0.16b-W3.16b}, [addr0]; mov sp, RFRAME; CFI_DEF_CFA_REGISTER(sp); ldp x25, x26, [sp], #16; CFI_ADJUST_CFA_OFFSET(-16); CFI_RESTORE(x25); CFI_RESTORE(x26); ldp x23, x24, [sp], #16; CFI_ADJUST_CFA_OFFSET(-16); CFI_RESTORE(x23); CFI_RESTORE(x24); ldp x21, x22, [sp], #16; CFI_ADJUST_CFA_OFFSET(-16); CFI_RESTORE(x21); CFI_RESTORE(x22); ldp x19, x20, [sp], #16; CFI_ADJUST_CFA_OFFSET(-16); CFI_RESTORE(x19); CFI_RESTORE(x20); ldp x28, x29, [sp], #16; CFI_ADJUST_CFA_OFFSET(-16); CFI_RESTORE(x28); CFI_RESTORE(x29); - ret + ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_sm3_transform_aarch64, .-_gcry_sm3_transform_aarch64;) #endif diff --git a/cipher/twofish-aarch64.S b/cipher/twofish-aarch64.S index 9f35b5cd..7941fe3a 100644 --- a/cipher/twofish-aarch64.S +++ b/cipher/twofish-aarch64.S @@ -1,321 +1,321 @@ /* twofish-aarch64.S - ARMv8/AArch64 assembly implementation of Twofish cipher * * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) #ifdef HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS .text /* structure of TWOFISH_context: */ #define s0 0 #define s1 ((s0) + 4 * 256) #define s2 ((s1) + 4 * 256) #define s3 ((s2) + 4 * 256) #define w ((s3) + 4 * 256) #define k ((w) + 4 * 8) /* register macros */ #define CTX x0 #define RDST x1 #define RSRC x2 #define CTXs0 CTX #define CTXs1 x3 #define CTXs2 x4 #define CTXs3 x5 #define CTXw x17 #define RA w6 #define RB w7 #define RC w8 #define RD w9 #define RX w10 #define RY w11 #define xRX x10 #define xRY x11 #define RMASK w12 #define RT0 w13 #define RT1 w14 #define RT2 w15 #define RT3 w16 #define xRT0 x13 #define xRT1 x14 #define xRT2 x15 #define xRT3 x16 /* helper macros */ #ifndef __AARCH64EL__ /* bswap on big-endian */ #define host_to_le(reg) \ rev reg, reg; #define le_to_host(reg) \ rev reg, reg; #else /* nop on little-endian */ #define host_to_le(reg) /*_*/ #define le_to_host(reg) /*_*/ #endif #define ldr_input_aligned_le(rin, a, b, c, d) \ ldr a, [rin, #0]; \ ldr b, [rin, #4]; \ le_to_host(a); \ ldr c, [rin, #8]; \ le_to_host(b); \ ldr d, [rin, #12]; \ le_to_host(c); \ le_to_host(d); #define str_output_aligned_le(rout, a, b, c, d) \ le_to_host(a); \ le_to_host(b); \ str a, [rout, #0]; \ le_to_host(c); \ str b, [rout, #4]; \ le_to_host(d); \ str c, [rout, #8]; \ str d, [rout, #12]; /* unaligned word reads/writes allowed */ #define ldr_input_le(rin, ra, rb, rc, rd, rtmp) \ ldr_input_aligned_le(rin, ra, rb, rc, rd) #define str_output_le(rout, ra, rb, rc, rd, rtmp0, rtmp1) \ str_output_aligned_le(rout, ra, rb, rc, rd) /********************************************************************** 1-way twofish **********************************************************************/ #define encrypt_round(a, b, rc, rd, n, ror_a, adj_a) \ and RT0, RMASK, b, lsr#(8 - 2); \ and RY, RMASK, b, lsr#(16 - 2); \ and RT1, RMASK, b, lsr#(24 - 2); \ ldr RY, [CTXs3, xRY]; \ and RT2, RMASK, b, lsl#(2); \ ldr RT0, [CTXs2, xRT0]; \ and RT3, RMASK, a, lsr#(16 - 2 + (adj_a)); \ ldr RT1, [CTXs0, xRT1]; \ and RX, RMASK, a, lsr#(8 - 2 + (adj_a)); \ ldr RT2, [CTXs1, xRT2]; \ ldr RX, [CTXs1, xRX]; \ ror_a(a); \ \ eor RY, RY, RT0; \ ldr RT3, [CTXs2, xRT3]; \ and RT0, RMASK, a, lsl#(2); \ eor RY, RY, RT1; \ and RT1, RMASK, a, lsr#(24 - 2); \ eor RY, RY, RT2; \ ldr RT0, [CTXs0, xRT0]; \ eor RX, RX, RT3; \ ldr RT1, [CTXs3, xRT1]; \ eor RX, RX, RT0; \ \ ldr RT3, [CTXs3, #(k - s3 + 8 * (n) + 4)]; \ eor RX, RX, RT1; \ ldr RT2, [CTXs3, #(k - s3 + 8 * (n))]; \ \ add RT0, RX, RY, lsl #1; \ add RX, RX, RY; \ add RT0, RT0, RT3; \ add RX, RX, RT2; \ eor rd, RT0, rd, ror #31; \ eor rc, rc, RX; #define dummy(x) /*_*/ #define ror1(r) \ ror r, r, #1; #define decrypt_round(a, b, rc, rd, n, ror_b, adj_b) \ and RT3, RMASK, b, lsl#(2 - (adj_b)); \ and RT1, RMASK, b, lsr#(8 - 2 + (adj_b)); \ ror_b(b); \ and RT2, RMASK, a, lsl#(2); \ and RT0, RMASK, a, lsr#(8 - 2); \ \ ldr RY, [CTXs1, xRT3]; \ ldr RX, [CTXs0, xRT2]; \ and RT3, RMASK, b, lsr#(16 - 2); \ ldr RT1, [CTXs2, xRT1]; \ and RT2, RMASK, a, lsr#(16 - 2); \ ldr RT0, [CTXs1, xRT0]; \ \ ldr RT3, [CTXs3, xRT3]; \ eor RY, RY, RT1; \ \ and RT1, RMASK, b, lsr#(24 - 2); \ eor RX, RX, RT0; \ ldr RT2, [CTXs2, xRT2]; \ and RT0, RMASK, a, lsr#(24 - 2); \ \ ldr RT1, [CTXs0, xRT1]; \ \ eor RY, RY, RT3; \ ldr RT0, [CTXs3, xRT0]; \ eor RX, RX, RT2; \ eor RY, RY, RT1; \ \ ldr RT1, [CTXs3, #(k - s3 + 8 * (n) + 4)]; \ eor RX, RX, RT0; \ ldr RT2, [CTXs3, #(k - s3 + 8 * (n))]; \ \ add RT0, RX, RY, lsl #1; \ add RX, RX, RY; \ add RT0, RT0, RT1; \ add RX, RX, RT2; \ eor rd, rd, RT0; \ eor rc, RX, rc, ror #31; #define first_encrypt_cycle(nc) \ encrypt_round(RA, RB, RC, RD, (nc) * 2, dummy, 0); \ encrypt_round(RC, RD, RA, RB, (nc) * 2 + 1, ror1, 1); #define encrypt_cycle(nc) \ encrypt_round(RA, RB, RC, RD, (nc) * 2, ror1, 1); \ encrypt_round(RC, RD, RA, RB, (nc) * 2 + 1, ror1, 1); #define last_encrypt_cycle(nc) \ encrypt_round(RA, RB, RC, RD, (nc) * 2, ror1, 1); \ encrypt_round(RC, RD, RA, RB, (nc) * 2 + 1, ror1, 1); \ ror1(RA); #define first_decrypt_cycle(nc) \ decrypt_round(RC, RD, RA, RB, (nc) * 2 + 1, dummy, 0); \ decrypt_round(RA, RB, RC, RD, (nc) * 2, ror1, 1); #define decrypt_cycle(nc) \ decrypt_round(RC, RD, RA, RB, (nc) * 2 + 1, ror1, 1); \ decrypt_round(RA, RB, RC, RD, (nc) * 2, ror1, 1); #define last_decrypt_cycle(nc) \ decrypt_round(RC, RD, RA, RB, (nc) * 2 + 1, ror1, 1); \ decrypt_round(RA, RB, RC, RD, (nc) * 2, ror1, 1); \ ror1(RD); .globl _gcry_twofish_arm_encrypt_block ELF(.type _gcry_twofish_arm_encrypt_block,%function;) _gcry_twofish_arm_encrypt_block: /* input: * x0: ctx * x1: dst * x2: src */ CFI_STARTPROC(); add CTXw, CTX, #(w); ldr_input_le(RSRC, RA, RB, RC, RD, RT0); /* Input whitening */ ldp RT0, RT1, [CTXw, #(0*8)]; ldp RT2, RT3, [CTXw, #(1*8)]; add CTXs3, CTX, #(s3); add CTXs2, CTX, #(s2); add CTXs1, CTX, #(s1); mov RMASK, #(0xff << 2); eor RA, RA, RT0; eor RB, RB, RT1; eor RC, RC, RT2; eor RD, RD, RT3; first_encrypt_cycle(0); encrypt_cycle(1); encrypt_cycle(2); encrypt_cycle(3); encrypt_cycle(4); encrypt_cycle(5); encrypt_cycle(6); last_encrypt_cycle(7); /* Output whitening */ ldp RT0, RT1, [CTXw, #(2*8)]; ldp RT2, RT3, [CTXw, #(3*8)]; eor RC, RC, RT0; eor RD, RD, RT1; eor RA, RA, RT2; eor RB, RB, RT3; str_output_le(RDST, RC, RD, RA, RB, RT0, RT1); - ret; + ret_spec_stop; CFI_ENDPROC(); .ltorg ELF(.size _gcry_twofish_arm_encrypt_block,.-_gcry_twofish_arm_encrypt_block;) .globl _gcry_twofish_arm_decrypt_block ELF(.type _gcry_twofish_arm_decrypt_block,%function;) _gcry_twofish_arm_decrypt_block: /* input: * %r0: ctx * %r1: dst * %r2: src */ CFI_STARTPROC(); add CTXw, CTX, #(w); ldr_input_le(RSRC, RC, RD, RA, RB, RT0); /* Input whitening */ ldp RT0, RT1, [CTXw, #(2*8)]; ldp RT2, RT3, [CTXw, #(3*8)]; add CTXs3, CTX, #(s3); add CTXs2, CTX, #(s2); add CTXs1, CTX, #(s1); mov RMASK, #(0xff << 2); eor RC, RC, RT0; eor RD, RD, RT1; eor RA, RA, RT2; eor RB, RB, RT3; first_decrypt_cycle(7); decrypt_cycle(6); decrypt_cycle(5); decrypt_cycle(4); decrypt_cycle(3); decrypt_cycle(2); decrypt_cycle(1); last_decrypt_cycle(0); /* Output whitening */ ldp RT0, RT1, [CTXw, #(0*8)]; ldp RT2, RT3, [CTXw, #(1*8)]; eor RA, RA, RT0; eor RB, RB, RT1; eor RC, RC, RT2; eor RD, RD, RT3; str_output_le(RDST, RA, RB, RC, RD, RT0, RT1); - ret; + ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_arm_decrypt_block,.-_gcry_twofish_arm_decrypt_block;) #endif /*HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS*/ #endif /*__AARCH64EL__*/ diff --git a/mpi/aarch64/mpih-add1.S b/mpi/aarch64/mpih-add1.S index cc356bce..24859b17 100644 --- a/mpi/aarch64/mpih-add1.S +++ b/mpi/aarch64/mpih-add1.S @@ -1,74 +1,74 @@ /* ARM64 add_n -- Add two limb vectors of the same length > 0 and store * sum in a third limb vector. * * Copyright (C) 2013 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * */ #include "sysdep.h" #include "asm-syntax.h" #include "asm-common-aarch64.h" /******************* * mpi_limb_t * _gcry_mpih_add_n( mpi_ptr_t res_ptr, x0 * mpi_ptr_t s1_ptr, x1 * mpi_ptr_t s2_ptr, x2 * mpi_size_t size) w3 */ .text .globl C_SYMBOL_NAME(_gcry_mpih_add_n) ELF(.type C_SYMBOL_NAME(_gcry_mpih_add_n),%function) C_SYMBOL_NAME(_gcry_mpih_add_n): CFI_STARTPROC() and w5, w3, #3; adds xzr, xzr, xzr; /* clear carry flag */ cbz w5, .Large_loop; .Loop: ldr x4, [x1], #8; sub w3, w3, #1; ldr x11, [x2], #8; and w5, w3, #3; adcs x4, x4, x11; str x4, [x0], #8; cbz w3, .Lend; cbnz w5, .Loop; .Large_loop: ldp x4, x6, [x1], #16; ldp x5, x7, [x2], #16; ldp x8, x10, [x1], #16; ldp x9, x11, [x2], #16; sub w3, w3, #4; adcs x4, x4, x5; adcs x6, x6, x7; adcs x8, x8, x9; adcs x10, x10, x11; stp x4, x6, [x0], #16; stp x8, x10, [x0], #16; cbnz w3, .Large_loop; .Lend: adc x0, xzr, xzr; - ret; + ret_spec_stop; CFI_ENDPROC() ELF(.size C_SYMBOL_NAME(_gcry_mpih_add_n),.-C_SYMBOL_NAME(_gcry_mpih_add_n);) diff --git a/mpi/aarch64/mpih-mul1.S b/mpi/aarch64/mpih-mul1.S index 0db54444..f34c13c5 100644 --- a/mpi/aarch64/mpih-mul1.S +++ b/mpi/aarch64/mpih-mul1.S @@ -1,99 +1,99 @@ /* ARM64 mul_1 -- Multiply a limb vector with a limb and store the result in * a second limb vector. * * Copyright (C) 2013 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * */ #include "sysdep.h" #include "asm-syntax.h" #include "asm-common-aarch64.h" /******************* * mpi_limb_t * _gcry_mpih_mul_1( mpi_ptr_t res_ptr, x0 * mpi_ptr_t s1_ptr, x1 * mpi_size_t s1_size, w2 * mpi_limb_t s2_limb) x3 */ .text .globl C_SYMBOL_NAME(_gcry_mpih_mul_1) ELF(.type C_SYMBOL_NAME(_gcry_mpih_mul_1),%function) C_SYMBOL_NAME(_gcry_mpih_mul_1): CFI_STARTPROC() and w5, w2, #3; mov x4, xzr; cbz w5, .Large_loop; .Loop: ldr x5, [x1], #8; sub w2, w2, #1; mul x9, x5, x3; umulh x10, x5, x3; and w5, w2, #3; adds x4, x4, x9; str x4, [x0], #8; adc x4, x10, xzr; cbz w2, .Lend; cbnz w5, .Loop; .Large_loop: ldp x5, x6, [x1]; sub w2, w2, #4; mul x9, x5, x3; ldp x7, x8, [x1, #16]; umulh x10, x5, x3; add x1, x1, #32; adds x4, x4, x9; str x4, [x0], #8; mul x11, x6, x3; adc x4, x10, xzr; umulh x12, x6, x3; adds x4, x4, x11; str x4, [x0], #8; mul x13, x7, x3; adc x4, x12, xzr; umulh x14, x7, x3; adds x4, x4, x13; str x4, [x0], #8; mul x15, x8, x3; adc x4, x14, xzr; umulh x16, x8, x3; adds x4, x4, x15; str x4, [x0], #8; adc x4, x16, xzr; cbnz w2, .Large_loop; .Lend: mov x0, x4; - ret; + ret_spec_stop; CFI_ENDPROC() ELF(.size C_SYMBOL_NAME(_gcry_mpih_mul_1),.-C_SYMBOL_NAME(_gcry_mpih_mul_1);) diff --git a/mpi/aarch64/mpih-mul2.S b/mpi/aarch64/mpih-mul2.S index b4cc6eeb..1880999d 100644 --- a/mpi/aarch64/mpih-mul2.S +++ b/mpi/aarch64/mpih-mul2.S @@ -1,111 +1,111 @@ /* ARM64 mul_2 -- Multiply a limb vector with a limb and add the result to * a second limb vector. * * Copyright (C) 2013 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * */ #include "sysdep.h" #include "asm-syntax.h" #include "asm-common-aarch64.h" /******************* * mpi_limb_t * _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, x0 * mpi_ptr_t s1_ptr, x1 * mpi_size_t s1_size, w2 * mpi_limb_t s2_limb) x3 */ .text .globl C_SYMBOL_NAME(_gcry_mpih_addmul_1) ELF(.type C_SYMBOL_NAME(_gcry_mpih_addmul_1),%function) C_SYMBOL_NAME(_gcry_mpih_addmul_1): CFI_STARTPROC() and w5, w2, #3; mov x6, xzr; mov x7, xzr; cbz w5, .Large_loop; .Loop: ldr x5, [x1], #8; mul x12, x5, x3; ldr x4, [x0]; umulh x13, x5, x3; sub w2, w2, #1; adds x12, x12, x4; and w5, w2, #3; adc x13, x13, x7; adds x12, x12, x6; str x12, [x0], #8; adc x6, x7, x13; cbz w2, .Lend; cbnz w5, .Loop; .Large_loop: ldp x5, x9, [x1], #16; sub w2, w2, #4; ldp x4, x8, [x0]; mul x12, x5, x3; umulh x13, x5, x3; adds x12, x12, x4; mul x14, x9, x3; adc x13, x13, x7; adds x12, x12, x6; umulh x15, x9, x3; str x12, [x0], #8; adc x6, x7, x13; adds x14, x14, x8; ldp x5, x9, [x1], #16; adc x15, x15, x7; adds x14, x14, x6; mul x12, x5, x3; str x14, [x0], #8; ldp x4, x8, [x0]; umulh x13, x5, x3; adc x6, x7, x15; adds x12, x12, x4; mul x14, x9, x3; adc x13, x13, x7; adds x12, x12, x6; umulh x15, x9, x3; str x12, [x0], #8; adc x6, x7, x13; adds x14, x14, x8; adc x15, x15, x7; adds x14, x14, x6; str x14, [x0], #8; adc x6, x7, x15; cbnz w2, .Large_loop; .Lend: mov x0, x6; - ret; + ret_spec_stop; CFI_ENDPROC() ELF(.size C_SYMBOL_NAME(_gcry_mpih_addmul_1),.-C_SYMBOL_NAME(_gcry_mpih_addmul_1);) diff --git a/mpi/aarch64/mpih-mul3.S b/mpi/aarch64/mpih-mul3.S index 47a189b6..e5faeddc 100644 --- a/mpi/aarch64/mpih-mul3.S +++ b/mpi/aarch64/mpih-mul3.S @@ -1,124 +1,124 @@ /* ARM mul_3 -- Multiply a limb vector with a limb and subtract the result * from a second limb vector. * * Copyright (C) 2013 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * */ #include "sysdep.h" #include "asm-syntax.h" #include "asm-common-aarch64.h" /******************* * mpi_limb_t * _gcry_mpih_submul_1( mpi_ptr_t res_ptr, x0 * mpi_ptr_t s1_ptr, x1 * mpi_size_t s1_size, w2 * mpi_limb_t s2_limb) x3 */ .text .globl C_SYMBOL_NAME(_gcry_mpih_submul_1) ELF(.type C_SYMBOL_NAME(_gcry_mpih_submul_1),%function) C_SYMBOL_NAME(_gcry_mpih_submul_1): CFI_STARTPROC() and w5, w2, #3; mov x7, xzr; cbz w5, .Large_loop; subs xzr, xzr, xzr; .Loop: ldr x4, [x1], #8; cinc x7, x7, cc; ldr x5, [x0]; sub w2, w2, #1; mul x6, x4, x3; subs x5, x5, x7; umulh x4, x4, x3; and w10, w2, #3; cset x7, cc; subs x5, x5, x6; add x7, x7, x4; str x5, [x0], #8; cbz w2, .Loop_end; cbnz w10, .Loop; cinc x7, x7, cc; .Large_loop: ldp x4, x8, [x1], #16; sub w2, w2, #4; ldp x5, x9, [x0]; mul x6, x4, x3; subs x5, x5, x7; umulh x4, x4, x3; cset x7, cc; subs x5, x5, x6; mul x6, x8, x3; add x7, x7, x4; str x5, [x0], #8; cinc x7, x7, cc; umulh x8, x8, x3; subs x9, x9, x7; cset x7, cc; subs x9, x9, x6; ldp x4, x10, [x1], #16; str x9, [x0], #8; add x7, x7, x8; ldp x5, x9, [x0]; cinc x7, x7, cc; mul x6, x4, x3; subs x5, x5, x7; umulh x4, x4, x3; cset x7, cc; subs x5, x5, x6; mul x6, x10, x3; add x7, x7, x4; str x5, [x0], #8; cinc x7, x7, cc; umulh x10, x10, x3; subs x9, x9, x7; cset x7, cc; subs x9, x9, x6; add x7, x7, x10; str x9, [x0], #8; cinc x7, x7, cc; cbnz w2, .Large_loop; mov x0, x7; - ret; + ret_spec_stop; .Loop_end: cinc x0, x7, cc; - ret; + ret_spec_stop; CFI_ENDPROC() ELF(.size C_SYMBOL_NAME(_gcry_mpih_submul_1),.-C_SYMBOL_NAME(_gcry_mpih_submul_1);) diff --git a/mpi/aarch64/mpih-sub1.S b/mpi/aarch64/mpih-sub1.S index 16b6c004..46908286 100644 --- a/mpi/aarch64/mpih-sub1.S +++ b/mpi/aarch64/mpih-sub1.S @@ -1,74 +1,74 @@ /* ARM64 sub_n -- Subtract two limb vectors of the same length > 0 and store * sum in a third limb vector. * * Copyright (C) 2013 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * */ #include "sysdep.h" #include "asm-syntax.h" #include "asm-common-aarch64.h" /******************* * mpi_limb_t * _gcry_mpih_sub_n( mpi_ptr_t res_ptr, x0 * mpi_ptr_t s1_ptr, x1 * mpi_ptr_t s2_ptr, x2 * mpi_size_t size) w3 */ .text .globl C_SYMBOL_NAME(_gcry_mpih_sub_n) ELF(.type C_SYMBOL_NAME(_gcry_mpih_sub_n),%function) C_SYMBOL_NAME(_gcry_mpih_sub_n): CFI_STARTPROC() and w5, w3, #3; subs xzr, xzr, xzr; /* prepare carry flag for sub */ cbz w5, .Large_loop; .Loop: ldr x4, [x1], #8; sub w3, w3, #1; ldr x11, [x2], #8; and w5, w3, #3; sbcs x4, x4, x11; str x4, [x0], #8; cbz w3, .Lend; cbnz w5, .Loop; .Large_loop: ldp x4, x6, [x1], #16; ldp x5, x7, [x2], #16; ldp x8, x10, [x1], #16; ldp x9, x11, [x2], #16; sub w3, w3, #4; sbcs x4, x4, x5; sbcs x6, x6, x7; sbcs x8, x8, x9; sbcs x10, x10, x11; stp x4, x6, [x0], #16; stp x8, x10, [x0], #16; cbnz w3, .Large_loop; .Lend: cset x0, cc; - ret; + ret_spec_stop; CFI_ENDPROC() ELF(.size C_SYMBOL_NAME(_gcry_mpih_sub_n),.-C_SYMBOL_NAME(_gcry_mpih_sub_n);)