diff --git a/cipher/asm-common-amd64.h b/cipher/asm-common-amd64.h index d9bbc01b..870fef9a 100644 --- a/cipher/asm-common-amd64.h +++ b/cipher/asm-common-amd64.h @@ -1,207 +1,213 @@ /* asm-common-amd64.h - Common macros for AMD64 assembly * * Copyright (C) 2018 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifndef GCRY_ASM_COMMON_AMD64_H #define GCRY_ASM_COMMON_AMD64_H #include #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif +#ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS +# define SECTION_RODATA .section .rdata +#else +# define SECTION_RODATA .section .rodata +#endif + #ifdef __PIC__ # define rRIP (%rip) #else # define rRIP #endif #ifdef __PIC__ # define RIP %rip #else # define RIP #endif #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #if defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS) || !defined(__PIC__) # define GET_EXTERN_POINTER(name, reg) movabsq $name, reg #else # ifdef __code_model_large__ # define GET_EXTERN_POINTER(name, reg) \ pushq %r15; \ pushq %r14; \ 1: leaq 1b(%rip), reg; \ movabsq $_GLOBAL_OFFSET_TABLE_-1b, %r14; \ movabsq $name@GOT, %r15; \ addq %r14, reg; \ popq %r14; \ movq (reg, %r15), reg; \ popq %r15; # else # define GET_EXTERN_POINTER(name, reg) movq name@GOTPCREL(%rip), reg # endif #endif #ifdef HAVE_GCC_ASM_CFI_DIRECTIVES /* CFI directives to emit DWARF stack unwinding information. */ # define CFI_STARTPROC() .cfi_startproc # define CFI_ENDPROC() .cfi_endproc # define CFI_REMEMBER_STATE() .cfi_remember_state # define CFI_RESTORE_STATE() .cfi_restore_state # define CFI_ADJUST_CFA_OFFSET(off) .cfi_adjust_cfa_offset off # define CFI_REL_OFFSET(reg,off) .cfi_rel_offset reg, off # define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg # define CFI_REGISTER(ro,rn) .cfi_register ro, rn # define CFI_RESTORE(reg) .cfi_restore reg # define CFI_PUSH(reg) \ CFI_ADJUST_CFA_OFFSET(8); CFI_REL_OFFSET(reg, 0) # define CFI_POP(reg) \ CFI_ADJUST_CFA_OFFSET(-8); CFI_RESTORE(reg) # define CFI_POP_TMP_REG() \ CFI_ADJUST_CFA_OFFSET(-8); # define CFI_LEAVE() \ CFI_ADJUST_CFA_OFFSET(-8); CFI_DEF_CFA_REGISTER(%rsp) /* CFA expressions are used for pointing CFA and registers to * %rsp relative offsets. */ # define DW_REGNO_rax 0 # define DW_REGNO_rdx 1 # define DW_REGNO_rcx 2 # define DW_REGNO_rbx 3 # define DW_REGNO_rsi 4 # define DW_REGNO_rdi 5 # define DW_REGNO_rbp 6 # define DW_REGNO_rsp 7 # define DW_REGNO_r8 8 # define DW_REGNO_r9 9 # define DW_REGNO_r10 10 # define DW_REGNO_r11 11 # define DW_REGNO_r12 12 # define DW_REGNO_r13 13 # define DW_REGNO_r14 14 # define DW_REGNO_r15 15 # define DW_REGNO(reg) DW_REGNO_ ## reg /* Fixed length encoding used for integers for now. */ # define DW_SLEB128_7BIT(value) \ 0x00|((value) & 0x7f) # define DW_SLEB128_28BIT(value) \ 0x80|((value)&0x7f), \ 0x80|(((value)>>7)&0x7f), \ 0x80|(((value)>>14)&0x7f), \ 0x00|(((value)>>21)&0x7f) # define CFI_CFA_ON_STACK(rsp_offs,cfa_depth) \ .cfi_escape \ 0x0f, /* DW_CFA_def_cfa_expression */ \ DW_SLEB128_7BIT(11), /* length */ \ 0x77, /* DW_OP_breg7, rsp + constant */ \ DW_SLEB128_28BIT(rsp_offs), \ 0x06, /* DW_OP_deref */ \ 0x23, /* DW_OP_plus_constu */ \ DW_SLEB128_28BIT((cfa_depth)+8) # define CFI_REG_ON_STACK(reg,rsp_offs) \ .cfi_escape \ 0x10, /* DW_CFA_expression */ \ DW_SLEB128_7BIT(DW_REGNO(reg)), \ DW_SLEB128_7BIT(5), /* length */ \ 0x77, /* DW_OP_breg7, rsp + constant */ \ DW_SLEB128_28BIT(rsp_offs) #else # define CFI_STARTPROC() # define CFI_ENDPROC() # define CFI_REMEMBER_STATE() # define CFI_RESTORE_STATE() # define CFI_ADJUST_CFA_OFFSET(off) # define CFI_REL_OFFSET(reg,off) # define CFI_DEF_CFA_REGISTER(reg) # define CFI_REGISTER(ro,rn) # define CFI_RESTORE(reg) # define CFI_PUSH(reg) # define CFI_POP(reg) # define CFI_POP_TMP_REG() # define CFI_LEAVE() # define CFI_CFA_ON_STACK(rsp_offs,cfa_depth) # define CFI_REG_ON_STACK(reg,rsp_offs) #endif #ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS # define ENTER_SYSV_FUNC_PARAMS_0_4 \ pushq %rdi; \ CFI_PUSH(%rdi); \ pushq %rsi; \ CFI_PUSH(%rsi); \ movq %rcx, %rdi; \ movq %rdx, %rsi; \ movq %r8, %rdx; \ movq %r9, %rcx; \ # define ENTER_SYSV_FUNC_PARAMS_5 \ ENTER_SYSV_FUNC_PARAMS_0_4; \ movq 0x38(%rsp), %r8; # define ENTER_SYSV_FUNC_PARAMS_6 \ ENTER_SYSV_FUNC_PARAMS_5; \ movq 0x40(%rsp), %r9; # define EXIT_SYSV_FUNC \ popq %rsi; \ CFI_POP(%rsi); \ popq %rdi; \ CFI_POP(%rdi); #else # define ENTER_SYSV_FUNC_PARAMS_0_4 # define ENTER_SYSV_FUNC_PARAMS_5 # define ENTER_SYSV_FUNC_PARAMS_6 # define EXIT_SYSV_FUNC #endif /* 'ret' instruction replacement for straight-line speculation mitigation. */ #define ret_spec_stop \ ret; int3; /* This prevents speculative execution on old AVX512 CPUs, to prevent * speculative execution to AVX512 code. The vpopcntb instruction is * available on newer CPUs that do not suffer from significant frequency * drop when 512-bit vectors are utilized. */ #define spec_stop_avx512 \ vpxord %ymm16, %ymm16, %ymm16; \ vpopcntb %xmm16, %xmm16; /* Supported only by newer AVX512 CPUs. */ \ vpxord %ymm16, %ymm16, %ymm16; #define spec_stop_avx512_intel_syntax \ vpxord ymm16, ymm16, ymm16; \ vpopcntb xmm16, xmm16; /* Supported only by newer AVX512 CPUs. */ \ vpxord ymm16, ymm16, ymm16; #endif /* GCRY_ASM_COMMON_AMD64_H */ diff --git a/cipher/blake2b-amd64-avx2.S b/cipher/blake2b-amd64-avx2.S index 3601b65f..43c2cce1 100644 --- a/cipher/blake2b-amd64-avx2.S +++ b/cipher/blake2b-amd64-avx2.S @@ -1,300 +1,301 @@ /* blake2b-amd64-avx2.S - AVX2 implementation of BLAKE2b * * Copyright (C) 2018 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* The code is based on public-domain/CC0 BLAKE2 reference implementation * by Samual Neves, at https://github.com/BLAKE2/BLAKE2/tree/master/sse * Copyright 2012, Samuel Neves */ #ifdef __x86_64 #include #if defined(HAVE_GCC_INLINE_ASM_AVX2) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) #include "asm-common-amd64.h" -.text - /* register macros */ #define RSTATE %rdi #define RINBLKS %rsi #define RNBLKS %rdx #define RIV %rcx /* state structure */ #define STATE_H 0 #define STATE_T (STATE_H + 8 * 8) #define STATE_F (STATE_T + 2 * 8) /* vector registers */ #define ROW1 %ymm0 #define ROW2 %ymm1 #define ROW3 %ymm2 #define ROW4 %ymm3 #define TMP1 %ymm4 #define TMP1x %xmm4 #define R16 %ymm5 #define R24 %ymm6 #define MA1 %ymm8 #define MA2 %ymm9 #define MA3 %ymm10 #define MA4 %ymm11 #define MA1x %xmm8 #define MA2x %xmm9 #define MA3x %xmm10 #define MA4x %xmm11 #define MB1 %ymm12 #define MB2 %ymm13 #define MB3 %ymm14 #define MB4 %ymm15 #define MB1x %xmm12 #define MB2x %xmm13 #define MB3x %xmm14 #define MB4x %xmm15 /********************************************************************** blake2b/AVX2 **********************************************************************/ #define GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ s0, s1, s2, s3, s4, s5, s6, s7, s8, \ s9, s10, s11, s12, s13, s14, s15) \ vmovq (s0)*8(RINBLKS), m1x; \ vmovq (s4)*8(RINBLKS), TMP1x; \ vpinsrq $1, (s2)*8(RINBLKS), m1x, m1x; \ vpinsrq $1, (s6)*8(RINBLKS), TMP1x, TMP1x; \ vinserti128 $1, TMP1x, m1, m1; \ vmovq (s1)*8(RINBLKS), m2x; \ vmovq (s5)*8(RINBLKS), TMP1x; \ vpinsrq $1, (s3)*8(RINBLKS), m2x, m2x; \ vpinsrq $1, (s7)*8(RINBLKS), TMP1x, TMP1x; \ vinserti128 $1, TMP1x, m2, m2; \ vmovq (s8)*8(RINBLKS), m3x; \ vmovq (s12)*8(RINBLKS), TMP1x; \ vpinsrq $1, (s10)*8(RINBLKS), m3x, m3x; \ vpinsrq $1, (s14)*8(RINBLKS), TMP1x, TMP1x; \ vinserti128 $1, TMP1x, m3, m3; \ vmovq (s9)*8(RINBLKS), m4x; \ vmovq (s13)*8(RINBLKS), TMP1x; \ vpinsrq $1, (s11)*8(RINBLKS), m4x, m4x; \ vpinsrq $1, (s15)*8(RINBLKS), TMP1x, TMP1x; \ vinserti128 $1, TMP1x, m4, m4; #define LOAD_MSG_0(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) #define LOAD_MSG_1(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3) #define LOAD_MSG_2(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4) #define LOAD_MSG_3(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8) #define LOAD_MSG_4(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13) #define LOAD_MSG_5(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9) #define LOAD_MSG_6(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11) #define LOAD_MSG_7(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10) #define LOAD_MSG_8(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5) #define LOAD_MSG_9(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, \ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0) #define LOAD_MSG_10(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ LOAD_MSG_0(m1, m2, m3, m4, m1x, m2x, m3x, m4x) #define LOAD_MSG_11(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ LOAD_MSG_1(m1, m2, m3, m4, m1x, m2x, m3x, m4x) #define LOAD_MSG(r, m1, m2, m3, m4) \ LOAD_MSG_##r(m1, m2, m3, m4, m1##x, m2##x, m3##x, m4##x) #define ROR_32(in, out) vpshufd $0xb1, in, out; #define ROR_24(in, out) vpshufb R24, in, out; #define ROR_16(in, out) vpshufb R16, in, out; #define ROR_63(in, out) \ vpsrlq $63, in, TMP1; \ vpaddq in, in, out; \ vpxor TMP1, out, out; #define G(r1, r2, r3, r4, m, ROR_A, ROR_B) \ vpaddq m, r1, r1; \ vpaddq r2, r1, r1; \ vpxor r1, r4, r4; \ ROR_A(r4, r4); \ vpaddq r4, r3, r3; \ vpxor r3, r2, r2; \ ROR_B(r2, r2); #define G1(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_32, ROR_24); #define G2(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_16, ROR_63); #define MM_SHUFFLE(z,y,x,w) \ (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #define DIAGONALIZE(r1, r2, r3, r4) \ vpermq $MM_SHUFFLE(0,3,2,1), r2, r2; \ vpermq $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpermq $MM_SHUFFLE(2,1,0,3), r4, r4; #define UNDIAGONALIZE(r1, r2, r3, r4) \ vpermq $MM_SHUFFLE(2,1,0,3), r2, r2; \ vpermq $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpermq $MM_SHUFFLE(0,3,2,1), r4, r4; #define ROUND(r, m1, m2, m3, m4) \ G1(ROW1, ROW2, ROW3, ROW4, m1); \ G2(ROW1, ROW2, ROW3, ROW4, m2); \ DIAGONALIZE(ROW1, ROW2, ROW3, ROW4); \ G1(ROW1, ROW2, ROW3, ROW4, m3); \ G2(ROW1, ROW2, ROW3, ROW4, m4); \ UNDIAGONALIZE(ROW1, ROW2, ROW3, ROW4); -blake2b_data: +SECTION_RODATA .align 32 +ELF(.type _blake2b_avx2_data,@object;) +_blake2b_avx2_data: .Liv: .quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b .quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1 .quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f .quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 .Lshuf_ror16: .byte 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9 .Lshuf_ror24: .byte 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10 +.text .align 64 .globl _gcry_blake2b_transform_amd64_avx2 ELF(.type _gcry_blake2b_transform_amd64_avx2,@function;) _gcry_blake2b_transform_amd64_avx2: /* input: * %rdi: state * %rsi: blks * %rdx: num_blks */ CFI_STARTPROC(); vzeroupper; addq $128, (STATE_T + 0)(RSTATE); adcq $0, (STATE_T + 8)(RSTATE); vbroadcasti128 .Lshuf_ror16 rRIP, R16; vbroadcasti128 .Lshuf_ror24 rRIP, R24; vmovdqa .Liv+(0 * 8) rRIP, ROW3; vmovdqa .Liv+(4 * 8) rRIP, ROW4; vmovdqu (STATE_H + 0 * 8)(RSTATE), ROW1; vmovdqu (STATE_H + 4 * 8)(RSTATE), ROW2; vpxor (STATE_T)(RSTATE), ROW4, ROW4; LOAD_MSG(0, MA1, MA2, MA3, MA4); LOAD_MSG(1, MB1, MB2, MB3, MB4); .Loop: ROUND(0, MA1, MA2, MA3, MA4); LOAD_MSG(2, MA1, MA2, MA3, MA4); ROUND(1, MB1, MB2, MB3, MB4); LOAD_MSG(3, MB1, MB2, MB3, MB4); ROUND(2, MA1, MA2, MA3, MA4); LOAD_MSG(4, MA1, MA2, MA3, MA4); ROUND(3, MB1, MB2, MB3, MB4); LOAD_MSG(5, MB1, MB2, MB3, MB4); ROUND(4, MA1, MA2, MA3, MA4); LOAD_MSG(6, MA1, MA2, MA3, MA4); ROUND(5, MB1, MB2, MB3, MB4); LOAD_MSG(7, MB1, MB2, MB3, MB4); ROUND(6, MA1, MA2, MA3, MA4); LOAD_MSG(8, MA1, MA2, MA3, MA4); ROUND(7, MB1, MB2, MB3, MB4); LOAD_MSG(9, MB1, MB2, MB3, MB4); ROUND(8, MA1, MA2, MA3, MA4); LOAD_MSG(10, MA1, MA2, MA3, MA4); ROUND(9, MB1, MB2, MB3, MB4); LOAD_MSG(11, MB1, MB2, MB3, MB4); sub $1, RNBLKS; jz .Loop_end; lea 128(RINBLKS), RINBLKS; addq $128, (STATE_T + 0)(RSTATE); adcq $0, (STATE_T + 8)(RSTATE); ROUND(10, MA1, MA2, MA3, MA4); LOAD_MSG(0, MA1, MA2, MA3, MA4); ROUND(11, MB1, MB2, MB3, MB4); LOAD_MSG(1, MB1, MB2, MB3, MB4); vpxor ROW3, ROW1, ROW1; vpxor ROW4, ROW2, ROW2; vmovdqa .Liv+(0 * 8) rRIP, ROW3; vmovdqa .Liv+(4 * 8) rRIP, ROW4; vpxor (STATE_H + 0 * 8)(RSTATE), ROW1, ROW1; vpxor (STATE_H + 4 * 8)(RSTATE), ROW2, ROW2; vmovdqu ROW1, (STATE_H + 0 * 8)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 8)(RSTATE); vpxor (STATE_T)(RSTATE), ROW4, ROW4; jmp .Loop; .Loop_end: ROUND(10, MA1, MA2, MA3, MA4); ROUND(11, MB1, MB2, MB3, MB4); vpxor ROW3, ROW1, ROW1; vpxor ROW4, ROW2, ROW2; vpxor (STATE_H + 0 * 8)(RSTATE), ROW1, ROW1; vpxor (STATE_H + 4 * 8)(RSTATE), ROW2, ROW2; vmovdqu ROW1, (STATE_H + 0 * 8)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 8)(RSTATE); xor %eax, %eax; vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_blake2b_transform_amd64_avx2, .-_gcry_blake2b_transform_amd64_avx2;) #endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/ #endif /*__x86_64*/ diff --git a/cipher/blake2b-amd64-avx512.S b/cipher/blake2b-amd64-avx512.S index 18b0c3ad..fe938730 100644 --- a/cipher/blake2b-amd64-avx512.S +++ b/cipher/blake2b-amd64-avx512.S @@ -1,314 +1,316 @@ /* blake2b-amd64-avx512.S - AVX512 implementation of BLAKE2b * * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* The code is based on public-domain/CC0 BLAKE2 reference implementation * by Samual Neves, at https://github.com/BLAKE2/BLAKE2/tree/master/sse * Copyright 2012, Samuel Neves */ #ifdef __x86_64 #include #if defined(HAVE_GCC_INLINE_ASM_AVX512) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) #include "asm-common-amd64.h" -.text - /* register macros */ #define RSTATE %rdi #define RINBLKS %rsi #define RNBLKS %rdx #define RIV %rcx /* state structure */ #define STATE_H 0 #define STATE_T (STATE_H + 8 * 8) #define STATE_F (STATE_T + 2 * 8) /* vector registers */ #define ROW1 %ymm0 #define ROW2 %ymm1 #define ROW3 %ymm2 #define ROW4 %ymm3 #define TMP1 %ymm4 #define TMP1x %xmm4 #define MA1 %ymm5 #define MA2 %ymm6 #define MA3 %ymm7 #define MA4 %ymm8 #define MA1x %xmm5 #define MA2x %xmm6 #define MA3x %xmm7 #define MA4x %xmm8 #define MB1 %ymm9 #define MB2 %ymm10 #define MB3 %ymm11 #define MB4 %ymm12 #define MB1x %xmm9 #define MB2x %xmm10 #define MB3x %xmm11 #define MB4x %xmm12 /********************************************************************** blake2b/AVX2 **********************************************************************/ #define GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, gather_masks) \ vmovdqa gather_masks + (4*4) * 0 rRIP, m2x; \ vmovdqa gather_masks + (4*4) * 1 rRIP, m3x; \ vmovdqa gather_masks + (4*4) * 2 rRIP, m4x; \ vmovdqa gather_masks + (4*4) * 3 rRIP, TMP1x; \ vpgatherdq (RINBLKS, m2x), m1 {%k1}; \ vpgatherdq (RINBLKS, m3x), m2 {%k2}; \ vpgatherdq (RINBLKS, m4x), m3 {%k3}; \ vpgatherdq (RINBLKS, TMP1x), m4 {%k4} #define GEN_GMASK(s0, s1, s2, s3, s4, s5, s6, s7, \ s8, s9, s10, s11, s12, s13, s14, s15) \ .long (s0)*8, (s2)*8, (s4)*8, (s6)*8, \ (s1)*8, (s3)*8, (s5)*8, (s7)*8, \ (s8)*8, (s10)*8, (s12)*8, (s14)*8, \ (s9)*8, (s11)*8, (s13)*8, (s15)*8 #define RESET_KMASKS() \ kmovw %k0, %k1; \ kmovw %k0, %k2; \ kmovw %k0, %k3; \ kmovw %k0, %k4 #define LOAD_MSG_0(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask0); \ RESET_KMASKS() #define LOAD_MSG_1(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask1); \ RESET_KMASKS() #define LOAD_MSG_2(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask2); \ RESET_KMASKS() #define LOAD_MSG_3(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask3); \ RESET_KMASKS() #define LOAD_MSG_4(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask4); \ RESET_KMASKS() #define LOAD_MSG_5(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask5); \ RESET_KMASKS() #define LOAD_MSG_6(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask6); \ RESET_KMASKS() #define LOAD_MSG_7(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask7); \ RESET_KMASKS() #define LOAD_MSG_8(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask8); \ RESET_KMASKS() #define LOAD_MSG_9(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask9); \ RESET_KMASKS() #define LOAD_MSG_10(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask0); \ RESET_KMASKS() #define LOAD_MSG_11(m1, m2, m3, m4, m1x, m2x, m3x, m4x) \ GATHER_MSG(m1, m2, m3, m4, m1x, m2x, m3x, m4x, .Lgmask1); #define LOAD_MSG(r, m1, m2, m3, m4) \ LOAD_MSG_##r(m1, m2, m3, m4, m1##x, m2##x, m3##x, m4##x) #define ROR_32(in, out) vpshufd $0xb1, in, out #define ROR_24(in, out) vprorq $24, in, out #define ROR_16(in, out) vprorq $16, in, out #define ROR_63(in, out) vprorq $63, in, out #define G(r1, r2, r3, r4, m, ROR_A, ROR_B) \ vpaddq m, r1, r1; \ vpaddq r2, r1, r1; \ vpxor r1, r4, r4; \ ROR_A(r4, r4); \ vpaddq r4, r3, r3; \ vpxor r3, r2, r2; \ ROR_B(r2, r2) #define G1(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_32, ROR_24) #define G2(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_16, ROR_63) #define MM_SHUFFLE(z,y,x,w) \ (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #define DIAGONALIZE(r1, r2, r3, r4) \ vpermq $MM_SHUFFLE(0,3,2,1), r2, r2; \ vpermq $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpermq $MM_SHUFFLE(2,1,0,3), r4, r4 #define UNDIAGONALIZE(r1, r2, r3, r4) \ vpermq $MM_SHUFFLE(2,1,0,3), r2, r2; \ vpermq $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpermq $MM_SHUFFLE(0,3,2,1), r4, r4 #define ROUND(r, m1, m2, m3, m4) \ G1(ROW1, ROW2, ROW3, ROW4, m1); \ G2(ROW1, ROW2, ROW3, ROW4, m2); \ DIAGONALIZE(ROW1, ROW2, ROW3, ROW4); \ G1(ROW1, ROW2, ROW3, ROW4, m3); \ G2(ROW1, ROW2, ROW3, ROW4, m4); \ UNDIAGONALIZE(ROW1, ROW2, ROW3, ROW4) -ELF(.type blake2b_data,@object;) -blake2b_data: +SECTION_RODATA + .align 32 +ELF(.type _blake2b_avx512_data,@object;) +_blake2b_avx512_data: .Liv: .quad 0x6a09e667f3bcc908, 0xbb67ae8584caa73b .quad 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1 .quad 0x510e527fade682d1, 0x9b05688c2b3e6c1f .quad 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 .Lgmask0: GEN_GMASK(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) .Lgmask1: GEN_GMASK(14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3) .Lgmask2: GEN_GMASK(11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4) .Lgmask3: GEN_GMASK(7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8) .Lgmask4: GEN_GMASK(9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13) .Lgmask5: GEN_GMASK(2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9) .Lgmask6: GEN_GMASK(12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11) .Lgmask7: GEN_GMASK(13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10) .Lgmask8: GEN_GMASK(6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5) .Lgmask9: GEN_GMASK(10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0) +.text + .align 64 .globl _gcry_blake2b_transform_amd64_avx512 ELF(.type _gcry_blake2b_transform_amd64_avx512,@function;) _gcry_blake2b_transform_amd64_avx512: /* input: * %rdi: state * %rsi: blks * %rdx: num_blks */ CFI_STARTPROC(); spec_stop_avx512; movl $0xf, %eax; kmovw %eax, %k0; xorl %eax, %eax; RESET_KMASKS(); addq $128, (STATE_T + 0)(RSTATE); adcq $0, (STATE_T + 8)(RSTATE); vmovdqa .Liv+(0 * 8) rRIP, ROW3; vmovdqa .Liv+(4 * 8) rRIP, ROW4; vmovdqu (STATE_H + 0 * 8)(RSTATE), ROW1; vmovdqu (STATE_H + 4 * 8)(RSTATE), ROW2; vpxor (STATE_T)(RSTATE), ROW4, ROW4; LOAD_MSG(0, MA1, MA2, MA3, MA4); LOAD_MSG(1, MB1, MB2, MB3, MB4); jmp .Loop; .align 64, 0xcc .Loop: ROUND(0, MA1, MA2, MA3, MA4); LOAD_MSG(2, MA1, MA2, MA3, MA4); ROUND(1, MB1, MB2, MB3, MB4); LOAD_MSG(3, MB1, MB2, MB3, MB4); ROUND(2, MA1, MA2, MA3, MA4); LOAD_MSG(4, MA1, MA2, MA3, MA4); ROUND(3, MB1, MB2, MB3, MB4); LOAD_MSG(5, MB1, MB2, MB3, MB4); ROUND(4, MA1, MA2, MA3, MA4); LOAD_MSG(6, MA1, MA2, MA3, MA4); ROUND(5, MB1, MB2, MB3, MB4); LOAD_MSG(7, MB1, MB2, MB3, MB4); ROUND(6, MA1, MA2, MA3, MA4); LOAD_MSG(8, MA1, MA2, MA3, MA4); ROUND(7, MB1, MB2, MB3, MB4); LOAD_MSG(9, MB1, MB2, MB3, MB4); ROUND(8, MA1, MA2, MA3, MA4); LOAD_MSG(10, MA1, MA2, MA3, MA4); ROUND(9, MB1, MB2, MB3, MB4); LOAD_MSG(11, MB1, MB2, MB3, MB4); sub $1, RNBLKS; jz .Loop_end; RESET_KMASKS(); lea 128(RINBLKS), RINBLKS; addq $128, (STATE_T + 0)(RSTATE); adcq $0, (STATE_T + 8)(RSTATE); ROUND(10, MA1, MA2, MA3, MA4); LOAD_MSG(0, MA1, MA2, MA3, MA4); ROUND(11, MB1, MB2, MB3, MB4); LOAD_MSG(1, MB1, MB2, MB3, MB4); vpternlogq $0x96, (STATE_H + 0 * 8)(RSTATE), ROW3, ROW1; vpternlogq $0x96, (STATE_H + 4 * 8)(RSTATE), ROW4, ROW2; vmovdqa .Liv+(0 * 8) rRIP, ROW3; vmovdqa .Liv+(4 * 8) rRIP, ROW4; vmovdqu ROW1, (STATE_H + 0 * 8)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 8)(RSTATE); vpxor (STATE_T)(RSTATE), ROW4, ROW4; jmp .Loop; .align 64, 0xcc .Loop_end: ROUND(10, MA1, MA2, MA3, MA4); ROUND(11, MB1, MB2, MB3, MB4); vpternlogq $0x96, (STATE_H + 0 * 8)(RSTATE), ROW3, ROW1; vpternlogq $0x96, (STATE_H + 4 * 8)(RSTATE), ROW4, ROW2; vmovdqu ROW1, (STATE_H + 0 * 8)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 8)(RSTATE); kxorw %k0, %k0, %k0; vzeroall; RESET_KMASKS(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_blake2b_transform_amd64_avx512, .-_gcry_blake2b_transform_amd64_avx512;) #endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/ #endif /*__x86_64*/ diff --git a/cipher/blake2s-amd64-avx.S b/cipher/blake2s-amd64-avx.S index 5094b4c1..44b82ab2 100644 --- a/cipher/blake2s-amd64-avx.S +++ b/cipher/blake2s-amd64-avx.S @@ -1,278 +1,281 @@ /* blake2s-amd64-avx.S - AVX implementation of BLAKE2s * * Copyright (C) 2018 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* The code is based on public-domain/CC0 BLAKE2 reference implementation * by Samual Neves, at https://github.com/BLAKE2/BLAKE2/tree/master/sse * Copyright 2012, Samuel Neves */ #ifdef __x86_64 #include #if defined(HAVE_GCC_INLINE_ASM_AVX) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) #include "asm-common-amd64.h" -.text - /* register macros */ #define RSTATE %rdi #define RINBLKS %rsi #define RNBLKS %rdx #define RIV %rcx /* state structure */ #define STATE_H 0 #define STATE_T (STATE_H + 8 * 4) #define STATE_F (STATE_T + 2 * 4) /* vector registers */ #define ROW1 %xmm0 #define ROW2 %xmm1 #define ROW3 %xmm2 #define ROW4 %xmm3 #define TMP1 %xmm4 #define TMP1x %xmm4 #define R16 %xmm5 #define R8 %xmm6 #define MA1 %xmm8 #define MA2 %xmm9 #define MA3 %xmm10 #define MA4 %xmm11 #define MB1 %xmm12 #define MB2 %xmm13 #define MB3 %xmm14 #define MB4 %xmm15 /********************************************************************** blake2s/AVX **********************************************************************/ #define GATHER_MSG(m1, m2, m3, m4, \ s0, s1, s2, s3, s4, s5, s6, s7, s8, \ s9, s10, s11, s12, s13, s14, s15) \ vmovd (s0)*4(RINBLKS), m1; \ vmovd (s1)*4(RINBLKS), m2; \ vmovd (s8)*4(RINBLKS), m3; \ vmovd (s9)*4(RINBLKS), m4; \ vpinsrd $1, (s2)*4(RINBLKS), m1, m1; \ vpinsrd $1, (s3)*4(RINBLKS), m2, m2; \ vpinsrd $1, (s10)*4(RINBLKS), m3, m3; \ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \ vpinsrd $2, (s4)*4(RINBLKS), m1, m1; \ vpinsrd $2, (s5)*4(RINBLKS), m2, m2; \ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \ vpinsrd $2, (s13)*4(RINBLKS), m4, m4; \ vpinsrd $3, (s6)*4(RINBLKS), m1, m1; \ vpinsrd $3, (s7)*4(RINBLKS), m2, m2; \ vpinsrd $3, (s14)*4(RINBLKS), m3, m3; \ vpinsrd $3, (s15)*4(RINBLKS), m4, m4; #define LOAD_MSG_0(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) #define LOAD_MSG_1(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3) #define LOAD_MSG_2(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4) #define LOAD_MSG_3(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8) #define LOAD_MSG_4(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13) #define LOAD_MSG_5(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9) #define LOAD_MSG_6(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11) #define LOAD_MSG_7(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10) #define LOAD_MSG_8(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5) #define LOAD_MSG_9(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0) #define LOAD_MSG(r, m1, m2, m3, m4) LOAD_MSG_##r(m1, m2, m3, m4) #define ROR_16(in, out) vpshufb R16, in, out; #define ROR_8(in, out) vpshufb R8, in, out; #define ROR_12(in, out) \ vpsrld $12, in, TMP1; \ vpslld $(32 - 12), in, out; \ vpxor TMP1, out, out; #define ROR_7(in, out) \ vpsrld $7, in, TMP1; \ vpslld $(32 - 7), in, out; \ vpxor TMP1, out, out; #define G(r1, r2, r3, r4, m, ROR_A, ROR_B) \ vpaddd m, r1, r1; \ vpaddd r2, r1, r1; \ vpxor r1, r4, r4; \ ROR_A(r4, r4); \ vpaddd r4, r3, r3; \ vpxor r3, r2, r2; \ ROR_B(r2, r2); #define G1(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_16, ROR_12); #define G2(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_8, ROR_7); #define MM_SHUFFLE(z,y,x,w) \ (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #define DIAGONALIZE(r1, r2, r3, r4) \ vpshufd $MM_SHUFFLE(0,3,2,1), r2, r2; \ vpshufd $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpshufd $MM_SHUFFLE(2,1,0,3), r4, r4; #define UNDIAGONALIZE(r1, r2, r3, r4) \ vpshufd $MM_SHUFFLE(2,1,0,3), r2, r2; \ vpshufd $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpshufd $MM_SHUFFLE(0,3,2,1), r4, r4; #define ROUND(r, m1, m2, m3, m4) \ G1(ROW1, ROW2, ROW3, ROW4, m1); \ G2(ROW1, ROW2, ROW3, ROW4, m2); \ DIAGONALIZE(ROW1, ROW2, ROW3, ROW4); \ G1(ROW1, ROW2, ROW3, ROW4, m3); \ G2(ROW1, ROW2, ROW3, ROW4, m4); \ UNDIAGONALIZE(ROW1, ROW2, ROW3, ROW4); -blake2s_data: +SECTION_RODATA + .align 16 +ELF(.type _blake2s_avx_data,@object;) +_blake2s_avx_data: .Liv: .long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A .long 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 .Lshuf_ror16: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .Lshuf_ror8: .byte 1,2,3,0,5,6,7,4,9,10,11,8,13,14,15,12 +.text + .align 64 .globl _gcry_blake2s_transform_amd64_avx ELF(.type _gcry_blake2s_transform_amd64_avx,@function;) _gcry_blake2s_transform_amd64_avx: /* input: * %rdi: state * %rsi: blks * %rdx: num_blks */ CFI_STARTPROC(); vzeroupper; addq $64, (STATE_T + 0)(RSTATE); vmovdqa .Lshuf_ror16 rRIP, R16; vmovdqa .Lshuf_ror8 rRIP, R8; vmovdqa .Liv+(0 * 4) rRIP, ROW3; vmovdqa .Liv+(4 * 4) rRIP, ROW4; vmovdqu (STATE_H + 0 * 4)(RSTATE), ROW1; vmovdqu (STATE_H + 4 * 4)(RSTATE), ROW2; vpxor (STATE_T)(RSTATE), ROW4, ROW4; LOAD_MSG(0, MA1, MA2, MA3, MA4); LOAD_MSG(1, MB1, MB2, MB3, MB4); .Loop: ROUND(0, MA1, MA2, MA3, MA4); LOAD_MSG(2, MA1, MA2, MA3, MA4); ROUND(1, MB1, MB2, MB3, MB4); LOAD_MSG(3, MB1, MB2, MB3, MB4); ROUND(2, MA1, MA2, MA3, MA4); LOAD_MSG(4, MA1, MA2, MA3, MA4); ROUND(3, MB1, MB2, MB3, MB4); LOAD_MSG(5, MB1, MB2, MB3, MB4); ROUND(4, MA1, MA2, MA3, MA4); LOAD_MSG(6, MA1, MA2, MA3, MA4); ROUND(5, MB1, MB2, MB3, MB4); LOAD_MSG(7, MB1, MB2, MB3, MB4); ROUND(6, MA1, MA2, MA3, MA4); LOAD_MSG(8, MA1, MA2, MA3, MA4); ROUND(7, MB1, MB2, MB3, MB4); LOAD_MSG(9, MB1, MB2, MB3, MB4); sub $1, RNBLKS; jz .Loop_end; lea 64(RINBLKS), RINBLKS; addq $64, (STATE_T + 0)(RSTATE); ROUND(8, MA1, MA2, MA3, MA4); LOAD_MSG(0, MA1, MA2, MA3, MA4); ROUND(9, MB1, MB2, MB3, MB4); LOAD_MSG(1, MB1, MB2, MB3, MB4); vpxor ROW3, ROW1, ROW1; vpxor ROW4, ROW2, ROW2; vmovdqa .Liv+(0 * 4) rRIP, ROW3; vmovdqa .Liv+(4 * 4) rRIP, ROW4; vpxor (STATE_H + 0 * 4)(RSTATE), ROW1, ROW1; vpxor (STATE_H + 4 * 4)(RSTATE), ROW2, ROW2; vmovdqu ROW1, (STATE_H + 0 * 4)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 4)(RSTATE); vpxor (STATE_T)(RSTATE), ROW4, ROW4; jmp .Loop; .Loop_end: ROUND(8, MA1, MA2, MA3, MA4); ROUND(9, MB1, MB2, MB3, MB4); vpxor ROW3, ROW1, ROW1; vpxor ROW4, ROW2, ROW2; vpxor (STATE_H + 0 * 4)(RSTATE), ROW1, ROW1; vpxor (STATE_H + 4 * 4)(RSTATE), ROW2, ROW2; vmovdqu ROW1, (STATE_H + 0 * 4)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 4)(RSTATE); xor %eax, %eax; vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_blake2s_transform_amd64_avx, .-_gcry_blake2s_transform_amd64_avx;) #endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/ #endif /*__x86_64*/ diff --git a/cipher/blake2s-amd64-avx512.S b/cipher/blake2s-amd64-avx512.S index ddcdfd67..e2da2a18 100644 --- a/cipher/blake2s-amd64-avx512.S +++ b/cipher/blake2s-amd64-avx512.S @@ -1,263 +1,265 @@ /* blake2s-amd64-avx512.S - AVX512 implementation of BLAKE2s * * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* The code is based on public-domain/CC0 BLAKE2 reference implementation * by Samual Neves, at https://github.com/BLAKE2/BLAKE2/tree/master/sse * Copyright 2012, Samuel Neves */ #ifdef __x86_64 #include #if defined(HAVE_GCC_INLINE_ASM_AVX512) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) #include "asm-common-amd64.h" -.text - /* register macros */ #define RSTATE %rdi #define RINBLKS %rsi #define RNBLKS %rdx #define RIV %rcx /* state structure */ #define STATE_H 0 #define STATE_T (STATE_H + 8 * 4) #define STATE_F (STATE_T + 2 * 4) /* vector registers */ #define ROW1 %xmm0 #define ROW2 %xmm1 #define ROW3 %xmm2 #define ROW4 %xmm3 #define TMP1 %xmm4 #define TMP1x %xmm4 #define MA1 %xmm5 #define MA2 %xmm6 #define MA3 %xmm7 #define MA4 %xmm8 #define MB1 %xmm9 #define MB2 %xmm10 #define MB3 %xmm11 #define MB4 %xmm12 /********************************************************************** blake2s/AVX **********************************************************************/ /* On Intel tigerlake, vmovd+vpinsrd approach is faster than vpgatherdd. */ #define GATHER_MSG(m1, m2, m3, m4, \ s0, s1, s2, s3, s4, s5, s6, s7, s8, \ s9, s10, s11, s12, s13, s14, s15) \ vmovd (s0)*4(RINBLKS), m1; \ vmovd (s1)*4(RINBLKS), m2; \ vmovd (s8)*4(RINBLKS), m3; \ vmovd (s9)*4(RINBLKS), m4; \ vpinsrd $1, (s2)*4(RINBLKS), m1, m1; \ vpinsrd $1, (s3)*4(RINBLKS), m2, m2; \ vpinsrd $1, (s10)*4(RINBLKS), m3, m3; \ vpinsrd $1, (s11)*4(RINBLKS), m4, m4; \ vpinsrd $2, (s4)*4(RINBLKS), m1, m1; \ vpinsrd $2, (s5)*4(RINBLKS), m2, m2; \ vpinsrd $2, (s12)*4(RINBLKS), m3, m3; \ vpinsrd $2, (s13)*4(RINBLKS), m4, m4; \ vpinsrd $3, (s6)*4(RINBLKS), m1, m1; \ vpinsrd $3, (s7)*4(RINBLKS), m2, m2; \ vpinsrd $3, (s14)*4(RINBLKS), m3, m3; \ vpinsrd $3, (s15)*4(RINBLKS), m4, m4; #define LOAD_MSG_0(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) #define LOAD_MSG_1(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3) #define LOAD_MSG_2(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4) #define LOAD_MSG_3(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8) #define LOAD_MSG_4(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13) #define LOAD_MSG_5(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9) #define LOAD_MSG_6(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11) #define LOAD_MSG_7(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10) #define LOAD_MSG_8(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5) #define LOAD_MSG_9(m1, m2, m3, m4) \ GATHER_MSG(m1, m2, m3, m4, \ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0) #define LOAD_MSG(r, m1, m2, m3, m4) LOAD_MSG_##r(m1, m2, m3, m4) #define ROR_16(in, out) vprord $16, in, out; #define ROR_8(in, out) vprord $8, in, out; #define ROR_12(in, out) vprord $12, in, out; #define ROR_7(in, out) vprord $7, in, out; #define G(r1, r2, r3, r4, m, ROR_A, ROR_B) \ vpaddd m, r1, r1; \ vpaddd r2, r1, r1; \ vpxor r1, r4, r4; \ ROR_A(r4, r4); \ vpaddd r4, r3, r3; \ vpxor r3, r2, r2; \ ROR_B(r2, r2); #define G1(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_16, ROR_12); #define G2(r1, r2, r3, r4, m) \ G(r1, r2, r3, r4, m, ROR_8, ROR_7); #define MM_SHUFFLE(z,y,x,w) \ (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #define DIAGONALIZE(r1, r2, r3, r4) \ vpshufd $MM_SHUFFLE(0,3,2,1), r2, r2; \ vpshufd $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpshufd $MM_SHUFFLE(2,1,0,3), r4, r4; #define UNDIAGONALIZE(r1, r2, r3, r4) \ vpshufd $MM_SHUFFLE(2,1,0,3), r2, r2; \ vpshufd $MM_SHUFFLE(1,0,3,2), r3, r3; \ vpshufd $MM_SHUFFLE(0,3,2,1), r4, r4; #define ROUND(r, m1, m2, m3, m4) \ G1(ROW1, ROW2, ROW3, ROW4, m1); \ G2(ROW1, ROW2, ROW3, ROW4, m2); \ DIAGONALIZE(ROW1, ROW2, ROW3, ROW4); \ G1(ROW1, ROW2, ROW3, ROW4, m3); \ G2(ROW1, ROW2, ROW3, ROW4, m4); \ UNDIAGONALIZE(ROW1, ROW2, ROW3, ROW4); -ELF(.type blake2s_data,@object;) -blake2s_data: +SECTION_RODATA + +ELF(.type _blake2s_avx512_data,@object;) .align 16 +_blake2s_avx512_data: .Liv: .long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A .long 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 +.text + .align 64 .globl _gcry_blake2s_transform_amd64_avx512 ELF(.type _gcry_blake2s_transform_amd64_avx512,@function;) _gcry_blake2s_transform_amd64_avx512: /* input: * %rdi: state * %rsi: blks * %rdx: num_blks */ CFI_STARTPROC(); spec_stop_avx512; addq $64, (STATE_T + 0)(RSTATE); vmovdqa .Liv+(0 * 4) rRIP, ROW3; vmovdqa .Liv+(4 * 4) rRIP, ROW4; vmovdqu (STATE_H + 0 * 4)(RSTATE), ROW1; vmovdqu (STATE_H + 4 * 4)(RSTATE), ROW2; vpxor (STATE_T)(RSTATE), ROW4, ROW4; LOAD_MSG(0, MA1, MA2, MA3, MA4); LOAD_MSG(1, MB1, MB2, MB3, MB4); jmp .Loop; .align 64, 0xcc .Loop: ROUND(0, MA1, MA2, MA3, MA4); LOAD_MSG(2, MA1, MA2, MA3, MA4); ROUND(1, MB1, MB2, MB3, MB4); LOAD_MSG(3, MB1, MB2, MB3, MB4); ROUND(2, MA1, MA2, MA3, MA4); LOAD_MSG(4, MA1, MA2, MA3, MA4); ROUND(3, MB1, MB2, MB3, MB4); LOAD_MSG(5, MB1, MB2, MB3, MB4); ROUND(4, MA1, MA2, MA3, MA4); LOAD_MSG(6, MA1, MA2, MA3, MA4); ROUND(5, MB1, MB2, MB3, MB4); LOAD_MSG(7, MB1, MB2, MB3, MB4); ROUND(6, MA1, MA2, MA3, MA4); LOAD_MSG(8, MA1, MA2, MA3, MA4); ROUND(7, MB1, MB2, MB3, MB4); LOAD_MSG(9, MB1, MB2, MB3, MB4); sub $1, RNBLKS; jz .Loop_end; lea 64(RINBLKS), RINBLKS; addq $64, (STATE_T + 0)(RSTATE); ROUND(8, MA1, MA2, MA3, MA4); LOAD_MSG(0, MA1, MA2, MA3, MA4); ROUND(9, MB1, MB2, MB3, MB4); LOAD_MSG(1, MB1, MB2, MB3, MB4); vpternlogq $0x96, (STATE_H + 0 * 4)(RSTATE), ROW3, ROW1; vpternlogq $0x96, (STATE_H + 4 * 4)(RSTATE), ROW4, ROW2; vmovdqa .Liv+(0 * 4) rRIP, ROW3; vmovdqa .Liv+(4 * 4) rRIP, ROW4; vmovdqu ROW1, (STATE_H + 0 * 4)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 4)(RSTATE); vpxor (STATE_T)(RSTATE), ROW4, ROW4; jmp .Loop; .align 64, 0xcc .Loop_end: ROUND(8, MA1, MA2, MA3, MA4); ROUND(9, MB1, MB2, MB3, MB4); vpternlogq $0x96, (STATE_H + 0 * 4)(RSTATE), ROW3, ROW1; vpternlogq $0x96, (STATE_H + 4 * 4)(RSTATE), ROW4, ROW2; vmovdqu ROW1, (STATE_H + 0 * 4)(RSTATE); vmovdqu ROW2, (STATE_H + 4 * 4)(RSTATE); xorl %eax, %eax; vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_blake2s_transform_amd64_avx512, .-_gcry_blake2s_transform_amd64_avx512;) #endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/ #endif /*__x86_64*/ diff --git a/cipher/poly1305-amd64-avx512.S b/cipher/poly1305-amd64-avx512.S index 9beed8ad..250db072 100644 --- a/cipher/poly1305-amd64-avx512.S +++ b/cipher/poly1305-amd64-avx512.S @@ -1,1624 +1,1626 @@ /* ;; ;; Copyright (c) 2021-2022, Intel Corporation ;; ;; Redistribution and use in source and binary forms, with or without ;; modification, are permitted provided that the following conditions are met: ;; ;; * Redistributions of source code must retain the above copyright notice, ;; this list of conditions and the following disclaimer. ;; * Redistributions in binary form must reproduce the above copyright ;; notice, this list of conditions and the following disclaimer in the ;; documentation and/or other materials provided with the distribution. ;; * Neither the name of Intel Corporation nor the names of its contributors ;; may be used to endorse or promote products derived from this software ;; without specific prior written permission. ;; ;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE ;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL ;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;; */ /* * From: * https://github.com/intel/intel-ipsec-mb/blob/f0cad21a644231c0f5d4af51f56061a5796343fb/lib/avx512/poly_fma_avx512.asm * * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX512) #include "asm-common-amd64.h" .intel_syntax noprefix -.text +SECTION_RODATA ELF(.type _gcry_poly1305_avx512_consts,@object) _gcry_poly1305_avx512_consts: .align 64 .Lmask_44: .quad 0xfffffffffff, 0xfffffffffff, 0xfffffffffff, 0xfffffffffff .quad 0xfffffffffff, 0xfffffffffff, 0xfffffffffff, 0xfffffffffff .align 64 .Lmask_42: .quad 0x3ffffffffff, 0x3ffffffffff, 0x3ffffffffff, 0x3ffffffffff .quad 0x3ffffffffff, 0x3ffffffffff, 0x3ffffffffff, 0x3ffffffffff .align 64 .Lhigh_bit: .quad 0x10000000000, 0x10000000000, 0x10000000000, 0x10000000000 .quad 0x10000000000, 0x10000000000, 0x10000000000, 0x10000000000 .Lbyte_len_to_mask_table: .short 0x0000, 0x0001, 0x0003, 0x0007 .short 0x000f, 0x001f, 0x003f, 0x007f .short 0x00ff, 0x01ff, 0x03ff, 0x07ff .short 0x0fff, 0x1fff, 0x3fff, 0x7fff .short 0xffff .align 64 .Lbyte64_len_to_mask_table: .quad 0x0000000000000000, 0x0000000000000001 .quad 0x0000000000000003, 0x0000000000000007 .quad 0x000000000000000f, 0x000000000000001f .quad 0x000000000000003f, 0x000000000000007f .quad 0x00000000000000ff, 0x00000000000001ff .quad 0x00000000000003ff, 0x00000000000007ff .quad 0x0000000000000fff, 0x0000000000001fff .quad 0x0000000000003fff, 0x0000000000007fff .quad 0x000000000000ffff, 0x000000000001ffff .quad 0x000000000003ffff, 0x000000000007ffff .quad 0x00000000000fffff, 0x00000000001fffff .quad 0x00000000003fffff, 0x00000000007fffff .quad 0x0000000000ffffff, 0x0000000001ffffff .quad 0x0000000003ffffff, 0x0000000007ffffff .quad 0x000000000fffffff, 0x000000001fffffff .quad 0x000000003fffffff, 0x000000007fffffff .quad 0x00000000ffffffff, 0x00000001ffffffff .quad 0x00000003ffffffff, 0x00000007ffffffff .quad 0x0000000fffffffff, 0x0000001fffffffff .quad 0x0000003fffffffff, 0x0000007fffffffff .quad 0x000000ffffffffff, 0x000001ffffffffff .quad 0x000003ffffffffff, 0x000007ffffffffff .quad 0x00000fffffffffff, 0x00001fffffffffff .quad 0x00003fffffffffff, 0x00007fffffffffff .quad 0x0000ffffffffffff, 0x0001ffffffffffff .quad 0x0003ffffffffffff, 0x0007ffffffffffff .quad 0x000fffffffffffff, 0x001fffffffffffff .quad 0x003fffffffffffff, 0x007fffffffffffff .quad 0x00ffffffffffffff, 0x01ffffffffffffff .quad 0x03ffffffffffffff, 0x07ffffffffffffff .quad 0x0fffffffffffffff, 0x1fffffffffffffff .quad 0x3fffffffffffffff, 0x7fffffffffffffff .quad 0xffffffffffffffff .Lqword_high_bit_mask: .short 0, 0x1, 0x5, 0x15, 0x55, 0x57, 0x5f, 0x7f, 0xff ELF(.size _gcry_poly1305_avx512_consts,.-_gcry_poly1305_avx512_consts) #define raxd eax #define rbxd ebx #define rcxd ecx #define rdxd edx #define rsid esi #define rdid edi #define rbpd ebp #define rspd esp #define __DWORD(X) X##d #define DWORD(R) __DWORD(R) #define arg1 rdi #define arg2 rsi #define arg3 rdx #define arg4 rcx #define job arg1 #define gp1 rsi #define gp2 rcx /* ;; don't use rdx and rax - they are needed for multiply operation */ #define gp3 rbp #define gp4 r8 #define gp5 r9 #define gp6 r10 #define gp7 r11 #define gp8 r12 #define gp9 r13 #define gp10 r14 #define gp11 r15 #define len gp11 #define msg gp10 #define POLY1305_BLOCK_SIZE 16 #define STACK_r_save 0 #define STACK_r_save_size (6 * 64) #define STACK_gpr_save (STACK_r_save + STACK_r_save_size) #define STACK_gpr_save_size (8 * 8) #define STACK_rsp_save (STACK_gpr_save + STACK_gpr_save_size) #define STACK_rsp_save_size (1 * 8) #define STACK_SIZE (STACK_rsp_save + STACK_rsp_save_size) #define A2_ZERO(...) /**/ #define A2_ZERO_INVERT(...) __VA_ARGS__ #define A2_NOT_ZERO(...) __VA_ARGS__ #define A2_NOT_ZERO_INVERT(...) /**/ #define clear_zmm(vec) vpxord vec, vec, vec /* ;; ============================================================================= ;; ============================================================================= ;; Computes hash for message length being multiple of block size ;; ============================================================================= ;; Combining 64-bit x 64-bit multiplication with reduction steps ;; ;; NOTES: ;; 1) A2 here is only two bits so anything above is subject of reduction. ;; Constant C1 = R1 + (R1 >> 2) simplifies multiply with less operations ;; 2) Magic 5x comes from mod 2^130-5 property and incorporating ;; reduction into multiply phase. ;; See "Cheating at modular arithmetic" and "Poly1305's prime: 2^130 - 5" ;; paragraphs at https://loup-vaillant.fr/tutorials/poly1305-design for more details. ;; ;; Flow of the code below is as follows: ;; ;; A2 A1 A0 ;; x R1 R0 ;; ----------------------------- ;; A2×R0 A1×R0 A0×R0 ;; + A0×R1 ;; + 5xA2xR1 5xA1xR1 ;; ----------------------------- ;; [0|L2L] [L1H|L1L] [L0H|L0L] ;; ;; Registers: T3:T2 T1:A0 ;; ;; Completing the multiply and adding (with carry) 3x128-bit limbs into ;; 192-bits again (3x64-bits): ;; A0 = L0L ;; A1 = L0H + L1L ;; T3 = L1H + L2L ; A0 [in/out] GPR with accumulator bits 63:0 ; A1 [in/out] GPR with accumulator bits 127:64 ; A2 [in/out] GPR with accumulator bits 195:128 ; R0 [in] GPR with R constant bits 63:0 ; R1 [in] GPR with R constant bits 127:64 ; C1 [in] C1 = R1 + (R1 >> 2) ; T1 [clobbered] GPR register ; T2 [clobbered] GPR register ; T3 [clobbered] GPR register ; GP_RAX [clobbered] RAX register ; GP_RDX [clobbered] RDX register ; IF_A2 [in] Used if input A2 is not 0 */ #define POLY1305_MUL_REDUCE(A0, A1, A2, R0, R1, C1, T1, T2, T3, GP_RAX, GP_RDX, IF_A2) \ /* T3:T2 = (A0 * R1) */ \ mov GP_RAX, R1; \ mul A0; \ mov T2, GP_RAX; \ mov GP_RAX, R0; \ mov T3, GP_RDX; \ \ /* T1:A0 = (A0 * R0) */ \ mul A0; \ mov A0, GP_RAX; /* A0 not used in other operations */ \ mov GP_RAX, R0; \ mov T1, GP_RDX; \ \ /* T3:T2 += (A1 * R0) */ \ mul A1; \ add T2, GP_RAX; \ mov GP_RAX, C1; \ adc T3, GP_RDX; \ \ /* T1:A0 += (A1 * R1x5) */ \ mul A1; \ IF_A2(mov A1, A2); /* use A1 for A2 */ \ add A0, GP_RAX; \ adc T1, GP_RDX; \ \ /* NOTE: A2 is clamped to 2-bits, */ \ /* R1/R0 is clamped to 60-bits, */ \ /* their product is less than 2^64. */ \ \ IF_A2(/* T3:T2 += (A2 * R1x5) */); \ IF_A2(imul A1, C1); \ IF_A2(add T2, A1); \ IF_A2(mov A1, T1); /* T1:A0 => A1:A0 */ \ IF_A2(adc T3, 0); \ \ IF_A2(/* T3:A1 += (A2 * R0) */); \ IF_A2(imul A2, R0); \ IF_A2(add A1, T2); \ IF_A2(adc T3, A2); \ \ IF_A2##_INVERT(/* If A2 == 0, just move and add T1-T2 to A1 */); \ IF_A2##_INVERT(mov A1, T1); \ IF_A2##_INVERT(add A1, T2); \ IF_A2##_INVERT(adc T3, 0); \ \ /* At this point, 3 64-bit limbs are in T3:A1:A0 */ \ /* T3 can span over more than 2 bits so final partial reduction step is needed. */ \ \ /* Partial reduction (just to fit into 130 bits) */ \ /* A2 = T3 & 3 */ \ /* k = (T3 & ~3) + (T3 >> 2) */ \ /* Y x4 + Y x1 */ \ /* A2:A1:A0 += k */ \ \ /* Result will be in A2:A1:A0 */ \ mov T1, T3; \ mov DWORD(A2), DWORD(T3); \ and T1, ~3; \ shr T3, 2; \ and DWORD(A2), 3; \ add T1, T3; \ \ /* A2:A1:A0 += k (kept in T1) */ \ add A0, T1; \ adc A1, 0; \ adc DWORD(A2), 0 /* ;; ============================================================================= ;; ============================================================================= ;; Computes hash for 8 16-byte message blocks, ;; and adds new message blocks to accumulator. ;; ;; It first multiplies all 8 blocks with powers of R: ;; ;; a2 a1 a0 ;; × b2 b1 b0 ;; --------------------------------------- ;; a2×b0 a1×b0 a0×b0 ;; + a1×b1 a0×b1 5×a2×b1 ;; + a0×b2 5×a2×b2 5×a1×b2 ;; --------------------------------------- ;; p2 p1 p0 ;; ;; Then, it propagates the carry (higher bits after bit 43) from lower limbs into higher limbs, ;; multiplying by 5 in case of the carry of p2. ;; ;A0 [in/out] ZMM register containing 1st 44-bit limb of the 8 blocks ;A1 [in/out] ZMM register containing 2nd 44-bit limb of the 8 blocks ;A2 [in/out] ZMM register containing 3rd 44-bit limb of the 8 blocks ;R0 [in] ZMM register (R0) to include the 1st limb of R ;R1 [in] ZMM register (R1) to include the 2nd limb of R ;R2 [in] ZMM register (R2) to include the 3rd limb of R ;R1P [in] ZMM register (R1') to include the 2nd limb of R (multiplied by 5) ;R2P [in] ZMM register (R2') to include the 3rd limb of R (multiplied by 5) ;P0_L [clobbered] ZMM register to contain p[0] of the 8 blocks ;P0_H [clobbered] ZMM register to contain p[0] of the 8 blocks ;P1_L [clobbered] ZMM register to contain p[1] of the 8 blocks ;P1_H [clobbered] ZMM register to contain p[1] of the 8 blocks ;P2_L [clobbered] ZMM register to contain p[2] of the 8 blocks ;P2_H [clobbered] ZMM register to contain p[2] of the 8 blocks ;ZTMP1 [clobbered] Temporary ZMM register */ #define POLY1305_MUL_REDUCE_VEC(A0, A1, A2, R0, R1, R2, R1P, R2P, P0_L, P0_H, \ P1_L, P1_H, P2_L, P2_H, ZTMP1) \ /* ;; Reset accumulator */ \ vpxorq P0_L, P0_L, P0_L; \ vpxorq P0_H, P0_H, P0_H; \ vpxorq P1_L, P1_L, P1_L; \ vpxorq P1_H, P1_H, P1_H; \ vpxorq P2_L, P2_L, P2_L; \ vpxorq P2_H, P2_H, P2_H; \ \ /* ; Reset accumulator and calculate products */ \ vpmadd52luq P0_L, A2, R1P; \ vpmadd52huq P0_H, A2, R1P; \ vpmadd52luq P1_L, A2, R2P; \ vpmadd52huq P1_H, A2, R2P; \ vpmadd52luq P2_L, A2, R0; \ vpmadd52huq P2_H, A2, R0; \ \ vpmadd52luq P1_L, A0, R1; \ vpmadd52huq P1_H, A0, R1; \ vpmadd52luq P2_L, A0, R2; \ vpmadd52huq P2_H, A0, R2; \ vpmadd52luq P0_L, A0, R0; \ vpmadd52huq P0_H, A0, R0; \ \ vpmadd52luq P0_L, A1, R2P; \ vpmadd52huq P0_H, A1, R2P; \ vpmadd52luq P1_L, A1, R0; \ vpmadd52huq P1_H, A1, R0; \ vpmadd52luq P2_L, A1, R1; \ vpmadd52huq P2_H, A1, R1; \ \ /* ; Carry propagation (first pass) */ \ vpsrlq ZTMP1, P0_L, 44; \ vpandq A0, P0_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpsllq P0_H, P0_H, 8; \ vpaddq P0_H, P0_H, ZTMP1; \ vpaddq P1_L, P1_L, P0_H; \ vpandq A1, P1_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpsrlq ZTMP1, P1_L, 44; \ vpsllq P1_H, P1_H, 8; \ vpaddq P1_H, P1_H, ZTMP1; \ vpaddq P2_L, P2_L, P1_H; \ vpandq A2, P2_L, [.Lmask_42 ADD_RIP]; /* ; Clear top 22 bits */ \ vpsrlq ZTMP1, P2_L, 42; \ vpsllq P2_H, P2_H, 10; \ vpaddq P2_H, P2_H, ZTMP1; \ \ /* ; Carry propagation (second pass) */ \ \ /* ; Multiply by 5 the highest bits (above 130 bits) */ \ vpaddq A0, A0, P2_H; \ vpsllq P2_H, P2_H, 2; \ vpaddq A0, A0, P2_H; \ vpsrlq ZTMP1, A0, 44; \ vpandq A0, A0, [.Lmask_44 ADD_RIP]; \ vpaddq A1, A1, ZTMP1; /* ;; ============================================================================= ;; ============================================================================= ;; Computes hash for 16 16-byte message blocks, ;; and adds new message blocks to accumulator, ;; interleaving this computation with the loading and splatting ;; of new data. ;; ;; It first multiplies all 16 blocks with powers of R (8 blocks from A0-A2 ;; and 8 blocks from B0-B2, multiplied by R0-R2) ;; ;; a2 a1 a0 ;; × b2 b1 b0 ;; --------------------------------------- ;; a2×b0 a1×b0 a0×b0 ;; + a1×b1 a0×b1 5×a2×b1 ;; + a0×b2 5×a2×b2 5×a1×b2 ;; --------------------------------------- ;; p2 p1 p0 ;; ;; Then, it propagates the carry (higher bits after bit 43) ;; from lower limbs into higher limbs, ;; multiplying by 5 in case of the carry of p2, and adds ;; the results to A0-A2 and B0-B2. ;; ;; ============================================================================= ;A0 [in/out] ZMM register containing 1st 44-bit limb of blocks 1-8 ;A1 [in/out] ZMM register containing 2nd 44-bit limb of blocks 1-8 ;A2 [in/out] ZMM register containing 3rd 44-bit limb of blocks 1-8 ;B0 [in/out] ZMM register containing 1st 44-bit limb of blocks 9-16 ;B1 [in/out] ZMM register containing 2nd 44-bit limb of blocks 9-16 ;B2 [in/out] ZMM register containing 3rd 44-bit limb of blocks 9-16 ;R0 [in] ZMM register (R0) to include the 1st limb of R ;R1 [in] ZMM register (R1) to include the 2nd limb of R ;R2 [in] ZMM register (R2) to include the 3rd limb of R ;R1P [in] ZMM register (R1') to include the 2nd limb of R (multiplied by 5) ;R2P [in] ZMM register (R2') to include the 3rd limb of R (multiplied by 5) ;P0_L [clobbered] ZMM register to contain p[0] of the 8 blocks 1-8 ;P0_H [clobbered] ZMM register to contain p[0] of the 8 blocks 1-8 ;P1_L [clobbered] ZMM register to contain p[1] of the 8 blocks 1-8 ;P1_H [clobbered] ZMM register to contain p[1] of the 8 blocks 1-8 ;P2_L [clobbered] ZMM register to contain p[2] of the 8 blocks 1-8 ;P2_H [clobbered] ZMM register to contain p[2] of the 8 blocks 1-8 ;Q0_L [clobbered] ZMM register to contain p[0] of the 8 blocks 9-16 ;Q0_H [clobbered] ZMM register to contain p[0] of the 8 blocks 9-16 ;Q1_L [clobbered] ZMM register to contain p[1] of the 8 blocks 9-16 ;Q1_H [clobbered] ZMM register to contain p[1] of the 8 blocks 9-16 ;Q2_L [clobbered] ZMM register to contain p[2] of the 8 blocks 9-16 ;Q2_H [clobbered] ZMM register to contain p[2] of the 8 blocks 9-16 ;ZTMP1 [clobbered] Temporary ZMM register ;ZTMP2 [clobbered] Temporary ZMM register ;ZTMP3 [clobbered] Temporary ZMM register ;ZTMP4 [clobbered] Temporary ZMM register ;ZTMP5 [clobbered] Temporary ZMM register ;ZTMP6 [clobbered] Temporary ZMM register ;ZTMP7 [clobbered] Temporary ZMM register ;ZTMP8 [clobbered] Temporary ZMM register ;ZTMP9 [clobbered] Temporary ZMM register ;MSG [in/out] Pointer to message ;LEN [in/out] Length left of message */ #define POLY1305_MSG_MUL_REDUCE_VEC16(A0, A1, A2, B0, B1, B2, R0, R1, R2, R1P, \ R2P, P0_L, P0_H, P1_L, P1_H, P2_L, P2_H, \ Q0_L, Q0_H, Q1_L, Q1_H, Q2_L, Q2_H, \ ZTMP1, ZTMP2, ZTMP3, ZTMP4, ZTMP5, \ ZTMP6, ZTMP7, ZTMP8, ZTMP9, MSG, LEN) \ /* ;; Reset accumulator */ \ vpxorq P0_L, P0_L, P0_L; \ vpxorq P0_H, P0_H, P0_H; \ vpxorq P1_L, P1_L, P1_L; \ vpxorq P1_H, P1_H, P1_H; \ vpxorq P2_L, P2_L, P2_L; \ vpxorq P2_H, P2_H, P2_H; \ vpxorq Q0_L, Q0_L, Q0_L; \ vpxorq Q0_H, Q0_H, Q0_H; \ vpxorq Q1_L, Q1_L, Q1_L; \ vpxorq Q1_H, Q1_H, Q1_H; \ vpxorq Q2_L, Q2_L, Q2_L; \ vpxorq Q2_H, Q2_H, Q2_H; \ \ /* ;; This code interleaves hash computation with input loading/splatting */ \ \ /* ; Calculate products */ \ vpmadd52luq P0_L, A2, R1P; \ vpmadd52huq P0_H, A2, R1P; \ /* ;; input loading of new blocks */ \ add MSG, POLY1305_BLOCK_SIZE*16; \ sub LEN, POLY1305_BLOCK_SIZE*16; \ \ vpmadd52luq Q0_L, B2, R1P; \ vpmadd52huq Q0_H, B2, R1P; \ \ vpmadd52luq P1_L, A2, R2P; \ vpmadd52huq P1_H, A2, R2P; \ /* ; Load next block of data (128 bytes) */ \ vmovdqu64 ZTMP5, [MSG]; \ vmovdqu64 ZTMP2, [MSG + 64]; \ \ vpmadd52luq Q1_L, B2, R2P; \ vpmadd52huq Q1_H, B2, R2P; \ \ /* ; Interleave new blocks of data */ \ vpunpckhqdq ZTMP3, ZTMP5, ZTMP2; \ vpunpcklqdq ZTMP5, ZTMP5, ZTMP2; \ \ vpmadd52luq P0_L, A0, R0; \ vpmadd52huq P0_H, A0, R0; \ /* ; Highest 42-bit limbs of new blocks */ \ vpsrlq ZTMP6, ZTMP3, 24; \ vporq ZTMP6, ZTMP6, [.Lhigh_bit ADD_RIP]; /* ; Add 2^128 to all 8 final qwords of the message */ \ \ vpmadd52luq Q0_L, B0, R0; \ vpmadd52huq Q0_H, B0, R0; \ \ /* ; Middle 44-bit limbs of new blocks */ \ vpsrlq ZTMP2, ZTMP5, 44; \ vpsllq ZTMP4, ZTMP3, 20; \ \ vpmadd52luq P2_L, A2, R0; \ vpmadd52huq P2_H, A2, R0; \ vpternlogq ZTMP2, ZTMP4, [.Lmask_44 ADD_RIP], 0xA8; /* ; (A OR B AND C) */ \ \ /* ; Lowest 44-bit limbs of new blocks */ \ vpandq ZTMP5, ZTMP5, [.Lmask_44 ADD_RIP]; \ \ vpmadd52luq Q2_L, B2, R0; \ vpmadd52huq Q2_H, B2, R0; \ \ /* ; Load next block of data (128 bytes) */ \ vmovdqu64 ZTMP8, [MSG + 64*2]; \ vmovdqu64 ZTMP9, [MSG + 64*3]; \ \ vpmadd52luq P1_L, A0, R1; \ vpmadd52huq P1_H, A0, R1; \ /* ; Interleave new blocks of data */ \ vpunpckhqdq ZTMP3, ZTMP8, ZTMP9; \ vpunpcklqdq ZTMP8, ZTMP8, ZTMP9; \ \ vpmadd52luq Q1_L, B0, R1; \ vpmadd52huq Q1_H, B0, R1; \ \ /* ; Highest 42-bit limbs of new blocks */ \ vpsrlq ZTMP7, ZTMP3, 24; \ vporq ZTMP7, ZTMP7, [.Lhigh_bit ADD_RIP]; /* ; Add 2^128 to all 8 final qwords of the message */ \ \ vpmadd52luq P0_L, A1, R2P; \ vpmadd52huq P0_H, A1, R2P; \ \ /* ; Middle 44-bit limbs of new blocks */ \ vpsrlq ZTMP9, ZTMP8, 44; \ vpsllq ZTMP4, ZTMP3, 20; \ \ vpmadd52luq Q0_L, B1, R2P; \ vpmadd52huq Q0_H, B1, R2P; \ \ vpternlogq ZTMP9, ZTMP4, [.Lmask_44 ADD_RIP], 0xA8; /* ; (A OR B AND C) */ \ \ /* ; Lowest 44-bit limbs of new blocks */ \ vpandq ZTMP8, ZTMP8, [.Lmask_44 ADD_RIP]; \ \ vpmadd52luq P2_L, A0, R2; \ vpmadd52huq P2_H, A0, R2; \ /* ; Carry propagation (first pass) */ \ vpsrlq ZTMP1, P0_L, 44; \ vpsllq P0_H, P0_H, 8; \ vpmadd52luq Q2_L, B0, R2; \ vpmadd52huq Q2_H, B0, R2; \ \ vpsrlq ZTMP3, Q0_L, 44; \ vpsllq Q0_H, Q0_H, 8; \ \ vpmadd52luq P1_L, A1, R0; \ vpmadd52huq P1_H, A1, R0; \ /* ; Carry propagation (first pass) - continue */ \ vpandq A0, P0_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq P0_H, P0_H, ZTMP1; \ vpmadd52luq Q1_L, B1, R0; \ vpmadd52huq Q1_H, B1, R0; \ \ vpandq B0, Q0_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq Q0_H, Q0_H, ZTMP3; \ \ vpmadd52luq P2_L, A1, R1; \ vpmadd52huq P2_H, A1, R1; \ /* ; Carry propagation (first pass) - continue */ \ vpaddq P1_L, P1_L, P0_H; \ vpsllq P1_H, P1_H, 8; \ vpsrlq ZTMP1, P1_L, 44; \ vpmadd52luq Q2_L, B1, R1; \ vpmadd52huq Q2_H, B1, R1; \ \ vpandq A1, P1_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq Q1_L, Q1_L, Q0_H; \ vpsllq Q1_H, Q1_H, 8; \ vpsrlq ZTMP3, Q1_L, 44; \ vpandq B1, Q1_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ \ vpaddq P2_L, P2_L, P1_H; /* ; P2_L += P1_H + P1_L[63:44] */ \ vpaddq P2_L, P2_L, ZTMP1; \ vpandq A2, P2_L, [.Lmask_42 ADD_RIP]; /* ; Clear top 22 bits */ \ vpaddq A2, A2, ZTMP6; /* ; Add highest bits from new blocks to accumulator */ \ vpsrlq ZTMP1, P2_L, 42; \ vpsllq P2_H, P2_H, 10; \ vpaddq P2_H, P2_H, ZTMP1; \ \ vpaddq Q2_L, Q2_L, Q1_H; /* ; Q2_L += P1_H + P1_L[63:44] */ \ vpaddq Q2_L, Q2_L, ZTMP3; \ vpandq B2, Q2_L, [.Lmask_42 ADD_RIP]; /* ; Clear top 22 bits */ \ vpaddq B2, B2, ZTMP7; /* ; Add highest bits from new blocks to accumulator */ \ vpsrlq ZTMP3, Q2_L, 42; \ vpsllq Q2_H, Q2_H, 10; \ vpaddq Q2_H, Q2_H, ZTMP3; \ \ /* ; Carry propagation (second pass) */ \ /* ; Multiply by 5 the highest bits (above 130 bits) */ \ vpaddq A0, A0, P2_H; \ vpsllq P2_H, P2_H, 2; \ vpaddq A0, A0, P2_H; \ vpaddq B0, B0, Q2_H; \ vpsllq Q2_H, Q2_H, 2; \ vpaddq B0, B0, Q2_H; \ \ vpsrlq ZTMP1, A0, 44; \ vpandq A0, A0, [.Lmask_44 ADD_RIP]; \ vpaddq A0, A0, ZTMP5; /* ; Add low 42-bit bits from new blocks to accumulator */ \ vpaddq A1, A1, ZTMP2; /* ; Add medium 42-bit bits from new blocks to accumulator */ \ vpaddq A1, A1, ZTMP1; \ vpsrlq ZTMP3, B0, 44; \ vpandq B0, B0, [.Lmask_44 ADD_RIP]; \ vpaddq B0, B0, ZTMP8; /* ; Add low 42-bit bits from new blocks to accumulator */ \ vpaddq B1, B1, ZTMP9; /* ; Add medium 42-bit bits from new blocks to accumulator */ \ vpaddq B1, B1, ZTMP3 /* ;; ============================================================================= ;; ============================================================================= ;; Computes hash for 16 16-byte message blocks. ;; ;; It first multiplies all 16 blocks with powers of R (8 blocks from A0-A2 ;; and 8 blocks from B0-B2, multiplied by R0-R2 and S0-S2) ;; ;; ;; a2 a1 a0 ;; × b2 b1 b0 ;; --------------------------------------- ;; a2×b0 a1×b0 a0×b0 ;; + a1×b1 a0×b1 5×a2×b1 ;; + a0×b2 5×a2×b2 5×a1×b2 ;; --------------------------------------- ;; p2 p1 p0 ;; ;; Then, it propagates the carry (higher bits after bit 43) from lower limbs into higher limbs, ;; multiplying by 5 in case of the carry of p2. ;; ;; ============================================================================= ;A0 [in/out] ZMM register containing 1st 44-bit limb of the 8 blocks ;A1 [in/out] ZMM register containing 2nd 44-bit limb of the 8 blocks ;A2 [in/out] ZMM register containing 3rd 44-bit limb of the 8 blocks ;B0 [in/out] ZMM register containing 1st 44-bit limb of the 8 blocks ;B1 [in/out] ZMM register containing 2nd 44-bit limb of the 8 blocks ;B2 [in/out] ZMM register containing 3rd 44-bit limb of the 8 blocks ;R0 [in] ZMM register (R0) to include the 1st limb in IDX ;R1 [in] ZMM register (R1) to include the 2nd limb in IDX ;R2 [in] ZMM register (R2) to include the 3rd limb in IDX ;R1P [in] ZMM register (R1') to include the 2nd limb (multiplied by 5) in IDX ;R2P [in] ZMM register (R2') to include the 3rd limb (multiplied by 5) in IDX ;S0 [in] ZMM register (R0) to include the 1st limb in IDX ;S1 [in] ZMM register (R1) to include the 2nd limb in IDX ;S2 [in] ZMM register (R2) to include the 3rd limb in IDX ;S1P [in] ZMM register (R1') to include the 2nd limb (multiplied by 5) in IDX ;S2P [in] ZMM register (R2') to include the 3rd limb (multiplied by 5) in IDX ;P0_L [clobbered] ZMM register to contain p[0] of the 8 blocks ;P0_H [clobbered] ZMM register to contain p[0] of the 8 blocks ;P1_L [clobbered] ZMM register to contain p[1] of the 8 blocks ;P1_H [clobbered] ZMM register to contain p[1] of the 8 blocks ;P2_L [clobbered] ZMM register to contain p[2] of the 8 blocks ;P2_H [clobbered] ZMM register to contain p[2] of the 8 blocks ;Q0_L [clobbered] ZMM register to contain p[0] of the 8 blocks ;Q0_H [clobbered] ZMM register to contain p[0] of the 8 blocks ;Q1_L [clobbered] ZMM register to contain p[1] of the 8 blocks ;Q1_H [clobbered] ZMM register to contain p[1] of the 8 blocks ;Q2_L [clobbered] ZMM register to contain p[2] of the 8 blocks ;Q2_H [clobbered] ZMM register to contain p[2] of the 8 blocks ;ZTMP1 [clobbered] Temporary ZMM register ;ZTMP2 [clobbered] Temporary ZMM register */ #define POLY1305_MUL_REDUCE_VEC16(A0, A1, A2, B0, B1, B2, R0, R1, R2, R1P, R2P,\ S0, S1, S2, S1P, S2P, P0_L, P0_H, P1_L, P1_H,\ P2_L, P2_H, Q0_L, Q0_H, Q1_L, Q1_H, Q2_L,\ Q2_H, ZTMP1, ZTMP2) \ /* ;; Reset accumulator */ \ vpxorq P0_L, P0_L, P0_L; \ vpxorq P0_H, P0_H, P0_H; \ vpxorq P1_L, P1_L, P1_L; \ vpxorq P1_H, P1_H, P1_H; \ vpxorq P2_L, P2_L, P2_L; \ vpxorq P2_H, P2_H, P2_H; \ vpxorq Q0_L, Q0_L, Q0_L; \ vpxorq Q0_H, Q0_H, Q0_H; \ vpxorq Q1_L, Q1_L, Q1_L; \ vpxorq Q1_H, Q1_H, Q1_H; \ vpxorq Q2_L, Q2_L, Q2_L; \ vpxorq Q2_H, Q2_H, Q2_H; \ \ /* ;; This code interleaves hash computation with input loading/splatting */ \ \ /* ; Calculate products */ \ vpmadd52luq P0_L, A2, R1P; \ vpmadd52huq P0_H, A2, R1P; \ \ vpmadd52luq Q0_L, B2, S1P; \ vpmadd52huq Q0_H, B2, S1P; \ \ vpmadd52luq P1_L, A2, R2P; \ vpmadd52huq P1_H, A2, R2P; \ \ vpmadd52luq Q1_L, B2, S2P; \ vpmadd52huq Q1_H, B2, S2P; \ \ vpmadd52luq P0_L, A0, R0; \ vpmadd52huq P0_H, A0, R0; \ \ vpmadd52luq Q0_L, B0, S0; \ vpmadd52huq Q0_H, B0, S0; \ \ vpmadd52luq P2_L, A2, R0; \ vpmadd52huq P2_H, A2, R0; \ vpmadd52luq Q2_L, B2, S0; \ vpmadd52huq Q2_H, B2, S0; \ \ vpmadd52luq P1_L, A0, R1; \ vpmadd52huq P1_H, A0, R1; \ vpmadd52luq Q1_L, B0, S1; \ vpmadd52huq Q1_H, B0, S1; \ \ vpmadd52luq P0_L, A1, R2P; \ vpmadd52huq P0_H, A1, R2P; \ \ vpmadd52luq Q0_L, B1, S2P; \ vpmadd52huq Q0_H, B1, S2P; \ \ vpmadd52luq P2_L, A0, R2; \ vpmadd52huq P2_H, A0, R2; \ \ vpmadd52luq Q2_L, B0, S2; \ vpmadd52huq Q2_H, B0, S2; \ \ /* ; Carry propagation (first pass) */ \ vpsrlq ZTMP1, P0_L, 44; \ vpsllq P0_H, P0_H, 8; \ vpsrlq ZTMP2, Q0_L, 44; \ vpsllq Q0_H, Q0_H, 8; \ \ vpmadd52luq P1_L, A1, R0; \ vpmadd52huq P1_H, A1, R0; \ vpmadd52luq Q1_L, B1, S0; \ vpmadd52huq Q1_H, B1, S0; \ \ /* ; Carry propagation (first pass) - continue */ \ vpandq A0, P0_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq P0_H, P0_H, ZTMP1; \ vpandq B0, Q0_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq Q0_H, Q0_H, ZTMP2; \ \ vpmadd52luq P2_L, A1, R1; \ vpmadd52huq P2_H, A1, R1; \ vpmadd52luq Q2_L, B1, S1; \ vpmadd52huq Q2_H, B1, S1; \ \ /* ; Carry propagation (first pass) - continue */ \ vpaddq P1_L, P1_L, P0_H; \ vpsllq P1_H, P1_H, 8; \ vpsrlq ZTMP1, P1_L, 44; \ vpandq A1, P1_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq Q1_L, Q1_L, Q0_H; \ vpsllq Q1_H, Q1_H, 8; \ vpsrlq ZTMP2, Q1_L, 44; \ vpandq B1, Q1_L, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ \ vpaddq P2_L, P2_L, P1_H; /* ; P2_L += P1_H + P1_L[63:44] */ \ vpaddq P2_L, P2_L, ZTMP1; \ vpandq A2, P2_L, [.Lmask_42 ADD_RIP]; /* ; Clear top 22 bits */ \ vpsrlq ZTMP1, P2_L, 42; \ vpsllq P2_H, P2_H, 10; \ vpaddq P2_H, P2_H, ZTMP1; \ \ vpaddq Q2_L, Q2_L, Q1_H; /* ; Q2_L += P1_H + P1_L[63:44] */ \ vpaddq Q2_L, Q2_L, ZTMP2; \ vpandq B2, Q2_L, [.Lmask_42 ADD_RIP]; /* ; Clear top 22 bits */ \ vpsrlq ZTMP2, Q2_L, 42; \ vpsllq Q2_H, Q2_H, 10; \ vpaddq Q2_H, Q2_H, ZTMP2; \ \ /* ; Carry propagation (second pass) */ \ /* ; Multiply by 5 the highest bits (above 130 bits) */ \ vpaddq A0, A0, P2_H; \ vpsllq P2_H, P2_H, 2; \ vpaddq A0, A0, P2_H; \ vpaddq B0, B0, Q2_H; \ vpsllq Q2_H, Q2_H, 2; \ vpaddq B0, B0, Q2_H; \ \ vpsrlq ZTMP1, A0, 44; \ vpandq A0, A0, [.Lmask_44 ADD_RIP]; \ vpaddq A1, A1, ZTMP1; \ vpsrlq ZTMP2, B0, 44; \ vpandq B0, B0, [.Lmask_44 ADD_RIP]; \ vpaddq B1, B1, ZTMP2; /* ;; ============================================================================= ;; ============================================================================= ;; Shuffle data blocks, so they match the right power of R. ;; Powers of R are in this order: R^8 R^4 R^7 R^3 R^6 R^2 R^5 R ;; Data blocks are coming in this order: A0 A4 A1 A5 A2 A6 A3 A7 ;; Generally the computation is: A0*R^8 + A1*R^7 + A2*R^6 + A3*R^5 + ;; A4*R^4 + A5*R^3 + A6*R^2 + A7*R ;; When there are less data blocks, less powers of R are used, so data needs to ;; be shuffled. Example: if 4 blocks are left, only A0-A3 are available and only ;; R-R^4 are used (A0*R^4 + A1*R^3 + A2*R^2 + A3*R), so A0-A3 need to be shifted ;; ============================================================================= ;A_L [in/out] 0-43 bits of input data ;A_M [in/out] 44-87 bits of input data ;A_H [in/out] 88-129 bits of input data ;TMP [clobbered] Temporary GP register ;N_BLOCKS [in] Number of remaining input blocks */ #define SHUFFLE_DATA_SMASK_1 0x39 #define SHUFFLE_DATA_KMASK_1 0xffff #define SHUFFLE_DATA_SMASK_2 0x4E #define SHUFFLE_DATA_KMASK_2 0xffff #define SHUFFLE_DATA_SMASK_3 0x93 #define SHUFFLE_DATA_KMASK_3 0xffff #define SHUFFLE_DATA_KMASK_4 0xffff #define SHUFFLE_DATA_SMASK_5 0x39 #define SHUFFLE_DATA_KMASK_5 0xfff0 #define SHUFFLE_DATA_SMASK_6 0x4E #define SHUFFLE_DATA_KMASK_6 0xff00 #define SHUFFLE_DATA_SMASK_7 0x93 #define SHUFFLE_DATA_KMASK_7 0xf000 #define SHUFFLE_DATA_BLOCKS_GENERIC(A_L, A_M, A_H, TMP, N_BLOCKS) \ mov TMP, SHUFFLE_DATA_KMASK_##N_BLOCKS; \ kmovq k1, TMP; \ vpshufd A_L{k1}, A_L, 0x4E; \ vpshufd A_M{k1}, A_M, 0x4E; \ vpshufd A_H{k1}, A_H, 0x4E; \ vshufi64x2 A_L, A_L, A_L, SHUFFLE_DATA_SMASK_##N_BLOCKS; \ vshufi64x2 A_M, A_M, A_M, SHUFFLE_DATA_SMASK_##N_BLOCKS; \ vshufi64x2 A_H, A_H, A_H, SHUFFLE_DATA_SMASK_##N_BLOCKS #define SHUFFLE_DATA_BLOCKS_1(A_L, A_M, A_H, TMP) \ SHUFFLE_DATA_BLOCKS_GENERIC(A_L, A_M, A_H, TMP, 1) #define SHUFFLE_DATA_BLOCKS_2(A_L, A_M, A_H, TMP) \ SHUFFLE_DATA_BLOCKS_GENERIC(A_L, A_M, A_H, TMP, 2) #define SHUFFLE_DATA_BLOCKS_3(A_L, A_M, A_H, TMP) \ SHUFFLE_DATA_BLOCKS_GENERIC(A_L, A_M, A_H, TMP, 3) #define SHUFFLE_DATA_BLOCKS_4(A_L, A_M, A_H, TMP) \ mov TMP, SHUFFLE_DATA_KMASK_4; \ kmovq k1, TMP; \ vpshufd A_L{k1}, A_L, 0x4E; \ vpshufd A_M{k1}, A_M, 0x4E; \ vpshufd A_H{k1}, A_H, 0x4E; #define SHUFFLE_DATA_BLOCKS_5(A_L, A_M, A_H, TMP) \ SHUFFLE_DATA_BLOCKS_GENERIC(A_L, A_M, A_H, TMP, 5) #define SHUFFLE_DATA_BLOCKS_6(A_L, A_M, A_H, TMP) \ SHUFFLE_DATA_BLOCKS_GENERIC(A_L, A_M, A_H, TMP, 6) #define SHUFFLE_DATA_BLOCKS_7(A_L, A_M, A_H, TMP) \ SHUFFLE_DATA_BLOCKS_GENERIC(A_L, A_M, A_H, TMP, 7) /* ;; ============================================================================= ;; ============================================================================= ;; Computes hash for message length being multiple of block size ;; ============================================================================= ;MSG [in/out] GPR pointer to input message (updated) ;LEN [in/out] GPR in: length in bytes / out: length mod 16 ;A0 [in/out] accumulator bits 63..0 ;A1 [in/out] accumulator bits 127..64 ;A2 [in/out] accumulator bits 195..128 ;R0 [in] R constant bits 63..0 ;R1 [in] R constant bits 127..64 ;T0 [clobbered] GPR register ;T1 [clobbered] GPR register ;T2 [clobbered] GPR register ;T3 [clobbered] GPR register ;GP_RAX [clobbered] RAX register ;GP_RDX [clobbered] RDX register */ #define POLY1305_BLOCKS(MSG, LEN, A0, A1, A2, R0, R1, T0, T1, T2, T3, \ GP_RAX, GP_RDX) \ /* ; Minimum of 256 bytes to run vectorized code */ \ cmp LEN, POLY1305_BLOCK_SIZE*16; \ jb .L_final_loop; \ \ /* ; Spread accumulator into 44-bit limbs in quadwords */ \ mov T0, A0; \ and T0, [.Lmask_44 ADD_RIP]; /* ;; First limb (A[43:0]) */ \ vmovq xmm5, T0; \ \ mov T0, A1; \ shrd A0, T0, 44; \ and A0, [.Lmask_44 ADD_RIP]; /* ;; Second limb (A[77:52]) */ \ vmovq xmm6, A0; \ \ shrd A1, A2, 24; \ and A1, [.Lmask_42 ADD_RIP]; /* ;; Third limb (A[129:88]) */ \ vmovq xmm7, A1; \ \ /* ; Load first block of data (128 bytes) */ \ vmovdqu64 zmm0, [MSG]; \ vmovdqu64 zmm1, [MSG + 64]; \ \ /* ; Interleave the data to form 44-bit limbs */ \ /* ; */ \ /* ; zmm13 to have bits 0-43 of all 8 blocks in 8 qwords */ \ /* ; zmm14 to have bits 87-44 of all 8 blocks in 8 qwords */ \ /* ; zmm15 to have bits 127-88 of all 8 blocks in 8 qwords */ \ vpunpckhqdq zmm15, zmm0, zmm1; \ vpunpcklqdq zmm13, zmm0, zmm1; \ \ vpsrlq zmm14, zmm13, 44; \ vpsllq zmm18, zmm15, 20; \ vpternlogq zmm14, zmm18, [.Lmask_44 ADD_RIP], 0xA8; /* ; (A OR B AND C) */ \ \ vpandq zmm13, zmm13, [.Lmask_44 ADD_RIP]; \ vpsrlq zmm15, zmm15, 24; \ \ /* ; Add 2^128 to all 8 final qwords of the message */ \ vporq zmm15, zmm15, [.Lhigh_bit ADD_RIP]; \ \ vpaddq zmm13, zmm13, zmm5; \ vpaddq zmm14, zmm14, zmm6; \ vpaddq zmm15, zmm15, zmm7; \ \ /* ; Load next blocks of data (128 bytes) */ \ vmovdqu64 zmm0, [MSG + 64*2]; \ vmovdqu64 zmm1, [MSG + 64*3]; \ \ /* ; Interleave the data to form 44-bit limbs */ \ /* ; */ \ /* ; zmm13 to have bits 0-43 of all 8 blocks in 8 qwords */ \ /* ; zmm14 to have bits 87-44 of all 8 blocks in 8 qwords */ \ /* ; zmm15 to have bits 127-88 of all 8 blocks in 8 qwords */ \ vpunpckhqdq zmm18, zmm0, zmm1; \ vpunpcklqdq zmm16, zmm0, zmm1; \ \ vpsrlq zmm17, zmm16, 44; \ vpsllq zmm19, zmm18, 20; \ vpternlogq zmm17, zmm19, [.Lmask_44 ADD_RIP], 0xA8; /* ; (A OR B AND C) */ \ \ vpandq zmm16, zmm16, [.Lmask_44 ADD_RIP]; \ vpsrlq zmm18, zmm18, 24; \ \ /* ; Add 2^128 to all 8 final qwords of the message */ \ vporq zmm18, zmm18, [.Lhigh_bit ADD_RIP]; \ \ /* ; Use memory in stack to save powers of R, before loading them into ZMM registers */ \ /* ; The first 16*8 bytes will contain the 16 bytes of the 8 powers of R */ \ /* ; The last 64 bytes will contain the last 2 bits of powers of R, spread in 8 qwords, */ \ /* ; to be OR'd with the highest qwords (in zmm26) */ \ vmovq xmm3, R0; \ vpinsrq xmm3, xmm3, R1, 1; \ vinserti32x4 zmm1, zmm1, xmm3, 3; \ \ vpxorq zmm0, zmm0, zmm0; \ vpxorq zmm2, zmm2, zmm2; \ \ /* ; Calculate R^2 */ \ mov T0, R1; \ shr T0, 2; \ add T0, R1; /* ;; T0 = R1 + (R1 >> 2) */ \ \ mov A0, R0; \ mov A1, R1; \ \ POLY1305_MUL_REDUCE(A0, A1, A2, R0, R1, T0, T1, T2, T3, GP_RAX, GP_RDX, A2_ZERO); \ \ vmovq xmm3, A0; \ vpinsrq xmm3, xmm3, A1, 1; \ vinserti32x4 zmm1, zmm1, xmm3, 2; \ \ vmovq xmm4, A2; \ vinserti32x4 zmm2, zmm2, xmm4, 2; \ \ /* ; Calculate R^3 */ \ POLY1305_MUL_REDUCE(A0, A1, A2, R0, R1, T0, T1, T2, T3, GP_RAX, GP_RDX, A2_NOT_ZERO); \ \ vmovq xmm3, A0; \ vpinsrq xmm3, xmm3, A1, 1; \ vinserti32x4 zmm1, zmm1, xmm3, 1; \ \ vmovq xmm4, A2; \ vinserti32x4 zmm2, zmm2, xmm4, 1; \ \ /* ; Calculate R^4 */ \ POLY1305_MUL_REDUCE(A0, A1, A2, R0, R1, T0, T1, T2, T3, GP_RAX, GP_RDX, A2_NOT_ZERO); \ \ vmovq xmm3, A0; \ vpinsrq xmm3, xmm3, A1, 1; \ vinserti32x4 zmm1, zmm1, xmm3, 0; \ \ vmovq xmm4, A2; \ vinserti32x4 zmm2, zmm2, xmm4, 0; \ \ /* ; Move 2 MSbits to top 24 bits, to be OR'ed later */ \ vpsllq zmm2, zmm2, 40; \ \ vpunpckhqdq zmm21, zmm1, zmm0; \ vpunpcklqdq zmm19, zmm1, zmm0; \ \ vpsrlq zmm20, zmm19, 44; \ vpsllq zmm4, zmm21, 20; \ vpternlogq zmm20, zmm4, [.Lmask_44 ADD_RIP], 0xA8; /* ; (A OR B AND C) */ \ \ vpandq zmm19, zmm19, [.Lmask_44 ADD_RIP]; \ vpsrlq zmm21, zmm21, 24; \ \ /* ; zmm2 contains the 2 highest bits of the powers of R */ \ vporq zmm21, zmm21, zmm2; \ \ /* ; Broadcast 44-bit limbs of R^4 */ \ mov T0, A0; \ and T0, [.Lmask_44 ADD_RIP]; /* ;; First limb (R^4[43:0]) */ \ vpbroadcastq zmm22, T0; \ \ mov T0, A1; \ shrd A0, T0, 44; \ and A0, [.Lmask_44 ADD_RIP]; /* ;; Second limb (R^4[87:44]) */ \ vpbroadcastq zmm23, A0; \ \ shrd A1, A2, 24; \ and A1, [.Lmask_42 ADD_RIP]; /* ;; Third limb (R^4[129:88]) */ \ vpbroadcastq zmm24, A1; \ \ /* ; Generate 4*5*R^4 */ \ vpsllq zmm25, zmm23, 2; \ vpsllq zmm26, zmm24, 2; \ \ /* ; 5*R^4 */ \ vpaddq zmm25, zmm25, zmm23; \ vpaddq zmm26, zmm26, zmm24; \ \ /* ; 4*5*R^4 */ \ vpsllq zmm25, zmm25, 2; \ vpsllq zmm26, zmm26, 2; \ \ vpslldq zmm29, zmm19, 8; \ vpslldq zmm30, zmm20, 8; \ vpslldq zmm31, zmm21, 8; \ \ /* ; Calculate R^8-R^5 */ \ POLY1305_MUL_REDUCE_VEC(zmm19, zmm20, zmm21, \ zmm22, zmm23, zmm24, \ zmm25, zmm26, \ zmm5, zmm6, zmm7, zmm8, zmm9, zmm10, \ zmm11); \ \ /* ; Interleave powers of R: R^8 R^4 R^7 R^3 R^6 R^2 R^5 R */ \ vporq zmm19, zmm19, zmm29; \ vporq zmm20, zmm20, zmm30; \ vporq zmm21, zmm21, zmm31; \ \ /* ; Broadcast R^8 */ \ vpbroadcastq zmm22, xmm19; \ vpbroadcastq zmm23, xmm20; \ vpbroadcastq zmm24, xmm21; \ \ /* ; Generate 4*5*R^8 */ \ vpsllq zmm25, zmm23, 2; \ vpsllq zmm26, zmm24, 2; \ \ /* ; 5*R^8 */ \ vpaddq zmm25, zmm25, zmm23; \ vpaddq zmm26, zmm26, zmm24; \ \ /* ; 4*5*R^8 */ \ vpsllq zmm25, zmm25, 2; \ vpsllq zmm26, zmm26, 2; \ \ cmp LEN, POLY1305_BLOCK_SIZE*32; \ jb .L_len_256_511; \ \ /* ; Store R^8-R for later use */ \ vmovdqa64 [rsp + STACK_r_save], zmm19; \ vmovdqa64 [rsp + STACK_r_save + 64], zmm20; \ vmovdqa64 [rsp + STACK_r_save + 64*2], zmm21; \ \ /* ; Calculate R^16-R^9 */ \ POLY1305_MUL_REDUCE_VEC(zmm19, zmm20, zmm21, \ zmm22, zmm23, zmm24, \ zmm25, zmm26, \ zmm5, zmm6, zmm7, zmm8, zmm9, zmm10, \ zmm11); \ \ /* ; Store R^16-R^9 for later use */ \ vmovdqa64 [rsp + STACK_r_save + 64*3], zmm19; \ vmovdqa64 [rsp + STACK_r_save + 64*4], zmm20; \ vmovdqa64 [rsp + STACK_r_save + 64*5], zmm21; \ \ /* ; Broadcast R^16 */ \ vpbroadcastq zmm22, xmm19; \ vpbroadcastq zmm23, xmm20; \ vpbroadcastq zmm24, xmm21; \ \ /* ; Generate 4*5*R^16 */ \ vpsllq zmm25, zmm23, 2; \ vpsllq zmm26, zmm24, 2; \ \ /* ; 5*R^16 */ \ vpaddq zmm25, zmm25, zmm23; \ vpaddq zmm26, zmm26, zmm24; \ \ /* ; 4*5*R^16 */ \ vpsllq zmm25, zmm25, 2; \ vpsllq zmm26, zmm26, 2; \ \ mov T0, LEN; \ and T0, 0xffffffffffffff00; /* ; multiple of 256 bytes */ \ \ .L_poly1305_blocks_loop: \ cmp T0, POLY1305_BLOCK_SIZE*16; \ jbe .L_poly1305_blocks_loop_end; \ \ /* ; zmm13-zmm18 contain the 16 blocks of message plus the previous accumulator */ \ /* ; zmm22-24 contain the 5x44-bit limbs of the powers of R */ \ /* ; zmm25-26 contain the 5x44-bit limbs of the powers of R' (5*4*R) */ \ POLY1305_MSG_MUL_REDUCE_VEC16(zmm13, zmm14, zmm15, zmm16, zmm17, zmm18, \ zmm22, zmm23, zmm24, zmm25, zmm26, \ zmm5, zmm6, zmm7, zmm8, zmm9, zmm10, \ zmm19, zmm20, zmm21, zmm27, zmm28, zmm29, \ zmm30, zmm31, zmm11, zmm0, zmm1, \ zmm2, zmm3, zmm4, zmm12, MSG, T0); \ \ jmp .L_poly1305_blocks_loop; \ \ .L_poly1305_blocks_loop_end: \ \ /* ;; Need to multiply by r^16, r^15, r^14... r */ \ \ /* ; First multiply by r^16-r^9 */ \ \ /* ; Read R^16-R^9 */ \ vmovdqa64 zmm19, [rsp + STACK_r_save + 64*3]; \ vmovdqa64 zmm20, [rsp + STACK_r_save + 64*4]; \ vmovdqa64 zmm21, [rsp + STACK_r_save + 64*5]; \ /* ; Read R^8-R */ \ vmovdqa64 zmm22, [rsp + STACK_r_save]; \ vmovdqa64 zmm23, [rsp + STACK_r_save + 64]; \ vmovdqa64 zmm24, [rsp + STACK_r_save + 64*2]; \ \ /* ; zmm27 to have bits 87-44 of all 9-16th powers of R' in 8 qwords */ \ /* ; zmm28 to have bits 129-88 of all 9-16th powers of R' in 8 qwords */ \ vpsllq zmm0, zmm20, 2; \ vpaddq zmm27, zmm20, zmm0; /* ; R1' (R1*5) */ \ vpsllq zmm1, zmm21, 2; \ vpaddq zmm28, zmm21, zmm1; /* ; R2' (R2*5) */ \ \ /* ; 4*5*R */ \ vpsllq zmm27, zmm27, 2; \ vpsllq zmm28, zmm28, 2; \ \ /* ; Then multiply by r^8-r */ \ \ /* ; zmm25 to have bits 87-44 of all 1-8th powers of R' in 8 qwords */ \ /* ; zmm26 to have bits 129-88 of all 1-8th powers of R' in 8 qwords */ \ vpsllq zmm2, zmm23, 2; \ vpaddq zmm25, zmm23, zmm2; /* ; R1' (R1*5) */ \ vpsllq zmm3, zmm24, 2; \ vpaddq zmm26, zmm24, zmm3; /* ; R2' (R2*5) */ \ \ /* ; 4*5*R */ \ vpsllq zmm25, zmm25, 2; \ vpsllq zmm26, zmm26, 2; \ \ POLY1305_MUL_REDUCE_VEC16(zmm13, zmm14, zmm15, zmm16, zmm17, zmm18, \ zmm19, zmm20, zmm21, zmm27, zmm28, \ zmm22, zmm23, zmm24, zmm25, zmm26, \ zmm0, zmm1, zmm2, zmm3, zmm4, zmm5, zmm6, \ zmm7, zmm8, zmm9, zmm10, zmm11, zmm12, zmm29); \ \ /* ;; Add all blocks (horizontally) */ \ vpaddq zmm13, zmm13, zmm16; \ vpaddq zmm14, zmm14, zmm17; \ vpaddq zmm15, zmm15, zmm18; \ \ vextracti64x4 ymm0, zmm13, 1; \ vextracti64x4 ymm1, zmm14, 1; \ vextracti64x4 ymm2, zmm15, 1; \ \ vpaddq ymm13, ymm13, ymm0; \ vpaddq ymm14, ymm14, ymm1; \ vpaddq ymm15, ymm15, ymm2; \ \ vextracti32x4 xmm10, ymm13, 1; \ vextracti32x4 xmm11, ymm14, 1; \ vextracti32x4 xmm12, ymm15, 1; \ \ vpaddq xmm13, xmm13, xmm10; \ vpaddq xmm14, xmm14, xmm11; \ vpaddq xmm15, xmm15, xmm12; \ \ vpsrldq xmm10, xmm13, 8; \ vpsrldq xmm11, xmm14, 8; \ vpsrldq xmm12, xmm15, 8; \ \ /* ; Finish folding and clear second qword */ \ mov T0, 0xfd; \ kmovq k1, T0; \ vpaddq xmm13{k1}{z}, xmm13, xmm10; \ vpaddq xmm14{k1}{z}, xmm14, xmm11; \ vpaddq xmm15{k1}{z}, xmm15, xmm12; \ \ add MSG, POLY1305_BLOCK_SIZE*16; \ \ and LEN, (POLY1305_BLOCK_SIZE*16 - 1); /* ; Get remaining lengths (LEN < 256 bytes) */ \ \ .L_less_than_256: \ \ cmp LEN, POLY1305_BLOCK_SIZE*8; \ jb .L_less_than_128; \ \ /* ; Read next 128 bytes */ \ /* ; Load first block of data (128 bytes) */ \ vmovdqu64 zmm0, [MSG]; \ vmovdqu64 zmm1, [MSG + 64]; \ \ /* ; Interleave the data to form 44-bit limbs */ \ /* ; */ \ /* ; zmm13 to have bits 0-43 of all 8 blocks in 8 qwords */ \ /* ; zmm14 to have bits 87-44 of all 8 blocks in 8 qwords */ \ /* ; zmm15 to have bits 127-88 of all 8 blocks in 8 qwords */ \ vpunpckhqdq zmm5, zmm0, zmm1; \ vpunpcklqdq zmm3, zmm0, zmm1; \ \ vpsrlq zmm4, zmm3, 44; \ vpsllq zmm8, zmm5, 20; \ vpternlogq zmm4, zmm8, [.Lmask_44 ADD_RIP], 0xA8; /* ; (A OR B AND C) */ \ \ vpandq zmm3, zmm3, [.Lmask_44 ADD_RIP]; \ vpsrlq zmm5, zmm5, 24; \ \ /* ; Add 2^128 to all 8 final qwords of the message */ \ vporq zmm5, zmm5, [.Lhigh_bit ADD_RIP]; \ \ vpaddq zmm13, zmm13, zmm3; \ vpaddq zmm14, zmm14, zmm4; \ vpaddq zmm15, zmm15, zmm5; \ \ add MSG, POLY1305_BLOCK_SIZE*8; \ sub LEN, POLY1305_BLOCK_SIZE*8; \ \ POLY1305_MUL_REDUCE_VEC(zmm13, zmm14, zmm15, \ zmm22, zmm23, zmm24, \ zmm25, zmm26, \ zmm5, zmm6, zmm7, zmm8, zmm9, zmm10, \ zmm11); \ \ /* ;; Add all blocks (horizontally) */ \ vextracti64x4 ymm0, zmm13, 1; \ vextracti64x4 ymm1, zmm14, 1; \ vextracti64x4 ymm2, zmm15, 1; \ \ vpaddq ymm13, ymm13, ymm0; \ vpaddq ymm14, ymm14, ymm1; \ vpaddq ymm15, ymm15, ymm2; \ \ vextracti32x4 xmm10, ymm13, 1; \ vextracti32x4 xmm11, ymm14, 1; \ vextracti32x4 xmm12, ymm15, 1; \ \ vpaddq xmm13, xmm13, xmm10; \ vpaddq xmm14, xmm14, xmm11; \ vpaddq xmm15, xmm15, xmm12; \ \ vpsrldq xmm10, xmm13, 8; \ vpsrldq xmm11, xmm14, 8; \ vpsrldq xmm12, xmm15, 8; \ \ /* ; Finish folding and clear second qword */ \ mov T0, 0xfd; \ kmovq k1, T0; \ vpaddq xmm13{k1}{z}, xmm13, xmm10; \ vpaddq xmm14{k1}{z}, xmm14, xmm11; \ vpaddq xmm15{k1}{z}, xmm15, xmm12; \ \ .L_less_than_128: \ cmp LEN, 32; /* ; If remaining bytes is <= 32, perform last blocks in scalar */ \ jbe .L_simd_to_gp; \ \ mov T0, LEN; \ and T0, 0x3f; \ lea T1, [.Lbyte64_len_to_mask_table ADD_RIP]; \ mov T1, [T1 + 8*T0]; \ \ /* ; Load default byte masks */ \ mov T2, 0xffffffffffffffff; \ xor T3, T3; \ \ cmp LEN, 64; \ cmovb T2, T1; /* ; Load mask for first 64 bytes */ \ cmovg T3, T1; /* ; Load mask for second 64 bytes */ \ \ kmovq k1, T2; \ kmovq k2, T3; \ vmovdqu8 zmm0{k1}{z}, [MSG]; \ vmovdqu8 zmm1{k2}{z}, [MSG + 64]; \ \ /* ; Pad last block message, if partial */ \ mov T0, LEN; \ and T0, 0x70; /* ; Multiple of 16 bytes */ \ /* ; Load last block of data (up to 112 bytes) */ \ shr T0, 3; /* ; Get number of full qwords */ \ \ /* ; Interleave the data to form 44-bit limbs */ \ /* ; */ \ /* ; zmm13 to have bits 0-43 of all 8 blocks in 8 qwords */ \ /* ; zmm14 to have bits 87-44 of all 8 blocks in 8 qwords */ \ /* ; zmm15 to have bits 127-88 of all 8 blocks in 8 qwords */ \ vpunpckhqdq zmm4, zmm0, zmm1; \ vpunpcklqdq zmm2, zmm0, zmm1; \ \ vpsrlq zmm3, zmm2, 44; \ vpsllq zmm28, zmm4, 20; \ vpternlogq zmm3, zmm28, [.Lmask_44 ADD_RIP], 0xA8; /* ; (A OR B AND C) */ \ \ vpandq zmm2, zmm2, [.Lmask_44 ADD_RIP]; \ vpsrlq zmm4, zmm4, 24; \ \ lea T1, [.Lqword_high_bit_mask ADD_RIP]; \ kmovb k1, [T1 + T0]; \ /* ; Add 2^128 to final qwords of the message (all full blocks and partial block, */ \ /* ; if "pad_to_16" is selected) */ \ vporq zmm4{k1}, zmm4, [.Lhigh_bit ADD_RIP]; \ \ vpaddq zmm13, zmm13, zmm2; \ vpaddq zmm14, zmm14, zmm3; \ vpaddq zmm15, zmm15, zmm4; \ \ mov T0, LEN; \ add T0, 15; \ shr T0, 4; /* ; Get number of 16-byte blocks (including partial blocks) */ \ xor LEN, LEN; /* ; All length will be consumed */ \ \ /* ; No need to shuffle data blocks (data is in the right order) */ \ cmp T0, 8; \ je .L_end_shuffle; \ \ cmp T0, 4; \ je .L_shuffle_blocks_4; \ jb .L_shuffle_blocks_3; \ \ /* ; Number of 16-byte blocks > 4 */ \ cmp T0, 6; \ je .L_shuffle_blocks_6; \ ja .L_shuffle_blocks_7; \ jmp .L_shuffle_blocks_5; \ \ .L_shuffle_blocks_3: \ SHUFFLE_DATA_BLOCKS_3(zmm13, zmm14, zmm15, T1); \ jmp .L_end_shuffle; \ .L_shuffle_blocks_4: \ SHUFFLE_DATA_BLOCKS_4(zmm13, zmm14, zmm15, T1); \ jmp .L_end_shuffle; \ .L_shuffle_blocks_5: \ SHUFFLE_DATA_BLOCKS_5(zmm13, zmm14, zmm15, T1); \ jmp .L_end_shuffle; \ .L_shuffle_blocks_6: \ SHUFFLE_DATA_BLOCKS_6(zmm13, zmm14, zmm15, T1); \ jmp .L_end_shuffle; \ .L_shuffle_blocks_7: \ SHUFFLE_DATA_BLOCKS_7(zmm13, zmm14, zmm15, T1); \ \ .L_end_shuffle: \ \ /* ; zmm13-zmm15 contain the 8 blocks of message plus the previous accumulator */ \ /* ; zmm22-24 contain the 3x44-bit limbs of the powers of R */ \ /* ; zmm25-26 contain the 3x44-bit limbs of the powers of R' (5*4*R) */ \ POLY1305_MUL_REDUCE_VEC(zmm13, zmm14, zmm15, \ zmm22, zmm23, zmm24, \ zmm25, zmm26, \ zmm5, zmm6, zmm7, zmm8, zmm9, zmm10, \ zmm11); \ \ /* ;; Add all blocks (horizontally) */ \ vextracti64x4 ymm0, zmm13, 1; \ vextracti64x4 ymm1, zmm14, 1; \ vextracti64x4 ymm2, zmm15, 1; \ \ vpaddq ymm13, ymm13, ymm0; \ vpaddq ymm14, ymm14, ymm1; \ vpaddq ymm15, ymm15, ymm2; \ \ vextracti32x4 xmm10, ymm13, 1; \ vextracti32x4 xmm11, ymm14, 1; \ vextracti32x4 xmm12, ymm15, 1; \ \ vpaddq xmm13, xmm13, xmm10; \ vpaddq xmm14, xmm14, xmm11; \ vpaddq xmm15, xmm15, xmm12; \ \ vpsrldq xmm10, xmm13, 8; \ vpsrldq xmm11, xmm14, 8; \ vpsrldq xmm12, xmm15, 8; \ \ vpaddq xmm13, xmm13, xmm10; \ vpaddq xmm14, xmm14, xmm11; \ vpaddq xmm15, xmm15, xmm12; \ \ .L_simd_to_gp: \ /* ; Carry propagation */ \ vpsrlq xmm0, xmm13, 44; \ vpandq xmm13, xmm13, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq xmm14, xmm14, xmm0; \ vpsrlq xmm0, xmm14, 44; \ vpandq xmm14, xmm14, [.Lmask_44 ADD_RIP]; /* ; Clear top 20 bits */ \ vpaddq xmm15, xmm15, xmm0; \ vpsrlq xmm0, xmm15, 42; \ vpandq xmm15, xmm15, [.Lmask_42 ADD_RIP]; /* ; Clear top 22 bits */ \ vpsllq xmm1, xmm0, 2; \ vpaddq xmm0, xmm0, xmm1; \ vpaddq xmm13, xmm13, xmm0; \ \ /* ; Put together A */ \ vmovq A0, xmm13; \ \ vmovq T0, xmm14; \ mov T1, T0; \ shl T1, 44; \ or A0, T1; \ \ shr T0, 20; \ vmovq A2, xmm15; \ mov A1, A2; \ shl A1, 24; \ or A1, T0; \ shr A2, 40; \ \ /* ; Clear powers of R */ \ vpxorq zmm0, zmm0, zmm0; \ vmovdqa64 [rsp + STACK_r_save], zmm0; \ vmovdqa64 [rsp + STACK_r_save + 64], zmm0; \ vmovdqa64 [rsp + STACK_r_save + 64*2], zmm0; \ vmovdqa64 [rsp + STACK_r_save + 64*3], zmm0; \ vmovdqa64 [rsp + STACK_r_save + 64*4], zmm0; \ vmovdqa64 [rsp + STACK_r_save + 64*5], zmm0; \ \ vzeroall; \ clear_zmm(ymm16); clear_zmm(ymm20); clear_zmm(ymm24); clear_zmm(ymm28); \ clear_zmm(ymm17); clear_zmm(ymm21); clear_zmm(ymm25); clear_zmm(ymm29); \ clear_zmm(ymm18); clear_zmm(ymm22); clear_zmm(ymm26); clear_zmm(ymm30); \ clear_zmm(ymm19); clear_zmm(ymm23); clear_zmm(ymm27); clear_zmm(ymm31); \ \ .L_final_loop: \ cmp LEN, POLY1305_BLOCK_SIZE; \ jb .L_poly1305_blocks_exit; \ \ /* ;; A += MSG[i] */ \ add A0, [MSG + 0]; \ adc A1, [MSG + 8]; \ adc A2, 1; /* ;; no padding bit */ \ \ mov T0, R1; \ shr T0, 2; \ add T0, R1; /* ;; T0 = R1 + (R1 >> 2) */ \ \ POLY1305_MUL_REDUCE(A0, A1, A2, R0, R1, \ T0, T1, T2, T3, GP_RAX, GP_RDX, A2_NOT_ZERO); \ \ add MSG, POLY1305_BLOCK_SIZE; \ sub LEN, POLY1305_BLOCK_SIZE; \ \ jmp .L_final_loop; \ \ .L_len_256_511: \ \ /* ; zmm13-zmm15 contain the 8 blocks of message plus the previous accumulator */ \ /* ; zmm22-24 contain the 3x44-bit limbs of the powers of R */ \ /* ; zmm25-26 contain the 3x44-bit limbs of the powers of R' (5*4*R) */ \ POLY1305_MUL_REDUCE_VEC(zmm13, zmm14, zmm15, \ zmm22, zmm23, zmm24, \ zmm25, zmm26, \ zmm5, zmm6, zmm7, zmm8, zmm9, zmm10, \ zmm11); \ \ /* ; Then multiply by r^8-r */ \ \ /* ; zmm19-zmm21 contains R^8-R, need to move it to zmm22-24, */ \ /* ; as it might be used in other part of the code */ \ vmovdqa64 zmm22, zmm19; \ vmovdqa64 zmm23, zmm20; \ vmovdqa64 zmm24, zmm21; \ \ /* ; zmm25 to have bits 87-44 of all 8 powers of R' in 8 qwords */ \ /* ; zmm26 to have bits 129-88 of all 8 powers of R' in 8 qwords */ \ vpsllq zmm0, zmm23, 2; \ vpaddq zmm25, zmm23, zmm0; /* ; R1' (R1*5) */ \ vpsllq zmm1, zmm24, 2; \ vpaddq zmm26, zmm24, zmm1; /* ; R2' (R2*5) */ \ \ /* ; 4*5*R^8 */ \ vpsllq zmm25, zmm25, 2; \ vpsllq zmm26, zmm26, 2; \ \ vpaddq zmm13, zmm13, zmm16; \ vpaddq zmm14, zmm14, zmm17; \ vpaddq zmm15, zmm15, zmm18; \ \ /* ; zmm13-zmm15 contain the 8 blocks of message plus the previous accumulator */ \ /* ; zmm22-24 contain the 3x44-bit limbs of the powers of R */ \ /* ; zmm25-26 contain the 3x44-bit limbs of the powers of R' (5*4*R) */ \ POLY1305_MUL_REDUCE_VEC(zmm13, zmm14, zmm15, \ zmm22, zmm23, zmm24, \ zmm25, zmm26, \ zmm5, zmm6, zmm7, zmm8, zmm9, zmm10, \ zmm11); \ \ /* ;; Add all blocks (horizontally) */ \ vextracti64x4 ymm0, zmm13, 1; \ vextracti64x4 ymm1, zmm14, 1; \ vextracti64x4 ymm2, zmm15, 1; \ \ vpaddq ymm13, ymm13, ymm0; \ vpaddq ymm14, ymm14, ymm1; \ vpaddq ymm15, ymm15, ymm2; \ \ vextracti32x4 xmm10, ymm13, 1; \ vextracti32x4 xmm11, ymm14, 1; \ vextracti32x4 xmm12, ymm15, 1; \ \ vpaddq xmm13, xmm13, xmm10; \ vpaddq xmm14, xmm14, xmm11; \ vpaddq xmm15, xmm15, xmm12; \ \ vpsrldq xmm10, xmm13, 8; \ vpsrldq xmm11, xmm14, 8; \ vpsrldq xmm12, xmm15, 8; \ \ /* ; Finish folding and clear second qword */ \ mov T0, 0xfd; \ kmovq k1, T0; \ vpaddq xmm13{k1}{z}, xmm13, xmm10; \ vpaddq xmm14{k1}{z}, xmm14, xmm11; \ vpaddq xmm15{k1}{z}, xmm15, xmm12; \ \ add MSG, POLY1305_BLOCK_SIZE*16; \ sub LEN, POLY1305_BLOCK_SIZE*16; \ \ jmp .L_less_than_256; \ .L_poly1305_blocks_exit: \ /* ;; ============================================================================= ;; ============================================================================= ;; Creates stack frame and saves registers ;; ============================================================================= */ #define FUNC_ENTRY() \ mov rax, rsp; \ CFI_DEF_CFA_REGISTER(rax); \ sub rsp, STACK_SIZE; \ and rsp, -64; \ \ mov [rsp + STACK_gpr_save + 8*0], rbx; \ mov [rsp + STACK_gpr_save + 8*1], rbp; \ mov [rsp + STACK_gpr_save + 8*2], r12; \ mov [rsp + STACK_gpr_save + 8*3], r13; \ mov [rsp + STACK_gpr_save + 8*4], r14; \ mov [rsp + STACK_gpr_save + 8*5], r15; \ mov [rsp + STACK_rsp_save], rax; \ CFI_CFA_ON_STACK(STACK_rsp_save, 0) /* ;; ============================================================================= ;; ============================================================================= ;; Restores registers and removes the stack frame ;; ============================================================================= */ #define FUNC_EXIT() \ mov rbx, [rsp + STACK_gpr_save + 8*0]; \ mov rbp, [rsp + STACK_gpr_save + 8*1]; \ mov r12, [rsp + STACK_gpr_save + 8*2]; \ mov r13, [rsp + STACK_gpr_save + 8*3]; \ mov r14, [rsp + STACK_gpr_save + 8*4]; \ mov r15, [rsp + STACK_gpr_save + 8*5]; \ mov rsp, [rsp + STACK_rsp_save]; \ CFI_DEF_CFA_REGISTER(rsp) +.text + /* ;; ============================================================================= ;; ============================================================================= ;; void poly1305_aead_update_fma_avx512(const void *msg, const uint64_t msg_len, ;; void *hash, const void *key) ;; arg1 - Input message ;; arg2 - Message length ;; arg3 - Input/output hash ;; arg4 - Poly1305 key */ .align 32 .globl _gcry_poly1305_amd64_avx512_blocks ELF(.type _gcry_poly1305_amd64_avx512_blocks,@function;) _gcry_poly1305_amd64_avx512_blocks: CFI_STARTPROC() spec_stop_avx512_intel_syntax; FUNC_ENTRY() #define _a0 gp3 #define _a0 gp3 #define _a1 gp4 #define _a2 gp5 #define _r0 gp6 #define _r1 gp7 #define _len arg2 #define _arg3 arg4 /* ; use rcx, arg3 = rdx */ /* ;; load R */ mov _r0, [arg4 + 0 * 8] mov _r1, [arg4 + 1 * 8] /* ;; load accumulator / current hash value */ /* ;; note: arg4 can't be used beyond this point */ mov _arg3, arg3 /* ; note: _arg3 = arg4 (linux) */ mov _a0, [_arg3 + 0 * 8] mov _a1, [_arg3 + 1 * 8] mov DWORD(_a2), [_arg3 + 2 * 8] /* ; note: _a2 = arg4 (win) */ POLY1305_BLOCKS(arg1, _len, _a0, _a1, _a2, _r0, _r1, gp10, gp11, gp8, gp9, rax, rdx) /* ;; save accumulator back */ mov [_arg3 + 0 * 8], _a0 mov [_arg3 + 1 * 8], _a1 mov [_arg3 + 2 * 8], DWORD(_a2) FUNC_EXIT() xor eax, eax kxorw k1, k1, k1 kxorw k2, k2, k2 ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_poly1305_amd64_avx512_blocks, .-_gcry_poly1305_amd64_avx512_blocks;) #endif #endif diff --git a/cipher/sha1-avx-amd64.S b/cipher/sha1-avx-amd64.S index acada960..e5e55684 100644 --- a/cipher/sha1-avx-amd64.S +++ b/cipher/sha1-avx-amd64.S @@ -1,429 +1,433 @@ /* sha1-avx-amd64.S - Intel AVX accelerated SHA-1 transform function * Copyright (C) 2013 Jussi Kivilinna * * Based on sha1.c: * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Intel SSSE3 accelerated SHA-1 implementation based on white paper: * "Improving the Performance of the Secure Hash Algorithm (SHA-1)" * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA1) #include "asm-common-amd64.h" /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 /* Constants */ -.text +SECTION_RODATA + +ELF(.type _sha1_avx_consts,@object) +_sha1_avx_consts: #define K1 0x5A827999 #define K2 0x6ED9EBA1 #define K3 0x8F1BBCDC #define K4 0xCA62C1D6 .align 16 .LK_XMM: .LK1: .long K1, K1, K1, K1 .LK2: .long K2, K2, K2, K2 .LK3: .long K3, K3, K3, K3 .LK4: .long K4, K4, K4, K4 .Lbswap_shufb_ctl: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f /* Register macros */ #define RSTATE %r8 #define RDATA %r9 #define ROLDSTACK %r10 #define RNBLKS %r11 #define a %eax #define b %ebx #define c %ecx #define d %edx #define e %edi #define RT0 %esi #define RT1 %ebp #define Wtmp0 %xmm0 #define Wtmp1 %xmm1 #define W0 %xmm2 #define W1 %xmm3 #define W2 %xmm4 #define W3 %xmm5 #define W4 %xmm6 #define W5 %xmm7 #define W6 %xmm8 #define W7 %xmm9 #define BSWAP_REG %xmm10 /* Round function macros. */ #define WK(i) (((i) & 15) * 4)(%rsp) #define R_F1(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl d, RT0; \ movl a, RT1; \ andl b, RT0; \ shldl $30, b, b; \ xorl d, RT0; \ leal (RT0,e), e; \ shldl $5, RT1, RT1; \ addl RT1, e; #define R_F2(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl b, RT0; \ shldl $30, b, b; \ xorl d, RT0; \ movl a, RT1; \ leal (RT0,e), e; \ shldl $5, RT1, RT1; \ addl RT1, e; #define R_F3(a,b,c,d,e,i) \ movl c, RT0; \ movl b, RT1; \ xorl b, RT0; \ andl c, RT1; \ andl d, RT0; \ addl RT1, e; \ addl WK(i), e; \ shldl $30, b, b; \ movl a, RT1; \ leal (RT0,e), e; \ shldl $5, RT1, RT1; \ addl RT1, e; #define R_F4(a,b,c,d,e,i) R_F2(a,b,c,d,e,i) #define R(a,b,c,d,e,f,i) \ R_##f(a,b,c,d,e,i) /* Input expansion macros. */ #define W_PRECALC_00_15_0(i, W, tmp0) \ vmovdqu (4*(i))(RDATA), tmp0; #define W_PRECALC_00_15_1(i, W, tmp0) \ vpshufb BSWAP_REG, tmp0, W; #define W_PRECALC_00_15_2(i, W, tmp0) \ vpaddd (.LK_XMM + ((i)/20)*16) rRIP, W, tmp0; #define W_PRECALC_00_15_3(i, W, tmp0) \ vmovdqa tmp0, WK(i&~3); #define W_PRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpalignr $8, W_m16, W_m12, W; \ vpsrldq $4, W_m04, tmp0; \ vpxor W_m08, W, W; #define W_PRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W_m16, tmp0, tmp0; \ vpxor tmp0, W, W; \ vpslld $1, W, tmp0; \ vpslldq $12, W, tmp1; \ vpsrld $31, W, W; #define W_PRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpor W, tmp0, tmp0; \ vpsrld $30, tmp1, W; \ vpslld $2, tmp1, tmp1; #define W_PRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W, tmp0, tmp0; \ vpxor tmp1, tmp0, W; \ vpaddd (.LK_XMM + ((i)/20)*16) rRIP, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); #define W_PRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m28, W, W; \ vpalignr $8, W_m08, W_m04, tmp0; #define W_PRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m16, W, W; \ vpxor tmp0, W, W; #define W_PRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpsrld $30, W, tmp0; \ vpslld $2, W, W; #define W_PRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpor W, tmp0, W; \ vpaddd (.LK_XMM + ((i)/20)*16) rRIP, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); +.text /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sha1_transform_amd64_avx (void *ctx, const unsigned char *data, * size_t nblks) */ .globl _gcry_sha1_transform_amd64_avx ELF(.type _gcry_sha1_transform_amd64_avx,@function) .align 16 _gcry_sha1_transform_amd64_avx: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks */ CFI_STARTPROC(); xorl %eax, %eax; cmpq $0, %rdx; jz .Lret; vzeroupper; movq %rdx, RNBLKS; movq %rdi, RSTATE; movq %rsi, RDATA; pushq %rbx; CFI_PUSH(%rbx); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, ROLDSTACK; CFI_DEF_CFA_REGISTER(ROLDSTACK); subq $(16*4), %rsp; andq $(~31), %rsp; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; vmovdqa .Lbswap_shufb_ctl rRIP, BSWAP_REG; /* Precalc 0-15. */ W_PRECALC_00_15_0(0, W0, Wtmp0); W_PRECALC_00_15_1(1, W0, Wtmp0); W_PRECALC_00_15_2(2, W0, Wtmp0); W_PRECALC_00_15_3(3, W0, Wtmp0); W_PRECALC_00_15_0(4, W7, Wtmp0); W_PRECALC_00_15_1(5, W7, Wtmp0); W_PRECALC_00_15_2(6, W7, Wtmp0); W_PRECALC_00_15_3(7, W7, Wtmp0); W_PRECALC_00_15_0(8, W6, Wtmp0); W_PRECALC_00_15_1(9, W6, Wtmp0); W_PRECALC_00_15_2(10, W6, Wtmp0); W_PRECALC_00_15_3(11, W6, Wtmp0); W_PRECALC_00_15_0(12, W5, Wtmp0); W_PRECALC_00_15_1(13, W5, Wtmp0); W_PRECALC_00_15_2(14, W5, Wtmp0); W_PRECALC_00_15_3(15, W5, Wtmp0); .align 8 .Loop: addq $64, RDATA; /* Transform 0-15 + Precalc 16-31. */ R( a, b, c, d, e, F1, 0 ); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 1 ); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 2 ); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 3 ); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 4 ); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 5 ); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 6 ); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 7 ); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 8 ); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 9 ); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 10 ); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 11 ); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 12 ); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 13 ); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 14 ); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 15 ); W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); /* Transform 16-63 + Precalc 32-79. */ R( e, a, b, c, d, F1, 16 ); W_PRECALC_32_79_0(32, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F1, 17 ); W_PRECALC_32_79_1(33, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F1, 18 ); W_PRECALC_32_79_2(34, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F1, 19 ); W_PRECALC_32_79_3(35, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F2, 20 ); W_PRECALC_32_79_0(36, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F2, 21 ); W_PRECALC_32_79_1(37, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F2, 22 ); W_PRECALC_32_79_2(38, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F2, 23 ); W_PRECALC_32_79_3(39, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F2, 24 ); W_PRECALC_32_79_0(40, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F2, 25 ); W_PRECALC_32_79_1(41, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F2, 26 ); W_PRECALC_32_79_2(42, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F2, 27 ); W_PRECALC_32_79_3(43, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F2, 28 ); W_PRECALC_32_79_0(44, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( b, c, d, e, a, F2, 29 ); W_PRECALC_32_79_1(45, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F2, 30 ); W_PRECALC_32_79_2(46, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F2, 31 ); W_PRECALC_32_79_3(47, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F2, 32 ); W_PRECALC_32_79_0(48, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( c, d, e, a, b, F2, 33 ); W_PRECALC_32_79_1(49, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( b, c, d, e, a, F2, 34 ); W_PRECALC_32_79_2(50, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( a, b, c, d, e, F2, 35 ); W_PRECALC_32_79_3(51, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( e, a, b, c, d, F2, 36 ); W_PRECALC_32_79_0(52, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( d, e, a, b, c, F2, 37 ); W_PRECALC_32_79_1(53, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( c, d, e, a, b, F2, 38 ); W_PRECALC_32_79_2(54, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( b, c, d, e, a, F2, 39 ); W_PRECALC_32_79_3(55, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( a, b, c, d, e, F3, 40 ); W_PRECALC_32_79_0(56, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( e, a, b, c, d, F3, 41 ); W_PRECALC_32_79_1(57, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( d, e, a, b, c, F3, 42 ); W_PRECALC_32_79_2(58, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( c, d, e, a, b, F3, 43 ); W_PRECALC_32_79_3(59, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( b, c, d, e, a, F3, 44 ); W_PRECALC_32_79_0(60, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( a, b, c, d, e, F3, 45 ); W_PRECALC_32_79_1(61, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( e, a, b, c, d, F3, 46 ); W_PRECALC_32_79_2(62, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( d, e, a, b, c, F3, 47 ); W_PRECALC_32_79_3(63, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( c, d, e, a, b, F3, 48 ); W_PRECALC_32_79_0(64, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F3, 49 ); W_PRECALC_32_79_1(65, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F3, 50 ); W_PRECALC_32_79_2(66, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( e, a, b, c, d, F3, 51 ); W_PRECALC_32_79_3(67, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F3, 52 ); W_PRECALC_32_79_0(68, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F3, 53 ); W_PRECALC_32_79_1(69, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F3, 54 ); W_PRECALC_32_79_2(70, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( a, b, c, d, e, F3, 55 ); W_PRECALC_32_79_3(71, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F3, 56 ); W_PRECALC_32_79_0(72, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F3, 57 ); W_PRECALC_32_79_1(73, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F3, 58 ); W_PRECALC_32_79_2(74, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( b, c, d, e, a, F3, 59 ); W_PRECALC_32_79_3(75, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F4, 60 ); W_PRECALC_32_79_0(76, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F4, 61 ); W_PRECALC_32_79_1(77, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F4, 62 ); W_PRECALC_32_79_2(78, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( c, d, e, a, b, F4, 63 ); W_PRECALC_32_79_3(79, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); decq RNBLKS; jz .Lend; /* Transform 64-79 + Precalc 0-15 of next block. */ R( b, c, d, e, a, F4, 64 ); W_PRECALC_00_15_0(0, W0, Wtmp0); R( a, b, c, d, e, F4, 65 ); W_PRECALC_00_15_1(1, W0, Wtmp0); R( e, a, b, c, d, F4, 66 ); W_PRECALC_00_15_2(2, W0, Wtmp0); R( d, e, a, b, c, F4, 67 ); W_PRECALC_00_15_3(3, W0, Wtmp0); R( c, d, e, a, b, F4, 68 ); W_PRECALC_00_15_0(4, W7, Wtmp0); R( b, c, d, e, a, F4, 69 ); W_PRECALC_00_15_1(5, W7, Wtmp0); R( a, b, c, d, e, F4, 70 ); W_PRECALC_00_15_2(6, W7, Wtmp0); R( e, a, b, c, d, F4, 71 ); W_PRECALC_00_15_3(7, W7, Wtmp0); R( d, e, a, b, c, F4, 72 ); W_PRECALC_00_15_0(8, W6, Wtmp0); R( c, d, e, a, b, F4, 73 ); W_PRECALC_00_15_1(9, W6, Wtmp0); R( b, c, d, e, a, F4, 74 ); W_PRECALC_00_15_2(10, W6, Wtmp0); R( a, b, c, d, e, F4, 75 ); W_PRECALC_00_15_3(11, W6, Wtmp0); R( e, a, b, c, d, F4, 76 ); W_PRECALC_00_15_0(12, W5, Wtmp0); R( d, e, a, b, c, F4, 77 ); W_PRECALC_00_15_1(13, W5, Wtmp0); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; W_PRECALC_00_15_2(14, W5, Wtmp0); R( b, c, d, e, a, F4, 79 ); W_PRECALC_00_15_3(15, W5, Wtmp0); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); jmp .Loop; .align 16 .Lend: vzeroall; /* Transform 64-79 + burn stack */ R( b, c, d, e, a, F4, 64 ); R( a, b, c, d, e, F4, 65 ); R( e, a, b, c, d, F4, 66 ); R( d, e, a, b, c, F4, 67 ); R( c, d, e, a, b, F4, 68 ); R( b, c, d, e, a, F4, 69 ); R( a, b, c, d, e, F4, 70 ); R( e, a, b, c, d, F4, 71 ); R( d, e, a, b, c, F4, 72 ); R( c, d, e, a, b, F4, 73 ); R( b, c, d, e, a, F4, 74 ); R( a, b, c, d, e, F4, 75 ); R( e, a, b, c, d, F4, 76 ); vmovdqa %xmm0, (0*16)(%rsp); R( d, e, a, b, c, F4, 77 ); vmovdqa %xmm0, (1*16)(%rsp); R( c, d, e, a, b, F4, 78 ); vmovdqa %xmm0, (2*16)(%rsp); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79 ); /* 16*4/16-1 = 3 */ vmovdqa %xmm0, (3*16)(%rsp); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); movq ROLDSTACK, %rsp; CFI_REGISTER(ROLDSTACK, %rsp); CFI_DEF_CFA_REGISTER(%rsp); popq %rbp; CFI_POP(%rbp); popq %rbx; CFI_POP(%rbx); /* stack already burned */ xorl %eax, %eax; .Lret: ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sha1_transform_amd64_avx, .-_gcry_sha1_transform_amd64_avx;) #endif #endif diff --git a/cipher/sha1-avx-bmi2-amd64.S b/cipher/sha1-avx-bmi2-amd64.S index 5f4b9e69..16a01bfd 100644 --- a/cipher/sha1-avx-bmi2-amd64.S +++ b/cipher/sha1-avx-bmi2-amd64.S @@ -1,441 +1,446 @@ /* sha1-avx-bmi2-amd64.S - Intel AVX/BMI2 accelerated SHA-1 transform function * Copyright (C) 2013 Jussi Kivilinna * * Based on sha1.c: * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Intel SSSE3 accelerated SHA-1 implementation based on white paper: * "Improving the Performance of the Secure Hash Algorithm (SHA-1)" * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA1) #include "asm-common-amd64.h" /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 /* Constants */ -.text +SECTION_RODATA + +ELF(.type _sha1_avx_bmi2_consts,@object) +_sha1_avx_bmi2_consts: + .align 16 .Lbswap_shufb_ctl: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f .LK1: .long 0x5A827999 .LK2: .long 0x6ED9EBA1 .LK3: .long 0x8F1BBCDC .LK4: .long 0xCA62C1D6 /* Register macros */ #define RSTATE %r8 #define RDATA %r9 #define ROLDSTACK %r10 #define RNBLKS %r11 #define a %esi #define b %edi #define c %ebp #define d %edx #define e %ecx #define ne %ebx #define RT0 %eax #define RT1 %r12d #define Wtmp0 %xmm0 #define Wtmp1 %xmm1 #define W0 %xmm2 #define W1 %xmm3 #define W2 %xmm4 #define W3 %xmm5 #define W4 %xmm6 #define W5 %xmm7 #define W6 %xmm8 #define W7 %xmm9 #define BSWAP_REG %xmm10 #define K1 %xmm11 #define K2 %xmm12 #define K3 %xmm13 #define K4 %xmm14 /* Round function macros. */ #define WK(i) (((i) & 15) * 4)(%rsp) #define R_F1(a,b,c,d,e,i) \ movl c, RT0; \ andn d, b, RT1; \ addl WK(i), e; \ andl b, RT0; \ rorxl $2, b, b; \ addl RT1, e; \ addl ne, a; \ leal (RT0,e), ne; \ rorxl $27, a, e; #define R_F2(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl b, RT0; \ rorxl $2, b, b; \ xorl d, RT0; \ addl ne, a; \ leal (RT0,e), ne; \ rorxl $27, a, e; #define R_F3(a,b,c,d,e,i) \ movl c, RT0; \ movl b, RT1; \ addl WK(i), e; \ xorl b, RT0; \ andl c, RT1; \ andl d, RT0; \ addl RT1, e; \ rorxl $2, b, b; \ addl ne, a; \ leal (RT0,e), ne; \ rorxl $27, a, e; #define R_F4(a,b,c,d,e,i) R_F2(a,b,c,d,e,i) #define R(a,b,c,d,e,f,i) \ R_##f(a,b,c,d,e,i) /* Input expansion macros. */ #define W_PRECALC_00_15_0(i, W, tmp0) \ vmovdqu (4*(i))(RDATA), tmp0; #define W_PRECALC_00_15_1(i, W, tmp0) \ vpshufb BSWAP_REG, tmp0, W; #define W_PRECALC_00_15_2(i, W, tmp0, K) \ vpaddd K, W, tmp0; #define W_PRECALC_00_15_3(i, W, tmp0) \ vmovdqa tmp0, WK(i&~3); #define W_PRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpalignr $8, W_m16, W_m12, W; \ vpsrldq $4, W_m04, tmp0; \ vpxor W_m08, W, W; #define W_PRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W_m16, tmp0, tmp0; \ vpxor tmp0, W, W; \ vpslld $1, W, tmp0; \ vpslldq $12, W, tmp1; \ vpsrld $31, W, W; #define W_PRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpor W, tmp0, tmp0; \ vpsrld $30, tmp1, W; \ vpslld $2, tmp1, tmp1; #define W_PRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1, K) \ vpxor W, tmp0, tmp0; \ vpxor tmp1, tmp0, W; \ vpaddd K, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); #define W_PRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m28, W, W; \ vpalignr $8, W_m08, W_m04, tmp0; #define W_PRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m16, W, W; \ vpxor tmp0, W, W; #define W_PRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpsrld $30, W, tmp0; \ vpslld $2, W, W; #define W_PRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0, K) \ vpor W, tmp0, W; \ vpaddd K, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); +.text /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sha1_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data, * size_t nblks) */ .globl _gcry_sha1_transform_amd64_avx_bmi2 ELF(.type _gcry_sha1_transform_amd64_avx_bmi2,@function) .align 16 _gcry_sha1_transform_amd64_avx_bmi2: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks */ CFI_STARTPROC(); xorl %eax, %eax; cmpq $0, %rdx; jz .Lret; vzeroupper; movq %rdx, RNBLKS; movq %rdi, RSTATE; movq %rsi, RDATA; pushq %rbx; CFI_PUSH(%rbx); pushq %rbp; CFI_PUSH(%rbp); pushq %r12; CFI_PUSH(%r12); movq %rsp, ROLDSTACK; CFI_DEF_CFA_REGISTER(ROLDSTACK); subq $(16*4), %rsp; andq $(~31), %rsp; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; xorl ne, ne; vmovdqa .Lbswap_shufb_ctl rRIP, BSWAP_REG; vpbroadcastd .LK1 rRIP, K1; vpbroadcastd .LK2 rRIP, K2; vpbroadcastd .LK3 rRIP, K3; vpbroadcastd .LK4 rRIP, K4; /* Precalc 0-15. */ W_PRECALC_00_15_0(0, W0, Wtmp0); W_PRECALC_00_15_1(1, W0, Wtmp0); W_PRECALC_00_15_2(2, W0, Wtmp0, K1); W_PRECALC_00_15_3(3, W0, Wtmp0); W_PRECALC_00_15_0(4, W7, Wtmp0); W_PRECALC_00_15_1(5, W7, Wtmp0); W_PRECALC_00_15_2(6, W7, Wtmp0, K1); W_PRECALC_00_15_3(7, W7, Wtmp0); W_PRECALC_00_15_0(8, W6, Wtmp0); W_PRECALC_00_15_1(9, W6, Wtmp0); W_PRECALC_00_15_2(10, W6, Wtmp0, K1); W_PRECALC_00_15_3(11, W6, Wtmp0); W_PRECALC_00_15_0(12, W5, Wtmp0); W_PRECALC_00_15_1(13, W5, Wtmp0); W_PRECALC_00_15_2(14, W5, Wtmp0, K1); W_PRECALC_00_15_3(15, W5, Wtmp0); .align 8 .Loop: addq $64, RDATA; /* Transform 0-15 + Precalc 16-31. */ R( a, b, c, d, e, F1, 0 ); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 1 ); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 2 ); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 3 ); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1, K1); R( b, c, d, e, a, F1, 4 ); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 5 ); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 6 ); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 7 ); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1, K2); R( c, d, e, a, b, F1, 8 ); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 9 ); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 10 ); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 11 ); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1, K2); R( d, e, a, b, c, F1, 12 ); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 13 ); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 14 ); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 15 ); W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1, K2); /* Transform 16-63 + Precalc 32-79. */ R( e, a, b, c, d, F1, 16 ); W_PRECALC_32_79_0(32, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F1, 17 ); W_PRECALC_32_79_1(33, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F1, 18 ); W_PRECALC_32_79_2(34, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F1, 19 ); W_PRECALC_32_79_3(35, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0, K2); R( a, b, c, d, e, F2, 20 ); W_PRECALC_32_79_0(36, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F2, 21 ); W_PRECALC_32_79_1(37, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F2, 22 ); W_PRECALC_32_79_2(38, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F2, 23 ); W_PRECALC_32_79_3(39, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0, K2); R( b, c, d, e, a, F2, 24 ); W_PRECALC_32_79_0(40, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F2, 25 ); W_PRECALC_32_79_1(41, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F2, 26 ); W_PRECALC_32_79_2(42, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F2, 27 ); W_PRECALC_32_79_3(43, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0, K3); R( c, d, e, a, b, F2, 28 ); W_PRECALC_32_79_0(44, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( b, c, d, e, a, F2, 29 ); W_PRECALC_32_79_1(45, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F2, 30 ); W_PRECALC_32_79_2(46, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F2, 31 ); W_PRECALC_32_79_3(47, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0, K3); R( d, e, a, b, c, F2, 32 ); W_PRECALC_32_79_0(48, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( c, d, e, a, b, F2, 33 ); W_PRECALC_32_79_1(49, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( b, c, d, e, a, F2, 34 ); W_PRECALC_32_79_2(50, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( a, b, c, d, e, F2, 35 ); W_PRECALC_32_79_3(51, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0, K3); R( e, a, b, c, d, F2, 36 ); W_PRECALC_32_79_0(52, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( d, e, a, b, c, F2, 37 ); W_PRECALC_32_79_1(53, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( c, d, e, a, b, F2, 38 ); W_PRECALC_32_79_2(54, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( b, c, d, e, a, F2, 39 ); W_PRECALC_32_79_3(55, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0, K3); R( a, b, c, d, e, F3, 40 ); W_PRECALC_32_79_0(56, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( e, a, b, c, d, F3, 41 ); W_PRECALC_32_79_1(57, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( d, e, a, b, c, F3, 42 ); W_PRECALC_32_79_2(58, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( c, d, e, a, b, F3, 43 ); W_PRECALC_32_79_3(59, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0, K3); R( b, c, d, e, a, F3, 44 ); W_PRECALC_32_79_0(60, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( a, b, c, d, e, F3, 45 ); W_PRECALC_32_79_1(61, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( e, a, b, c, d, F3, 46 ); W_PRECALC_32_79_2(62, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( d, e, a, b, c, F3, 47 ); W_PRECALC_32_79_3(63, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0, K4); R( c, d, e, a, b, F3, 48 ); W_PRECALC_32_79_0(64, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F3, 49 ); W_PRECALC_32_79_1(65, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F3, 50 ); W_PRECALC_32_79_2(66, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( e, a, b, c, d, F3, 51 ); W_PRECALC_32_79_3(67, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0, K4); R( d, e, a, b, c, F3, 52 ); W_PRECALC_32_79_0(68, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F3, 53 ); W_PRECALC_32_79_1(69, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F3, 54 ); W_PRECALC_32_79_2(70, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( a, b, c, d, e, F3, 55 ); W_PRECALC_32_79_3(71, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0, K4); R( e, a, b, c, d, F3, 56 ); W_PRECALC_32_79_0(72, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F3, 57 ); W_PRECALC_32_79_1(73, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F3, 58 ); W_PRECALC_32_79_2(74, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( b, c, d, e, a, F3, 59 ); W_PRECALC_32_79_3(75, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0, K4); R( a, b, c, d, e, F4, 60 ); W_PRECALC_32_79_0(76, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F4, 61 ); W_PRECALC_32_79_1(77, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F4, 62 ); W_PRECALC_32_79_2(78, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( c, d, e, a, b, F4, 63 ); W_PRECALC_32_79_3(79, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0, K4); decq RNBLKS; jz .Lend; /* Transform 64-79 + Precalc 0-15 of next block. */ R( b, c, d, e, a, F4, 64 ); W_PRECALC_00_15_0(0, W0, Wtmp0); R( a, b, c, d, e, F4, 65 ); W_PRECALC_00_15_1(1, W0, Wtmp0); R( e, a, b, c, d, F4, 66 ); W_PRECALC_00_15_2(2, W0, Wtmp0, K1); R( d, e, a, b, c, F4, 67 ); W_PRECALC_00_15_3(3, W0, Wtmp0); R( c, d, e, a, b, F4, 68 ); W_PRECALC_00_15_0(4, W7, Wtmp0); R( b, c, d, e, a, F4, 69 ); W_PRECALC_00_15_1(5, W7, Wtmp0); R( a, b, c, d, e, F4, 70 ); W_PRECALC_00_15_2(6, W7, Wtmp0, K1); R( e, a, b, c, d, F4, 71 ); W_PRECALC_00_15_3(7, W7, Wtmp0); R( d, e, a, b, c, F4, 72 ); W_PRECALC_00_15_0(8, W6, Wtmp0); R( c, d, e, a, b, F4, 73 ); W_PRECALC_00_15_1(9, W6, Wtmp0); R( b, c, d, e, a, F4, 74 ); W_PRECALC_00_15_2(10, W6, Wtmp0, K1); R( a, b, c, d, e, F4, 75 ); W_PRECALC_00_15_3(11, W6, Wtmp0); R( e, a, b, c, d, F4, 76 ); W_PRECALC_00_15_0(12, W5, Wtmp0); R( d, e, a, b, c, F4, 77 ); W_PRECALC_00_15_1(13, W5, Wtmp0); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; W_PRECALC_00_15_2(14, W5, Wtmp0, K1); R( b, c, d, e, a, F4, 79 ); W_PRECALC_00_15_3(15, W5, Wtmp0); addl ne, a; xorl ne, ne; /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); jmp .Loop; .align 16 .Lend: vzeroall; /* Transform 64-79 + burn stack */ R( b, c, d, e, a, F4, 64 ); R( a, b, c, d, e, F4, 65 ); R( e, a, b, c, d, F4, 66 ); R( d, e, a, b, c, F4, 67 ); R( c, d, e, a, b, F4, 68 ); R( b, c, d, e, a, F4, 69 ); R( a, b, c, d, e, F4, 70 ); R( e, a, b, c, d, F4, 71 ); R( d, e, a, b, c, F4, 72 ); R( c, d, e, a, b, F4, 73 ); R( b, c, d, e, a, F4, 74 ); R( a, b, c, d, e, F4, 75 ); R( e, a, b, c, d, F4, 76 ); vmovdqa %xmm0, (0*16)(%rsp); R( d, e, a, b, c, F4, 77 ); vmovdqa %xmm0, (1*16)(%rsp); R( c, d, e, a, b, F4, 78 ); vmovdqa %xmm0, (2*16)(%rsp); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79 ); addl ne, a; xorl ne, ne; /* 16*4/16-1 = 3 */ vmovdqa %xmm0, (3*16)(%rsp); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); movq ROLDSTACK, %rsp; CFI_REGISTER(ROLDSTACK, %rsp); CFI_DEF_CFA_REGISTER(%rsp); popq %r12; CFI_POP(%r12); popq %rbp; CFI_POP(%rbp); popq %rbx; CFI_POP(%rbx); /* stack already burned */ xorl %eax, %eax; .Lret: ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sha1_transform_amd64_avx_bmi2, .-_gcry_sha1_transform_amd64_avx_bmi2;) #endif #endif diff --git a/cipher/sha1-avx2-bmi2-amd64.S b/cipher/sha1-avx2-bmi2-amd64.S index ed52761b..06ff92f0 100644 --- a/cipher/sha1-avx2-bmi2-amd64.S +++ b/cipher/sha1-avx2-bmi2-amd64.S @@ -1,573 +1,578 @@ /* sha1-avx2-bmi2-amd64.S - Intel AVX2/BMI2 accelerated SHA-1 transform function * Copyright (C) 2019 Jussi Kivilinna * * Based on sha1.c: * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Intel SSSE3 accelerated SHA-1 implementation based on white paper: * "Improving the Performance of the Secure Hash Algorithm (SHA-1)" * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_BMI2) && defined(HAVE_GCC_INLINE_ASM_AVX) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(USE_SHA1) #include "asm-common-amd64.h" /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 /* Constants */ +SECTION_RODATA + #define WK_STACK_WORDS (80 * 2) -.text +ELF(.type _sha1_avx2_bmi2_consts,@object) +_sha1_avx2_bmi2_consts: + .align 16 .Lbswap_shufb_ctl: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f .LK1: .long 0x5A827999 .LK2: .long 0x6ED9EBA1 .LK3: .long 0x8F1BBCDC .LK4: .long 0xCA62C1D6 /* Register macros */ #define RSTATE %r8 #define RDATA %r9 #define ROLDSTACK %r10 #define RNBLKS %r11 #define a %eax #define b %ebx #define c %ecx #define d %edx #define e %edi #define ne %r12d #define RT0 %esi #define RT1 %ebp #define Wtmp0 %ymm0 #define Wtmp1 %ymm1 #define Wtmp0x %xmm0 #define Wtmp1x %xmm1 #define W0 %ymm2 #define W1 %ymm3 #define W2 %ymm4 #define W3 %ymm5 #define W4 %ymm6 #define W5 %ymm7 #define W6 %ymm8 #define W7 %ymm9 #define BSWAP_REG %ymm10 #define K1 %ymm11 #define K2 %ymm12 #define K3 %ymm13 #define K4 %ymm14 /* Round function macros. */ #define WK(i,block) ((block) * 16 + ((i) / 4) * 32 + ((i) % 4) * 4)(%rsp) #define PRE_WK(i) ((i) * 4 * 2)(%rsp) #define R_F1(a,b,c,d,e,i,block) \ movl c, RT0; \ andn d, b, RT1; \ addl WK(i,block), e; \ andl b, RT0; \ leal (a,ne), a; \ rorxl $2, b, b; \ addl RT1, e; \ rorxl $27, a, ne; \ addl RT0, e; #define R_F2(a,b,c,d,e,i,block) \ addl WK(i,block), e; \ movl c, RT0; \ xorl b, RT0; \ leal (a,ne), a; \ rorxl $2, b, b; \ xorl d, RT0; \ addl RT0, e; \ rorxl $27, a, ne; #define R_F3(a,b,c,d,e,i,block) \ movl c, RT0; \ addl WK(i,block), e; \ movl b, RT1; \ xorl b, RT0; \ leal (a,ne), a; \ rorxl $2, b, b; \ andl c, RT1; \ addl RT1, e; \ andl d, RT0; \ rorxl $27, a, ne; \ addl RT0, e; #define R_F4(a,b,c,d,e,i,block) R_F2(a,b,c,d,e,i,block) #define R(a,b,c,d,e,f,i,block) \ R_##f(a,b,c,d,e,i,block) /* Input expansion macros. */ #define W_PRECALC_00_15_0(i, W, tmp0) \ vmovdqu (4*(i))(RDATA), tmp0##x; \ vinserti128 $1, (4*(i) + 64)(RDATA), tmp0, tmp0; #define W_PRECALC_00_15_1(i, W, tmp0) \ vpshufb BSWAP_REG, tmp0, W; #define W_PRECALC_00_15_2(i, W, tmp0, K) \ vpaddd K, W, tmp0; #define W_PRECALC_00_15_3(i, W, tmp0) \ vmovdqa tmp0, PRE_WK((i)&~3); #define W_PRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpalignr $8, W_m16, W_m12, W; \ vpsrldq $4, W_m04, tmp0; \ vpxor W_m08, W, W; #define W_PRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W_m16, tmp0, tmp0; \ vpxor tmp0, W, W; \ vpslld $1, W, tmp0; \ vpslldq $12, W, tmp1; \ vpsrld $31, W, W; #define W_PRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpor W, tmp0, tmp0; \ vpsrld $30, tmp1, W; \ vpslld $2, tmp1, tmp1; #define W_PRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1, K) \ vpxor W, tmp0, tmp0; \ vpxor tmp1, tmp0, W; \ vpaddd K, W, tmp0; \ vmovdqa tmp0, PRE_WK((i)&~3); #define W_PRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m28, W, W; \ vpalignr $8, W_m08, W_m04, tmp0; #define W_PRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m16, W, W; \ vpxor tmp0, W, W; #define W_PRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpsrld $30, W, tmp0; \ vpslld $2, W, W; #define W_PRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0, K) \ vpor W, tmp0, W; \ vpaddd K, W, tmp0; \ vmovdqa tmp0, PRE_WK((i)&~3); +.text /* * Transform 2*nblks*64 bytes (2*nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sha1_transform_amd64_avx2_bmi2 (void *ctx, const unsigned char *data, * size_t nblks) */ .globl _gcry_sha1_transform_amd64_avx2_bmi2 ELF(.type _gcry_sha1_transform_amd64_avx2_bmi2,@function) .align 16 _gcry_sha1_transform_amd64_avx2_bmi2: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks (multiple of 2, larger than 0) */ CFI_STARTPROC(); vzeroupper; movq %rdx, RNBLKS; movq %rdi, RSTATE; movq %rsi, RDATA; pushq %rbx; CFI_PUSH(%rbx); pushq %rbp; CFI_PUSH(%rbp); pushq %r12; CFI_PUSH(%r12); movq %rsp, ROLDSTACK; CFI_DEF_CFA_REGISTER(ROLDSTACK); subq $(WK_STACK_WORDS*4), %rsp; andq $(~63), %rsp; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; xorl ne, ne; vbroadcasti128 .Lbswap_shufb_ctl rRIP, BSWAP_REG; vpbroadcastd .LK1 rRIP, K1; vpbroadcastd .LK2 rRIP, K2; vpbroadcastd .LK3 rRIP, K3; vpbroadcastd .LK4 rRIP, K4; /* Precalc 0-31 for block 1 & 2. */ W_PRECALC_00_15_0(0, W0, Wtmp0); W_PRECALC_00_15_1(1, W0, Wtmp0); W_PRECALC_00_15_2(2, W0, Wtmp0, K1); W_PRECALC_00_15_3(3, W0, Wtmp0); W_PRECALC_00_15_0(4, W7, Wtmp0); W_PRECALC_00_15_1(5, W7, Wtmp0); W_PRECALC_00_15_2(6, W7, Wtmp0, K1); W_PRECALC_00_15_3(7, W7, Wtmp0); W_PRECALC_00_15_0(8, W6, Wtmp0); W_PRECALC_00_15_1(9, W6, Wtmp0); W_PRECALC_00_15_2(10, W6, Wtmp0, K1); W_PRECALC_00_15_3(11, W6, Wtmp0); W_PRECALC_00_15_0(12, W5, Wtmp0); W_PRECALC_00_15_1(13, W5, Wtmp0); W_PRECALC_00_15_2(14, W5, Wtmp0, K1); W_PRECALC_00_15_3(15, W5, Wtmp0); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1, K1); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1, K2); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1, K2); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1, K2); .align 8 .Loop: addq $(2 * 64), RDATA; /* Transform 0-15 for block 1 + Precalc 32-47 for block 1 & 2. */ R( a, b, c, d, e, F1, 0, 0 ); W_PRECALC_32_79_0(32, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( e, a, b, c, d, F1, 1, 0 ); W_PRECALC_32_79_1(33, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F1, 2, 0 ); W_PRECALC_32_79_2(34, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F1, 3, 0 ); W_PRECALC_32_79_3(35, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0, K2); R( b, c, d, e, a, F1, 4, 0 ); W_PRECALC_32_79_0(36, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( a, b, c, d, e, F1, 5, 0 ); W_PRECALC_32_79_1(37, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F1, 6, 0 ); W_PRECALC_32_79_2(38, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F1, 7, 0 ); W_PRECALC_32_79_3(39, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0, K2); R( c, d, e, a, b, F1, 8, 0 ); W_PRECALC_32_79_0(40, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( b, c, d, e, a, F1, 9, 0 ); W_PRECALC_32_79_1(41, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F1, 10, 0 ); W_PRECALC_32_79_2(42, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F1, 11, 0 ); W_PRECALC_32_79_3(43, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0, K3); R( d, e, a, b, c, F1, 12, 0 ); W_PRECALC_32_79_0(44, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( c, d, e, a, b, F1, 13, 0 ); W_PRECALC_32_79_1(45, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( b, c, d, e, a, F1, 14, 0 ); W_PRECALC_32_79_2(46, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F1, 15, 0 ); W_PRECALC_32_79_3(47, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0, K3); /* Transform 16-47 for block 1 + Precalc 48-79 for block 1 & 2. */ R( e, a, b, c, d, F1, 16, 0 ); W_PRECALC_32_79_0(48, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( d, e, a, b, c, F1, 17, 0 ); W_PRECALC_32_79_1(49, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( c, d, e, a, b, F1, 18, 0 ); W_PRECALC_32_79_2(50, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( b, c, d, e, a, F1, 19, 0 ); W_PRECALC_32_79_3(51, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0, K3); R( a, b, c, d, e, F2, 20, 0 ); W_PRECALC_32_79_0(52, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( e, a, b, c, d, F2, 21, 0 ); W_PRECALC_32_79_1(53, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( d, e, a, b, c, F2, 22, 0 ); W_PRECALC_32_79_2(54, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( c, d, e, a, b, F2, 23, 0 ); W_PRECALC_32_79_3(55, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0, K3); R( b, c, d, e, a, F2, 24, 0 ); W_PRECALC_32_79_0(56, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( a, b, c, d, e, F2, 25, 0 ); W_PRECALC_32_79_1(57, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( e, a, b, c, d, F2, 26, 0 ); W_PRECALC_32_79_2(58, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( d, e, a, b, c, F2, 27, 0 ); W_PRECALC_32_79_3(59, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0, K3); R( c, d, e, a, b, F2, 28, 0 ); W_PRECALC_32_79_0(60, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( b, c, d, e, a, F2, 29, 0 ); W_PRECALC_32_79_1(61, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( a, b, c, d, e, F2, 30, 0 ); W_PRECALC_32_79_2(62, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( e, a, b, c, d, F2, 31, 0 ); W_PRECALC_32_79_3(63, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0, K4); R( d, e, a, b, c, F2, 32, 0 ); W_PRECALC_32_79_0(64, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F2, 33, 0 ); W_PRECALC_32_79_1(65, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F2, 34, 0 ); W_PRECALC_32_79_2(66, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F2, 35, 0 ); W_PRECALC_32_79_3(67, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0, K4); R( e, a, b, c, d, F2, 36, 0 ); W_PRECALC_32_79_0(68, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F2, 37, 0 ); W_PRECALC_32_79_1(69, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F2, 38, 0 ); W_PRECALC_32_79_2(70, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F2, 39, 0 ); W_PRECALC_32_79_3(71, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0, K4); R( a, b, c, d, e, F3, 40, 0 ); W_PRECALC_32_79_0(72, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F3, 41, 0 ); W_PRECALC_32_79_1(73, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F3, 42, 0 ); W_PRECALC_32_79_2(74, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F3, 43, 0 ); W_PRECALC_32_79_3(75, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0, K4); R( b, c, d, e, a, F3, 44, 0 ); W_PRECALC_32_79_0(76, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F3, 45, 0 ); W_PRECALC_32_79_1(77, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F3, 46, 0 ); W_PRECALC_32_79_2(78, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F3, 47, 0 ); W_PRECALC_32_79_3(79, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0, K4); /* Transform 48-79 for block 1. */ R( c, d, e, a, b, F3, 48, 0 ); R( b, c, d, e, a, F3, 49, 0 ); R( a, b, c, d, e, F3, 50, 0 ); R( e, a, b, c, d, F3, 51, 0 ); R( d, e, a, b, c, F3, 52, 0 ); R( c, d, e, a, b, F3, 53, 0 ); R( b, c, d, e, a, F3, 54, 0 ); R( a, b, c, d, e, F3, 55, 0 ); R( e, a, b, c, d, F3, 56, 0 ); R( d, e, a, b, c, F3, 57, 0 ); R( c, d, e, a, b, F3, 58, 0 ); R( b, c, d, e, a, F3, 59, 0 ); R( a, b, c, d, e, F4, 60, 0 ); R( e, a, b, c, d, F4, 61, 0 ); R( d, e, a, b, c, F4, 62, 0 ); R( c, d, e, a, b, F4, 63, 0 ); R( b, c, d, e, a, F4, 64, 0 ); R( a, b, c, d, e, F4, 65, 0 ); R( e, a, b, c, d, F4, 66, 0 ); R( d, e, a, b, c, F4, 67, 0 ); R( c, d, e, a, b, F4, 68, 0 ); R( b, c, d, e, a, F4, 69, 0 ); R( a, b, c, d, e, F4, 70, 0 ); R( e, a, b, c, d, F4, 71, 0 ); R( d, e, a, b, c, F4, 72, 0 ); R( c, d, e, a, b, F4, 73, 0 ); R( b, c, d, e, a, F4, 74, 0 ); R( a, b, c, d, e, F4, 75, 0 ); R( e, a, b, c, d, F4, 76, 0 ); R( d, e, a, b, c, F4, 77, 0 ); R( c, d, e, a, b, F4, 78, 0 ); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79, 0 ); addl ne, a; xorl ne, ne; /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); /* Transform 0-47 for block 2. */ R( a, b, c, d, e, F1, 0, 1 ); R( e, a, b, c, d, F1, 1, 1 ); R( d, e, a, b, c, F1, 2, 1 ); R( c, d, e, a, b, F1, 3, 1 ); R( b, c, d, e, a, F1, 4, 1 ); R( a, b, c, d, e, F1, 5, 1 ); R( e, a, b, c, d, F1, 6, 1 ); R( d, e, a, b, c, F1, 7, 1 ); R( c, d, e, a, b, F1, 8, 1 ); R( b, c, d, e, a, F1, 9, 1 ); R( a, b, c, d, e, F1, 10, 1 ); R( e, a, b, c, d, F1, 11, 1 ); R( d, e, a, b, c, F1, 12, 1 ); R( c, d, e, a, b, F1, 13, 1 ); R( b, c, d, e, a, F1, 14, 1 ); R( a, b, c, d, e, F1, 15, 1 ); R( e, a, b, c, d, F1, 16, 1 ); R( d, e, a, b, c, F1, 17, 1 ); R( c, d, e, a, b, F1, 18, 1 ); R( b, c, d, e, a, F1, 19, 1 ); R( a, b, c, d, e, F2, 20, 1 ); R( e, a, b, c, d, F2, 21, 1 ); R( d, e, a, b, c, F2, 22, 1 ); R( c, d, e, a, b, F2, 23, 1 ); R( b, c, d, e, a, F2, 24, 1 ); R( a, b, c, d, e, F2, 25, 1 ); R( e, a, b, c, d, F2, 26, 1 ); R( d, e, a, b, c, F2, 27, 1 ); R( c, d, e, a, b, F2, 28, 1 ); R( b, c, d, e, a, F2, 29, 1 ); R( a, b, c, d, e, F2, 30, 1 ); R( e, a, b, c, d, F2, 31, 1 ); R( d, e, a, b, c, F2, 32, 1 ); R( c, d, e, a, b, F2, 33, 1 ); R( b, c, d, e, a, F2, 34, 1 ); R( a, b, c, d, e, F2, 35, 1 ); R( e, a, b, c, d, F2, 36, 1 ); R( d, e, a, b, c, F2, 37, 1 ); R( c, d, e, a, b, F2, 38, 1 ); R( b, c, d, e, a, F2, 39, 1 ); R( a, b, c, d, e, F3, 40, 1 ); R( e, a, b, c, d, F3, 41, 1 ); R( d, e, a, b, c, F3, 42, 1 ); R( c, d, e, a, b, F3, 43, 1 ); R( b, c, d, e, a, F3, 44, 1 ); R( a, b, c, d, e, F3, 45, 1 ); R( e, a, b, c, d, F3, 46, 1 ); R( d, e, a, b, c, F3, 47, 1 ); addq $-2, RNBLKS; jz .Lend; /* Transform 48-79 for block 2 + Precalc 0-31 for next two blocks. */ R( c, d, e, a, b, F3, 48, 1 ); W_PRECALC_00_15_0(0, W0, Wtmp0); R( b, c, d, e, a, F3, 49, 1 ); W_PRECALC_00_15_1(1, W0, Wtmp0); R( a, b, c, d, e, F3, 50, 1 ); W_PRECALC_00_15_2(2, W0, Wtmp0, K1); R( e, a, b, c, d, F3, 51, 1 ); W_PRECALC_00_15_3(3, W0, Wtmp0); R( d, e, a, b, c, F3, 52, 1 ); W_PRECALC_00_15_0(4, W7, Wtmp0); R( c, d, e, a, b, F3, 53, 1 ); W_PRECALC_00_15_1(5, W7, Wtmp0); R( b, c, d, e, a, F3, 54, 1 ); W_PRECALC_00_15_2(6, W7, Wtmp0, K1); R( a, b, c, d, e, F3, 55, 1 ); W_PRECALC_00_15_3(7, W7, Wtmp0); R( e, a, b, c, d, F3, 56, 1 ); W_PRECALC_00_15_0(8, W6, Wtmp0); R( d, e, a, b, c, F3, 57, 1 ); W_PRECALC_00_15_1(9, W6, Wtmp0); R( c, d, e, a, b, F3, 58, 1 ); W_PRECALC_00_15_2(10, W6, Wtmp0, K1); R( b, c, d, e, a, F3, 59, 1 ); W_PRECALC_00_15_3(11, W6, Wtmp0); R( a, b, c, d, e, F4, 60, 1 ); W_PRECALC_00_15_0(12, W5, Wtmp0); R( e, a, b, c, d, F4, 61, 1 ); W_PRECALC_00_15_1(13, W5, Wtmp0); R( d, e, a, b, c, F4, 62, 1 ); W_PRECALC_00_15_2(14, W5, Wtmp0, K1); R( c, d, e, a, b, F4, 63, 1 ); W_PRECALC_00_15_3(15, W5, Wtmp0); R( b, c, d, e, a, F4, 64, 1 ); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( a, b, c, d, e, F4, 65, 1 ); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( e, a, b, c, d, F4, 66, 1 ); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( d, e, a, b, c, F4, 67, 1 ); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1, K1); R( c, d, e, a, b, F4, 68, 1 ); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( b, c, d, e, a, F4, 69, 1 ); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( a, b, c, d, e, F4, 70, 1 ); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( e, a, b, c, d, F4, 71, 1 ); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1, K2); R( d, e, a, b, c, F4, 72, 1 ); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( c, d, e, a, b, F4, 73, 1 ); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( b, c, d, e, a, F4, 74, 1 ); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( a, b, c, d, e, F4, 75, 1 ); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1, K2); R( e, a, b, c, d, F4, 76, 1 ); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( d, e, a, b, c, F4, 77, 1 ); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( c, d, e, a, b, F4, 78, 1 ); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); addl state_h0(RSTATE), a; W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1, K2); R( b, c, d, e, a, F4, 79, 1 ); addl ne, a; xorl ne, ne; /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); jmp .Loop; .align 16 .Lend: vzeroall; /* Transform 48-79 for block 2 + burn stack */ R( c, d, e, a, b, F3, 48, 1 ); R( b, c, d, e, a, F3, 49, 1 ); R( a, b, c, d, e, F3, 50, 1 ); R( e, a, b, c, d, F3, 51, 1 ); R( d, e, a, b, c, F3, 52, 1 ); R( c, d, e, a, b, F3, 53, 1 ); R( b, c, d, e, a, F3, 54, 1 ); R( a, b, c, d, e, F3, 55, 1 ); R( e, a, b, c, d, F3, 56, 1 ); R( d, e, a, b, c, F3, 57, 1 ); R( c, d, e, a, b, F3, 58, 1 ); R( b, c, d, e, a, F3, 59, 1 ); R( a, b, c, d, e, F4, 60, 1 ); vmovdqa %ymm0, (0*32)(%rsp); R( e, a, b, c, d, F4, 61, 1 ); vmovdqa %ymm0, (1*32)(%rsp); R( d, e, a, b, c, F4, 62, 1 ); vmovdqa %ymm0, (2*32)(%rsp); R( c, d, e, a, b, F4, 63, 1 ); vmovdqa %ymm0, (3*32)(%rsp); R( b, c, d, e, a, F4, 64, 1 ); vmovdqa %ymm0, (4*32)(%rsp); R( a, b, c, d, e, F4, 65, 1 ); vmovdqa %ymm0, (5*32)(%rsp); R( e, a, b, c, d, F4, 66, 1 ); vmovdqa %ymm0, (6*32)(%rsp); R( d, e, a, b, c, F4, 67, 1 ); vmovdqa %ymm0, (7*32)(%rsp); R( c, d, e, a, b, F4, 68, 1 ); vmovdqa %ymm0, (8*32)(%rsp); R( b, c, d, e, a, F4, 69, 1 ); vmovdqa %ymm0, (9*32)(%rsp); R( a, b, c, d, e, F4, 70, 1 ); vmovdqa %ymm0, (10*32)(%rsp); R( e, a, b, c, d, F4, 71, 1 ); vmovdqa %ymm0, (11*32)(%rsp); R( d, e, a, b, c, F4, 72, 1 ); vmovdqa %ymm0, (12*32)(%rsp); R( c, d, e, a, b, F4, 73, 1 ); vmovdqa %ymm0, (13*32)(%rsp); R( b, c, d, e, a, F4, 74, 1 ); vmovdqa %ymm0, (14*32)(%rsp); R( a, b, c, d, e, F4, 75, 1 ); vmovdqa %ymm0, (15*32)(%rsp); R( e, a, b, c, d, F4, 76, 1 ); vmovdqa %ymm0, (16*32)(%rsp); R( d, e, a, b, c, F4, 77, 1 ); vmovdqa %ymm0, (17*32)(%rsp); R( c, d, e, a, b, F4, 78, 1 ); vmovdqa %ymm0, (18*32)(%rsp); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79, 1 ); addl ne, a; xorl ne, ne; /* WK_STACK_WORDS*4/32-1 = 19 */ vmovdqa %ymm0, (19*32)(%rsp); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); movq ROLDSTACK, %rsp; CFI_REGISTER(ROLDSTACK, %rsp); CFI_DEF_CFA_REGISTER(%rsp); popq %r12; CFI_POP(%r12); popq %rbp; CFI_POP(%rbp); popq %rbx; CFI_POP(%rbx); /* stack already burned */ xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sha1_transform_amd64_avx2_bmi2, .-_gcry_sha1_transform_amd64_avx2_bmi2;) #endif #endif diff --git a/cipher/sha1-ssse3-amd64.S b/cipher/sha1-ssse3-amd64.S index f09b1de1..53a24431 100644 --- a/cipher/sha1-ssse3-amd64.S +++ b/cipher/sha1-ssse3-amd64.S @@ -1,437 +1,442 @@ /* sha1-ssse3-amd64.S - Intel SSSE3 accelerated SHA-1 transform function * Copyright (C) 2013 Jussi Kivilinna * * Based on sha1.c: * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Intel SSSE3 accelerated SHA-1 implementation based on white paper: * "Improving the Performance of the Secure Hash Algorithm (SHA-1)" * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA1) #include "asm-common-amd64.h" /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 /* Constants */ -.text +SECTION_RODATA + +ELF(.type _sha1_ssse3_consts,@object) +_sha1_ssse3_consts: + #define K1 0x5A827999 #define K2 0x6ED9EBA1 #define K3 0x8F1BBCDC #define K4 0xCA62C1D6 .align 16 .LK_XMM: .LK1: .long K1, K1, K1, K1 .LK2: .long K2, K2, K2, K2 .LK3: .long K3, K3, K3, K3 .LK4: .long K4, K4, K4, K4 .Lbswap_shufb_ctl: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f /* Register macros */ #define RSTATE %r8 #define RDATA %r9 #define ROLDSTACK %r10 #define RNBLKS %r11 #define a %eax #define b %ebx #define c %ecx #define d %edx #define e %edi #define RT0 %esi #define RT1 %ebp #define Wtmp0 %xmm0 #define Wtmp1 %xmm1 #define W0 %xmm2 #define W1 %xmm3 #define W2 %xmm4 #define W3 %xmm5 #define W4 %xmm6 #define W5 %xmm7 #define W6 %xmm8 #define W7 %xmm9 #define BSWAP_REG %xmm10 /* Round function macros. */ #define WK(i) (((i) & 15) * 4)(%rsp) #define R_F1(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl d, RT0; \ movl a, RT1; \ andl b, RT0; \ roll $30, b; \ xorl d, RT0; \ leal (RT0,e), e; \ roll $5, RT1; \ addl RT1, e; #define R_F2(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl b, RT0; \ roll $30, b; \ xorl d, RT0; \ movl a, RT1; \ leal (RT0,e), e; \ roll $5, RT1; \ addl RT1, e; #define R_F3(a,b,c,d,e,i) \ movl c, RT0; \ movl b, RT1; \ xorl b, RT0; \ andl c, RT1; \ andl d, RT0; \ addl RT1, e; \ addl WK(i), e; \ roll $30, b; \ movl a, RT1; \ leal (RT0,e), e; \ roll $5, RT1; \ addl RT1, e; #define R_F4(a,b,c,d,e,i) R_F2(a,b,c,d,e,i) #define R(a,b,c,d,e,f,i) \ R_##f(a,b,c,d,e,i) /* Input expansion macros. */ #define W_PRECALC_00_15_0(i, W, tmp0) \ movdqu (4*(i))(RDATA), tmp0; #define W_PRECALC_00_15_1(i, W, tmp0) \ pshufb BSWAP_REG, tmp0; \ movdqa tmp0, W; #define W_PRECALC_00_15_2(i, W, tmp0) \ paddd (.LK_XMM + ((i)/20)*16) rRIP, tmp0; #define W_PRECALC_00_15_3(i, W, tmp0) \ movdqa tmp0, WK(i&~3); #define W_PRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ movdqa W_m12, W; \ palignr $8, W_m16, W; \ movdqa W_m04, tmp0; \ psrldq $4, tmp0; \ pxor W_m08, W; #define W_PRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ pxor W_m16, tmp0; \ pxor tmp0, W; \ movdqa W, tmp1; \ movdqa W, tmp0; \ pslldq $12, tmp1; #define W_PRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ psrld $31, W; \ pslld $1, tmp0; \ por W, tmp0; \ movdqa tmp1, W; \ psrld $30, tmp1; \ pslld $2, W; #define W_PRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ pxor W, tmp0; \ pxor tmp1, tmp0; \ movdqa tmp0, W; \ paddd (.LK_XMM + ((i)/20)*16) rRIP, tmp0; \ movdqa tmp0, WK((i)&~3); #define W_PRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ movdqa W_m04, tmp0; \ pxor W_m28, W; \ palignr $8, W_m08, tmp0; #define W_PRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ pxor W_m16, W; \ pxor tmp0, W; \ movdqa W, tmp0; #define W_PRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ psrld $30, W; \ pslld $2, tmp0; \ por W, tmp0; #define W_PRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ movdqa tmp0, W; \ paddd (.LK_XMM + ((i)/20)*16) rRIP, tmp0; \ movdqa tmp0, WK((i)&~3); #define CLEAR_REG(reg) pxor reg, reg; +.text /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sha1_transform_amd64_ssse3 (void *ctx, const unsigned char *data, * size_t nblks) */ .globl _gcry_sha1_transform_amd64_ssse3 ELF(.type _gcry_sha1_transform_amd64_ssse3,@function) .align 16 _gcry_sha1_transform_amd64_ssse3: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks */ CFI_STARTPROC(); xorl %eax, %eax; cmpq $0, %rdx; jz .Lret; movq %rdx, RNBLKS; movq %rdi, RSTATE; movq %rsi, RDATA; pushq %rbx; CFI_PUSH(%rbx); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, ROLDSTACK; CFI_DEF_CFA_REGISTER(ROLDSTACK); subq $(16*4), %rsp; andq $(~31), %rsp; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; movdqa .Lbswap_shufb_ctl rRIP, BSWAP_REG; /* Precalc 0-15. */ W_PRECALC_00_15_0(0, W0, Wtmp0); W_PRECALC_00_15_1(1, W0, Wtmp0); W_PRECALC_00_15_2(2, W0, Wtmp0); W_PRECALC_00_15_3(3, W0, Wtmp0); W_PRECALC_00_15_0(4, W7, Wtmp0); W_PRECALC_00_15_1(5, W7, Wtmp0); W_PRECALC_00_15_2(6, W7, Wtmp0); W_PRECALC_00_15_3(7, W7, Wtmp0); W_PRECALC_00_15_0(8, W6, Wtmp0); W_PRECALC_00_15_1(9, W6, Wtmp0); W_PRECALC_00_15_2(10, W6, Wtmp0); W_PRECALC_00_15_3(11, W6, Wtmp0); W_PRECALC_00_15_0(12, W5, Wtmp0); W_PRECALC_00_15_1(13, W5, Wtmp0); W_PRECALC_00_15_2(14, W5, Wtmp0); W_PRECALC_00_15_3(15, W5, Wtmp0); .align 8 .Loop: addq $64, RDATA; /* Transform 0-15 + Precalc 16-31. */ R( a, b, c, d, e, F1, 0 ); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 1 ); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 2 ); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 3 ); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 4 ); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 5 ); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 6 ); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 7 ); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 8 ); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 9 ); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 10 ); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 11 ); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 12 ); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 13 ); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 14 ); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 15 ); W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); /* Transform 16-63 + Precalc 32-79. */ R( e, a, b, c, d, F1, 16 ); W_PRECALC_32_79_0(32, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F1, 17 ); W_PRECALC_32_79_1(33, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F1, 18 ); W_PRECALC_32_79_2(34, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F1, 19 ); W_PRECALC_32_79_3(35, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F2, 20 ); W_PRECALC_32_79_0(36, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F2, 21 ); W_PRECALC_32_79_1(37, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F2, 22 ); W_PRECALC_32_79_2(38, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F2, 23 ); W_PRECALC_32_79_3(39, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F2, 24 ); W_PRECALC_32_79_0(40, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F2, 25 ); W_PRECALC_32_79_1(41, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F2, 26 ); W_PRECALC_32_79_2(42, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F2, 27 ); W_PRECALC_32_79_3(43, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F2, 28 ); W_PRECALC_32_79_0(44, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( b, c, d, e, a, F2, 29 ); W_PRECALC_32_79_1(45, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F2, 30 ); W_PRECALC_32_79_2(46, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F2, 31 ); W_PRECALC_32_79_3(47, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F2, 32 ); W_PRECALC_32_79_0(48, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( c, d, e, a, b, F2, 33 ); W_PRECALC_32_79_1(49, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( b, c, d, e, a, F2, 34 ); W_PRECALC_32_79_2(50, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( a, b, c, d, e, F2, 35 ); W_PRECALC_32_79_3(51, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( e, a, b, c, d, F2, 36 ); W_PRECALC_32_79_0(52, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( d, e, a, b, c, F2, 37 ); W_PRECALC_32_79_1(53, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( c, d, e, a, b, F2, 38 ); W_PRECALC_32_79_2(54, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( b, c, d, e, a, F2, 39 ); W_PRECALC_32_79_3(55, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( a, b, c, d, e, F3, 40 ); W_PRECALC_32_79_0(56, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( e, a, b, c, d, F3, 41 ); W_PRECALC_32_79_1(57, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( d, e, a, b, c, F3, 42 ); W_PRECALC_32_79_2(58, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( c, d, e, a, b, F3, 43 ); W_PRECALC_32_79_3(59, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( b, c, d, e, a, F3, 44 ); W_PRECALC_32_79_0(60, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( a, b, c, d, e, F3, 45 ); W_PRECALC_32_79_1(61, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( e, a, b, c, d, F3, 46 ); W_PRECALC_32_79_2(62, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( d, e, a, b, c, F3, 47 ); W_PRECALC_32_79_3(63, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( c, d, e, a, b, F3, 48 ); W_PRECALC_32_79_0(64, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F3, 49 ); W_PRECALC_32_79_1(65, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F3, 50 ); W_PRECALC_32_79_2(66, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( e, a, b, c, d, F3, 51 ); W_PRECALC_32_79_3(67, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F3, 52 ); W_PRECALC_32_79_0(68, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F3, 53 ); W_PRECALC_32_79_1(69, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F3, 54 ); W_PRECALC_32_79_2(70, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( a, b, c, d, e, F3, 55 ); W_PRECALC_32_79_3(71, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F3, 56 ); W_PRECALC_32_79_0(72, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F3, 57 ); W_PRECALC_32_79_1(73, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F3, 58 ); W_PRECALC_32_79_2(74, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( b, c, d, e, a, F3, 59 ); W_PRECALC_32_79_3(75, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F4, 60 ); W_PRECALC_32_79_0(76, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F4, 61 ); W_PRECALC_32_79_1(77, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F4, 62 ); W_PRECALC_32_79_2(78, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( c, d, e, a, b, F4, 63 ); W_PRECALC_32_79_3(79, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); decq RNBLKS; jz .Lend; /* Transform 64-79 + Precalc 0-15 of next block. */ R( b, c, d, e, a, F4, 64 ); W_PRECALC_00_15_0(0, W0, Wtmp0); R( a, b, c, d, e, F4, 65 ); W_PRECALC_00_15_1(1, W0, Wtmp0); R( e, a, b, c, d, F4, 66 ); W_PRECALC_00_15_2(2, W0, Wtmp0); R( d, e, a, b, c, F4, 67 ); W_PRECALC_00_15_3(3, W0, Wtmp0); R( c, d, e, a, b, F4, 68 ); W_PRECALC_00_15_0(4, W7, Wtmp0); R( b, c, d, e, a, F4, 69 ); W_PRECALC_00_15_1(5, W7, Wtmp0); R( a, b, c, d, e, F4, 70 ); W_PRECALC_00_15_2(6, W7, Wtmp0); R( e, a, b, c, d, F4, 71 ); W_PRECALC_00_15_3(7, W7, Wtmp0); R( d, e, a, b, c, F4, 72 ); W_PRECALC_00_15_0(8, W6, Wtmp0); R( c, d, e, a, b, F4, 73 ); W_PRECALC_00_15_1(9, W6, Wtmp0); R( b, c, d, e, a, F4, 74 ); W_PRECALC_00_15_2(10, W6, Wtmp0); R( a, b, c, d, e, F4, 75 ); W_PRECALC_00_15_3(11, W6, Wtmp0); R( e, a, b, c, d, F4, 76 ); W_PRECALC_00_15_0(12, W5, Wtmp0); R( d, e, a, b, c, F4, 77 ); W_PRECALC_00_15_1(13, W5, Wtmp0); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; W_PRECALC_00_15_2(14, W5, Wtmp0); R( b, c, d, e, a, F4, 79 ); W_PRECALC_00_15_3(15, W5, Wtmp0); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); jmp .Loop; .align 16 .Lend: /* Transform 64-79 + Clear XMM registers + Burn stack. */ R( b, c, d, e, a, F4, 64 ); CLEAR_REG(BSWAP_REG); R( a, b, c, d, e, F4, 65 ); CLEAR_REG(Wtmp0); R( e, a, b, c, d, F4, 66 ); CLEAR_REG(Wtmp1); R( d, e, a, b, c, F4, 67 ); CLEAR_REG(W0); R( c, d, e, a, b, F4, 68 ); CLEAR_REG(W1); R( b, c, d, e, a, F4, 69 ); CLEAR_REG(W2); R( a, b, c, d, e, F4, 70 ); CLEAR_REG(W3); R( e, a, b, c, d, F4, 71 ); CLEAR_REG(W4); R( d, e, a, b, c, F4, 72 ); CLEAR_REG(W5); R( c, d, e, a, b, F4, 73 ); CLEAR_REG(W6); R( b, c, d, e, a, F4, 74 ); CLEAR_REG(W7); R( a, b, c, d, e, F4, 75 ); R( e, a, b, c, d, F4, 76 ); movdqa Wtmp0, (0*16)(%rsp); R( d, e, a, b, c, F4, 77 ); movdqa Wtmp0, (1*16)(%rsp); R( c, d, e, a, b, F4, 78 ); movdqa Wtmp0, (2*16)(%rsp); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79 ); /* 16*4/16-1 = 3 */ movdqa Wtmp0, (3*16)(%rsp); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); movq ROLDSTACK, %rsp; CFI_REGISTER(ROLDSTACK, %rsp); CFI_DEF_CFA_REGISTER(%rsp); popq %rbp; CFI_POP(%rbp); popq %rbx; CFI_POP(%rbx); /* stack already burned */ xorl %eax, %eax; .Lret: ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sha1_transform_amd64_ssse3, .-_gcry_sha1_transform_amd64_ssse3;) #endif #endif diff --git a/cipher/sha256-avx-amd64.S b/cipher/sha256-avx-amd64.S index be8a799d..8cfd0880 100644 --- a/cipher/sha256-avx-amd64.S +++ b/cipher/sha256-avx-amd64.S @@ -1,506 +1,511 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: Based on the SSSE3 implementation. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA256) #include "asm-common-amd64.h" .intel_syntax noprefix #define VMOVDQ vmovdqu /* assume buffers not aligned */ #define ROR(p1, p2) \ /* shld is faster than ror on Intel Sandybridge */ \ shld p1, p1, (32 - p2); /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros*/ /* addm [mem], reg * Add reg to mem using reg-mem add and store */ #define addm(p1, p2) \ add p2, p1; \ mov p1, p2; /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ /* COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask * Load xmm with mem and byte swap each dword */ #define COPY_XMM_AND_BSWAP(p1, p2, p3) \ VMOVDQ p1, p2; \ vpshufb p1, p1, p3; /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ #define X0 xmm4 #define X1 xmm5 #define X2 xmm6 #define X3 xmm7 #define XTMP0 xmm0 #define XTMP1 xmm1 #define XTMP2 xmm2 #define XTMP3 xmm3 #define XTMP4 xmm8 #define XFER xmm9 #define SHUF_00BA xmm10 /* shuffle xBxA -> 00BA */ #define SHUF_DC00 xmm11 /* shuffle xDxC -> DC00 */ #define BYTE_FLIP_MASK xmm12 #define NUM_BLKS rdx /* 3rd arg */ #define CTX rsi /* 2nd arg */ #define INP rdi /* 1st arg */ #define SRND rdi /* clobbers INP */ #define c ecx #define d r8d #define e edx #define TBL rbp #define a eax #define b ebx #define f r9d #define g r10d #define h r11d #define y0 r13d #define y1 r14d #define y2 r15d #define _INP_END_SIZE 8 #define _INP_SIZE 8 #define _XFER_SIZE 8 #define _XMM_SAVE_SIZE 0 /* STACK_SIZE plus pushes must be an odd multiple of 8 */ #define _ALIGN_SIZE 8 #define _INP_END 0 #define _INP (_INP_END + _INP_END_SIZE) #define _XFER (_INP + _INP_SIZE) #define _XMM_SAVE (_XFER + _XFER_SIZE + _ALIGN_SIZE) #define STACK_SIZE (_XMM_SAVE + _XMM_SAVE_SIZE) #define FOUR_ROUNDS_AND_SCHED_0(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ /* compute s0 four at a time and s1 two at a time */; \ /* compute W[-16] + W[-7] 4 at a time */; \ mov y0, e /* y0 = e */; \ ROR( y0, (25-11)) /* y0 = e >> (25-11) */; \ mov y1, a /* y1 = a */; \ vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */; \ ROR( y1, (22-13)) /* y1 = a >> (22-13) */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ mov y2, f /* y2 = f */; \ ROR( y0, (11-6)) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ xor y2, g /* y2 = f^g */; \ vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16] */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ ROR( y1, (13-2)) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ /* compute s0 */; \ vpalignr XTMP1, X1, X0, 4 /* XTMP1 = W[-15] */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ ROR( y0, 6) /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ ROR( y1, 2) /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, y0 /* y2 = S1 + CH */; \ add y2, [rsp + _XFER + 0*4] /* y2 = k + w + S1 + CH */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ vpslld XTMP2, XTMP1, (32-7); \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ vpsrld XTMP3, XTMP1, 7; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ vpor XTMP3, XTMP3, XTMP2 /* XTMP1 = W[-15] ror 7 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED_1(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ mov y0, e /* y0 = e */; \ mov y1, a /* y1 = a */; \ ROR( y0, (25-11)) /* y0 = e >> (25-11) */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ mov y2, f /* y2 = f */; \ ROR( y1, (22-13)) /* y1 = a >> (22-13) */; \ vpslld XTMP2, XTMP1, (32-18); \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ ROR( y0, (11-6)) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ xor y2, g /* y2 = f^g */; \ vpsrld XTMP4, XTMP1, 18; \ ROR( y1, (13-2)) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ ROR( y0, 6) /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ vpxor XTMP4, XTMP4, XTMP3; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ vpsrld XTMP1, XTMP1, 3 /* XTMP4 = W[-15] >> 3 */; \ add y2, y0 /* y2 = S1 + CH */; \ add y2, [rsp + _XFER + 1*4] /* y2 = k + w + S1 + CH */; \ ROR( y1, 2) /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ vpxor XTMP1, XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ vpxor XTMP1, XTMP1, XTMP4 /* XTMP1 = s0 */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ /* compute low s1 */; \ vpshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ vpaddd XTMP0, XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED_2(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ mov y0, e /* y0 = e */; \ mov y1, a /* y1 = a */; \ ROR( y0, (25-11)) /* y0 = e >> (25-11) */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ ROR( y1, (22-13)) /* y1 = a >> (22-13) */; \ mov y2, f /* y2 = f */; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ ROR( y0, (11-6)) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ vpsrlq XTMP3, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */; \ xor y2, g /* y2 = f^g */; \ vpsrlq XTMP4, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ vpsrld XTMP2, XTMP2, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */; \ ROR( y1, (13-2)) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ ROR( y0, 6) /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ vpxor XTMP2, XTMP2, XTMP3; \ add y2, y0 /* y2 = S1 + CH */; \ ROR( y1, 2) /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, [rsp + _XFER + 2*4] /* y2 = k + w + S1 + CH */; \ vpxor XTMP4, XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ vpshufb XTMP4, XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ vpaddd XTMP0, XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ /* compute high s1 */; \ vpshufd XTMP2, XTMP0, 0b01010000 /* XTMP2 = W[-2] {DDCC} */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED_3(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ mov y0, e /* y0 = e */; \ ROR( y0, (25-11)) /* y0 = e >> (25-11) */; \ mov y1, a /* y1 = a */; \ ROR( y1, (22-13)) /* y1 = a >> (22-13) */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ mov y2, f /* y2 = f */; \ ROR( y0, (11-6)) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ vpsrlq XTMP3, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ xor y2, g /* y2 = f^g */; \ vpsrlq X0, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ ROR( y1, (13-2)) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ vpsrld XTMP2, XTMP2, 10 /* X0 = W[-2] >> 10 {DDCC} */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ ROR( y0, 6) /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ vpxor XTMP2, XTMP2, XTMP3; \ ROR( y1, 2) /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, y0 /* y2 = S1 + CH */; \ add y2, [rsp + _XFER + 3*4] /* y2 = k + w + S1 + CH */; \ vpxor X0, X0, XTMP2 /* X0 = s1 {xDxC} */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ vpshufb X0, X0, SHUF_DC00 /* X0 = s1 {DC00} */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ vpaddd X0, X0, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ FOUR_ROUNDS_AND_SCHED_0(X0, X1, X2, X3, a, b, c, d, e, f, g, h); \ FOUR_ROUNDS_AND_SCHED_1(X0, X1, X2, X3, h, a, b, c, d, e, f, g); \ FOUR_ROUNDS_AND_SCHED_2(X0, X1, X2, X3, g, h, a, b, c, d, e, f); \ FOUR_ROUNDS_AND_SCHED_3(X0, X1, X2, X3, f, g, h, a, b, c, d, e); /* input is [rsp + _XFER + %1 * 4] */ #define DO_ROUND(i1, a, b, c, d, e, f, g, h) \ mov y0, e /* y0 = e */; \ ROR( y0, (25-11)) /* y0 = e >> (25-11) */; \ mov y1, a /* y1 = a */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ ROR( y1, (22-13)) /* y1 = a >> (22-13) */; \ mov y2, f /* y2 = f */; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ ROR( y0, (11-6)) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ xor y2, g /* y2 = f^g */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ ROR( y1, (13-2)) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ and y2, e /* y2 = (f^g)&e */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ ROR( y0, 6) /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ add y2, y0 /* y2 = S1 + CH */; \ ROR( y1, 2) /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, [rsp + _XFER + i1 * 4] /* y2 = k + w + S1 + CH */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_avx ELF(.type _gcry_sha256_transform_amd64_avx,@function;) .align 16 _gcry_sha256_transform_amd64_avx: CFI_STARTPROC() vzeroupper push rbx CFI_PUSH(rbx) push rbp CFI_PUSH(rbp) push r13 CFI_PUSH(r13) push r14 CFI_PUSH(r14) push r15 CFI_PUSH(r15) sub rsp, STACK_SIZE CFI_ADJUST_CFA_OFFSET(STACK_SIZE); shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + _INP_END], NUM_BLKS /* load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] .Loop0: lea TBL, [.LK256 ADD_RIP] /* byte swap first 16 dwords */ COPY_XMM_AND_BSWAP(X0, [INP + 0*16], BYTE_FLIP_MASK) COPY_XMM_AND_BSWAP(X1, [INP + 1*16], BYTE_FLIP_MASK) COPY_XMM_AND_BSWAP(X2, [INP + 2*16], BYTE_FLIP_MASK) COPY_XMM_AND_BSWAP(X3, [INP + 3*16], BYTE_FLIP_MASK) mov [rsp + _INP], INP /* schedule 48 input dwords, by doing 3 rounds of 16 each */ mov SRND, 3 .align 16 .Loop1: vpaddd XFER, X0, [TBL + 0*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED(X0, X1, X2, X3, a, b, c, d, e, f, g, h) vpaddd XFER, X1, [TBL + 1*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED(X1, X2, X3, X0, e, f, g, h, a, b, c, d) vpaddd XFER, X2, [TBL + 2*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED(X2, X3, X0, X1, a, b, c, d, e, f, g, h) vpaddd XFER, X3, [TBL + 3*16] vmovdqa [rsp + _XFER], XFER add TBL, 4*16 FOUR_ROUNDS_AND_SCHED(X3, X0, X1, X2, e, f, g, h, a, b, c, d) sub SRND, 1 jne .Loop1 mov SRND, 2 .Loop2: vpaddd X0, X0, [TBL + 0*16] vmovdqa [rsp + _XFER], X0 DO_ROUND(0, a, b, c, d, e, f, g, h) DO_ROUND(1, h, a, b, c, d, e, f, g) DO_ROUND(2, g, h, a, b, c, d, e, f) DO_ROUND(3, f, g, h, a, b, c, d, e) vpaddd X1, X1, [TBL + 1*16] vmovdqa [rsp + _XFER], X1 add TBL, 2*16 DO_ROUND(0, e, f, g, h, a, b, c, d) DO_ROUND(1, d, e, f, g, h, a, b, c) DO_ROUND(2, c, d, e, f, g, h, a, b) DO_ROUND(3, b, c, d, e, f, g, h, a) vmovdqa X0, X2 vmovdqa X1, X3 sub SRND, 1 jne .Loop2 addm([4*0 + CTX],a) addm([4*1 + CTX],b) addm([4*2 + CTX],c) addm([4*3 + CTX],d) addm([4*4 + CTX],e) addm([4*5 + CTX],f) addm([4*6 + CTX],g) addm([4*7 + CTX],h) mov INP, [rsp + _INP] add INP, 64 cmp INP, [rsp + _INP_END] jne .Loop0 .Ldone_hash: vzeroall vmovdqa [rsp + _XFER], XFER xor eax, eax add rsp, STACK_SIZE CFI_ADJUST_CFA_OFFSET(-STACK_SIZE); pop r15 CFI_POP(r15) pop r14 CFI_POP(r14) pop r13 CFI_POP(r13) pop rbp CFI_POP(rbp) pop rbx CFI_POP(rbx) ret_spec_stop CFI_ENDPROC() +SECTION_RODATA + +ELF(.type _sha256_avx_consts,@object) +_sha256_avx_consts: + .align 16 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha256-avx2-bmi2-amd64.S b/cipher/sha256-avx2-bmi2-amd64.S index 60ad442c..e2a5454c 100644 --- a/cipher/sha256-avx2-bmi2-amd64.S +++ b/cipher/sha256-avx2-bmi2-amd64.S @@ -1,527 +1,533 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 2 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA256) #include "asm-common-amd64.h" .intel_syntax noprefix #define VMOVDQ vmovdqu /* ; assume buffers not aligned */ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ #define addm(p1, p2) \ add p2, p1; \ mov p1, p2; /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ #define X0 ymm4 #define X1 ymm5 #define X2 ymm6 #define X3 ymm7 /* XMM versions of above */ #define XWORD0 xmm4 #define XWORD1 xmm5 #define XWORD2 xmm6 #define XWORD3 xmm7 #define XTMP0 ymm0 #define XTMP1 ymm1 #define XTMP2 ymm2 #define XTMP3 ymm3 #define XTMP4 ymm8 #define XFER ymm9 #define XTMP5 ymm11 #define SHUF_00BA ymm10 /* shuffle xBxA -> 00BA */ #define SHUF_DC00 ymm12 /* shuffle xDxC -> DC00 */ #define BYTE_FLIP_MASK ymm13 #define X_BYTE_FLIP_MASK xmm13 /* XMM version of BYTE_FLIP_MASK */ #define NUM_BLKS rdx /* 3rd arg */ #define CTX rsi /* 2nd arg */ #define INP rdi /* 1st arg */ #define c ecx #define d r8d #define e edx /* clobbers NUM_BLKS */ #define y3 edi /* clobbers INP */ #define TBL rbp #define SRND CTX /* SRND is same register as CTX */ #define a eax #define b ebx #define f r9d #define g r10d #define h r11d #define old_h r11d #define T1 r12d #define y0 r13d #define y1 r14d #define y2 r15d #define _XFER_SIZE 2*64*4 /* 2 blocks, 64 rounds, 4 bytes/round */ #define _XMM_SAVE_SIZE 0 #define _INP_END_SIZE 8 #define _INP_SIZE 8 #define _CTX_SIZE 8 #define _RSP_SIZE 8 #define _XFER 0 #define _XMM_SAVE _XFER + _XFER_SIZE #define _INP_END _XMM_SAVE + _XMM_SAVE_SIZE #define _INP _INP_END + _INP_END_SIZE #define _CTX _INP + _INP_SIZE #define _RSP _CTX + _CTX_SIZE #define STACK_SIZE _RSP + _RSP_SIZE #define ONE_ROUND_PART1(XFERIN, a, b, c, d, e, f, g, h) \ /* h += Sum1 (e) + Ch (e, f, g) + (k[t] + w[0]); */ \ /* d += h; */ \ /* h += Sum0 (a) + Maj (a, b, c); */ \ \ /* Ch(x, y, z) => ((x & y) + (~x & z)) */ \ /* Maj(x, y, z) => ((x & y) + (z & (x ^ y))) */ \ \ mov y3, e; \ add h, [XFERIN]; \ and y3, f; \ rorx y0, e, 25; \ rorx y1, e, 11; \ lea h, [h + y3]; \ andn y3, e, g; \ rorx T1, a, 13; \ xor y0, y1; \ lea h, [h + y3] #define ONE_ROUND_PART2(a, b, c, d, e, f, g, h) \ rorx y2, a, 22; \ rorx y1, e, 6; \ mov y3, a; \ xor T1, y2; \ xor y0, y1; \ xor y3, b; \ lea h, [h + y0]; \ mov y0, a; \ rorx y2, a, 2; \ add d, h; \ and y3, c; \ xor T1, y2; \ lea h, [h + y3]; \ lea h, [h + T1]; \ and y0, b; \ lea h, [h + y0] #define ONE_ROUND(XFER, a, b, c, d, e, f, g, h) \ ONE_ROUND_PART1(XFER, a, b, c, d, e, f, g, h); \ ONE_ROUND_PART2(a, b, c, d, e, f, g, h) #define FOUR_ROUNDS_AND_SCHED(XFERIN, XFEROUT, X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */; \ vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16]; y1 = (e >> 6); S1 */; \ vpalignr XTMP1, X1, X0, 4 /* XTMP1 = W[-15] */; \ vpsrld XTMP2, XTMP1, 7; \ vpslld XTMP3, XTMP1, (32-7); \ vpor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 */; \ vpsrld XTMP2, XTMP1,18; \ \ ONE_ROUND(0*4+XFERIN, a, b, c, d, e, f, g, h); \ \ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vpsrld XTMP4, XTMP1, 3 /* XTMP4 = W[-15] >> 3 */; \ vpslld XTMP1, XTMP1, (32-18); \ vpxor XTMP3, XTMP3, XTMP1; \ vpxor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 */; \ vpxor XTMP1, XTMP3, XTMP4 /* XTMP1 = s0 */; \ vpshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */; \ vpaddd XTMP0, XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */; \ vpsrld XTMP4, XTMP2, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */; \ \ ONE_ROUND(1*4+XFERIN, h, a, b, c, d, e, f, g); \ \ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */; \ vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */; \ vpxor XTMP2, XTMP2, XTMP3; \ vpxor XTMP4, XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */; \ vpshufb XTMP4, XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */; \ vpaddd XTMP0, XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */; \ vpshufd XTMP2, XTMP0, 0b1010000 /* XTMP2 = W[-2] {DDCC} */; \ \ ONE_ROUND(2*4+XFERIN, g, h, a, b, c, d, e, f); \ \ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vpsrld XTMP5, XTMP2, 10 /* XTMP5 = W[-2] >> 10 {DDCC} */; \ vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */; \ vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */; \ vpxor XTMP2, XTMP2, XTMP3; \ vpxor XTMP5, XTMP5, XTMP2 /* XTMP5 = s1 {xDxC} */; \ vpshufb XTMP5, XTMP5, SHUF_DC00 /* XTMP5 = s1 {DC00} */; \ vpaddd X0, XTMP5, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */; \ vpaddd XFER, X0, [TBL + XFEROUT]; \ \ ONE_ROUND_PART1(3*4+XFERIN, f, g, h, a, b, c, d, e); \ vmovdqa [rsp + _XFER + XFEROUT], XFER; \ ONE_ROUND_PART2(f, g, h, a, b, c, d, e); #define DO_4ROUNDS(XFERIN, a, b, c, d, e, f, g, h) \ ONE_ROUND(0*4+XFERIN, a, b, c, d, e, f, g, h); \ ONE_ROUND(1*4+XFERIN, h, a, b, c, d, e, f, g); \ ONE_ROUND(2*4+XFERIN, g, h, a, b, c, d, e, f); \ ONE_ROUND(3*4+XFERIN, f, g, h, a, b, c, d, e) /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_avx2 ELF(.type _gcry_sha256_transform_amd64_avx2,@function) .align 32 _gcry_sha256_transform_amd64_avx2: CFI_STARTPROC() xor eax, eax cmp rdx, 0 je .Lnowork push rbx CFI_PUSH(rbx) push rbp CFI_PUSH(rbp) push r12 CFI_PUSH(r12) push r13 CFI_PUSH(r13) push r14 CFI_PUSH(r14) push r15 CFI_PUSH(r15) vzeroupper vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] mov rax, rsp CFI_DEF_CFA_REGISTER(rax); sub rsp, STACK_SIZE and rsp, ~63 mov [rsp + _RSP], rax CFI_CFA_ON_STACK(_RSP, 6 * 8) shl NUM_BLKS, 6 /* convert to bytes */ lea NUM_BLKS, [NUM_BLKS + INP - 64] /* pointer to last block */ mov [rsp + _INP_END], NUM_BLKS /* Check if only one block of input. Note: Loading initial digest * only uses 'mov' instruction and does not change condition * flags. */ cmp NUM_BLKS, INP /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] mov [rsp + _CTX], CTX je .Ldo_last_block .Loop0: lea TBL, [.LK256 ADD_RIP] /* ; Load first 16 dwords from two blocks */ VMOVDQ XTMP0, [INP + 0*32] VMOVDQ XTMP1, [INP + 1*32] VMOVDQ XTMP2, [INP + 2*32] VMOVDQ XTMP3, [INP + 3*32] /* ; byte swap data */ vpshufb XTMP0, XTMP0, BYTE_FLIP_MASK vpshufb XTMP1, XTMP1, BYTE_FLIP_MASK vpshufb XTMP2, XTMP2, BYTE_FLIP_MASK vpshufb XTMP3, XTMP3, BYTE_FLIP_MASK /* ; transpose data into high/low halves */ vperm2i128 X0, XTMP0, XTMP2, 0x20 vperm2i128 X1, XTMP0, XTMP2, 0x31 vperm2i128 X2, XTMP1, XTMP3, 0x20 vperm2i128 X3, XTMP1, XTMP3, 0x31 .Last_block_enter: add INP, 64 mov [rsp + _INP], INP /* ; schedule 48 input dwords, by doing 3 rounds of 12 each */ xor SRND, SRND vpaddd XFER, X0, [TBL + 0*32] vmovdqa [rsp + _XFER + 0*32], XFER vpaddd XFER, X1, [TBL + 1*32] vmovdqa [rsp + _XFER + 1*32], XFER vpaddd XFER, X2, [TBL + 2*32] vmovdqa [rsp + _XFER + 2*32], XFER vpaddd XFER, X3, [TBL + 3*32] vmovdqa [rsp + _XFER + 3*32], XFER .align 16 .Loop1: FOUR_ROUNDS_AND_SCHED(rsp + _XFER + SRND + 0*32, SRND + 4*32, X0, X1, X2, X3, a, b, c, d, e, f, g, h) FOUR_ROUNDS_AND_SCHED(rsp + _XFER + SRND + 1*32, SRND + 5*32, X1, X2, X3, X0, e, f, g, h, a, b, c, d) FOUR_ROUNDS_AND_SCHED(rsp + _XFER + SRND + 2*32, SRND + 6*32, X2, X3, X0, X1, a, b, c, d, e, f, g, h) FOUR_ROUNDS_AND_SCHED(rsp + _XFER + SRND + 3*32, SRND + 7*32, X3, X0, X1, X2, e, f, g, h, a, b, c, d) add SRND, 4*32 cmp SRND, 3 * 4*32 jb .Loop1 /* ; Do last 16 rounds with no scheduling */ DO_4ROUNDS(rsp + _XFER + (3*4*32 + 0*32), a, b, c, d, e, f, g, h) DO_4ROUNDS(rsp + _XFER + (3*4*32 + 1*32), e, f, g, h, a, b, c, d) DO_4ROUNDS(rsp + _XFER + (3*4*32 + 2*32), a, b, c, d, e, f, g, h) DO_4ROUNDS(rsp + _XFER + (3*4*32 + 3*32), e, f, g, h, a, b, c, d) mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] addm([4*0 + CTX],a) addm([4*1 + CTX],b) addm([4*2 + CTX],c) addm([4*3 + CTX],d) addm([4*4 + CTX],e) addm([4*5 + CTX],f) addm([4*6 + CTX],g) addm([4*7 + CTX],h) cmp INP, [rsp + _INP_END] ja .Ldone_hash /* ;;; Do second block using previously scheduled results */ xor SRND, SRND .align 16 .Loop3: DO_4ROUNDS(rsp + _XFER + SRND + 0*32 + 16, a, b, c, d, e, f, g, h) DO_4ROUNDS(rsp + _XFER + SRND + 1*32 + 16, e, f, g, h, a, b, c, d) add SRND, 2*32 cmp SRND, 4 * 4*32 jb .Loop3 mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] add INP, 64 addm([4*0 + CTX],a) addm([4*1 + CTX],b) addm([4*2 + CTX],c) addm([4*3 + CTX],d) addm([4*4 + CTX],e) addm([4*5 + CTX],f) addm([4*6 + CTX],g) addm([4*7 + CTX],h) cmp INP, [rsp + _INP_END] jb .Loop0 ja .Ldone_hash .Ldo_last_block: /* ;;; do last block */ lea TBL, [.LK256 ADD_RIP] VMOVDQ XWORD0, [INP + 0*16] VMOVDQ XWORD1, [INP + 1*16] VMOVDQ XWORD2, [INP + 2*16] VMOVDQ XWORD3, [INP + 3*16] vpshufb XWORD0, XWORD0, X_BYTE_FLIP_MASK vpshufb XWORD1, XWORD1, X_BYTE_FLIP_MASK vpshufb XWORD2, XWORD2, X_BYTE_FLIP_MASK vpshufb XWORD3, XWORD3, X_BYTE_FLIP_MASK jmp .Last_block_enter .Lonly_one_block: /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] mov [rsp + _CTX], CTX jmp .Ldo_last_block .Ldone_hash: vzeroall /* burn stack */ vmovdqa [rsp + _XFER + 0 * 32], ymm0 vmovdqa [rsp + _XFER + 1 * 32], ymm0 vmovdqa [rsp + _XFER + 2 * 32], ymm0 vmovdqa [rsp + _XFER + 3 * 32], ymm0 vmovdqa [rsp + _XFER + 4 * 32], ymm0 vmovdqa [rsp + _XFER + 5 * 32], ymm0 vmovdqa [rsp + _XFER + 6 * 32], ymm0 vmovdqa [rsp + _XFER + 7 * 32], ymm0 vmovdqa [rsp + _XFER + 8 * 32], ymm0 vmovdqa [rsp + _XFER + 9 * 32], ymm0 vmovdqa [rsp + _XFER + 10 * 32], ymm0 vmovdqa [rsp + _XFER + 11 * 32], ymm0 vmovdqa [rsp + _XFER + 12 * 32], ymm0 vmovdqa [rsp + _XFER + 13 * 32], ymm0 vmovdqa [rsp + _XFER + 14 * 32], ymm0 vmovdqa [rsp + _XFER + 15 * 32], ymm0 xor eax, eax mov rsp, [rsp + _RSP] CFI_DEF_CFA_REGISTER(rsp) pop r15 CFI_POP(r15) pop r14 CFI_POP(r14) pop r13 CFI_POP(r13) pop r12 CFI_POP(r12) pop rbp CFI_POP(rbp) pop rbx CFI_POP(rbx) .Lnowork: ret_spec_stop CFI_ENDPROC() + +SECTION_RODATA + +ELF(.type _sha256_avx2_consts,@object) +_sha256_avx2_consts: + .align 64 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha256-ssse3-amd64.S b/cipher/sha256-ssse3-amd64.S index 401ff6f4..ab93647c 100644 --- a/cipher/sha256-ssse3-amd64.S +++ b/cipher/sha256-ssse3-amd64.S @@ -1,528 +1,533 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: original implementation was named as SHA256-SSE4. However, only SSSE3 * is required. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA256) #include "asm-common-amd64.h" .intel_syntax noprefix #define MOVDQ movdqu /* assume buffers not aligned */ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros*/ /* addm [mem], reg * Add reg to mem using reg-mem add and store */ #define addm(p1, p2) \ add p2, p1; \ mov p1, p2; /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ /* COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask * Load xmm with mem and byte swap each dword */ #define COPY_XMM_AND_BSWAP(p1, p2, p3) \ MOVDQ p1, p2; \ pshufb p1, p3; /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ #define X0 xmm4 #define X1 xmm5 #define X2 xmm6 #define X3 xmm7 #define XTMP0 xmm0 #define XTMP1 xmm1 #define XTMP2 xmm2 #define XTMP3 xmm3 #define XTMP4 xmm8 #define XFER xmm9 #define SHUF_00BA xmm10 /* shuffle xBxA -> 00BA */ #define SHUF_DC00 xmm11 /* shuffle xDxC -> DC00 */ #define BYTE_FLIP_MASK xmm12 #define NUM_BLKS rdx /* 3rd arg */ #define CTX rsi /* 2nd arg */ #define INP rdi /* 1st arg */ #define SRND rdi /* clobbers INP */ #define c ecx #define d r8d #define e edx #define TBL rbp #define a eax #define b ebx #define f r9d #define g r10d #define h r11d #define y0 r13d #define y1 r14d #define y2 r15d #define _INP_END_SIZE 8 #define _INP_SIZE 8 #define _XFER_SIZE 8 #define _XMM_SAVE_SIZE 0 /* STACK_SIZE plus pushes must be an odd multiple of 8 */ #define _ALIGN_SIZE 8 #define _INP_END 0 #define _INP (_INP_END + _INP_END_SIZE) #define _XFER (_INP + _INP_SIZE) #define _XMM_SAVE (_XFER + _XFER_SIZE + _ALIGN_SIZE) #define STACK_SIZE (_XMM_SAVE + _XMM_SAVE_SIZE) #define FOUR_ROUNDS_AND_SCHED_0(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ /* compute s0 four at a time and s1 two at a time */; \ /* compute W[-16] + W[-7] 4 at a time */; \ movdqa XTMP0, X3; \ mov y0, e /* y0 = e */; \ ror y0, (25-11) /* y0 = e >> (25-11) */; \ mov y1, a /* y1 = a */; \ palignr XTMP0, X2, 4 /* XTMP0 = W[-7] */; \ ror y1, (22-13) /* y1 = a >> (22-13) */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ mov y2, f /* y2 = f */; \ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ movdqa XTMP1, X1; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ xor y2, g /* y2 = f^g */; \ paddd XTMP0, X0 /* XTMP0 = W[-7] + W[-16] */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ /* compute s0 */; \ palignr XTMP1, X0, 4 /* XTMP1 = W[-15] */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ movdqa XTMP2, XTMP1 /* XTMP2 = W[-15] */; \ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, y0 /* y2 = S1 + CH */; \ add y2, [rsp + _XFER + 0*4] /* y2 = k + w + S1 + CH */; \ movdqa XTMP3, XTMP1 /* XTMP3 = W[-15] */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ pslld XTMP1, (32-7); \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ psrld XTMP2, 7; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ por XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED_1(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ movdqa XTMP2, XTMP3 /* XTMP2 = W[-15] */; \ mov y0, e /* y0 = e */; \ mov y1, a /* y1 = a */; \ movdqa XTMP4, XTMP3 /* XTMP4 = W[-15] */; \ ror y0, (25-11) /* y0 = e >> (25-11) */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ mov y2, f /* y2 = f */; \ ror y1, (22-13) /* y1 = a >> (22-13) */; \ pslld XTMP3, (32-18); \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ xor y2, g /* y2 = f^g */; \ psrld XTMP2, 18; \ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ pxor XTMP1, XTMP3; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ psrld XTMP4, 3 /* XTMP4 = W[-15] >> 3 */; \ add y2, y0 /* y2 = S1 + CH */; \ add y2, [rsp + _XFER + 1*4] /* y2 = k + w + S1 + CH */; \ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ pxor XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ pxor XTMP1, XTMP4 /* XTMP1 = s0 */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ /* compute low s1 */; \ pshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ paddd XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED_2(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {BBAA} */; \ mov y0, e /* y0 = e */; \ mov y1, a /* y1 = a */; \ ror y0, (25-11) /* y0 = e >> (25-11) */; \ movdqa XTMP4, XTMP2 /* XTMP4 = W[-2] {BBAA} */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ ror y1, (22-13) /* y1 = a >> (22-13) */; \ mov y2, f /* y2 = f */; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */; \ xor y2, g /* y2 = f^g */; \ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ psrld XTMP4, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */; \ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ pxor XTMP2, XTMP3; \ add y2, y0 /* y2 = S1 + CH */; \ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, [rsp + _XFER + 2*4] /* y2 = k + w + S1 + CH */; \ pxor XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ pshufb XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ paddd XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ /* compute high s1 */; \ pshufd XTMP2, XTMP0, 0b01010000 /* XTMP2 = W[-2] {DDCC} */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED_3(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {DDCC} */; \ mov y0, e /* y0 = e */; \ ror y0, (25-11) /* y0 = e >> (25-11) */; \ mov y1, a /* y1 = a */; \ movdqa X0, XTMP2 /* X0 = W[-2] {DDCC} */; \ ror y1, (22-13) /* y1 = a >> (22-13) */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ mov y2, f /* y2 = f */; \ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ xor y2, g /* y2 = f^g */; \ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ and y2, e /* y2 = (f^g)&e */; \ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ psrld X0, 10 /* X0 = W[-2] >> 10 {DDCC} */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ pxor XTMP2, XTMP3; \ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, y0 /* y2 = S1 + CH */; \ add y2, [rsp + _XFER + 3*4] /* y2 = k + w + S1 + CH */; \ pxor X0, XTMP2 /* X0 = s1 {xDxC} */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ pshufb X0, SHUF_DC00 /* X0 = s1 {DC00} */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ paddd X0, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ #define FOUR_ROUNDS_AND_SCHED(X0, X1, X2, X3, a, b, c, d, e, f, g, h) \ FOUR_ROUNDS_AND_SCHED_0(X0, X1, X2, X3, a, b, c, d, e, f, g, h); \ FOUR_ROUNDS_AND_SCHED_1(X0, X1, X2, X3, h, a, b, c, d, e, f, g); \ FOUR_ROUNDS_AND_SCHED_2(X0, X1, X2, X3, g, h, a, b, c, d, e, f); \ FOUR_ROUNDS_AND_SCHED_3(X0, X1, X2, X3, f, g, h, a, b, c, d, e); /* input is [rsp + _XFER + %1 * 4] */ #define DO_ROUND(i1, a, b, c, d, e, f, g, h) \ mov y0, e /* y0 = e */; \ ror y0, (25-11) /* y0 = e >> (25-11) */; \ mov y1, a /* y1 = a */; \ xor y0, e /* y0 = e ^ (e >> (25-11)) */; \ ror y1, (22-13) /* y1 = a >> (22-13) */; \ mov y2, f /* y2 = f */; \ xor y1, a /* y1 = a ^ (a >> (22-13) */; \ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */; \ xor y2, g /* y2 = f^g */; \ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */; \ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */; \ and y2, e /* y2 = (f^g)&e */; \ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */; \ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */; \ xor y2, g /* y2 = CH = ((f^g)&e)^g */; \ add y2, y0 /* y2 = S1 + CH */; \ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */; \ add y2, [rsp + _XFER + i1 * 4] /* y2 = k + w + S1 + CH */; \ mov y0, a /* y0 = a */; \ add h, y2 /* h = h + S1 + CH + k + w */; \ mov y2, a /* y2 = a */; \ or y0, c /* y0 = a|c */; \ add d, h /* d = d + h + S1 + CH + k + w */; \ and y2, c /* y2 = a&c */; \ and y0, b /* y0 = (a|c)&b */; \ add h, y1 /* h = h + S1 + CH + k + w + S0 */; \ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */; \ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_sse4(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_ssse3 ELF(.type _gcry_sha256_transform_amd64_ssse3,@function;) .align 16 _gcry_sha256_transform_amd64_ssse3: CFI_STARTPROC() push rbx CFI_PUSH(rbx) push rbp CFI_PUSH(rbp) push r13 CFI_PUSH(r13) push r14 CFI_PUSH(r14) push r15 CFI_PUSH(r15) sub rsp, STACK_SIZE CFI_ADJUST_CFA_OFFSET(STACK_SIZE); shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + _INP_END], NUM_BLKS /* load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] movdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] movdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] movdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] .Loop0: lea TBL, [.LK256 ADD_RIP] /* byte swap first 16 dwords */ COPY_XMM_AND_BSWAP(X0, [INP + 0*16], BYTE_FLIP_MASK) COPY_XMM_AND_BSWAP(X1, [INP + 1*16], BYTE_FLIP_MASK) COPY_XMM_AND_BSWAP(X2, [INP + 2*16], BYTE_FLIP_MASK) COPY_XMM_AND_BSWAP(X3, [INP + 3*16], BYTE_FLIP_MASK) mov [rsp + _INP], INP /* schedule 48 input dwords, by doing 3 rounds of 16 each */ mov SRND, 3 .align 16 .Loop1: movdqa XFER, [TBL + 0*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED(X0, X1, X2, X3, a, b, c, d, e, f, g, h) movdqa XFER, [TBL + 1*16] paddd XFER, X1 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED(X1, X2, X3, X0, e, f, g, h, a, b, c, d) movdqa XFER, [TBL + 2*16] paddd XFER, X2 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED(X2, X3, X0, X1, a, b, c, d, e, f, g, h) movdqa XFER, [TBL + 3*16] paddd XFER, X3 movdqa [rsp + _XFER], XFER add TBL, 4*16 FOUR_ROUNDS_AND_SCHED(X3, X0, X1, X2, e, f, g, h, a, b, c, d) sub SRND, 1 jne .Loop1 mov SRND, 2 .Loop2: paddd X0, [TBL + 0*16] movdqa [rsp + _XFER], X0 DO_ROUND(0, a, b, c, d, e, f, g, h) DO_ROUND(1, h, a, b, c, d, e, f, g) DO_ROUND(2, g, h, a, b, c, d, e, f) DO_ROUND(3, f, g, h, a, b, c, d, e) paddd X1, [TBL + 1*16] movdqa [rsp + _XFER], X1 add TBL, 2*16 DO_ROUND(0, e, f, g, h, a, b, c, d) DO_ROUND(1, d, e, f, g, h, a, b, c) DO_ROUND(2, c, d, e, f, g, h, a, b) DO_ROUND(3, b, c, d, e, f, g, h, a) movdqa X0, X2 movdqa X1, X3 sub SRND, 1 jne .Loop2 addm([4*0 + CTX],a) addm([4*1 + CTX],b) addm([4*2 + CTX],c) addm([4*3 + CTX],d) addm([4*4 + CTX],e) addm([4*5 + CTX],f) addm([4*6 + CTX],g) addm([4*7 + CTX],h) mov INP, [rsp + _INP] add INP, 64 cmp INP, [rsp + _INP_END] jne .Loop0 pxor xmm0, xmm0 pxor xmm1, xmm1 pxor xmm2, xmm2 pxor xmm3, xmm3 pxor xmm4, xmm4 pxor xmm5, xmm5 pxor xmm6, xmm6 pxor xmm7, xmm7 pxor xmm8, xmm8 pxor xmm9, xmm9 pxor xmm10, xmm10 pxor xmm11, xmm11 pxor xmm12, xmm12 .Ldone_hash: pxor XFER, XFER movdqa [rsp + _XFER], XFER xor eax, eax add rsp, STACK_SIZE CFI_ADJUST_CFA_OFFSET(-STACK_SIZE); pop r15 CFI_POP(r15) pop r14 CFI_POP(r14) pop r13 CFI_POP(r13) pop rbp CFI_POP(rbp) pop rbx CFI_POP(rbx) ret_spec_stop CFI_ENDPROC() +SECTION_RODATA + +ELF(.type _sha256_ssse3_consts,@object) +_sha256_ssse3_consts: + .align 16 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha512-avx-amd64.S b/cipher/sha512-avx-amd64.S index bfc4435d..1bd38060 100644 --- a/cipher/sha512-avx-amd64.S +++ b/cipher/sha512-avx-amd64.S @@ -1,461 +1,466 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA512) #include "asm-common-amd64.h" .intel_syntax noprefix .text /* Virtual Registers */ #define msg rdi /* ARG1 */ #define digest rsi /* ARG2 */ #define msglen rdx /* ARG3 */ #define T1 rcx #define T2 r8 #define a_64 r9 #define b_64 r10 #define c_64 r11 #define d_64 r12 #define e_64 r13 #define f_64 r14 #define g_64 r15 #define h_64 rbx #define tmp0 rax /* ; Local variables (stack frame) ; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP */ #define frame_W 0 /* Message Schedule */ #define frame_W_size (80 * 8) #define frame_WK ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */ #define frame_WK_size (2 * 8) #define frame_GPRSAVE ((frame_WK) + (frame_WK_size)) #define frame_GPRSAVE_size (5 * 8) #define frame_size ((frame_GPRSAVE) + (frame_GPRSAVE_size)) /* Useful QWORD "arrays" for simpler memory references */ #define MSG(i) msg + 8*(i) /* Input message (arg1) */ #define DIGEST(i) digest + 8*(i) /* Output Digest (arg2) */ #define K_t(i) .LK512 + 8*(i) ADD_RIP /* SHA Constants (static mem) */ #define W_t(i) rsp + frame_W + 8*(i) /* Message Schedule (stack frame) */ #define WK_2(i) rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */ /* MSG, DIGEST, K_t, W_t are arrays */ /* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */ #define RORQ(p1, p2) \ /* shld is faster than ror on Intel Sandybridge */ \ shld p1, p1, (64 - p2) #define SHA512_Round(t, a, b, c, d, e, f, g, h) \ /* Compute Round %%t */; \ mov T1, f /* T1 = f */; \ mov tmp0, e /* tmp = e */; \ xor T1, g /* T1 = f ^ g */; \ RORQ( tmp0, 23) /* 41 ; tmp = e ror 23 */; \ and T1, e /* T1 = (f ^ g) & e */; \ xor tmp0, e /* tmp = (e ror 23) ^ e */; \ xor T1, g /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */; \ add T1, [WK_2(t)] /* W[t] + K[t] from message scheduler */; \ RORQ( tmp0, 4) /* 18 ; tmp = ((e ror 23) ^ e) ror 4 */; \ xor tmp0, e /* tmp = (((e ror 23) ^ e) ror 4) ^ e */; \ mov T2, a /* T2 = a */; \ add T1, h /* T1 = CH(e,f,g) + W[t] + K[t] + h */; \ RORQ( tmp0, 14) /* 14 ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */; \ add T1, tmp0 /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */; \ mov tmp0, a /* tmp = a */; \ xor T2, c /* T2 = a ^ c */; \ and tmp0, c /* tmp = a & c */; \ and T2, b /* T2 = (a ^ c) & b */; \ xor T2, tmp0 /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */; \ mov tmp0, a /* tmp = a */; \ RORQ( tmp0, 5) /* 39 ; tmp = a ror 5 */; \ xor tmp0, a /* tmp = (a ror 5) ^ a */; \ add d, T1 /* e(next_state) = d + T1 */; \ RORQ( tmp0, 6) /* 34 ; tmp = ((a ror 5) ^ a) ror 6 */; \ xor tmp0, a /* tmp = (((a ror 5) ^ a) ror 6) ^ a */; \ lea h, [T1 + T2] /* a(next_state) = T1 + Maj(a,b,c) */; \ RORQ( tmp0, 28) /* 28 ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */; \ add h, tmp0 /* a(next_state) = T1 + Maj(a,b,c) S0(a) */ #define SHA512_2Sched_2Round_avx_PART1(t, a, b, c, d, e, f, g, h) \ /* \ ; Compute rounds %%t-2 and %%t-1 \ ; Compute message schedule QWORDS %%t and %%t+1 \ ; \ ; Two rounds are computed based on the values for K[t-2]+W[t-2] and \ ; K[t-1]+W[t-1] which were previously stored at WK_2 by the message \ ; scheduler. \ ; The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. \ ; They are then added to their respective SHA512 constants at \ ; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] \ ; For brievity, the comments following vectored instructions only refer to \ ; the first of a pair of QWORDS. \ ; Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} \ ; The computation of the message schedule and the rounds are tightly \ ; stitched to take advantage of instruction-level parallelism. \ ; For clarity, integer instructions (for the rounds calculation) are indented \ ; by one tab. Vectored instructions (for the message scheduler) are indented \ ; by two tabs. \ */ \ \ vmovdqa xmm4, [W_t(t-2)] /* XMM4 = W[t-2] */; \ vmovdqu xmm5, [W_t(t-15)] /* XMM5 = W[t-15] */; \ mov T1, f; \ vpsrlq xmm0, xmm4, 61 /* XMM0 = W[t-2]>>61 */; \ mov tmp0, e; \ vpsrlq xmm6, xmm5, 1 /* XMM6 = W[t-15]>>1 */; \ xor T1, g; \ RORQ( tmp0, 23) /* 41 */; \ vpsrlq xmm1, xmm4, 19 /* XMM1 = W[t-2]>>19 */; \ and T1, e; \ xor tmp0, e; \ vpxor xmm0, xmm0, xmm1 /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 */; \ xor T1, g; \ add T1, [WK_2(t)]; \ vpsrlq xmm7, xmm5, 8 /* XMM7 = W[t-15]>>8 */; \ RORQ( tmp0, 4) /* 18 */; \ vpsrlq xmm2, xmm4, 6 /* XMM2 = W[t-2]>>6 */; \ xor tmp0, e; \ mov T2, a; \ add T1, h; \ vpxor xmm6, xmm6, xmm7 /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 */; \ RORQ( tmp0, 14) /* 14 */; \ add T1, tmp0; \ vpsrlq xmm8, xmm5, 7 /* XMM8 = W[t-15]>>7 */; \ mov tmp0, a; \ xor T2, c; \ vpsllq xmm3, xmm4, (64-61) /* XMM3 = W[t-2]<<3 */; \ and tmp0, c; \ and T2, b; \ vpxor xmm2, xmm2, xmm3 /* XMM2 = W[t-2]>>6 ^ W[t-2]<<3 */; \ xor T2, tmp0; \ mov tmp0, a; \ vpsllq xmm9, xmm5, (64-1) /* XMM9 = W[t-15]<<63 */; \ RORQ( tmp0, 5) /* 39 */; \ vpxor xmm8, xmm8, xmm9 /* XMM8 = W[t-15]>>7 ^ W[t-15]<<63 */; \ xor tmp0, a; \ add d, T1; \ RORQ( tmp0, 6) /* 34 */; \ xor tmp0, a; \ vpxor xmm6, xmm6, xmm8 /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^ W[t-15]>>7 ^ W[t-15]<<63 */; \ lea h, [T1 + T2]; \ RORQ( tmp0, 28) /* 28 */; \ vpsllq xmm4, xmm4, (64-19) /* XMM4 = W[t-2]<<25 */; \ add h, tmp0 #define SHA512_2Sched_2Round_avx_PART2(t, a, b, c, d, e, f, g, h) \ vpxor xmm0, xmm0, xmm4 /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ W[t-2]<<25 */; \ mov T1, f; \ vpxor xmm0, xmm0, xmm2 /* XMM0 = s1(W[t-2]) */; \ mov tmp0, e; \ xor T1, g; \ vpaddq xmm0, xmm0, [W_t(t-16)] /* XMM0 = s1(W[t-2]) + W[t-16] */; \ vmovdqu xmm1, [W_t(t- 7)] /* XMM1 = W[t-7] */; \ RORQ( tmp0, 23) /* 41 */; \ and T1, e; \ xor tmp0, e; \ xor T1, g; \ vpsllq xmm5, xmm5, (64-8) /* XMM5 = W[t-15]<<56 */; \ add T1, [WK_2(t+1)]; \ vpxor xmm6, xmm6, xmm5 /* XMM6 = s0(W[t-15]) */; \ RORQ( tmp0, 4) /* 18 */; \ vpaddq xmm0, xmm0, xmm6 /* XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) */; \ xor tmp0, e; \ vpaddq xmm0, xmm0, xmm1 /* XMM0 = W[t] = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */; \ mov T2, a; \ add T1, h; \ RORQ( tmp0, 14) /* 14 */; \ add T1, tmp0; \ vmovdqa [W_t(t)], xmm0 /* Store W[t] */; \ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */; \ vmovdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] for next rounds */; \ mov tmp0, a; \ xor T2, c; \ and tmp0, c; \ and T2, b; \ xor T2, tmp0; \ mov tmp0, a; \ RORQ( tmp0, 5) /* 39 */; \ xor tmp0, a; \ add d, T1; \ RORQ( tmp0, 6) /* 34 */; \ xor tmp0, a; \ lea h, [T1 + T2]; \ RORQ( tmp0, 28) /* 28 */; \ add h, tmp0 #define SHA512_2Sched_2Round_avx(t, a, b, c, d, e, f, g, h) \ SHA512_2Sched_2Round_avx_PART1(t, a, b, c, d, e, f, g, h); \ SHA512_2Sched_2Round_avx_PART2(t, h, a, b, c, d, e, f, g) /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_avx(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx ELF(.type _gcry_sha512_transform_amd64_avx,@function;) .align 16 _gcry_sha512_transform_amd64_avx: CFI_STARTPROC() xor eax, eax cmp msglen, 0 je .Lnowork vzeroupper /* Allocate Stack Space */ sub rsp, frame_size CFI_ADJUST_CFA_OFFSET(frame_size); /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbx mov [rsp + frame_GPRSAVE + 8 * 1], r12 mov [rsp + frame_GPRSAVE + 8 * 2], r13 mov [rsp + frame_GPRSAVE + 8 * 3], r14 mov [rsp + frame_GPRSAVE + 8 * 4], r15 CFI_REL_OFFSET(rbx, frame_GPRSAVE + 8 * 0); CFI_REL_OFFSET(r12, frame_GPRSAVE + 8 * 1); CFI_REL_OFFSET(r13, frame_GPRSAVE + 8 * 2); CFI_REL_OFFSET(r14, frame_GPRSAVE + 8 * 3); CFI_REL_OFFSET(r15, frame_GPRSAVE + 8 * 4); .Lupdateblock: /* Load state variables */ mov a_64, [DIGEST(0)] mov b_64, [DIGEST(1)] mov c_64, [DIGEST(2)] mov d_64, [DIGEST(3)] mov e_64, [DIGEST(4)] mov f_64, [DIGEST(5)] mov g_64, [DIGEST(6)] mov h_64, [DIGEST(7)] /* BSWAP 2 QWORDS */ vmovdqa xmm1, [.LXMM_QWORD_BSWAP ADD_RIP] vmovdqu xmm0, [MSG(0)] vpshufb xmm0, xmm0, xmm1 /* BSWAP */ vmovdqa [W_t(0)], xmm0 /* Store Scheduled Pair */ vpaddq xmm0, xmm0, [K_t(0)] /* Compute W[t]+K[t] */ vmovdqa [WK_2(0)], xmm0 /* Store into WK for rounds */ #define T_2_14(t, a, b, c, d, e, f, g, h) \ /* BSWAP 2 QWORDS, Compute 2 Rounds */; \ vmovdqu xmm0, [MSG(t)]; \ vpshufb xmm0, xmm0, xmm1 /* BSWAP */; \ SHA512_Round(((t) - 2), a##_64, b##_64, c##_64, d##_64, \ e##_64, f##_64, g##_64, h##_64); \ vmovdqa [W_t(t)], xmm0 /* Store Scheduled Pair */; \ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */; \ SHA512_Round(((t) - 1), h##_64, a##_64, b##_64, c##_64, \ d##_64, e##_64, f##_64, g##_64); \ vmovdqa [WK_2(t)], xmm0 /* W[t]+K[t] into WK */ #define T_16_78(t, a, b, c, d, e, f, g, h) \ SHA512_2Sched_2Round_avx((t), a##_64, b##_64, c##_64, d##_64, \ e##_64, f##_64, g##_64, h##_64) #define T_80(t, a, b, c, d, e, f, g, h) \ /* Compute 2 Rounds */; \ SHA512_Round((t - 2), a##_64, b##_64, c##_64, d##_64, \ e##_64, f##_64, g##_64, h##_64); \ SHA512_Round((t - 1), h##_64, a##_64, b##_64, c##_64, \ d##_64, e##_64, f##_64, g##_64) T_2_14(2, a, b, c, d, e, f, g, h) T_2_14(4, g, h, a, b, c, d, e, f) T_2_14(6, e, f, g, h, a, b, c, d) T_2_14(8, c, d, e, f, g, h, a, b) T_2_14(10, a, b, c, d, e, f, g, h) T_2_14(12, g, h, a, b, c, d, e, f) T_2_14(14, e, f, g, h, a, b, c, d) T_16_78(16, c, d, e, f, g, h, a, b) T_16_78(18, a, b, c, d, e, f, g, h) T_16_78(20, g, h, a, b, c, d, e, f) T_16_78(22, e, f, g, h, a, b, c, d) T_16_78(24, c, d, e, f, g, h, a, b) T_16_78(26, a, b, c, d, e, f, g, h) T_16_78(28, g, h, a, b, c, d, e, f) T_16_78(30, e, f, g, h, a, b, c, d) T_16_78(32, c, d, e, f, g, h, a, b) T_16_78(34, a, b, c, d, e, f, g, h) T_16_78(36, g, h, a, b, c, d, e, f) T_16_78(38, e, f, g, h, a, b, c, d) T_16_78(40, c, d, e, f, g, h, a, b) T_16_78(42, a, b, c, d, e, f, g, h) T_16_78(44, g, h, a, b, c, d, e, f) T_16_78(46, e, f, g, h, a, b, c, d) T_16_78(48, c, d, e, f, g, h, a, b) T_16_78(50, a, b, c, d, e, f, g, h) T_16_78(52, g, h, a, b, c, d, e, f) T_16_78(54, e, f, g, h, a, b, c, d) T_16_78(56, c, d, e, f, g, h, a, b) T_16_78(58, a, b, c, d, e, f, g, h) T_16_78(60, g, h, a, b, c, d, e, f) T_16_78(62, e, f, g, h, a, b, c, d) T_16_78(64, c, d, e, f, g, h, a, b) T_16_78(66, a, b, c, d, e, f, g, h) T_16_78(68, g, h, a, b, c, d, e, f) T_16_78(70, e, f, g, h, a, b, c, d) T_16_78(72, c, d, e, f, g, h, a, b) T_16_78(74, a, b, c, d, e, f, g, h) T_16_78(76, g, h, a, b, c, d, e, f) T_16_78(78, e, f, g, h, a, b, c, d) T_80(80, c, d, e, f, g, h, a, b) /* Update digest */ add [DIGEST(0)], a_64 add [DIGEST(1)], b_64 add [DIGEST(2)], c_64 add [DIGEST(3)], d_64 add [DIGEST(4)], e_64 add [DIGEST(5)], f_64 add [DIGEST(6)], g_64 add [DIGEST(7)], h_64 /* Advance to next message block */ add msg, 16*8 dec msglen jnz .Lupdateblock /* Restore GPRs */ mov rbx, [rsp + frame_GPRSAVE + 8 * 0] mov r12, [rsp + frame_GPRSAVE + 8 * 1] mov r13, [rsp + frame_GPRSAVE + 8 * 2] mov r14, [rsp + frame_GPRSAVE + 8 * 3] mov r15, [rsp + frame_GPRSAVE + 8 * 4] CFI_RESTORE(rbx) CFI_RESTORE(r12) CFI_RESTORE(r13) CFI_RESTORE(r14) CFI_RESTORE(r15) vzeroall /* Burn stack */ mov eax, 0 .Lerase_stack: vmovdqu [rsp + rax], ymm0 add eax, 32 cmp eax, frame_W_size jne .Lerase_stack vmovdqu [rsp + frame_WK], xmm0 xor eax, eax /* Restore Stack Pointer */ add rsp, frame_size CFI_ADJUST_CFA_OFFSET(-frame_size); .Lnowork: ret_spec_stop CFI_ENDPROC() /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Binary Data */ +SECTION_RODATA + +ELF(.type _sha512_avx_consts,@object) +_sha512_avx_consts: + .align 16 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LXMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif #endif diff --git a/cipher/sha512-avx2-bmi2-amd64.S b/cipher/sha512-avx2-bmi2-amd64.S index a431e196..7b60bf1d 100644 --- a/cipher/sha512-avx2-bmi2-amd64.S +++ b/cipher/sha512-avx2-bmi2-amd64.S @@ -1,502 +1,507 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA512) #include "asm-common-amd64.h" .intel_syntax noprefix .text /* Virtual Registers */ #define Y_0 ymm4 #define Y_1 ymm5 #define Y_2 ymm6 #define Y_3 ymm7 #define YTMP0 ymm0 #define YTMP1 ymm1 #define YTMP2 ymm2 #define YTMP3 ymm3 #define YTMP4 ymm8 #define XFER YTMP0 #define BYTE_FLIP_MASK ymm9 #define MASK_YMM_LO ymm10 #define MASK_YMM_LOx xmm10 #define INP rdi /* 1st arg */ #define CTX rsi /* 2nd arg */ #define NUM_BLKS rdx /* 3rd arg */ #define c rcx #define d r8 #define e rdx #define y3 rdi #define TBL rbp #define a rax #define b rbx #define f r9 #define g r10 #define h r11 #define T1 r12 #define y0 r13 #define y1 r14 #define y2 r15 #define y4 r12 /* Local variables (stack frame) */ #define frame_XFER 0 #define frame_XFER_size (4*4*8) #define frame_SRND (frame_XFER + frame_XFER_size) #define frame_SRND_size (1*8) #define frame_INP (frame_SRND + frame_SRND_size) #define frame_INP_size (1*8) #define frame_NBLKS (frame_INP + frame_INP_size) #define frame_NBLKS_size (1*8) #define frame_RSPSAVE (frame_NBLKS + frame_NBLKS_size) #define frame_RSPSAVE_size (1*8) #define frame_GPRSAVE (frame_RSPSAVE + frame_RSPSAVE_size) #define frame_GPRSAVE_size (6*8) #define frame_size (frame_GPRSAVE + frame_GPRSAVE_size) #define VMOVDQ vmovdqu /*; assume buffers not aligned */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ #define addm(p1, p2) \ add p2, p1; \ mov p1, p2; /* COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask */ /* Load ymm with mem and byte swap each dword */ #define COPY_YMM_AND_BSWAP(p1, p2, p3) \ VMOVDQ p1, p2; \ vpshufb p1, p1, p3 /* %macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL */ /* YDST = {YSRC1, YSRC2} >> RVAL*8 */ #define MY_VPALIGNR(YDST, YSRC1, YSRC2, RVAL) \ vperm2i128 YDST, YSRC1, YSRC2, 0x3 /* YDST = {YS1_LO, YS2_HI} */; \ vpalignr YDST, YDST, YSRC2, RVAL /* YDST = {YDS1, YS2} >> RVAL*8 */ #define ONE_ROUND_PART1(XFERIN, a, b, c, d, e, f, g, h) \ /* h += Sum1 (e) + Ch (e, f, g) + (k[t] + w[0]); \ * d += h; \ * h += Sum0 (a) + Maj (a, b, c); \ * \ * Ch(x, y, z) => ((x & y) + (~x & z)) \ * Maj(x, y, z) => ((x & y) + (z & (x ^ y))) \ */ \ \ mov y3, e; \ add h, [XFERIN]; \ and y3, f; \ rorx y0, e, 41; \ rorx y1, e, 18; \ lea h, [h + y3]; \ andn y3, e, g; \ rorx T1, a, 34; \ xor y0, y1; \ lea h, [h + y3] #define ONE_ROUND_PART2(a, b, c, d, e, f, g, h) \ rorx y2, a, 39; \ rorx y1, e, 14; \ mov y3, a; \ xor T1, y2; \ xor y0, y1; \ xor y3, b; \ lea h, [h + y0]; \ mov y0, a; \ rorx y2, a, 28; \ add d, h; \ and y3, c; \ xor T1, y2; \ lea h, [h + y3]; \ lea h, [h + T1]; \ and y0, b; \ lea h, [h + y0] #define ONE_ROUND(XFERIN, a, b, c, d, e, f, g, h) \ ONE_ROUND_PART1(XFERIN, a, b, c, d, e, f, g, h); \ ONE_ROUND_PART2(a, b, c, d, e, f, g, h) #define FOUR_ROUNDS_AND_SCHED(X, Y_0, Y_1, Y_2, Y_3, a, b, c, d, e, f, g, h) \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ /* Extract w[t-7] */; \ MY_VPALIGNR( YTMP0, Y_3, Y_2, 8) /* YTMP0 = W[-7] */; \ /* Calculate w[t-16] + w[t-7] */; \ vpaddq YTMP0, YTMP0, Y_0 /* YTMP0 = W[-7] + W[-16] */; \ /* Extract w[t-15] */; \ MY_VPALIGNR( YTMP1, Y_1, Y_0, 8) /* YTMP1 = W[-15] */; \ \ /* Calculate sigma0 */; \ \ /* Calculate w[t-15] ror 1 */; \ vpsrlq YTMP2, YTMP1, 1; \ vpsllq YTMP3, YTMP1, (64-1); \ vpor YTMP3, YTMP3, YTMP2 /* YTMP3 = W[-15] ror 1 */; \ /* Calculate w[t-15] shr 7 */; \ vpsrlq YTMP4, YTMP1, 7 /* YTMP4 = W[-15] >> 7 */; \ \ ONE_ROUND(rsp+frame_XFER+0*8+X*32, a, b, c, d, e, f, g, h); \ \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ /* Calculate w[t-15] ror 8 */; \ vpsrlq YTMP2, YTMP1, 8; \ vpsllq YTMP1, YTMP1, (64-8); \ vpor YTMP1, YTMP1, YTMP2 /* YTMP1 = W[-15] ror 8 */; \ /* XOR the three components */; \ vpxor YTMP3, YTMP3, YTMP4 /* YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 */; \ vpxor YTMP1, YTMP3, YTMP1 /* YTMP1 = s0 */; \ \ /* Add three components, w[t-16], w[t-7] and sigma0 */; \ vpaddq YTMP0, YTMP0, YTMP1 /* YTMP0 = W[-16] + W[-7] + s0 */; \ /* Move to appropriate lanes for calculating w[16] and w[17] */; \ vperm2i128 Y_0, YTMP0, YTMP0, 0x0 /* Y_0 = W[-16] + W[-7] + s0 {BABA} */; \ /* Move to appropriate lanes for calculating w[18] and w[19] */; \ vpand YTMP0, YTMP0, MASK_YMM_LO /* YTMP0 = W[-16] + W[-7] + s0 {DC00} */; \ \ /* Calculate w[16] and w[17] in both 128 bit lanes */; \ \ /* Calculate sigma1 for w[16] and w[17] on both 128 bit lanes */; \ vperm2i128 YTMP2, Y_3, Y_3, 0x11 /* YTMP2 = W[-2] {BABA} */; \ vpsrlq YTMP4, YTMP2, 6 /* YTMP4 = W[-2] >> 6 {BABA} */; \ \ ONE_ROUND(rsp+frame_XFER+1*8+X*32, h, a, b, c, d, e, f, g); \ \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vpsrlq YTMP3, YTMP2, 19 /* YTMP3 = W[-2] >> 19 {BABA} */; \ vpsllq YTMP1, YTMP2, (64-19) /* YTMP1 = W[-2] << 19 {BABA} */; \ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {BABA} */; \ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} */; \ vpsrlq YTMP3, YTMP2, 61 /* YTMP3 = W[-2] >> 61 {BABA} */; \ vpsllq YTMP1, YTMP2, (64-61) /* YTMP1 = W[-2] << 61 {BABA} */; \ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {BABA} */; \ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} */; \ \ /* Add sigma1 to the other compunents to get w[16] and w[17] */; \ vpaddq Y_0, Y_0, YTMP4 /* Y_0 = {W[1], W[0], W[1], W[0]} */; \ \ /* Calculate sigma1 for w[18] and w[19] for upper 128 bit lane */; \ vpsrlq YTMP4, Y_0, 6 /* YTMP4 = W[-2] >> 6 {DC--} */; \ \ ONE_ROUND(rsp+frame_XFER+2*8+X*32, g, h, a, b, c, d, e, f); \ \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vpsrlq YTMP3, Y_0, 19 /* YTMP3 = W[-2] >> 19 {DC--} */; \ vpsllq YTMP1, Y_0, (64-19) /* YTMP1 = W[-2] << 19 {DC--} */; \ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {DC--} */; \ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} */; \ vpsrlq YTMP3, Y_0, 61 /* YTMP3 = W[-2] >> 61 {DC--} */; \ vpsllq YTMP1, Y_0, (64-61) /* YTMP1 = W[-2] << 61 {DC--} */; \ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {DC--} */; \ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} */; \ \ /* Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] to newly calculated sigma1 to get w[18] and w[19] */; \ vpaddq YTMP2, YTMP0, YTMP4 /* YTMP2 = {W[3], W[2], --, --} */; \ \ /* Form w[19, w[18], w17], w[16] */; \ vpblendd Y_0, Y_0, YTMP2, 0xF0 /* Y_0 = {W[3], W[2], W[1], W[0]} */; \ \ ONE_ROUND_PART1(rsp+frame_XFER+3*8+X*32, f, g, h, a, b, c, d, e); \ vpaddq XFER, Y_0, [TBL + (4+X)*32]; \ vmovdqa [rsp + frame_XFER + X*32], XFER; \ ONE_ROUND_PART2(f, g, h, a, b, c, d, e) #define DO_4ROUNDS(X, a, b, c, d, e, f, g, h) \ ONE_ROUND(rsp+frame_XFER+0*8+X*32, a, b, c, d, e, f, g, h); \ ONE_ROUND(rsp+frame_XFER+1*8+X*32, h, a, b, c, d, e, f, g); \ ONE_ROUND(rsp+frame_XFER+2*8+X*32, g, h, a, b, c, d, e, f); \ ONE_ROUND(rsp+frame_XFER+3*8+X*32, f, g, h, a, b, c, d, e) /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_rorx(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx2 ELF(.type _gcry_sha512_transform_amd64_avx2,@function;) .align 16 _gcry_sha512_transform_amd64_avx2: CFI_STARTPROC() xor eax, eax cmp rdx, 0 je .Lnowork vzeroupper /* Allocate Stack Space */ mov rax, rsp CFI_DEF_CFA_REGISTER(rax); sub rsp, frame_size and rsp, ~(0x40 - 1) mov [rsp + frame_RSPSAVE], rax CFI_CFA_ON_STACK(frame_RSPSAVE, 0) /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbp mov [rsp + frame_GPRSAVE + 8 * 1], rbx mov [rsp + frame_GPRSAVE + 8 * 2], r12 mov [rsp + frame_GPRSAVE + 8 * 3], r13 mov [rsp + frame_GPRSAVE + 8 * 4], r14 mov [rsp + frame_GPRSAVE + 8 * 5], r15 CFI_REG_ON_STACK(rbp, frame_GPRSAVE + 8 * 0) CFI_REG_ON_STACK(rbx, frame_GPRSAVE + 8 * 1) CFI_REG_ON_STACK(r12, frame_GPRSAVE + 8 * 2) CFI_REG_ON_STACK(r13, frame_GPRSAVE + 8 * 3) CFI_REG_ON_STACK(r14, frame_GPRSAVE + 8 * 4) CFI_REG_ON_STACK(r15, frame_GPRSAVE + 8 * 5) mov [rsp + frame_NBLKS], NUM_BLKS /*; load initial digest */ mov a,[8*0 + CTX] mov b,[8*1 + CTX] mov c,[8*2 + CTX] mov d,[8*3 + CTX] mov e,[8*4 + CTX] mov f,[8*5 + CTX] mov g,[8*6 + CTX] mov h,[8*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa MASK_YMM_LO, [.LMASK_YMM_LO ADD_RIP] lea TBL,[.LK512 ADD_RIP] /*; byte swap first 16 dwords */ COPY_YMM_AND_BSWAP(Y_0, [INP + 0*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_1, [INP + 1*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_2, [INP + 2*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_3, [INP + 3*32], BYTE_FLIP_MASK) add INP, 128 mov [rsp + frame_INP], INP vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER + 0*32], XFER vpaddq XFER, Y_1, [TBL + 1*32] vmovdqa [rsp + frame_XFER + 1*32], XFER vpaddq XFER, Y_2, [TBL + 2*32] vmovdqa [rsp + frame_XFER + 2*32], XFER vpaddq XFER, Y_3, [TBL + 3*32] vmovdqa [rsp + frame_XFER + 3*32], XFER /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ mov qword ptr [rsp + frame_SRND], 4 .align 16 .Loop0: FOUR_ROUNDS_AND_SCHED(0, Y_0, Y_1, Y_2, Y_3, a, b, c, d, e, f, g, h) FOUR_ROUNDS_AND_SCHED(1, Y_1, Y_2, Y_3, Y_0, e, f, g, h, a, b, c, d) FOUR_ROUNDS_AND_SCHED(2, Y_2, Y_3, Y_0, Y_1, a, b, c, d, e, f, g, h) FOUR_ROUNDS_AND_SCHED(3, Y_3, Y_0, Y_1, Y_2, e, f, g, h, a, b, c, d) add TBL, 4*32 sub qword ptr [rsp + frame_SRND], 1 jne .Loop0 sub qword ptr [rsp + frame_NBLKS], 1 je .Ldone_hash mov INP, [rsp + frame_INP] lea TBL,[.LK512 ADD_RIP] /* load next block and byte swap */ COPY_YMM_AND_BSWAP(Y_0, [INP + 0*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_1, [INP + 1*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_2, [INP + 2*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_3, [INP + 3*32], BYTE_FLIP_MASK) add INP, 128 mov [rsp + frame_INP], INP DO_4ROUNDS(0, a, b, c, d, e, f, g, h) vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER + 0*32], XFER DO_4ROUNDS(1, e, f, g, h, a, b, c, d) vpaddq XFER, Y_1, [TBL + 1*32] vmovdqa [rsp + frame_XFER + 1*32], XFER DO_4ROUNDS(2, a, b, c, d, e, f, g, h) vpaddq XFER, Y_2, [TBL + 2*32] vmovdqa [rsp + frame_XFER + 2*32], XFER DO_4ROUNDS(3, e, f, g, h, a, b, c, d) vpaddq XFER, Y_3, [TBL + 3*32] vmovdqa [rsp + frame_XFER + 3*32], XFER addm([8*0 + CTX],a) addm([8*1 + CTX],b) addm([8*2 + CTX],c) addm([8*3 + CTX],d) addm([8*4 + CTX],e) addm([8*5 + CTX],f) addm([8*6 + CTX],g) addm([8*7 + CTX],h) /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ mov qword ptr [rsp + frame_SRND],4 jmp .Loop0 .Ldone_hash: vzeroall DO_4ROUNDS(0, a, b, c, d, e, f, g, h) vmovdqa [rsp + frame_XFER + 0*32], ymm0 /* burn stack */ DO_4ROUNDS(1, e, f, g, h, a, b, c, d) vmovdqa [rsp + frame_XFER + 1*32], ymm0 /* burn stack */ DO_4ROUNDS(2, a, b, c, d, e, f, g, h) vmovdqa [rsp + frame_XFER + 2*32], ymm0 /* burn stack */ DO_4ROUNDS(3, e, f, g, h, a, b, c, d) vmovdqa [rsp + frame_XFER + 3*32], ymm0 /* burn stack */ addm([8*0 + CTX],a) xor eax, eax /* burn stack */ addm([8*1 + CTX],b) addm([8*2 + CTX],c) addm([8*3 + CTX],d) addm([8*4 + CTX],e) addm([8*5 + CTX],f) addm([8*6 + CTX],g) addm([8*7 + CTX],h) /* Restore GPRs */ mov rbp, [rsp + frame_GPRSAVE + 8 * 0] mov rbx, [rsp + frame_GPRSAVE + 8 * 1] mov r12, [rsp + frame_GPRSAVE + 8 * 2] mov r13, [rsp + frame_GPRSAVE + 8 * 3] mov r14, [rsp + frame_GPRSAVE + 8 * 4] mov r15, [rsp + frame_GPRSAVE + 8 * 5] CFI_RESTORE(rbp) CFI_RESTORE(rbx) CFI_RESTORE(r12) CFI_RESTORE(r13) CFI_RESTORE(r14) CFI_RESTORE(r15) /* Restore Stack Pointer */ mov rsp, [rsp + frame_RSPSAVE] CFI_DEF_CFA_REGISTER(rsp) .Lnowork: ret_spec_stop CFI_ENDPROC() /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;; Binary Data */ +SECTION_RODATA + +ELF(.type _sha512_avx2_consts,@object) +_sha512_avx2_consts: + .align 64 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .align 32 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .LMASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha512-avx512-amd64.S b/cipher/sha512-avx512-amd64.S index 431fb3e9..61c72e5d 100644 --- a/cipher/sha512-avx512-amd64.S +++ b/cipher/sha512-avx512-amd64.S @@ -1,463 +1,465 @@ /* sha512-avx512-amd64.c - amd64/AVX512 implementation of SHA-512 transform * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Based on implementation from file "sha512-avx2-bmi2-amd64.S": ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX512) && \ defined(USE_SHA512) #include "asm-common-amd64.h" .intel_syntax noprefix .text /* Virtual Registers */ #define Y_0 ymm0 #define Y_1 ymm1 #define Y_2 ymm2 #define Y_3 ymm3 #define YTMP0 ymm4 #define YTMP1 ymm5 #define YTMP2 ymm6 #define YTMP3 ymm7 #define YTMP4 ymm8 #define XFER YTMP0 #define BYTE_FLIP_MASK ymm9 #define PERM_VPALIGNR_8 ymm10 #define MASK_DC_00 k1 #define INP rdi /* 1st arg */ #define CTX rsi /* 2nd arg */ #define NUM_BLKS rdx /* 3rd arg */ #define SRND r8d #define RSP_SAVE r9 #define TBL rcx #define a xmm11 #define b xmm12 #define c xmm13 #define d xmm14 #define e xmm15 #define f xmm16 #define g xmm17 #define h xmm18 #define y0 xmm19 #define y1 xmm20 #define y2 xmm21 #define y3 xmm22 /* Local variables (stack frame) */ #define frame_XFER 0 #define frame_XFER_size (4*4*8) #define frame_size (frame_XFER + frame_XFER_size) #define clear_reg(x) vpxorq x,x,x /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ #define addm(p1, p2) \ vmovq y0, p1; \ vpaddq p2, p2, y0; \ vmovq p1, p2; /* COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask */ /* Load ymm with mem and byte swap each dword */ #define COPY_YMM_AND_BSWAP(p1, p2, p3) \ vmovdqu p1, p2; \ vpshufb p1, p1, p3 /* %macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL */ /* YDST = {YSRC1, YSRC2} >> RVAL*8 */ #define MY_VPALIGNR(YDST_SRC1, YSRC2, RVAL) \ vpermt2q YDST_SRC1, PERM_VPALIGNR_##RVAL, YSRC2; #define ONE_ROUND_PART1(XFERIN, a, b, c, d, e, f, g, h) \ /* h += Sum1 (e) + Ch (e, f, g) + (k[t] + w[0]); \ * d += h; \ * h += Sum0 (a) + Maj (a, b, c); \ * \ * Ch(x, y, z) => ((x & y) + (~x & z)) \ * Maj(x, y, z) => ((x & y) + (z & (x ^ y))) \ */ \ \ vmovq y3, [XFERIN]; \ vmovdqa64 y2, e; \ vpaddq h, h, y3; \ vprorq y0, e, 41; \ vpternlogq y2, f, g, 0xca; /* Ch (e, f, g) */ \ vprorq y1, e, 18; \ vprorq y3, e, 14; \ vpaddq h, h, y2; \ vpternlogq y0, y1, y3, 0x96; /* Sum1 (e) */ \ vpaddq h, h, y0; /* h += Sum1 (e) + Ch (e, f, g) + (k[t] + w[0]) */ \ vpaddq d, d, h; /* d += h */ #define ONE_ROUND_PART2(a, b, c, d, e, f, g, h) \ vmovdqa64 y1, a; \ vprorq y0, a, 39; \ vpternlogq y1, b, c, 0xe8; /* Maj (a, b, c) */ \ vprorq y2, a, 34; \ vprorq y3, a, 28; \ vpternlogq y0, y2, y3, 0x96; /* Sum0 (a) */ \ vpaddq h, h, y1; \ vpaddq h, h, y0; /* h += Sum0 (a) + Maj (a, b, c) */ #define FOUR_ROUNDS_AND_SCHED(X, Y_0, Y_1, Y_2, Y_3, a, b, c, d, e, f, g, h) \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vmovdqa YTMP0, Y_3; \ vmovdqa YTMP1, Y_1; \ /* Extract w[t-7] */; \ vpermt2q YTMP0, PERM_VPALIGNR_8, Y_2 /* YTMP0 = W[-7] */; \ /* Calculate w[t-16] + w[t-7] */; \ vpaddq YTMP0, YTMP0, Y_0 /* YTMP0 = W[-7] + W[-16] */; \ /* Extract w[t-15] */; \ vpermt2q YTMP1, PERM_VPALIGNR_8, Y_0 /* YTMP1 = W[-15] */; \ ONE_ROUND_PART1(rsp+frame_XFER+0*8+X*32, a, b, c, d, e, f, g, h); \ \ /* Calculate sigma0 */; \ \ /* Calculate w[t-15] ror 1 */; \ vprorq YTMP3, YTMP1, 1; /* YTMP3 = W[-15] ror 1 */; \ /* Calculate w[t-15] shr 7 */; \ vpsrlq YTMP4, YTMP1, 7 /* YTMP4 = W[-15] >> 7 */; \ \ ONE_ROUND_PART2(a, b, c, d, e, f, g, h); \ \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ /* Calculate w[t-15] ror 8 */; \ vprorq YTMP1, YTMP1, 8 /* YTMP1 = W[-15] ror 8 */; \ /* XOR the three components */; \ vpternlogq YTMP1, YTMP3, YTMP4, 0x96 /* YTMP1 = s0 = W[-15] ror 1 ^ W[-15] >> 7 ^ W[-15] ror 8 */; \ \ /* Add three components, w[t-16], w[t-7] and sigma0 */; \ vpaddq YTMP0, YTMP0, YTMP1 /* YTMP0 = W[-16] + W[-7] + s0 */; \ ONE_ROUND_PART1(rsp+frame_XFER+1*8+X*32, h, a, b, c, d, e, f, g); \ /* Move to appropriate lanes for calculating w[16] and w[17] */; \ vshufi64x2 Y_0, YTMP0, YTMP0, 0x0 /* Y_0 = W[-16] + W[-7] + s0 {BABA} */; \ \ /* Calculate w[16] and w[17] in both 128 bit lanes */; \ \ /* Calculate sigma1 for w[16] and w[17] on both 128 bit lanes */; \ vshufi64x2 YTMP2, Y_3, Y_3, 0b11 /* YTMP2 = W[-2] {BABA} */; \ vpsrlq YTMP4, YTMP2, 6 /* YTMP4 = W[-2] >> 6 {BABA} */; \ \ ONE_ROUND_PART2(h, a, b, c, d, e, f, g); \ \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vprorq YTMP3, YTMP2, 19 /* YTMP3 = W[-2] ror 19 {BABA} */; \ vprorq YTMP1, YTMP2, 61 /* YTMP3 = W[-2] ror 61 {BABA} */; \ vpternlogq YTMP4, YTMP3, YTMP1, 0x96 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} */; \ \ ONE_ROUND_PART1(rsp+frame_XFER+2*8+X*32, g, h, a, b, c, d, e, f); \ /* Add sigma1 to the other compunents to get w[16] and w[17] */; \ vpaddq Y_0, Y_0, YTMP4 /* Y_0 = {W[1], W[0], W[1], W[0]} */; \ \ /* Calculate sigma1 for w[18] and w[19] for upper 128 bit lane */; \ vpsrlq YTMP4, Y_0, 6 /* YTMP4 = W[-2] >> 6 {DC--} */; \ \ ONE_ROUND_PART2(g, h, a, b, c, d, e, f); \ \ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */; \ vprorq YTMP3, Y_0, 19 /* YTMP3 = W[-2] ror 19 {DC--} */; \ vprorq YTMP1, Y_0, 61 /* YTMP1 = W[-2] ror 61 {DC--} */; \ vpternlogq YTMP4, YTMP3, YTMP1, 0x96 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} */; \ \ ONE_ROUND_PART1(rsp+frame_XFER+3*8+X*32, f, g, h, a, b, c, d, e); \ /* Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] to newly calculated sigma1 to get w[18] and w[19] */; \ /* Form w[19, w[18], w17], w[16] */; \ vpaddq Y_0{MASK_DC_00}, YTMP0, YTMP4 /* YTMP2 = {W[3], W[2], W[1], W[0]} */; \ \ vpaddq XFER, Y_0, [TBL + (4+X)*32]; \ vmovdqa [rsp + frame_XFER + X*32], XFER; \ ONE_ROUND_PART2(f, g, h, a, b, c, d, e) #define ONE_ROUND(XFERIN, a, b, c, d, e, f, g, h) \ ONE_ROUND_PART1(XFERIN, a, b, c, d, e, f, g, h); \ ONE_ROUND_PART2(a, b, c, d, e, f, g, h) #define DO_4ROUNDS(X, a, b, c, d, e, f, g, h) \ ONE_ROUND(rsp+frame_XFER+0*8+X*32, a, b, c, d, e, f, g, h); \ ONE_ROUND(rsp+frame_XFER+1*8+X*32, h, a, b, c, d, e, f, g); \ ONE_ROUND(rsp+frame_XFER+2*8+X*32, g, h, a, b, c, d, e, f); \ ONE_ROUND(rsp+frame_XFER+3*8+X*32, f, g, h, a, b, c, d, e) /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_avx512(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx512 ELF(.type _gcry_sha512_transform_amd64_avx512,@function;) .align 16 _gcry_sha512_transform_amd64_avx512: CFI_STARTPROC() xor eax, eax cmp rdx, 0 je .Lnowork spec_stop_avx512_intel_syntax; /* Setup mask register for DC:BA merging. */ mov eax, 0b1100 kmovd MASK_DC_00, eax /* Allocate Stack Space */ mov RSP_SAVE, rsp CFI_DEF_CFA_REGISTER(RSP_SAVE); sub rsp, frame_size and rsp, ~(0x40 - 1) /*; load initial digest */ vmovq a,[8*0 + CTX] vmovq b,[8*1 + CTX] vmovq c,[8*2 + CTX] vmovq d,[8*3 + CTX] vmovq e,[8*4 + CTX] vmovq f,[8*5 + CTX] vmovq g,[8*6 + CTX] vmovq h,[8*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vpmovzxbq PERM_VPALIGNR_8, [.LPERM_VPALIGNR_8 ADD_RIP] lea TBL,[.LK512 ADD_RIP] /*; byte swap first 16 dwords */ COPY_YMM_AND_BSWAP(Y_0, [INP + 0*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_1, [INP + 1*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_2, [INP + 2*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_3, [INP + 3*32], BYTE_FLIP_MASK) lea INP, [INP + 128] vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER + 0*32], XFER vpaddq XFER, Y_1, [TBL + 1*32] vmovdqa [rsp + frame_XFER + 1*32], XFER vpaddq XFER, Y_2, [TBL + 2*32] vmovdqa [rsp + frame_XFER + 2*32], XFER vpaddq XFER, Y_3, [TBL + 3*32] vmovdqa [rsp + frame_XFER + 3*32], XFER /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ mov SRND, 4 .align 16 .Loop0: FOUR_ROUNDS_AND_SCHED(0, Y_0, Y_1, Y_2, Y_3, a, b, c, d, e, f, g, h) FOUR_ROUNDS_AND_SCHED(1, Y_1, Y_2, Y_3, Y_0, e, f, g, h, a, b, c, d) FOUR_ROUNDS_AND_SCHED(2, Y_2, Y_3, Y_0, Y_1, a, b, c, d, e, f, g, h) FOUR_ROUNDS_AND_SCHED(3, Y_3, Y_0, Y_1, Y_2, e, f, g, h, a, b, c, d) lea TBL, [TBL + 4*32] sub SRND, 1 jne .Loop0 sub NUM_BLKS, 1 je .Ldone_hash lea TBL, [.LK512 ADD_RIP] /* load next block and byte swap */ COPY_YMM_AND_BSWAP(Y_0, [INP + 0*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_1, [INP + 1*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_2, [INP + 2*32], BYTE_FLIP_MASK) COPY_YMM_AND_BSWAP(Y_3, [INP + 3*32], BYTE_FLIP_MASK) lea INP, [INP + 128] DO_4ROUNDS(0, a, b, c, d, e, f, g, h) vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER + 0*32], XFER DO_4ROUNDS(1, e, f, g, h, a, b, c, d) vpaddq XFER, Y_1, [TBL + 1*32] vmovdqa [rsp + frame_XFER + 1*32], XFER DO_4ROUNDS(2, a, b, c, d, e, f, g, h) vpaddq XFER, Y_2, [TBL + 2*32] vmovdqa [rsp + frame_XFER + 2*32], XFER DO_4ROUNDS(3, e, f, g, h, a, b, c, d) vpaddq XFER, Y_3, [TBL + 3*32] vmovdqa [rsp + frame_XFER + 3*32], XFER addm([8*0 + CTX],a) addm([8*1 + CTX],b) addm([8*2 + CTX],c) addm([8*3 + CTX],d) addm([8*4 + CTX],e) addm([8*5 + CTX],f) addm([8*6 + CTX],g) addm([8*7 + CTX],h) /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ mov SRND, 4 jmp .Loop0 .Ldone_hash: DO_4ROUNDS(0, a, b, c, d, e, f, g, h) DO_4ROUNDS(1, e, f, g, h, a, b, c, d) DO_4ROUNDS(2, a, b, c, d, e, f, g, h) DO_4ROUNDS(3, e, f, g, h, a, b, c, d) addm([8*0 + CTX],a) xor eax, eax /* burn stack */ addm([8*1 + CTX],b) addm([8*2 + CTX],c) addm([8*3 + CTX],d) addm([8*4 + CTX],e) addm([8*5 + CTX],f) addm([8*6 + CTX],g) addm([8*7 + CTX],h) kxord MASK_DC_00, MASK_DC_00, MASK_DC_00 vzeroall vmovdqa [rsp + frame_XFER + 0*32], ymm0 /* burn stack */ vmovdqa [rsp + frame_XFER + 1*32], ymm0 /* burn stack */ vmovdqa [rsp + frame_XFER + 2*32], ymm0 /* burn stack */ vmovdqa [rsp + frame_XFER + 3*32], ymm0 /* burn stack */ clear_reg(ymm16); clear_reg(ymm17); clear_reg(ymm18); clear_reg(ymm19); clear_reg(ymm20); clear_reg(ymm21); clear_reg(ymm22); /* Restore Stack Pointer */ mov rsp, RSP_SAVE CFI_DEF_CFA_REGISTER(rsp) .Lnowork: ret_spec_stop CFI_ENDPROC() ELF(.size _gcry_sha512_transform_amd64_avx512,.-_gcry_sha512_transform_amd64_avx512) /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;; Binary Data */ +SECTION_RODATA + ELF(.type _gcry_sha512_avx512_consts,@object) _gcry_sha512_avx512_consts: .align 64 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .align 32 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .align 4 .LPERM_VPALIGNR_8: .byte 5, 6, 7, 0 ELF(.size _gcry_sha512_avx512_consts,.-_gcry_sha512_avx512_consts) #endif #endif diff --git a/cipher/sha512-ssse3-amd64.S b/cipher/sha512-ssse3-amd64.S index 9cc30892..bfd3bb54 100644 --- a/cipher/sha512-ssse3-amd64.S +++ b/cipher/sha512-ssse3-amd64.S @@ -1,467 +1,472 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: original implementation was named as SHA512-SSE4. However, only SSSE3 * is required. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA512) #include "asm-common-amd64.h" .intel_syntax noprefix .text /* Virtual Registers */ #define msg rdi /* ARG1 */ #define digest rsi /* ARG2 */ #define msglen rdx /* ARG3 */ #define T1 rcx #define T2 r8 #define a_64 r9 #define b_64 r10 #define c_64 r11 #define d_64 r12 #define e_64 r13 #define f_64 r14 #define g_64 r15 #define h_64 rbx #define tmp0 rax /* ; Local variables (stack frame) ; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP */ #define frame_W 0 /* Message Schedule */ #define frame_W_size (80 * 8) #define frame_WK ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */ #define frame_WK_size (2 * 8) #define frame_GPRSAVE ((frame_WK) + (frame_WK_size)) #define frame_GPRSAVE_size (5 * 8) #define frame_size ((frame_GPRSAVE) + (frame_GPRSAVE_size)) /* Useful QWORD "arrays" for simpler memory references */ #define MSG(i) msg + 8*(i) /* Input message (arg1) */ #define DIGEST(i) digest + 8*(i) /* Output Digest (arg2) */ #define K_t(i) .LK512 + 8*(i) ADD_RIP /* SHA Constants (static mem) */ #define W_t(i) rsp + frame_W + 8*(i) /* Message Schedule (stack frame) */ #define WK_2(i) rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */ /* MSG, DIGEST, K_t, W_t are arrays */ /* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */ #define SHA512_Round(t, a, b, c, d, e, f, g, h) \ /* Compute Round %%t */; \ mov T1, f /* T1 = f */; \ mov tmp0, e /* tmp = e */; \ xor T1, g /* T1 = f ^ g */; \ ror tmp0, 23 /* 41 ; tmp = e ror 23 */; \ and T1, e /* T1 = (f ^ g) & e */; \ xor tmp0, e /* tmp = (e ror 23) ^ e */; \ xor T1, g /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */; \ add T1, [WK_2(t)] /* W[t] + K[t] from message scheduler */; \ ror tmp0, 4 /* 18 ; tmp = ((e ror 23) ^ e) ror 4 */; \ xor tmp0, e /* tmp = (((e ror 23) ^ e) ror 4) ^ e */; \ mov T2, a /* T2 = a */; \ add T1, h /* T1 = CH(e,f,g) + W[t] + K[t] + h */; \ ror tmp0, 14 /* 14 ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */; \ add T1, tmp0 /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */; \ mov tmp0, a /* tmp = a */; \ xor T2, c /* T2 = a ^ c */; \ and tmp0, c /* tmp = a & c */; \ and T2, b /* T2 = (a ^ c) & b */; \ xor T2, tmp0 /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */; \ mov tmp0, a /* tmp = a */; \ ror tmp0, 5 /* 39 ; tmp = a ror 5 */; \ xor tmp0, a /* tmp = (a ror 5) ^ a */; \ add d, T1 /* e(next_state) = d + T1 */; \ ror tmp0, 6 /* 34 ; tmp = ((a ror 5) ^ a) ror 6 */; \ xor tmp0, a /* tmp = (((a ror 5) ^ a) ror 6) ^ a */; \ lea h, [T1 + T2] /* a(next_state) = T1 + Maj(a,b,c) */; \ ror tmp0, 28 /* 28 ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */; \ add h, tmp0 /* a(next_state) = T1 + Maj(a,b,c) S0(a) */ #define SHA512_2Sched_2Round_sse_PART1(t, a, b, c, d, e, f, g, h) \ /* \ ; Compute rounds %%t-2 and %%t-1 \ ; Compute message schedule QWORDS %%t and %%t+1 \ ; \ ; Two rounds are computed based on the values for K[t-2]+W[t-2] and \ ; K[t-1]+W[t-1] which were previously stored at WK_2 by the message \ ; scheduler. \ ; The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. \ ; They are then added to their respective SHA512 constants at \ ; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] \ ; For brievity, the comments following vectored instructions only refer to \ ; the first of a pair of QWORDS. \ ; Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} \ ; The computation of the message schedule and the rounds are tightly \ ; stitched to take advantage of instruction-level parallelism. \ ; For clarity, integer instructions (for the rounds calculation) are indented \ ; by one tab. Vectored instructions (for the message scheduler) are indented \ ; by two tabs. \ */ \ \ mov T1, f; \ movdqa xmm2, [W_t(t-2)] /* XMM2 = W[t-2] */; \ xor T1, g; \ and T1, e; \ movdqa xmm0, xmm2 /* XMM0 = W[t-2] */; \ xor T1, g; \ add T1, [WK_2(t)]; \ movdqu xmm5, [W_t(t-15)] /* XMM5 = W[t-15] */; \ mov tmp0, e; \ ror tmp0, 23 /* 41 */; \ movdqa xmm3, xmm5 /* XMM3 = W[t-15] */; \ xor tmp0, e; \ ror tmp0, 4 /* 18 */; \ psrlq xmm0, 61 - 19 /* XMM0 = W[t-2] >> 42 */; \ xor tmp0, e; \ ror tmp0, 14 /* 14 */; \ psrlq xmm3, (8 - 7) /* XMM3 = W[t-15] >> 1 */; \ add T1, tmp0; \ add T1, h; \ pxor xmm0, xmm2 /* XMM0 = (W[t-2] >> 42) ^ W[t-2] */; \ mov T2, a; \ xor T2, c; \ pxor xmm3, xmm5 /* XMM3 = (W[t-15] >> 1) ^ W[t-15] */; \ and T2, b; \ mov tmp0, a; \ psrlq xmm0, 19 - 6 /* XMM0 = ((W[t-2]>>42)^W[t-2])>>13 */; \ and tmp0, c; \ xor T2, tmp0; \ psrlq xmm3, (7 - 1) /* XMM3 = ((W[t-15]>>1)^W[t-15])>>6 */; \ mov tmp0, a; \ ror tmp0, 5 /* 39 */; \ pxor xmm0, xmm2 /* XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2] */; \ xor tmp0, a; \ ror tmp0, 6 /* 34 */; \ pxor xmm3, xmm5 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15] */; \ xor tmp0, a; \ ror tmp0, 28 /* 28 */; \ psrlq xmm0, 6 /* XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6 */; \ add T2, tmp0; \ add d, T1; \ psrlq xmm3, 1 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1 */; \ lea h, [T1 + T2] #define SHA512_2Sched_2Round_sse_PART2(t, a, b, c, d, e, f, g, h) \ movdqa xmm1, xmm2 /* XMM1 = W[t-2] */; \ mov T1, f; \ xor T1, g; \ movdqa xmm4, xmm5 /* XMM4 = W[t-15] */; \ and T1, e; \ xor T1, g; \ psllq xmm1, (64 - 19) - (64 - 61) /* XMM1 = W[t-2] << 42 */; \ add T1, [WK_2(t+1)]; \ mov tmp0, e; \ psllq xmm4, (64 - 1) - (64 - 8) /* XMM4 = W[t-15] << 7 */; \ ror tmp0, 23 /* 41 */; \ xor tmp0, e; \ pxor xmm1, xmm2 /* XMM1 = (W[t-2] << 42)^W[t-2] */; \ ror tmp0, 4 /* 18 */; \ xor tmp0, e; \ pxor xmm4, xmm5 /* XMM4 = (W[t-15]<<7)^W[t-15] */; \ ror tmp0, 14 /* 14 */; \ add T1, tmp0; \ psllq xmm1, (64 - 61) /* XMM1 = ((W[t-2] << 42)^W[t-2])<<3 */; \ add T1, h; \ mov T2, a; \ psllq xmm4, (64 - 8) /* XMM4 = ((W[t-15]<<7)^W[t-15])<<56 */; \ xor T2, c; \ and T2, b; \ pxor xmm0, xmm1 /* XMM0 = s1(W[t-2]) */; \ mov tmp0, a; \ and tmp0, c; \ movdqu xmm1, [W_t(t- 7)] /* XMM1 = W[t-7] */; \ xor T2, tmp0; \ pxor xmm3, xmm4 /* XMM3 = s0(W[t-15]) */; \ mov tmp0, a; \ paddq xmm0, xmm3 /* XMM0 = s1(W[t-2]) + s0(W[t-15]) */; \ ror tmp0, 5 /* 39 */; \ paddq xmm0, [W_t(t-16)] /* XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] */; \ xor tmp0, a; \ paddq xmm0, xmm1 /* XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */; \ ror tmp0, 6 /* 34 */; \ movdqa [W_t(t)], xmm0 /* Store scheduled qwords */; \ xor tmp0, a; \ paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */; \ ror tmp0, 28 /* 28 */; \ movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] for next rounds */; \ add T2, tmp0; \ add d, T1; \ lea h, [T1 + T2] #define SHA512_2Sched_2Round_sse(t, a, b, c, d, e, f, g, h) \ SHA512_2Sched_2Round_sse_PART1(t, a, b, c, d, e, f, g, h); \ SHA512_2Sched_2Round_sse_PART2(t, h, a, b, c, d, e, f, g) /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_sse4(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks. */ .globl _gcry_sha512_transform_amd64_ssse3 ELF(.type _gcry_sha512_transform_amd64_ssse3,@function;) .align 16 _gcry_sha512_transform_amd64_ssse3: CFI_STARTPROC() xor eax, eax cmp msglen, 0 je .Lnowork /* Allocate Stack Space */ sub rsp, frame_size CFI_ADJUST_CFA_OFFSET(frame_size); /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbx mov [rsp + frame_GPRSAVE + 8 * 1], r12 mov [rsp + frame_GPRSAVE + 8 * 2], r13 mov [rsp + frame_GPRSAVE + 8 * 3], r14 mov [rsp + frame_GPRSAVE + 8 * 4], r15 CFI_REL_OFFSET(rbx, frame_GPRSAVE + 8 * 0); CFI_REL_OFFSET(r12, frame_GPRSAVE + 8 * 1); CFI_REL_OFFSET(r13, frame_GPRSAVE + 8 * 2); CFI_REL_OFFSET(r14, frame_GPRSAVE + 8 * 3); CFI_REL_OFFSET(r15, frame_GPRSAVE + 8 * 4); .Lupdateblock: /* Load state variables */ mov a_64, [DIGEST(0)] mov b_64, [DIGEST(1)] mov c_64, [DIGEST(2)] mov d_64, [DIGEST(3)] mov e_64, [DIGEST(4)] mov f_64, [DIGEST(5)] mov g_64, [DIGEST(6)] mov h_64, [DIGEST(7)] /* BSWAP 2 QWORDS */ movdqa xmm1, [.LXMM_QWORD_BSWAP ADD_RIP] movdqu xmm0, [MSG(0)] pshufb xmm0, xmm1 /* BSWAP */ movdqa [W_t(0)], xmm0 /* Store Scheduled Pair */ paddq xmm0, [K_t(0)] /* Compute W[t]+K[t] */ movdqa [WK_2(0)], xmm0 /* Store into WK for rounds */ #define T_2_14(t, a, b, c, d, e, f, g, h) \ /* BSWAP 2 QWORDS; Compute 2 Rounds */; \ movdqu xmm0, [MSG(t)]; \ pshufb xmm0, xmm1 /* BSWAP */; \ SHA512_Round(((t) - 2), a##_64, b##_64, c##_64, d##_64, \ e##_64, f##_64, g##_64, h##_64); \ movdqa [W_t(t)], xmm0 /* Store Scheduled Pair */; \ paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */; \ SHA512_Round(((t) - 1), h##_64, a##_64, b##_64, c##_64, \ d##_64, e##_64, f##_64, g##_64); \ movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] into WK */ #define T_16_78(t, a, b, c, d, e, f, g, h) \ SHA512_2Sched_2Round_sse((t), a##_64, b##_64, c##_64, d##_64, \ e##_64, f##_64, g##_64, h##_64) #define T_80(t, a, b, c, d, e, f, g, h) \ /* Compute 2 Rounds */; \ SHA512_Round((t - 2), a##_64, b##_64, c##_64, d##_64, \ e##_64, f##_64, g##_64, h##_64); \ SHA512_Round((t - 1), h##_64, a##_64, b##_64, c##_64, \ d##_64, e##_64, f##_64, g##_64) T_2_14(2, a, b, c, d, e, f, g, h) T_2_14(4, g, h, a, b, c, d, e, f) T_2_14(6, e, f, g, h, a, b, c, d) T_2_14(8, c, d, e, f, g, h, a, b) T_2_14(10, a, b, c, d, e, f, g, h) T_2_14(12, g, h, a, b, c, d, e, f) T_2_14(14, e, f, g, h, a, b, c, d) T_16_78(16, c, d, e, f, g, h, a, b) T_16_78(18, a, b, c, d, e, f, g, h) T_16_78(20, g, h, a, b, c, d, e, f) T_16_78(22, e, f, g, h, a, b, c, d) T_16_78(24, c, d, e, f, g, h, a, b) T_16_78(26, a, b, c, d, e, f, g, h) T_16_78(28, g, h, a, b, c, d, e, f) T_16_78(30, e, f, g, h, a, b, c, d) T_16_78(32, c, d, e, f, g, h, a, b) T_16_78(34, a, b, c, d, e, f, g, h) T_16_78(36, g, h, a, b, c, d, e, f) T_16_78(38, e, f, g, h, a, b, c, d) T_16_78(40, c, d, e, f, g, h, a, b) T_16_78(42, a, b, c, d, e, f, g, h) T_16_78(44, g, h, a, b, c, d, e, f) T_16_78(46, e, f, g, h, a, b, c, d) T_16_78(48, c, d, e, f, g, h, a, b) T_16_78(50, a, b, c, d, e, f, g, h) T_16_78(52, g, h, a, b, c, d, e, f) T_16_78(54, e, f, g, h, a, b, c, d) T_16_78(56, c, d, e, f, g, h, a, b) T_16_78(58, a, b, c, d, e, f, g, h) T_16_78(60, g, h, a, b, c, d, e, f) T_16_78(62, e, f, g, h, a, b, c, d) T_16_78(64, c, d, e, f, g, h, a, b) T_16_78(66, a, b, c, d, e, f, g, h) T_16_78(68, g, h, a, b, c, d, e, f) T_16_78(70, e, f, g, h, a, b, c, d) T_16_78(72, c, d, e, f, g, h, a, b) T_16_78(74, a, b, c, d, e, f, g, h) T_16_78(76, g, h, a, b, c, d, e, f) T_16_78(78, e, f, g, h, a, b, c, d) T_80(80, c, d, e, f, g, h, a, b) /* Update digest */ add [DIGEST(0)], a_64 add [DIGEST(1)], b_64 add [DIGEST(2)], c_64 add [DIGEST(3)], d_64 add [DIGEST(4)], e_64 add [DIGEST(5)], f_64 add [DIGEST(6)], g_64 add [DIGEST(7)], h_64 /* Advance to next message block */ add msg, 16*8 dec msglen jnz .Lupdateblock /* Restore GPRs */ mov rbx, [rsp + frame_GPRSAVE + 8 * 0] mov r12, [rsp + frame_GPRSAVE + 8 * 1] mov r13, [rsp + frame_GPRSAVE + 8 * 2] mov r14, [rsp + frame_GPRSAVE + 8 * 3] mov r15, [rsp + frame_GPRSAVE + 8 * 4] CFI_RESTORE(rbx) CFI_RESTORE(r12) CFI_RESTORE(r13) CFI_RESTORE(r14) CFI_RESTORE(r15) pxor xmm0, xmm0 pxor xmm1, xmm1 pxor xmm2, xmm2 pxor xmm3, xmm3 pxor xmm4, xmm4 pxor xmm5, xmm5 /* Burn stack */ mov eax, 0 .Lerase_stack: movdqu [rsp + rax], xmm0 add eax, 16 cmp eax, frame_W_size jne .Lerase_stack movdqu [rsp + frame_WK], xmm0 xor eax, eax /* Restore Stack Pointer */ add rsp, frame_size CFI_ADJUST_CFA_OFFSET(-frame_size); .Lnowork: ret_spec_stop CFI_ENDPROC() /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Binary Data */ +SECTION_RODATA + +ELF(.type _sha512_ssse3_consts,@object) +_sha512_ssse3_consts: + .align 16 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LXMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif #endif diff --git a/cipher/sm3-avx-bmi2-amd64.S b/cipher/sm3-avx-bmi2-amd64.S index d9b6206a..ef923165 100644 --- a/cipher/sm3-avx-bmi2-amd64.S +++ b/cipher/sm3-avx-bmi2-amd64.S @@ -1,553 +1,555 @@ /* sm3-avx-bmi2-amd64.S - Intel AVX/BMI2 accelerated SM3 transform function * Copyright (C) 2021 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SM3) #include "asm-common-amd64.h" /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 #define state_h5 20 #define state_h6 24 #define state_h7 28 /* Constants */ -.text +SECTION_RODATA .align 16 ELF(.type _gcry_sm3_avx2_consts,@object) _gcry_sm3_avx2_consts: .Lbe32mask: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f ELF(.size _gcry_sm3_avx2_consts,.-_gcry_sm3_avx2_consts) /* Round constant macros */ #define K0 2043430169 /* 0x79cc4519 */ #define K1 -208106958 /* 0xf3988a32 */ #define K2 -416213915 /* 0xe7311465 */ #define K3 -832427829 /* 0xce6228cb */ #define K4 -1664855657 /* 0x9cc45197 */ #define K5 965255983 /* 0x3988a32f */ #define K6 1930511966 /* 0x7311465e */ #define K7 -433943364 /* 0xe6228cbc */ #define K8 -867886727 /* 0xcc451979 */ #define K9 -1735773453 /* 0x988a32f3 */ #define K10 823420391 /* 0x311465e7 */ #define K11 1646840782 /* 0x6228cbce */ #define K12 -1001285732 /* 0xc451979c */ #define K13 -2002571463 /* 0x88a32f39 */ #define K14 289824371 /* 0x11465e73 */ #define K15 579648742 /* 0x228cbce6 */ #define K16 -1651869049 /* 0x9d8a7a87 */ #define K17 991229199 /* 0x3b14f50f */ #define K18 1982458398 /* 0x7629ea1e */ #define K19 -330050500 /* 0xec53d43c */ #define K20 -660100999 /* 0xd8a7a879 */ #define K21 -1320201997 /* 0xb14f50f3 */ #define K22 1654563303 /* 0x629ea1e7 */ #define K23 -985840690 /* 0xc53d43ce */ #define K24 -1971681379 /* 0x8a7a879d */ #define K25 351604539 /* 0x14f50f3b */ #define K26 703209078 /* 0x29ea1e76 */ #define K27 1406418156 /* 0x53d43cec */ #define K28 -1482130984 /* 0xa7a879d8 */ #define K29 1330705329 /* 0x4f50f3b1 */ #define K30 -1633556638 /* 0x9ea1e762 */ #define K31 1027854021 /* 0x3d43cec5 */ #define K32 2055708042 /* 0x7a879d8a */ #define K33 -183551212 /* 0xf50f3b14 */ #define K34 -367102423 /* 0xea1e7629 */ #define K35 -734204845 /* 0xd43cec53 */ #define K36 -1468409689 /* 0xa879d8a7 */ #define K37 1358147919 /* 0x50f3b14f */ #define K38 -1578671458 /* 0xa1e7629e */ #define K39 1137624381 /* 0x43cec53d */ #define K40 -2019718534 /* 0x879d8a7a */ #define K41 255530229 /* 0x0f3b14f5 */ #define K42 511060458 /* 0x1e7629ea */ #define K43 1022120916 /* 0x3cec53d4 */ #define K44 2044241832 /* 0x79d8a7a8 */ #define K45 -206483632 /* 0xf3b14f50 */ #define K46 -412967263 /* 0xe7629ea1 */ #define K47 -825934525 /* 0xcec53d43 */ #define K48 -1651869049 /* 0x9d8a7a87 */ #define K49 991229199 /* 0x3b14f50f */ #define K50 1982458398 /* 0x7629ea1e */ #define K51 -330050500 /* 0xec53d43c */ #define K52 -660100999 /* 0xd8a7a879 */ #define K53 -1320201997 /* 0xb14f50f3 */ #define K54 1654563303 /* 0x629ea1e7 */ #define K55 -985840690 /* 0xc53d43ce */ #define K56 -1971681379 /* 0x8a7a879d */ #define K57 351604539 /* 0x14f50f3b */ #define K58 703209078 /* 0x29ea1e76 */ #define K59 1406418156 /* 0x53d43cec */ #define K60 -1482130984 /* 0xa7a879d8 */ #define K61 1330705329 /* 0x4f50f3b1 */ #define K62 -1633556638 /* 0x9ea1e762 */ #define K63 1027854021 /* 0x3d43cec5 */ /* Register macros */ #define RSTATE %rdi #define RDATA %rsi #define RNBLKS %rdx #define t0 %eax #define t1 %ebx #define t2 %ecx #define a %r8d #define b %r9d #define c %r10d #define d %r11d #define e %r12d #define f %r13d #define g %r14d #define h %r15d #define W0 %xmm0 #define W1 %xmm1 #define W2 %xmm2 #define W3 %xmm3 #define W4 %xmm4 #define W5 %xmm5 #define XTMP0 %xmm6 #define XTMP1 %xmm7 #define XTMP2 %xmm8 #define XTMP3 %xmm9 #define XTMP4 %xmm10 #define XTMP5 %xmm11 #define XTMP6 %xmm12 #define BSWAP_REG %xmm15 /* Stack structure */ #define STACK_W_SIZE (32 * 2 * 3) #define STACK_REG_SAVE_SIZE (64) #define STACK_W (0) #define STACK_REG_SAVE (STACK_W + STACK_W_SIZE) #define STACK_SIZE (STACK_REG_SAVE + STACK_REG_SAVE_SIZE) /* Instruction helpers. */ #define roll2(v, reg) \ roll $(v), reg; #define roll3mov(v, src, dst) \ movl src, dst; \ roll $(v), dst; #define roll3(v, src, dst) \ rorxl $(32-(v)), src, dst; #define addl2(a, out) \ leal (a, out), out; /* Round function macros. */ #define GG1(x, y, z, o, t) \ movl x, o; \ xorl y, o; \ xorl z, o; #define FF1(x, y, z, o, t) GG1(x, y, z, o, t) #define GG2(x, y, z, o, t) \ andnl z, x, o; \ movl y, t; \ andl x, t; \ addl2(t, o); #define FF2(x, y, z, o, t) \ movl y, o; \ xorl x, o; \ movl y, t; \ andl x, t; \ andl z, o; \ xorl t, o; #define R(i, a, b, c, d, e, f, g, h, round, widx, wtype) \ /* rol(a, 12) => t0 */ \ roll3mov(12, a, t0); /* rorxl here would reduce perf by 6% on zen3 */ \ /* rol (t0 + e + t), 7) => t1 */ \ leal K##round(t0, e, 1), t1; \ roll2(7, t1); \ /* h + w1 => h */ \ addl wtype##_W1_ADDR(round, widx), h; \ /* h + t1 => h */ \ addl2(t1, h); \ /* t1 ^ t0 => t0 */ \ xorl t1, t0; \ /* w1w2 + d => d */ \ addl wtype##_W1W2_ADDR(round, widx), d; \ /* FF##i(a,b,c) => t1 */ \ FF##i(a, b, c, t1, t2); \ /* d + t1 => d */ \ addl2(t1, d); \ /* GG#i(e,f,g) => t2 */ \ GG##i(e, f, g, t2, t1); \ /* h + t2 => h */ \ addl2(t2, h); \ /* rol (f, 19) => f */ \ roll2(19, f); \ /* d + t0 => d */ \ addl2(t0, d); \ /* rol (b, 9) => b */ \ roll2(9, b); \ /* P0(h) => h */ \ roll3(9, h, t2); \ roll3(17, h, t1); \ xorl t2, h; \ xorl t1, h; #define R1(a, b, c, d, e, f, g, h, round, widx, wtype) \ R(1, a, b, c, d, e, f, g, h, round, widx, wtype) #define R2(a, b, c, d, e, f, g, h, round, widx, wtype) \ R(2, a, b, c, d, e, f, g, h, round, widx, wtype) /* Input expansion macros. */ /* Byte-swapped input address. */ #define IW_W_ADDR(round, widx, offs) \ (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))(%rsp) /* Expanded input address. */ #define XW_W_ADDR(round, widx, offs) \ (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))(%rsp) /* Rounds 1-12, byte-swapped input block addresses. */ #define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 0) #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 32) /* Rounds 1-12, expanded input block addresses. */ #define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0) #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 32) /* Input block loading. */ #define LOAD_W_XMM_1() \ vmovdqu 0*16(RDATA), XTMP0; /* XTMP0: w3, w2, w1, w0 */ \ vmovdqu 1*16(RDATA), XTMP1; /* XTMP1: w7, w6, w5, w4 */ \ vmovdqu 2*16(RDATA), XTMP2; /* XTMP2: w11, w10, w9, w8 */ \ vmovdqu 3*16(RDATA), XTMP3; /* XTMP3: w15, w14, w13, w12 */\ vpshufb BSWAP_REG, XTMP0, XTMP0; \ vpshufb BSWAP_REG, XTMP1, XTMP1; \ vpshufb BSWAP_REG, XTMP2, XTMP2; \ vpshufb BSWAP_REG, XTMP3, XTMP3; \ vpxor XTMP0, XTMP1, XTMP4; \ vpxor XTMP1, XTMP2, XTMP5; \ vpxor XTMP2, XTMP3, XTMP6; \ leaq 64(RDATA), RDATA; \ vmovdqa XTMP0, IW_W1_ADDR(0, 0); \ vmovdqa XTMP4, IW_W1W2_ADDR(0, 0); \ vmovdqa XTMP1, IW_W1_ADDR(4, 0); \ vmovdqa XTMP5, IW_W1W2_ADDR(4, 0); #define LOAD_W_XMM_2() \ vmovdqa XTMP2, IW_W1_ADDR(8, 0); \ vmovdqa XTMP6, IW_W1W2_ADDR(8, 0); #define LOAD_W_XMM_3() \ vpshufd $0b00000000, XTMP0, W0; /* W0: xx, w0, xx, xx */ \ vpshufd $0b11111001, XTMP0, W1; /* W1: xx, w3, w2, w1 */ \ vmovdqa XTMP1, W2; /* W2: xx, w6, w5, w4 */ \ vpalignr $12, XTMP1, XTMP2, W3; /* W3: xx, w9, w8, w7 */ \ vpalignr $8, XTMP2, XTMP3, W4; /* W4: xx, w12, w11, w10 */ \ vpshufd $0b11111001, XTMP3, W5; /* W5: xx, w15, w14, w13 */ /* Message scheduling. Note: 3 words per XMM register. */ #define SCHED_W_0(round, w0, w1, w2, w3, w4, w5) \ /* Load (w[i - 16]) => XTMP0 */ \ vpshufd $0b10111111, w0, XTMP0; \ vpalignr $12, XTMP0, w1, XTMP0; /* XTMP0: xx, w2, w1, w0 */ \ /* Load (w[i - 13]) => XTMP1 */ \ vpshufd $0b10111111, w1, XTMP1; \ vpalignr $12, XTMP1, w2, XTMP1; \ /* w[i - 9] == w3 */ \ /* XMM3 ^ XTMP0 => XTMP0 */ \ vpxor w3, XTMP0, XTMP0; #define SCHED_W_1(round, w0, w1, w2, w3, w4, w5) \ /* w[i - 3] == w5 */ \ /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \ vpslld $15, w5, XTMP2; \ vpsrld $(32-15), w5, XTMP3; \ vpxor XTMP2, XTMP3, XTMP3; \ vpxor XTMP3, XTMP0, XTMP0; \ /* rol(XTMP1, 7) => XTMP1 */ \ vpslld $7, XTMP1, XTMP5; \ vpsrld $(32-7), XTMP1, XTMP1; \ vpxor XTMP5, XTMP1, XTMP1; \ /* XMM4 ^ XTMP1 => XTMP1 */ \ vpxor w4, XTMP1, XTMP1; \ /* w[i - 6] == XMM4 */ \ /* P1(XTMP0) ^ XTMP1 => XMM0 */ \ vpslld $15, XTMP0, XTMP5; \ vpsrld $(32-15), XTMP0, XTMP6; \ vpslld $23, XTMP0, XTMP2; \ vpsrld $(32-23), XTMP0, XTMP3; \ vpxor XTMP0, XTMP1, XTMP1; \ vpxor XTMP6, XTMP5, XTMP5; \ vpxor XTMP3, XTMP2, XTMP2; \ vpxor XTMP2, XTMP5, XTMP5; \ vpxor XTMP5, XTMP1, w0; #define SCHED_W_2(round, w0, w1, w2, w3, w4, w5) \ /* W1 in XMM12 */ \ vpshufd $0b10111111, w4, XTMP4; \ vpalignr $12, XTMP4, w5, XTMP4; \ vmovdqa XTMP4, XW_W1_ADDR((round), 0); \ /* W1 ^ W2 => XTMP1 */ \ vpxor w0, XTMP4, XTMP1; \ vmovdqa XTMP1, XW_W1W2_ADDR((round), 0); +.text + /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sm3_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data, * size_t nblks) */ .globl _gcry_sm3_transform_amd64_avx_bmi2 ELF(.type _gcry_sm3_transform_amd64_avx_bmi2,@function) .align 16 _gcry_sm3_transform_amd64_avx_bmi2: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks */ CFI_STARTPROC(); vzeroupper; pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movq %rdx, RNBLKS; subq $STACK_SIZE, %rsp; andq $(~63), %rsp; movq %rbx, (STACK_REG_SAVE + 0 * 8)(%rsp); CFI_REL_OFFSET(%rbx, STACK_REG_SAVE + 0 * 8); movq %r15, (STACK_REG_SAVE + 1 * 8)(%rsp); CFI_REL_OFFSET(%r15, STACK_REG_SAVE + 1 * 8); movq %r14, (STACK_REG_SAVE + 2 * 8)(%rsp); CFI_REL_OFFSET(%r14, STACK_REG_SAVE + 2 * 8); movq %r13, (STACK_REG_SAVE + 3 * 8)(%rsp); CFI_REL_OFFSET(%r13, STACK_REG_SAVE + 3 * 8); movq %r12, (STACK_REG_SAVE + 4 * 8)(%rsp); CFI_REL_OFFSET(%r12, STACK_REG_SAVE + 4 * 8); vmovdqa .Lbe32mask rRIP, BSWAP_REG; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; movl state_h5(RSTATE), f; movl state_h6(RSTATE), g; movl state_h7(RSTATE), h; .align 16 .Loop: /* Load data part1. */ LOAD_W_XMM_1(); leaq -1(RNBLKS), RNBLKS; /* Transform 0-3 + Load data part2. */ R1(a, b, c, d, e, f, g, h, 0, 0, IW); LOAD_W_XMM_2(); R1(d, a, b, c, h, e, f, g, 1, 1, IW); R1(c, d, a, b, g, h, e, f, 2, 2, IW); R1(b, c, d, a, f, g, h, e, 3, 3, IW); LOAD_W_XMM_3(); /* Transform 4-7 + Precalc 12-14. */ R1(a, b, c, d, e, f, g, h, 4, 0, IW); R1(d, a, b, c, h, e, f, g, 5, 1, IW); R1(c, d, a, b, g, h, e, f, 6, 2, IW); SCHED_W_0(12, W0, W1, W2, W3, W4, W5); R1(b, c, d, a, f, g, h, e, 7, 3, IW); SCHED_W_1(12, W0, W1, W2, W3, W4, W5); /* Transform 8-11 + Precalc 12-17. */ R1(a, b, c, d, e, f, g, h, 8, 0, IW); SCHED_W_2(12, W0, W1, W2, W3, W4, W5); R1(d, a, b, c, h, e, f, g, 9, 1, IW); SCHED_W_0(15, W1, W2, W3, W4, W5, W0); R1(c, d, a, b, g, h, e, f, 10, 2, IW); SCHED_W_1(15, W1, W2, W3, W4, W5, W0); R1(b, c, d, a, f, g, h, e, 11, 3, IW); SCHED_W_2(15, W1, W2, W3, W4, W5, W0); /* Transform 12-14 + Precalc 18-20 */ R1(a, b, c, d, e, f, g, h, 12, 0, XW); SCHED_W_0(18, W2, W3, W4, W5, W0, W1); R1(d, a, b, c, h, e, f, g, 13, 1, XW); SCHED_W_1(18, W2, W3, W4, W5, W0, W1); R1(c, d, a, b, g, h, e, f, 14, 2, XW); SCHED_W_2(18, W2, W3, W4, W5, W0, W1); /* Transform 15-17 + Precalc 21-23 */ R1(b, c, d, a, f, g, h, e, 15, 0, XW); SCHED_W_0(21, W3, W4, W5, W0, W1, W2); R2(a, b, c, d, e, f, g, h, 16, 1, XW); SCHED_W_1(21, W3, W4, W5, W0, W1, W2); R2(d, a, b, c, h, e, f, g, 17, 2, XW); SCHED_W_2(21, W3, W4, W5, W0, W1, W2); /* Transform 18-20 + Precalc 24-26 */ R2(c, d, a, b, g, h, e, f, 18, 0, XW); SCHED_W_0(24, W4, W5, W0, W1, W2, W3); R2(b, c, d, a, f, g, h, e, 19, 1, XW); SCHED_W_1(24, W4, W5, W0, W1, W2, W3); R2(a, b, c, d, e, f, g, h, 20, 2, XW); SCHED_W_2(24, W4, W5, W0, W1, W2, W3); /* Transform 21-23 + Precalc 27-29 */ R2(d, a, b, c, h, e, f, g, 21, 0, XW); SCHED_W_0(27, W5, W0, W1, W2, W3, W4); R2(c, d, a, b, g, h, e, f, 22, 1, XW); SCHED_W_1(27, W5, W0, W1, W2, W3, W4); R2(b, c, d, a, f, g, h, e, 23, 2, XW); SCHED_W_2(27, W5, W0, W1, W2, W3, W4); /* Transform 24-26 + Precalc 30-32 */ R2(a, b, c, d, e, f, g, h, 24, 0, XW); SCHED_W_0(30, W0, W1, W2, W3, W4, W5); R2(d, a, b, c, h, e, f, g, 25, 1, XW); SCHED_W_1(30, W0, W1, W2, W3, W4, W5); R2(c, d, a, b, g, h, e, f, 26, 2, XW); SCHED_W_2(30, W0, W1, W2, W3, W4, W5); /* Transform 27-29 + Precalc 33-35 */ R2(b, c, d, a, f, g, h, e, 27, 0, XW); SCHED_W_0(33, W1, W2, W3, W4, W5, W0); R2(a, b, c, d, e, f, g, h, 28, 1, XW); SCHED_W_1(33, W1, W2, W3, W4, W5, W0); R2(d, a, b, c, h, e, f, g, 29, 2, XW); SCHED_W_2(33, W1, W2, W3, W4, W5, W0); /* Transform 30-32 + Precalc 36-38 */ R2(c, d, a, b, g, h, e, f, 30, 0, XW); SCHED_W_0(36, W2, W3, W4, W5, W0, W1); R2(b, c, d, a, f, g, h, e, 31, 1, XW); SCHED_W_1(36, W2, W3, W4, W5, W0, W1); R2(a, b, c, d, e, f, g, h, 32, 2, XW); SCHED_W_2(36, W2, W3, W4, W5, W0, W1); /* Transform 33-35 + Precalc 39-41 */ R2(d, a, b, c, h, e, f, g, 33, 0, XW); SCHED_W_0(39, W3, W4, W5, W0, W1, W2); R2(c, d, a, b, g, h, e, f, 34, 1, XW); SCHED_W_1(39, W3, W4, W5, W0, W1, W2); R2(b, c, d, a, f, g, h, e, 35, 2, XW); SCHED_W_2(39, W3, W4, W5, W0, W1, W2); /* Transform 36-38 + Precalc 42-44 */ R2(a, b, c, d, e, f, g, h, 36, 0, XW); SCHED_W_0(42, W4, W5, W0, W1, W2, W3); R2(d, a, b, c, h, e, f, g, 37, 1, XW); SCHED_W_1(42, W4, W5, W0, W1, W2, W3); R2(c, d, a, b, g, h, e, f, 38, 2, XW); SCHED_W_2(42, W4, W5, W0, W1, W2, W3); /* Transform 39-41 + Precalc 45-47 */ R2(b, c, d, a, f, g, h, e, 39, 0, XW); SCHED_W_0(45, W5, W0, W1, W2, W3, W4); R2(a, b, c, d, e, f, g, h, 40, 1, XW); SCHED_W_1(45, W5, W0, W1, W2, W3, W4); R2(d, a, b, c, h, e, f, g, 41, 2, XW); SCHED_W_2(45, W5, W0, W1, W2, W3, W4); /* Transform 42-44 + Precalc 48-50 */ R2(c, d, a, b, g, h, e, f, 42, 0, XW); SCHED_W_0(48, W0, W1, W2, W3, W4, W5); R2(b, c, d, a, f, g, h, e, 43, 1, XW); SCHED_W_1(48, W0, W1, W2, W3, W4, W5); R2(a, b, c, d, e, f, g, h, 44, 2, XW); SCHED_W_2(48, W0, W1, W2, W3, W4, W5); /* Transform 45-47 + Precalc 51-53 */ R2(d, a, b, c, h, e, f, g, 45, 0, XW); SCHED_W_0(51, W1, W2, W3, W4, W5, W0); R2(c, d, a, b, g, h, e, f, 46, 1, XW); SCHED_W_1(51, W1, W2, W3, W4, W5, W0); R2(b, c, d, a, f, g, h, e, 47, 2, XW); SCHED_W_2(51, W1, W2, W3, W4, W5, W0); /* Transform 48-50 + Precalc 54-56 */ R2(a, b, c, d, e, f, g, h, 48, 0, XW); SCHED_W_0(54, W2, W3, W4, W5, W0, W1); R2(d, a, b, c, h, e, f, g, 49, 1, XW); SCHED_W_1(54, W2, W3, W4, W5, W0, W1); R2(c, d, a, b, g, h, e, f, 50, 2, XW); SCHED_W_2(54, W2, W3, W4, W5, W0, W1); /* Transform 51-53 + Precalc 57-59 */ R2(b, c, d, a, f, g, h, e, 51, 0, XW); SCHED_W_0(57, W3, W4, W5, W0, W1, W2); R2(a, b, c, d, e, f, g, h, 52, 1, XW); SCHED_W_1(57, W3, W4, W5, W0, W1, W2); R2(d, a, b, c, h, e, f, g, 53, 2, XW); SCHED_W_2(57, W3, W4, W5, W0, W1, W2); /* Transform 54-56 + Precalc 60-62 */ R2(c, d, a, b, g, h, e, f, 54, 0, XW); SCHED_W_0(60, W4, W5, W0, W1, W2, W3); R2(b, c, d, a, f, g, h, e, 55, 1, XW); SCHED_W_1(60, W4, W5, W0, W1, W2, W3); R2(a, b, c, d, e, f, g, h, 56, 2, XW); SCHED_W_2(60, W4, W5, W0, W1, W2, W3); /* Transform 57-59 + Precalc 63 */ R2(d, a, b, c, h, e, f, g, 57, 0, XW); SCHED_W_0(63, W5, W0, W1, W2, W3, W4); R2(c, d, a, b, g, h, e, f, 58, 1, XW); R2(b, c, d, a, f, g, h, e, 59, 2, XW); SCHED_W_1(63, W5, W0, W1, W2, W3, W4); /* Transform 60-62 + Precalc 63 */ R2(a, b, c, d, e, f, g, h, 60, 0, XW); R2(d, a, b, c, h, e, f, g, 61, 1, XW); SCHED_W_2(63, W5, W0, W1, W2, W3, W4); R2(c, d, a, b, g, h, e, f, 62, 2, XW); /* Transform 63 */ R2(b, c, d, a, f, g, h, e, 63, 0, XW); /* Update the chaining variables. */ xorl state_h0(RSTATE), a; xorl state_h1(RSTATE), b; xorl state_h2(RSTATE), c; xorl state_h3(RSTATE), d; movl a, state_h0(RSTATE); movl b, state_h1(RSTATE); movl c, state_h2(RSTATE); movl d, state_h3(RSTATE); xorl state_h4(RSTATE), e; xorl state_h5(RSTATE), f; xorl state_h6(RSTATE), g; xorl state_h7(RSTATE), h; movl e, state_h4(RSTATE); movl f, state_h5(RSTATE); movl g, state_h6(RSTATE); movl h, state_h7(RSTATE); cmpq $0, RNBLKS; jne .Loop; vzeroall; movq (STACK_REG_SAVE + 0 * 8)(%rsp), %rbx; CFI_RESTORE(%rbx); movq (STACK_REG_SAVE + 1 * 8)(%rsp), %r15; CFI_RESTORE(%r15); movq (STACK_REG_SAVE + 2 * 8)(%rsp), %r14; CFI_RESTORE(%r14); movq (STACK_REG_SAVE + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r13); movq (STACK_REG_SAVE + 4 * 8)(%rsp), %r12; CFI_RESTORE(%r12); vmovdqa %xmm0, IW_W1_ADDR(0, 0); vmovdqa %xmm0, IW_W1W2_ADDR(0, 0); vmovdqa %xmm0, IW_W1_ADDR(4, 0); vmovdqa %xmm0, IW_W1W2_ADDR(4, 0); vmovdqa %xmm0, IW_W1_ADDR(8, 0); vmovdqa %xmm0, IW_W1W2_ADDR(8, 0); xorl %eax, %eax; /* stack burned */ leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm3_transform_amd64_avx_bmi2, .-_gcry_sm3_transform_amd64_avx_bmi2;) #endif #endif