diff --git a/cipher/sha256-avx-amd64.S b/cipher/sha256-avx-amd64.S index 6953855b..b8b01b15 100644 --- a/cipher/sha256-avx-amd64.S +++ b/cipher/sha256-avx-amd64.S @@ -1,527 +1,528 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: Based on the SSSE3 implementation. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA256) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix #define VMOVDQ vmovdqu /* assume buffers not aligned */ .macro ROR p1 p2 /* shld is faster than ror on Intel Sandybridge */ shld \p1, \p1, (32 - \p2) .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros*/ /* addm [mem], reg * Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ /* COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask * Load xmm with mem and byte swap each dword */ .macro COPY_XMM_AND_BSWAP p1 p2 p3 VMOVDQ \p1, \p2 vpshufb \p1, \p1, \p3 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ X0 = xmm4 X1 = xmm5 X2 = xmm6 X3 = xmm7 XTMP0 = xmm0 XTMP1 = xmm1 XTMP2 = xmm2 XTMP3 = xmm3 XTMP4 = xmm8 XFER = xmm9 SHUF_00BA = xmm10 /* shuffle xBxA -> 00BA */ SHUF_DC00 = xmm11 /* shuffle xDxC -> DC00 */ BYTE_FLIP_MASK = xmm12 NUM_BLKS = rdx /* 3rd arg */ CTX = rsi /* 2nd arg */ INP = rdi /* 1st arg */ SRND = rdi /* clobbers INP */ c = ecx d = r8d e = edx TBL = rbp a = eax b = ebx f = r9d g = r10d h = r11d y0 = r13d y1 = r14d y2 = r15d #define _INP_END_SIZE 8 #define _INP_SIZE 8 #define _XFER_SIZE 8 #define _XMM_SAVE_SIZE 0 /* STACK_SIZE plus pushes must be an odd multiple of 8 */ #define _ALIGN_SIZE 8 #define _INP_END 0 #define _INP (_INP_END + _INP_END_SIZE) #define _XFER (_INP + _INP_SIZE) #define _XMM_SAVE (_XFER + _XFER_SIZE + _ALIGN_SIZE) #define STACK_SIZE (_XMM_SAVE + _XMM_SAVE_SIZE) /* rotate_Xs * Rotate values of symbols X0...X3 */ .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm /* ROTATE_ARGS * Rotate values of symbols a...h */ .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED /* compute s0 four at a time and s1 two at a time * compute W[-16] + W[-7] 4 at a time */ mov y0, e /* y0 = e */ ROR y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */ ROR y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16] */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ /* compute s0 */ vpalignr XTMP1, X1, X0, 4 /* XTMP1 = W[-15] */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 0*4] /* y2 = k + w + S1 + CH */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpslld XTMP2, XTMP1, (32-7) or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ vpsrld XTMP3, XTMP1, 7 and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ vpor XTMP3, XTMP3, XTMP2 /* XTMP1 = W[-15] ror 7 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ ROR y0, (25-11) /* y0 = e >> (25-11) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ROR y1, (22-13) /* y1 = a >> (22-13) */ vpslld XTMP2, XTMP1, (32-18) xor y1, a /* y1 = a ^ (a >> (22-13) */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ vpsrld XTMP4, XTMP1, 18 ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ vpxor XTMP4, XTMP4, XTMP3 xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ vpsrld XTMP1, XTMP1, 3 /* XTMP4 = W[-15] >> 3 */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 1*4] /* y2 = k + w + S1 + CH */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ vpxor XTMP1, XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpxor XTMP1, XTMP1, XTMP4 /* XTMP1 = s0 */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ /* compute low s1 */ vpshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ vpaddd XTMP0, XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ ROR y0, (25-11) /* y0 = e >> (25-11) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ROR y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ vpsrlq XTMP3, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */ xor y2, g /* y2 = f^g */ vpsrlq XTMP4, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ vpsrld XTMP2, XTMP2, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ vpxor XTMP2, XTMP2, XTMP3 add y2, y0 /* y2 = S1 + CH */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + 2*4] /* y2 = k + w + S1 + CH */ vpxor XTMP4, XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpshufb XTMP4, XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ vpaddd XTMP0, XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ /* compute high s1 */ vpshufd XTMP2, XTMP0, 0b01010000 /* XTMP2 = W[-2] {DDCC} */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS mov y0, e /* y0 = e */ ROR y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ ROR y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ vpsrlq XTMP3, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */ xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ vpsrlq X0, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ vpsrld XTMP2, XTMP2, 10 /* X0 = W[-2] >> 10 {DDCC} */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ vpxor XTMP2, XTMP2, XTMP3 ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 3*4] /* y2 = k + w + S1 + CH */ vpxor X0, X0, XTMP2 /* X0 = s1 {xDxC} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpshufb X0, X0, SHUF_DC00 /* X0 = s1 {DC00} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ vpaddd X0, X0, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS rotate_Xs .endm /* input is [rsp + _XFER + %1 * 4] */ .macro DO_ROUND i1 mov y0, e /* y0 = e */ ROR y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ROR y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ and y2, e /* y2 = (f^g)&e */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ add y2, y0 /* y2 = S1 + CH */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + \i1 * 4] /* y2 = k + w + S1 + CH */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_avx ELF(.type _gcry_sha256_transform_amd64_avx,@function;) .align 16 _gcry_sha256_transform_amd64_avx: vzeroupper push rbx push rbp push r13 push r14 push r15 sub rsp, STACK_SIZE shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + _INP_END], NUM_BLKS /* load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] .Loop0: lea TBL, [.LK256 ADD_RIP] /* byte swap first 16 dwords */ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK mov [rsp + _INP], INP /* schedule 48 input dwords, by doing 3 rounds of 16 each */ mov SRND, 3 .align 16 .Loop1: vpaddd XFER, X0, [TBL + 0*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddd XFER, X0, [TBL + 1*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddd XFER, X0, [TBL + 2*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddd XFER, X0, [TBL + 3*16] vmovdqa [rsp + _XFER], XFER add TBL, 4*16 FOUR_ROUNDS_AND_SCHED sub SRND, 1 jne .Loop1 mov SRND, 2 .Loop2: vpaddd X0, X0, [TBL + 0*16] vmovdqa [rsp + _XFER], X0 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vpaddd X1, X1, [TBL + 1*16] vmovdqa [rsp + _XFER], X1 add TBL, 2*16 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vmovdqa X0, X2 vmovdqa X1, X3 sub SRND, 1 jne .Loop2 addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h mov INP, [rsp + _INP] add INP, 64 cmp INP, [rsp + _INP_END] jne .Loop0 +.Ldone_hash: vzeroall -.Ldone_hash: + vmovdqa [rsp + _XFER], XFER + xor eax, eax + add rsp, STACK_SIZE pop r15 pop r14 pop r13 pop rbp pop rbx - mov eax, STACK_SIZE + 5*8 - ret .align 16 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha256-avx2-bmi2-amd64.S b/cipher/sha256-avx2-bmi2-amd64.S index 85e663fe..598f9382 100644 --- a/cipher/sha256-avx2-bmi2-amd64.S +++ b/cipher/sha256-avx2-bmi2-amd64.S @@ -1,813 +1,829 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 2 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA256) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix #define VMOVDQ vmovdqu /* ; assume buffers not aligned */ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ X0 = ymm4 X1 = ymm5 X2 = ymm6 X3 = ymm7 /* XMM versions of above */ XWORD0 = xmm4 XWORD1 = xmm5 XWORD2 = xmm6 XWORD3 = xmm7 XTMP0 = ymm0 XTMP1 = ymm1 XTMP2 = ymm2 XTMP3 = ymm3 XTMP4 = ymm8 XFER = ymm9 XTMP5 = ymm11 SHUF_00BA = ymm10 /* shuffle xBxA -> 00BA */ SHUF_DC00 = ymm12 /* shuffle xDxC -> DC00 */ BYTE_FLIP_MASK = ymm13 X_BYTE_FLIP_MASK = xmm13 /* XMM version of BYTE_FLIP_MASK */ NUM_BLKS = rdx /* 3rd arg */ CTX = rsi /* 2nd arg */ INP = rdi /* 1st arg */ c = ecx d = r8d e = edx /* clobbers NUM_BLKS */ y3 = edi /* clobbers INP */ TBL = rbp SRND = CTX /* SRND is same register as CTX */ a = eax b = ebx f = r9d g = r10d h = r11d old_h = r11d T1 = r12d y0 = r13d y1 = r14d y2 = r15d _XFER_SIZE = 2*64*4 /* 2 blocks, 64 rounds, 4 bytes/round */ _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 _RSP_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE _RSP = _CTX + _CTX_SIZE STACK_SIZE = _RSP + _RSP_SIZE /* rotate_Xs */ /* Rotate values of symbols X0...X3 */ .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm /* ROTATE_ARGS */ /* Rotate values of symbols a...h */ .macro ROTATE_ARGS old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED XFER /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ add h, [\XFER+0*4] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ xor y2, g /* y2 = f^g ; CH */ vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16]; y1 = (e >> 6) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ vpalignr XTMP1, X1, X0, 4 /* XTMP1 = W[-15] */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpsrld XTMP2, XTMP1, 7 xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ vpslld XTMP3, XTMP1, (32-7) or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ vpor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 */ vpsrld XTMP2, XTMP1,18 add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ add h, [\XFER+1*4] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ vpsrld XTMP4, XTMP1, 3 /* XTMP4 = W[-15] >> 3 */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ vpslld XTMP1, XTMP1, (32-18) and y3, b /* y3 = (a|c)&b ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ vpxor XTMP3, XTMP3, XTMP1 rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpxor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ vpxor XTMP1, XTMP3, XTMP4 /* XTMP1 = s0 */ vpshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ vpaddd XTMP0, XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ vpsrld XTMP4, XTMP2, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ add h, [\XFER+2*4] /* h = k + w + h ; -- */ vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ xor y2, g /* y2 = f^g ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */ and y2, e /* y2 = (f^g)&e ; CH */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ vpxor XTMP2, XTMP2, XTMP3 add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ vpxor XTMP4, XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpshufb XTMP4, XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ vpaddd XTMP0, XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ vpshufd XTMP2, XTMP0, 0b1010000 /* XTMP2 = W[-2] {DDCC} */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ add h, [\XFER+3*4] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ vpsrld XTMP5, XTMP2, 10 /* XTMP5 = W[-2] >> 10 {DDCC} */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ xor y2, g /* y2 = f^g ; CH */ vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpxor XTMP2, XTMP2, XTMP3 rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ add y2, y0 /* y2 = S1 + CH ; -- */ vpxor XTMP5, XTMP5, XTMP2 /* XTMP5 = s1 {xDxC} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ vpshufb XTMP5, XTMP5, SHUF_DC00 /* XTMP5 = s1 {DC00} */ vpaddd X0, XTMP5, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS rotate_Xs .endm .macro DO_4ROUNDS XFER /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*0] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*1] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*2] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*3] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_avx2 ELF(.type _gcry_sha256_transform_amd64_avx2,@function) .align 32 _gcry_sha256_transform_amd64_avx2: push rbx push rbp push r12 push r13 push r14 push r15 vzeroupper mov rax, rsp sub rsp, STACK_SIZE and rsp, -32 mov [rsp + _RSP], rax shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash lea NUM_BLKS, [NUM_BLKS + INP - 64] /* pointer to last block */ mov [rsp + _INP_END], NUM_BLKS cmp INP, NUM_BLKS je .Lonly_one_block /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] mov [rsp + _CTX], CTX .Loop0: lea TBL, [.LK256 ADD_RIP] /* ; Load first 16 dwords from two blocks */ VMOVDQ XTMP0, [INP + 0*32] VMOVDQ XTMP1, [INP + 1*32] VMOVDQ XTMP2, [INP + 2*32] VMOVDQ XTMP3, [INP + 3*32] /* ; byte swap data */ vpshufb XTMP0, XTMP0, BYTE_FLIP_MASK vpshufb XTMP1, XTMP1, BYTE_FLIP_MASK vpshufb XTMP2, XTMP2, BYTE_FLIP_MASK vpshufb XTMP3, XTMP3, BYTE_FLIP_MASK /* ; transpose data into high/low halves */ vperm2i128 X0, XTMP0, XTMP2, 0x20 vperm2i128 X1, XTMP0, XTMP2, 0x31 vperm2i128 X2, XTMP1, XTMP3, 0x20 vperm2i128 X3, XTMP1, XTMP3, 0x31 .Last_block_enter: add INP, 64 mov [rsp + _INP], INP /* ; schedule 48 input dwords, by doing 3 rounds of 12 each */ xor SRND, SRND .align 16 .Loop1: vpaddd XFER, X0, [TBL + SRND + 0*32] vmovdqa [rsp + _XFER + SRND + 0*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 0*32 vpaddd XFER, X0, [TBL + SRND + 1*32] vmovdqa [rsp + _XFER + SRND + 1*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 1*32 vpaddd XFER, X0, [TBL + SRND + 2*32] vmovdqa [rsp + _XFER + SRND + 2*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 2*32 vpaddd XFER, X0, [TBL + SRND + 3*32] vmovdqa [rsp + _XFER + SRND + 3*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 3*32 add SRND, 4*32 cmp SRND, 3 * 4*32 jb .Loop1 .Loop2: /* ; Do last 16 rounds with no scheduling */ vpaddd XFER, X0, [TBL + SRND + 0*32] vmovdqa [rsp + _XFER + SRND + 0*32], XFER DO_4ROUNDS rsp + _XFER + SRND + 0*32 vpaddd XFER, X1, [TBL + SRND + 1*32] vmovdqa [rsp + _XFER + SRND + 1*32], XFER DO_4ROUNDS rsp + _XFER + SRND + 1*32 add SRND, 2*32 vmovdqa X0, X2 vmovdqa X1, X3 cmp SRND, 4 * 4*32 jb .Loop2 mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h cmp INP, [rsp + _INP_END] ja .Ldone_hash /* ;;; Do second block using previously scheduled results */ xor SRND, SRND .align 16 .Loop3: DO_4ROUNDS rsp + _XFER + SRND + 0*32 + 16 DO_4ROUNDS rsp + _XFER + SRND + 1*32 + 16 add SRND, 2*32 cmp SRND, 4 * 4*32 jb .Loop3 mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] add INP, 64 addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h cmp INP, [rsp + _INP_END] jb .Loop0 ja .Ldone_hash .Ldo_last_block: /* ;;; do last block */ lea TBL, [.LK256 ADD_RIP] VMOVDQ XWORD0, [INP + 0*16] VMOVDQ XWORD1, [INP + 1*16] VMOVDQ XWORD2, [INP + 2*16] VMOVDQ XWORD3, [INP + 3*16] vpshufb XWORD0, XWORD0, X_BYTE_FLIP_MASK vpshufb XWORD1, XWORD1, X_BYTE_FLIP_MASK vpshufb XWORD2, XWORD2, X_BYTE_FLIP_MASK vpshufb XWORD3, XWORD3, X_BYTE_FLIP_MASK jmp .Last_block_enter .Lonly_one_block: /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] mov [rsp + _CTX], CTX jmp .Ldo_last_block .Ldone_hash: - mov rsp, [rsp + _RSP] - vzeroall + /* burn stack */ + vmovdqa [rsp + _XFER + 0 * 32], ymm0 + vmovdqa [rsp + _XFER + 1 * 32], ymm0 + vmovdqa [rsp + _XFER + 2 * 32], ymm0 + vmovdqa [rsp + _XFER + 3 * 32], ymm0 + vmovdqa [rsp + _XFER + 4 * 32], ymm0 + vmovdqa [rsp + _XFER + 5 * 32], ymm0 + vmovdqa [rsp + _XFER + 6 * 32], ymm0 + vmovdqa [rsp + _XFER + 7 * 32], ymm0 + vmovdqa [rsp + _XFER + 8 * 32], ymm0 + vmovdqa [rsp + _XFER + 9 * 32], ymm0 + vmovdqa [rsp + _XFER + 10 * 32], ymm0 + vmovdqa [rsp + _XFER + 11 * 32], ymm0 + vmovdqa [rsp + _XFER + 12 * 32], ymm0 + vmovdqa [rsp + _XFER + 13 * 32], ymm0 + vmovdqa [rsp + _XFER + 14 * 32], ymm0 + vmovdqa [rsp + _XFER + 15 * 32], ymm0 + xor eax, eax + + mov rsp, [rsp + _RSP] + pop r15 pop r14 pop r13 pop r12 pop rbp pop rbx - /* stack burn depth */ - mov eax, STACK_SIZE + 6*8 + 31 - ret .align 64 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha256-ssse3-amd64.S b/cipher/sha256-ssse3-amd64.S index a9213e41..ca5c9fd1 100644 --- a/cipher/sha256-ssse3-amd64.S +++ b/cipher/sha256-ssse3-amd64.S @@ -1,547 +1,549 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: original implementation was named as SHA256-SSE4. However, only SSSE3 * is required. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA256) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix #define MOVDQ movdqu /* assume buffers not aligned */ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros*/ /* addm [mem], reg * Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ /* COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask * Load xmm with mem and byte swap each dword */ .macro COPY_XMM_AND_BSWAP p1 p2 p3 MOVDQ \p1, \p2 pshufb \p1, \p3 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ X0 = xmm4 X1 = xmm5 X2 = xmm6 X3 = xmm7 XTMP0 = xmm0 XTMP1 = xmm1 XTMP2 = xmm2 XTMP3 = xmm3 XTMP4 = xmm8 XFER = xmm9 SHUF_00BA = xmm10 /* shuffle xBxA -> 00BA */ SHUF_DC00 = xmm11 /* shuffle xDxC -> DC00 */ BYTE_FLIP_MASK = xmm12 NUM_BLKS = rdx /* 3rd arg */ CTX = rsi /* 2nd arg */ INP = rdi /* 1st arg */ SRND = rdi /* clobbers INP */ c = ecx d = r8d e = edx TBL = rbp a = eax b = ebx f = r9d g = r10d h = r11d y0 = r13d y1 = r14d y2 = r15d #define _INP_END_SIZE 8 #define _INP_SIZE 8 #define _XFER_SIZE 8 #define _XMM_SAVE_SIZE 0 /* STACK_SIZE plus pushes must be an odd multiple of 8 */ #define _ALIGN_SIZE 8 #define _INP_END 0 #define _INP (_INP_END + _INP_END_SIZE) #define _XFER (_INP + _INP_SIZE) #define _XMM_SAVE (_XFER + _XFER_SIZE + _ALIGN_SIZE) #define STACK_SIZE (_XMM_SAVE + _XMM_SAVE_SIZE) /* rotate_Xs * Rotate values of symbols X0...X3 */ .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm /* ROTATE_ARGS * Rotate values of symbols a...h */ .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED /* compute s0 four at a time and s1 two at a time * compute W[-16] + W[-7] 4 at a time */ movdqa XTMP0, X3 mov y0, e /* y0 = e */ ror y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ palignr XTMP0, X2, 4 /* XTMP0 = W[-7] */ ror y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ movdqa XTMP1, X1 xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ paddd XTMP0, X0 /* XTMP0 = W[-7] + W[-16] */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ /* compute s0 */ palignr XTMP1, X0, 4 /* XTMP1 = W[-15] */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ movdqa XTMP2, XTMP1 /* XTMP2 = W[-15] */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 0*4] /* y2 = k + w + S1 + CH */ movdqa XTMP3, XTMP1 /* XTMP3 = W[-15] */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pslld XTMP1, (32-7) or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ psrld XTMP2, 7 and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ por XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS movdqa XTMP2, XTMP3 /* XTMP2 = W[-15] */ mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ movdqa XTMP4, XTMP3 /* XTMP4 = W[-15] */ ror y0, (25-11) /* y0 = e >> (25-11) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ror y1, (22-13) /* y1 = a >> (22-13) */ pslld XTMP3, (32-18) xor y1, a /* y1 = a ^ (a >> (22-13) */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ psrld XTMP2, 18 ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ pxor XTMP1, XTMP3 xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ psrld XTMP4, 3 /* XTMP4 = W[-15] >> 3 */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 1*4] /* y2 = k + w + S1 + CH */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ pxor XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pxor XTMP1, XTMP4 /* XTMP1 = s0 */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ /* compute low s1 */ pshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ paddd XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {BBAA} */ mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ ror y0, (25-11) /* y0 = e >> (25-11) */ movdqa XTMP4, XTMP2 /* XTMP4 = W[-2] {BBAA} */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ror y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */ xor y2, g /* y2 = f^g */ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ psrld XTMP4, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ pxor XTMP2, XTMP3 add y2, y0 /* y2 = S1 + CH */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + 2*4] /* y2 = k + w + S1 + CH */ pxor XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pshufb XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ paddd XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ /* compute high s1 */ pshufd XTMP2, XTMP0, 0b01010000 /* XTMP2 = W[-2] {DDCC} */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {DDCC} */ mov y0, e /* y0 = e */ ror y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ movdqa X0, XTMP2 /* X0 = W[-2] {DDCC} */ ror y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */ xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ psrld X0, 10 /* X0 = W[-2] >> 10 {DDCC} */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ pxor XTMP2, XTMP3 ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 3*4] /* y2 = k + w + S1 + CH */ pxor X0, XTMP2 /* X0 = s1 {xDxC} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pshufb X0, SHUF_DC00 /* X0 = s1 {DC00} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ paddd X0, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS rotate_Xs .endm /* input is [rsp + _XFER + %1 * 4] */ .macro DO_ROUND i1 mov y0, e /* y0 = e */ ror y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ror y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ and y2, e /* y2 = (f^g)&e */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ add y2, y0 /* y2 = S1 + CH */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + \i1 * 4] /* y2 = k + w + S1 + CH */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_sse4(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_ssse3 ELF(.type _gcry_sha256_transform_amd64_ssse3,@function;) .align 16 _gcry_sha256_transform_amd64_ssse3: push rbx push rbp push r13 push r14 push r15 sub rsp, STACK_SIZE shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + _INP_END], NUM_BLKS /* load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] movdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] movdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] movdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] .Loop0: lea TBL, [.LK256 ADD_RIP] /* byte swap first 16 dwords */ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK mov [rsp + _INP], INP /* schedule 48 input dwords, by doing 3 rounds of 16 each */ mov SRND, 3 .align 16 .Loop1: movdqa XFER, [TBL + 0*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED movdqa XFER, [TBL + 1*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED movdqa XFER, [TBL + 2*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED movdqa XFER, [TBL + 3*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER add TBL, 4*16 FOUR_ROUNDS_AND_SCHED sub SRND, 1 jne .Loop1 mov SRND, 2 .Loop2: paddd X0, [TBL + 0*16] movdqa [rsp + _XFER], X0 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 paddd X1, [TBL + 1*16] movdqa [rsp + _XFER], X1 add TBL, 2*16 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 movdqa X0, X2 movdqa X1, X3 sub SRND, 1 jne .Loop2 addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h mov INP, [rsp + _INP] add INP, 64 cmp INP, [rsp + _INP_END] jne .Loop0 pxor xmm0, xmm0 pxor xmm1, xmm1 pxor xmm2, xmm2 pxor xmm3, xmm3 pxor xmm4, xmm4 pxor xmm5, xmm5 pxor xmm6, xmm6 pxor xmm7, xmm7 pxor xmm8, xmm8 pxor xmm9, xmm9 pxor xmm10, xmm10 pxor xmm11, xmm11 pxor xmm12, xmm12 .Ldone_hash: + pxor XFER, XFER + movdqa [rsp + _XFER], XFER + xor eax, eax + add rsp, STACK_SIZE pop r15 pop r14 pop r13 pop rbp pop rbx - mov eax, STACK_SIZE + 5*8 - ret .align 16 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha512-avx-amd64.S b/cipher/sha512-avx-amd64.S index 446a8b4e..534351e4 100644 --- a/cipher/sha512-avx-amd64.S +++ b/cipher/sha512-avx-amd64.S @@ -1,421 +1,427 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA512) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix .text /* Virtual Registers */ msg = rdi /* ARG1 */ digest = rsi /* ARG2 */ msglen = rdx /* ARG3 */ T1 = rcx T2 = r8 a_64 = r9 b_64 = r10 c_64 = r11 d_64 = r12 e_64 = r13 f_64 = r14 g_64 = r15 h_64 = rbx tmp0 = rax /* ; Local variables (stack frame) ; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP */ frame_W = 0 /* Message Schedule */ frame_W_size = (80 * 8) frame_WK = ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */ frame_WK_size = (2 * 8) frame_GPRSAVE = ((frame_WK) + (frame_WK_size)) frame_GPRSAVE_size = (5 * 8) frame_size = ((frame_GPRSAVE) + (frame_GPRSAVE_size)) /* Useful QWORD "arrays" for simpler memory references */ #define MSG(i) msg + 8*(i) /* Input message (arg1) */ #define DIGEST(i) digest + 8*(i) /* Output Digest (arg2) */ #define K_t(i) .LK512 + 8*(i) ADD_RIP /* SHA Constants (static mem) */ #define W_t(i) rsp + frame_W + 8*(i) /* Message Schedule (stack frame) */ #define WK_2(i) rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */ /* MSG, DIGEST, K_t, W_t are arrays */ /* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */ .macro RotateState /* Rotate symbles a..h right */ __TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = __TMP .endm .macro RORQ p1 p2 /* shld is faster than ror on Intel Sandybridge */ shld \p1, \p1, (64 - \p2) .endm .macro SHA512_Round t /* Compute Round %%t */ mov T1, f_64 /* T1 = f */ mov tmp0, e_64 /* tmp = e */ xor T1, g_64 /* T1 = f ^ g */ RORQ tmp0, 23 /* 41 ; tmp = e ror 23 */ and T1, e_64 /* T1 = (f ^ g) & e */ xor tmp0, e_64 /* tmp = (e ror 23) ^ e */ xor T1, g_64 /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */ add T1, [WK_2(\t)] /* W[t] + K[t] from message scheduler */ RORQ tmp0, 4 /* 18 ; tmp = ((e ror 23) ^ e) ror 4 */ xor tmp0, e_64 /* tmp = (((e ror 23) ^ e) ror 4) ^ e */ mov T2, a_64 /* T2 = a */ add T1, h_64 /* T1 = CH(e,f,g) + W[t] + K[t] + h */ RORQ tmp0, 14 /* 14 ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */ add T1, tmp0 /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */ mov tmp0, a_64 /* tmp = a */ xor T2, c_64 /* T2 = a ^ c */ and tmp0, c_64 /* tmp = a & c */ and T2, b_64 /* T2 = (a ^ c) & b */ xor T2, tmp0 /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */ mov tmp0, a_64 /* tmp = a */ RORQ tmp0, 5 /* 39 ; tmp = a ror 5 */ xor tmp0, a_64 /* tmp = (a ror 5) ^ a */ add d_64, T1 /* e(next_state) = d + T1 */ RORQ tmp0, 6 /* 34 ; tmp = ((a ror 5) ^ a) ror 6 */ xor tmp0, a_64 /* tmp = (((a ror 5) ^ a) ror 6) ^ a */ lea h_64, [T1 + T2] /* a(next_state) = T1 + Maj(a,b,c) */ RORQ tmp0, 28 /* 28 ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */ add h_64, tmp0 /* a(next_state) = T1 + Maj(a,b,c) S0(a) */ RotateState .endm .macro SHA512_2Sched_2Round_avx t /* ; Compute rounds %%t-2 and %%t-1 ; Compute message schedule QWORDS %%t and %%t+1 ; Two rounds are computed based on the values for K[t-2]+W[t-2] and ; K[t-1]+W[t-1] which were previously stored at WK_2 by the message ; scheduler. ; The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. ; They are then added to their respective SHA512 constants at ; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] ; For brievity, the comments following vectored instructions only refer to ; the first of a pair of QWORDS. ; Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} ; The computation of the message schedule and the rounds are tightly ; stitched to take advantage of instruction-level parallelism. ; For clarity, integer instructions (for the rounds calculation) are indented ; by one tab. Vectored instructions (for the message scheduler) are indented ; by two tabs. */ vmovdqa xmm4, [W_t(\t-2)] /* XMM4 = W[t-2] */ vmovdqu xmm5, [W_t(\t-15)] /* XMM5 = W[t-15] */ mov T1, f_64 vpsrlq xmm0, xmm4, 61 /* XMM0 = W[t-2]>>61 */ mov tmp0, e_64 vpsrlq xmm6, xmm5, 1 /* XMM6 = W[t-15]>>1 */ xor T1, g_64 RORQ tmp0, 23 /* 41 */ vpsrlq xmm1, xmm4, 19 /* XMM1 = W[t-2]>>19 */ and T1, e_64 xor tmp0, e_64 vpxor xmm0, xmm0, xmm1 /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 */ xor T1, g_64 add T1, [WK_2(\t)]; vpsrlq xmm7, xmm5, 8 /* XMM7 = W[t-15]>>8 */ RORQ tmp0, 4 /* 18 */ vpsrlq xmm2, xmm4, 6 /* XMM2 = W[t-2]>>6 */ xor tmp0, e_64 mov T2, a_64 add T1, h_64 vpxor xmm6, xmm6, xmm7 /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 */ RORQ tmp0, 14 /* 14 */ add T1, tmp0 vpsrlq xmm8, xmm5, 7 /* XMM8 = W[t-15]>>7 */ mov tmp0, a_64 xor T2, c_64 vpsllq xmm3, xmm4, (64-61) /* XMM3 = W[t-2]<<3 */ and tmp0, c_64 and T2, b_64 vpxor xmm2, xmm2, xmm3 /* XMM2 = W[t-2]>>6 ^ W[t-2]<<3 */ xor T2, tmp0 mov tmp0, a_64 vpsllq xmm9, xmm5, (64-1) /* XMM9 = W[t-15]<<63 */ RORQ tmp0, 5 /* 39 */ vpxor xmm8, xmm8, xmm9 /* XMM8 = W[t-15]>>7 ^ W[t-15]<<63 */ xor tmp0, a_64 add d_64, T1 RORQ tmp0, 6 /* 34 */ xor tmp0, a_64 vpxor xmm6, xmm6, xmm8 /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^ W[t-15]>>7 ^ W[t-15]<<63 */ lea h_64, [T1 + T2] RORQ tmp0, 28 /* 28 */ vpsllq xmm4, xmm4, (64-19) /* XMM4 = W[t-2]<<25 */ add h_64, tmp0 RotateState vpxor xmm0, xmm0, xmm4 /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ W[t-2]<<25 */ mov T1, f_64 vpxor xmm0, xmm0, xmm2 /* XMM0 = s1(W[t-2]) */ mov tmp0, e_64 xor T1, g_64 vpaddq xmm0, xmm0, [W_t(\t-16)] /* XMM0 = s1(W[t-2]) + W[t-16] */ vmovdqu xmm1, [W_t(\t- 7)] /* XMM1 = W[t-7] */ RORQ tmp0, 23 /* 41 */ and T1, e_64 xor tmp0, e_64 xor T1, g_64 vpsllq xmm5, xmm5, (64-8) /* XMM5 = W[t-15]<<56 */ add T1, [WK_2(\t+1)] vpxor xmm6, xmm6, xmm5 /* XMM6 = s0(W[t-15]) */ RORQ tmp0, 4 /* 18 */ vpaddq xmm0, xmm0, xmm6 /* XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) */ xor tmp0, e_64 vpaddq xmm0, xmm0, xmm1 /* XMM0 = W[t] = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */ mov T2, a_64 add T1, h_64 RORQ tmp0, 14 /* 14 */ add T1, tmp0 vmovdqa [W_t(\t)], xmm0 /* Store W[t] */ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */ vmovdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] for next rounds */ mov tmp0, a_64 xor T2, c_64 and tmp0, c_64 and T2, b_64 xor T2, tmp0 mov tmp0, a_64 RORQ tmp0, 5 /* 39 */ xor tmp0, a_64 add d_64, T1 RORQ tmp0, 6 /* 34 */ xor tmp0, a_64 lea h_64, [T1 + T2] RORQ tmp0, 28 /* 28 */ add h_64, tmp0 RotateState .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_avx(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx ELF(.type _gcry_sha512_transform_amd64_avx,@function;) .align 16 _gcry_sha512_transform_amd64_avx: xor eax, eax cmp msglen, 0 je .Lnowork vzeroupper /* Allocate Stack Space */ sub rsp, frame_size /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbx mov [rsp + frame_GPRSAVE + 8 * 1], r12 mov [rsp + frame_GPRSAVE + 8 * 2], r13 mov [rsp + frame_GPRSAVE + 8 * 3], r14 mov [rsp + frame_GPRSAVE + 8 * 4], r15 .Lupdateblock: /* Load state variables */ mov a_64, [DIGEST(0)] mov b_64, [DIGEST(1)] mov c_64, [DIGEST(2)] mov d_64, [DIGEST(3)] mov e_64, [DIGEST(4)] mov f_64, [DIGEST(5)] mov g_64, [DIGEST(6)] mov h_64, [DIGEST(7)] t = 0 .rept 80/2 + 1 /* (80 rounds) / (2 rounds/iteration) + (1 iteration) */ /* +1 iteration because the scheduler leads hashing by 1 iteration */ .if t < 2 /* BSWAP 2 QWORDS */ vmovdqa xmm1, [.LXMM_QWORD_BSWAP ADD_RIP] vmovdqu xmm0, [MSG(t)] vpshufb xmm0, xmm0, xmm1 /* BSWAP */ vmovdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */ vmovdqa [WK_2(t)], xmm0 /* Store into WK for rounds */ .elseif t < 16 /* BSWAP 2 QWORDS, Compute 2 Rounds */ vmovdqu xmm0, [MSG(t)] vpshufb xmm0, xmm0, xmm1 /* BSWAP */ SHA512_Round (t - 2) /* Round t-2 */ vmovdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */ SHA512_Round (t - 1) /* Round t-1 */ vmovdqa [WK_2(t)], xmm0 /* W[t]+K[t] into WK */ .elseif t < 79 /* Schedule 2 QWORDS; Compute 2 Rounds */ SHA512_2Sched_2Round_avx t .else /* Compute 2 Rounds */ SHA512_Round (t - 2) SHA512_Round (t - 1) .endif t = ((t)+2) .endr /* Update digest */ add [DIGEST(0)], a_64 add [DIGEST(1)], b_64 add [DIGEST(2)], c_64 add [DIGEST(3)], d_64 add [DIGEST(4)], e_64 add [DIGEST(5)], f_64 add [DIGEST(6)], g_64 add [DIGEST(7)], h_64 /* Advance to next message block */ add msg, 16*8 dec msglen jnz .Lupdateblock /* Restore GPRs */ mov rbx, [rsp + frame_GPRSAVE + 8 * 0] mov r12, [rsp + frame_GPRSAVE + 8 * 1] mov r13, [rsp + frame_GPRSAVE + 8 * 2] mov r14, [rsp + frame_GPRSAVE + 8 * 3] mov r15, [rsp + frame_GPRSAVE + 8 * 4] - /* Restore Stack Pointer */ - add rsp, frame_size - vzeroall - /* Return stack burn depth */ - mov rax, frame_size + /* Burn stack */ + t = 0 + .rept frame_W_size / 32 + vmovups [rsp + frame_W + (t) * 32], ymm0 + t = ((t)+1) + .endr + vmovdqu [rsp + frame_WK], xmm0 + xor eax, eax + + /* Restore Stack Pointer */ + add rsp, frame_size .Lnowork: ret /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Binary Data */ .align 16 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LXMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif #endif diff --git a/cipher/sha512-avx2-bmi2-amd64.S b/cipher/sha512-avx2-bmi2-amd64.S index 05bef64c..914f920a 100644 --- a/cipher/sha512-avx2-bmi2-amd64.S +++ b/cipher/sha512-avx2-bmi2-amd64.S @@ -1,792 +1,793 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA512) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix .text /* Virtual Registers */ Y_0 = ymm4 Y_1 = ymm5 Y_2 = ymm6 Y_3 = ymm7 YTMP0 = ymm0 YTMP1 = ymm1 YTMP2 = ymm2 YTMP3 = ymm3 YTMP4 = ymm8 XFER = YTMP0 BYTE_FLIP_MASK = ymm9 INP = rdi /* 1st arg */ CTX = rsi /* 2nd arg */ NUM_BLKS = rdx /* 3rd arg */ c = rcx d = r8 e = rdx y3 = rdi TBL = rbp a = rax b = rbx f = r9 g = r10 h = r11 old_h = r11 T1 = r12 y0 = r13 y1 = r14 y2 = r15 y4 = r12 /* Local variables (stack frame) */ #define frame_XFER 0 #define frame_XFER_size (4*8) #define frame_SRND (frame_XFER + frame_XFER_size) #define frame_SRND_size (1*8) #define frame_INP (frame_SRND + frame_SRND_size) #define frame_INP_size (1*8) #define frame_INPEND (frame_INP + frame_INP_size) #define frame_INPEND_size (1*8) #define frame_RSPSAVE (frame_INPEND + frame_INPEND_size) #define frame_RSPSAVE_size (1*8) #define frame_GPRSAVE (frame_RSPSAVE + frame_RSPSAVE_size) #define frame_GPRSAVE_size (6*8) #define frame_size (frame_GPRSAVE + frame_GPRSAVE_size) #define VMOVDQ vmovdqu /*; assume buffers not aligned */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /* COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask */ /* Load ymm with mem and byte swap each dword */ .macro COPY_YMM_AND_BSWAP p1 p2 p3 VMOVDQ \p1, \p2 vpshufb \p1, \p1, \p3 .endm /* rotate_Ys */ /* Rotate values of symbols Y0...Y3 */ .macro rotate_Ys __Y_ = Y_0 Y_0 = Y_1 Y_1 = Y_2 Y_2 = Y_3 Y_3 = __Y_ .endm /* RotateState */ .macro RotateState /* Rotate symbles a..h right */ old_h = h __TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = __TMP_ .endm /* %macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL */ /* YDST = {YSRC1, YSRC2} >> RVAL*8 */ .macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL vperm2f128 \YDST, \YSRC1, \YSRC2, 0x3 /* YDST = {YS1_LO, YS2_HI} */ vpalignr \YDST, \YDST, \YSRC2, \RVAL /* YDST = {YDS1, YS2} >> RVAL*8 */ .endm .macro FOUR_ROUNDS_AND_SCHED /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* Extract w[t-7] */ MY_VPALIGNR YTMP0, Y_3, Y_2, 8 /* YTMP0 = W[-7] */ /* Calculate w[t-16] + w[t-7] */ vpaddq YTMP0, YTMP0, Y_0 /* YTMP0 = W[-7] + W[-16] */ /* Extract w[t-15] */ MY_VPALIGNR YTMP1, Y_1, Y_0, 8 /* YTMP1 = W[-15] */ /* Calculate sigma0 */ /* Calculate w[t-15] ror 1 */ vpsrlq YTMP2, YTMP1, 1 vpsllq YTMP3, YTMP1, (64-1) vpor YTMP3, YTMP3, YTMP2 /* YTMP3 = W[-15] ror 1 */ /* Calculate w[t-15] shr 7 */ vpsrlq YTMP4, YTMP1, 7 /* YTMP4 = W[-15] >> 7 */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ add h, [rsp+frame_XFER+0*8] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ /* Calculate w[t-15] ror 8 */ vpsrlq YTMP2, YTMP1, 8 vpsllq YTMP1, YTMP1, (64-8) vpor YTMP1, YTMP1, YTMP2 /* YTMP1 = W[-15] ror 8 */ /* XOR the three components */ vpxor YTMP3, YTMP3, YTMP4 /* YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 */ vpxor YTMP1, YTMP3, YTMP1 /* YTMP1 = s0 */ /* Add three components, w[t-16], w[t-7] and sigma0 */ vpaddq YTMP0, YTMP0, YTMP1 /* YTMP0 = W[-16] + W[-7] + s0 */ /* Move to appropriate lanes for calculating w[16] and w[17] */ vperm2f128 Y_0, YTMP0, YTMP0, 0x0 /* Y_0 = W[-16] + W[-7] + s0 {BABA} */ /* Move to appropriate lanes for calculating w[18] and w[19] */ vpand YTMP0, YTMP0, [.LMASK_YMM_LO ADD_RIP] /* YTMP0 = W[-16] + W[-7] + s0 {DC00} */ /* Calculate w[16] and w[17] in both 128 bit lanes */ /* Calculate sigma1 for w[16] and w[17] on both 128 bit lanes */ vperm2f128 YTMP2, Y_3, Y_3, 0x11 /* YTMP2 = W[-2] {BABA} */ vpsrlq YTMP4, YTMP2, 6 /* YTMP4 = W[-2] >> 6 {BABA} */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ add h, [rsp+frame_XFER+1*8] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ vpsrlq YTMP3, YTMP2, 19 /* YTMP3 = W[-2] >> 19 {BABA} */ vpsllq YTMP1, YTMP2, (64-19) /* YTMP1 = W[-2] << 19 {BABA} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {BABA} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} */ vpsrlq YTMP3, YTMP2, 61 /* YTMP3 = W[-2] >> 61 {BABA} */ vpsllq YTMP1, YTMP2, (64-61) /* YTMP1 = W[-2] << 61 {BABA} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {BABA} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} */ /* Add sigma1 to the other compunents to get w[16] and w[17] */ vpaddq Y_0, Y_0, YTMP4 /* Y_0 = {W[1], W[0], W[1], W[0]} */ /* Calculate sigma1 for w[18] and w[19] for upper 128 bit lane */ vpsrlq YTMP4, Y_0, 6 /* YTMP4 = W[-2] >> 6 {DC--} */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ add h, [rsp+frame_XFER+2*8] /* h = k + w + h ; -- */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ xor y2, g /* y2 = f^g ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ vpsrlq YTMP3, Y_0, 19 /* YTMP3 = W[-2] >> 19 {DC--} */ vpsllq YTMP1, Y_0, (64-19) /* YTMP1 = W[-2] << 19 {DC--} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {DC--} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} */ vpsrlq YTMP3, Y_0, 61 /* YTMP3 = W[-2] >> 61 {DC--} */ vpsllq YTMP1, Y_0, (64-61) /* YTMP1 = W[-2] << 61 {DC--} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {DC--} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} */ /* Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] to newly calculated sigma1 to get w[18] and w[19] */ vpaddq YTMP2, YTMP0, YTMP4 /* YTMP2 = {W[3], W[2], --, --} */ /* Form w[19, w[18], w17], w[16] */ vpblendd Y_0, Y_0, YTMP2, 0xF0 /* Y_0 = {W[3], W[2], W[1], W[0]} */ /* vperm2f128 Y_0, Y_0, YTMP2, 0x30 */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ add h, [rsp+frame_XFER+3*8] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ add y2, y0 /* y2 = S1 + CH ; -- */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState rotate_Ys .endm .macro DO_4ROUNDS /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*0] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*1] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*2] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*3] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_rorx(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx2 ELF(.type _gcry_sha512_transform_amd64_avx2,@function;) .align 16 _gcry_sha512_transform_amd64_avx2: xor eax, eax cmp rdx, 0 je .Lnowork vzeroupper /* Allocate Stack Space */ mov rax, rsp sub rsp, frame_size and rsp, ~(0x20 - 1) mov [rsp + frame_RSPSAVE], rax /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbp mov [rsp + frame_GPRSAVE + 8 * 1], rbx mov [rsp + frame_GPRSAVE + 8 * 2], r12 mov [rsp + frame_GPRSAVE + 8 * 3], r13 mov [rsp + frame_GPRSAVE + 8 * 4], r14 mov [rsp + frame_GPRSAVE + 8 * 5], r15 vpblendd xmm0, xmm0, xmm1, 0xf0 vpblendd ymm0, ymm0, ymm1, 0xf0 shl NUM_BLKS, 7 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + frame_INPEND], NUM_BLKS /*; load initial digest */ mov a,[8*0 + CTX] mov b,[8*1 + CTX] mov c,[8*2 + CTX] mov d,[8*3 + CTX] mov e,[8*4 + CTX] mov f,[8*5 + CTX] mov g,[8*6 + CTX] mov h,[8*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] .Loop0: lea TBL,[.LK512 ADD_RIP] /*; byte swap first 16 dwords */ COPY_YMM_AND_BSWAP Y_0, [INP + 0*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_1, [INP + 1*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_2, [INP + 2*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_3, [INP + 3*32], BYTE_FLIP_MASK mov [rsp + frame_INP], INP /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ movq [rsp + frame_SRND],4 .align 16 .Loop1: vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddq XFER, Y_0, [TBL + 1*32] vmovdqa [rsp + frame_XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddq XFER, Y_0, [TBL + 2*32] vmovdqa [rsp + frame_XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddq XFER, Y_0, [TBL + 3*32] vmovdqa [rsp + frame_XFER], XFER add TBL, 4*32 FOUR_ROUNDS_AND_SCHED subq [rsp + frame_SRND], 1 jne .Loop1 movq [rsp + frame_SRND], 2 .Loop2: vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER], XFER DO_4ROUNDS vpaddq XFER, Y_1, [TBL + 1*32] vmovdqa [rsp + frame_XFER], XFER add TBL, 2*32 DO_4ROUNDS vmovdqa Y_0, Y_2 vmovdqa Y_1, Y_3 subq [rsp + frame_SRND], 1 jne .Loop2 addm [8*0 + CTX],a addm [8*1 + CTX],b addm [8*2 + CTX],c addm [8*3 + CTX],d addm [8*4 + CTX],e addm [8*5 + CTX],f addm [8*6 + CTX],g addm [8*7 + CTX],h mov INP, [rsp + frame_INP] add INP, 128 cmp INP, [rsp + frame_INPEND] jne .Loop0 .Ldone_hash: + vzeroall /* Restore GPRs */ mov rbp, [rsp + frame_GPRSAVE + 8 * 0] mov rbx, [rsp + frame_GPRSAVE + 8 * 1] mov r12, [rsp + frame_GPRSAVE + 8 * 2] mov r13, [rsp + frame_GPRSAVE + 8 * 3] mov r14, [rsp + frame_GPRSAVE + 8 * 4] mov r15, [rsp + frame_GPRSAVE + 8 * 5] + /* Burn stack */ + vmovdqa [rsp + frame_XFER], XFER + xor eax, eax + /* Restore Stack Pointer */ mov rsp, [rsp + frame_RSPSAVE] - - vzeroall - - mov eax, frame_size + 31 .Lnowork: ret /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;; Binary Data */ .align 64 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .align 32 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .LMASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha512-ssse3-amd64.S b/cipher/sha512-ssse3-amd64.S index 51193b36..8e950e0e 100644 --- a/cipher/sha512-ssse3-amd64.S +++ b/cipher/sha512-ssse3-amd64.S @@ -1,426 +1,432 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: original implementation was named as SHA512-SSE4. However, only SSSE3 * is required. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA512) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix .text /* Virtual Registers */ msg = rdi /* ARG1 */ digest = rsi /* ARG2 */ msglen = rdx /* ARG3 */ T1 = rcx T2 = r8 a_64 = r9 b_64 = r10 c_64 = r11 d_64 = r12 e_64 = r13 f_64 = r14 g_64 = r15 h_64 = rbx tmp0 = rax /* ; Local variables (stack frame) ; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP */ frame_W = 0 /* Message Schedule */ frame_W_size = (80 * 8) frame_WK = ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */ frame_WK_size = (2 * 8) frame_GPRSAVE = ((frame_WK) + (frame_WK_size)) frame_GPRSAVE_size = (5 * 8) frame_size = ((frame_GPRSAVE) + (frame_GPRSAVE_size)) /* Useful QWORD "arrays" for simpler memory references */ #define MSG(i) msg + 8*(i) /* Input message (arg1) */ #define DIGEST(i) digest + 8*(i) /* Output Digest (arg2) */ #define K_t(i) .LK512 + 8*(i) ADD_RIP /* SHA Constants (static mem) */ #define W_t(i) rsp + frame_W + 8*(i) /* Message Schedule (stack frame) */ #define WK_2(i) rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */ /* MSG, DIGEST, K_t, W_t are arrays */ /* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */ .macro RotateState /* Rotate symbles a..h right */ __TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = __TMP .endm .macro SHA512_Round t /* Compute Round %%t */ mov T1, f_64 /* T1 = f */ mov tmp0, e_64 /* tmp = e */ xor T1, g_64 /* T1 = f ^ g */ ror tmp0, 23 /* 41 ; tmp = e ror 23 */ and T1, e_64 /* T1 = (f ^ g) & e */ xor tmp0, e_64 /* tmp = (e ror 23) ^ e */ xor T1, g_64 /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */ add T1, [WK_2(\t)] /* W[t] + K[t] from message scheduler */ ror tmp0, 4 /* 18 ; tmp = ((e ror 23) ^ e) ror 4 */ xor tmp0, e_64 /* tmp = (((e ror 23) ^ e) ror 4) ^ e */ mov T2, a_64 /* T2 = a */ add T1, h_64 /* T1 = CH(e,f,g) + W[t] + K[t] + h */ ror tmp0, 14 /* 14 ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */ add T1, tmp0 /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */ mov tmp0, a_64 /* tmp = a */ xor T2, c_64 /* T2 = a ^ c */ and tmp0, c_64 /* tmp = a & c */ and T2, b_64 /* T2 = (a ^ c) & b */ xor T2, tmp0 /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */ mov tmp0, a_64 /* tmp = a */ ror tmp0, 5 /* 39 ; tmp = a ror 5 */ xor tmp0, a_64 /* tmp = (a ror 5) ^ a */ add d_64, T1 /* e(next_state) = d + T1 */ ror tmp0, 6 /* 34 ; tmp = ((a ror 5) ^ a) ror 6 */ xor tmp0, a_64 /* tmp = (((a ror 5) ^ a) ror 6) ^ a */ lea h_64, [T1 + T2] /* a(next_state) = T1 + Maj(a,b,c) */ ror tmp0, 28 /* 28 ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */ add h_64, tmp0 /* a(next_state) = T1 + Maj(a,b,c) S0(a) */ RotateState .endm .macro SHA512_2Sched_2Round_sse t /* ; Compute rounds %%t-2 and %%t-1 ; Compute message schedule QWORDS %%t and %%t+1 ; Two rounds are computed based on the values for K[t-2]+W[t-2] and ; K[t-1]+W[t-1] which were previously stored at WK_2 by the message ; scheduler. ; The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. ; They are then added to their respective SHA512 constants at ; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] ; For brievity, the comments following vectored instructions only refer to ; the first of a pair of QWORDS. ; Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} ; The computation of the message schedule and the rounds are tightly ; stitched to take advantage of instruction-level parallelism. ; For clarity, integer instructions (for the rounds calculation) are indented ; by one tab. Vectored instructions (for the message scheduler) are indented ; by two tabs. */ mov T1, f_64 movdqa xmm2, [W_t(\t-2)] /* XMM2 = W[t-2] */ xor T1, g_64 and T1, e_64 movdqa xmm0, xmm2 /* XMM0 = W[t-2] */ xor T1, g_64 add T1, [WK_2(\t)] movdqu xmm5, [W_t(\t-15)] /* XMM5 = W[t-15] */ mov tmp0, e_64 ror tmp0, 23 /* 41 */ movdqa xmm3, xmm5 /* XMM3 = W[t-15] */ xor tmp0, e_64 ror tmp0, 4 /* 18 */ psrlq xmm0, 61 - 19 /* XMM0 = W[t-2] >> 42 */ xor tmp0, e_64 ror tmp0, 14 /* 14 */ psrlq xmm3, (8 - 7) /* XMM3 = W[t-15] >> 1 */ add T1, tmp0 add T1, h_64 pxor xmm0, xmm2 /* XMM0 = (W[t-2] >> 42) ^ W[t-2] */ mov T2, a_64 xor T2, c_64 pxor xmm3, xmm5 /* XMM3 = (W[t-15] >> 1) ^ W[t-15] */ and T2, b_64 mov tmp0, a_64 psrlq xmm0, 19 - 6 /* XMM0 = ((W[t-2]>>42)^W[t-2])>>13 */ and tmp0, c_64 xor T2, tmp0 psrlq xmm3, (7 - 1) /* XMM3 = ((W[t-15]>>1)^W[t-15])>>6 */ mov tmp0, a_64 ror tmp0, 5 /* 39 */ pxor xmm0, xmm2 /* XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2] */ xor tmp0, a_64 ror tmp0, 6 /* 34 */ pxor xmm3, xmm5 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15] */ xor tmp0, a_64 ror tmp0, 28 /* 28 */ psrlq xmm0, 6 /* XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6 */ add T2, tmp0 add d_64, T1 psrlq xmm3, 1 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1 */ lea h_64, [T1 + T2] RotateState movdqa xmm1, xmm2 /* XMM1 = W[t-2] */ mov T1, f_64 xor T1, g_64 movdqa xmm4, xmm5 /* XMM4 = W[t-15] */ and T1, e_64 xor T1, g_64 psllq xmm1, (64 - 19) - (64 - 61) /* XMM1 = W[t-2] << 42 */ add T1, [WK_2(\t+1)] mov tmp0, e_64 psllq xmm4, (64 - 1) - (64 - 8) /* XMM4 = W[t-15] << 7 */ ror tmp0, 23 /* 41 */ xor tmp0, e_64 pxor xmm1, xmm2 /* XMM1 = (W[t-2] << 42)^W[t-2] */ ror tmp0, 4 /* 18 */ xor tmp0, e_64 pxor xmm4, xmm5 /* XMM4 = (W[t-15]<<7)^W[t-15] */ ror tmp0, 14 /* 14 */ add T1, tmp0 psllq xmm1, (64 - 61) /* XMM1 = ((W[t-2] << 42)^W[t-2])<<3 */ add T1, h_64 mov T2, a_64 psllq xmm4, (64 - 8) /* XMM4 = ((W[t-15]<<7)^W[t-15])<<56 */ xor T2, c_64 and T2, b_64 pxor xmm0, xmm1 /* XMM0 = s1(W[t-2]) */ mov tmp0, a_64 and tmp0, c_64 movdqu xmm1, [W_t(\t- 7)] /* XMM1 = W[t-7] */ xor T2, tmp0 pxor xmm3, xmm4 /* XMM3 = s0(W[t-15]) */ mov tmp0, a_64 paddq xmm0, xmm3 /* XMM0 = s1(W[t-2]) + s0(W[t-15]) */ ror tmp0, 5 /* 39 */ paddq xmm0, [W_t(\t-16)] /* XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] */ xor tmp0, a_64 paddq xmm0, xmm1 /* XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */ ror tmp0, 6 /* 34 */ movdqa [W_t(\t)], xmm0 /* Store scheduled qwords */ xor tmp0, a_64 paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */ ror tmp0, 28 /* 28 */ movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] for next rounds */ add T2, tmp0 add d_64, T1 lea h_64, [T1 + T2] RotateState .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_sse4(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks. */ .globl _gcry_sha512_transform_amd64_ssse3 ELF(.type _gcry_sha512_transform_amd64_ssse3,@function;) .align 16 _gcry_sha512_transform_amd64_ssse3: xor eax, eax cmp msglen, 0 je .Lnowork /* Allocate Stack Space */ sub rsp, frame_size /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbx mov [rsp + frame_GPRSAVE + 8 * 1], r12 mov [rsp + frame_GPRSAVE + 8 * 2], r13 mov [rsp + frame_GPRSAVE + 8 * 3], r14 mov [rsp + frame_GPRSAVE + 8 * 4], r15 .Lupdateblock: /* Load state variables */ mov a_64, [DIGEST(0)] mov b_64, [DIGEST(1)] mov c_64, [DIGEST(2)] mov d_64, [DIGEST(3)] mov e_64, [DIGEST(4)] mov f_64, [DIGEST(5)] mov g_64, [DIGEST(6)] mov h_64, [DIGEST(7)] t = 0 .rept 80/2 + 1 /* (80 rounds) / (2 rounds/iteration) + (1 iteration) */ /* +1 iteration because the scheduler leads hashing by 1 iteration */ .if t < 2 /* BSWAP 2 QWORDS */ movdqa xmm1, [.LXMM_QWORD_BSWAP ADD_RIP] movdqu xmm0, [MSG(t)] pshufb xmm0, xmm1 /* BSWAP */ movdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */ movdqa [WK_2(t)], xmm0 /* Store into WK for rounds */ .elseif t < 16 /* BSWAP 2 QWORDS; Compute 2 Rounds */ movdqu xmm0, [MSG(t)] pshufb xmm0, xmm1 /* BSWAP */ SHA512_Round (t - 2) /* Round t-2 */ movdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */ SHA512_Round (t - 1) /* Round t-1 */ movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] into WK */ .elseif t < 79 /* Schedule 2 QWORDS; Compute 2 Rounds */ SHA512_2Sched_2Round_sse t .else /* Compute 2 Rounds */ SHA512_Round (t - 2) SHA512_Round (t - 1) .endif t = (t)+2 .endr /* Update digest */ add [DIGEST(0)], a_64 add [DIGEST(1)], b_64 add [DIGEST(2)], c_64 add [DIGEST(3)], d_64 add [DIGEST(4)], e_64 add [DIGEST(5)], f_64 add [DIGEST(6)], g_64 add [DIGEST(7)], h_64 /* Advance to next message block */ add msg, 16*8 dec msglen jnz .Lupdateblock /* Restore GPRs */ mov rbx, [rsp + frame_GPRSAVE + 8 * 0] mov r12, [rsp + frame_GPRSAVE + 8 * 1] mov r13, [rsp + frame_GPRSAVE + 8 * 2] mov r14, [rsp + frame_GPRSAVE + 8 * 3] mov r15, [rsp + frame_GPRSAVE + 8 * 4] - /* Restore Stack Pointer */ - add rsp, frame_size - pxor xmm0, xmm0 pxor xmm1, xmm1 pxor xmm2, xmm2 pxor xmm3, xmm3 pxor xmm4, xmm4 pxor xmm5, xmm5 - /* Return stack burn depth */ - mov rax, frame_size + /* Burn stack */ + t = 0 + .rept frame_W_size / 16 + movdqu [rsp + frame_W + (t) * 16], xmm0 + t = ((t)+1) + .endr + movdqu [rsp + frame_WK], xmm0 + xor eax, eax + + /* Restore Stack Pointer */ + add rsp, frame_size .Lnowork: ret /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Binary Data */ .align 16 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LXMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif #endif