diff --git a/cipher/sha256-avx2-bmi2-amd64.S b/cipher/sha256-avx2-bmi2-amd64.S index 598f9382..5fc402cd 100644 --- a/cipher/sha256-avx2-bmi2-amd64.S +++ b/cipher/sha256-avx2-bmi2-amd64.S @@ -1,829 +1,568 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 2 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA256) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix #define VMOVDQ vmovdqu /* ; assume buffers not aligned */ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ X0 = ymm4 X1 = ymm5 X2 = ymm6 X3 = ymm7 /* XMM versions of above */ XWORD0 = xmm4 XWORD1 = xmm5 XWORD2 = xmm6 XWORD3 = xmm7 XTMP0 = ymm0 XTMP1 = ymm1 XTMP2 = ymm2 XTMP3 = ymm3 XTMP4 = ymm8 XFER = ymm9 XTMP5 = ymm11 SHUF_00BA = ymm10 /* shuffle xBxA -> 00BA */ SHUF_DC00 = ymm12 /* shuffle xDxC -> DC00 */ BYTE_FLIP_MASK = ymm13 X_BYTE_FLIP_MASK = xmm13 /* XMM version of BYTE_FLIP_MASK */ NUM_BLKS = rdx /* 3rd arg */ CTX = rsi /* 2nd arg */ INP = rdi /* 1st arg */ c = ecx d = r8d e = edx /* clobbers NUM_BLKS */ y3 = edi /* clobbers INP */ TBL = rbp SRND = CTX /* SRND is same register as CTX */ a = eax b = ebx f = r9d g = r10d h = r11d old_h = r11d T1 = r12d y0 = r13d y1 = r14d y2 = r15d _XFER_SIZE = 2*64*4 /* 2 blocks, 64 rounds, 4 bytes/round */ _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 _RSP_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE _RSP = _CTX + _CTX_SIZE STACK_SIZE = _RSP + _RSP_SIZE /* rotate_Xs */ /* Rotate values of symbols X0...X3 */ .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm /* ROTATE_ARGS */ /* Rotate values of symbols a...h */ .macro ROTATE_ARGS old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm -.macro FOUR_ROUNDS_AND_SCHED XFER -/* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - - add h, [\XFER+0*4] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */ - mov y2, f /* y2 = f ; CH */ - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ +.macro ONE_ROUND_PART1 XFER + /* h += Sum1 (e) + Ch (e, f, g) + (k[t] + w[0]); + * d += h; + * h += Sum0 (a) + Maj (a, b, c); + * + * Ch(x, y, z) => ((x & y) + (~x & z)) + * Maj(x, y, z) => ((x & y) + (z & (x ^ y))) + */ + + mov y3, e + add h, [\XFER] + and y3, f + rorx y0, e, 25 + rorx y1, e, 11 + lea h, [h + y3] + andn y3, e, g + rorx T1, a, 13 + xor y0, y1 + lea h, [h + y3] +.endm +.macro ONE_ROUND_PART2 + rorx y2, a, 22 + rorx y1, e, 6 + mov y3, a + xor T1, y2 + xor y0, y1 + xor y3, b + lea h, [h + y0] + mov y0, a + rorx y2, a, 2 + add d, h + and y3, c + xor T1, y2 + lea h, [h + y3] + lea h, [h + T1] + and y0, b + lea h, [h + y0] +.endm - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ - xor y2, g /* y2 = f^g ; CH */ - vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16]; y1 = (e >> 6) ; S1 */ - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ +.macro ONE_ROUND XFER + ONE_ROUND_PART1 \XFER + ONE_ROUND_PART2 +.endm - and y2, e /* y2 = (f^g)&e ; CH */ - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ - add d, h /* d = k + w + h + d ; -- */ +.macro FOUR_ROUNDS_AND_SCHED XFER, XFEROUT +/* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - and y3, b /* y3 = (a|c)&b ; MAJA */ + vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */ + vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16]; y1 = (e >> 6); S1 */ vpalignr XTMP1, X1, X0, 4 /* XTMP1 = W[-15] */ - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ - - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpsrld XTMP2, XTMP1, 7 - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - - add y2, y0 /* y2 = S1 + CH ; -- */ vpslld XTMP3, XTMP1, (32-7) - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ vpor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 */ - vpsrld XTMP2, XTMP1,18 - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ - -ROTATE_ARGS + ONE_ROUND 0*4+\XFER + ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - add h, [\XFER+1*4] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - vpsrld XTMP4, XTMP1, 3 /* XTMP4 = W[-15] >> 3 */ - mov y2, f /* y2 = f ; CH */ - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ - xor y2, g /* y2 = f^g ; CH */ - - - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ - and y2, e /* y2 = (f^g)&e ; CH */ - add d, h /* d = k + w + h + d ; -- */ - vpslld XTMP1, XTMP1, (32-18) - and y3, b /* y3 = (a|c)&b ; MAJA */ - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - vpxor XTMP3, XTMP3, XTMP1 - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - vpxor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 */ - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - vpxor XTMP1, XTMP3, XTMP4 /* XTMP1 = s0 */ vpshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - vpaddd XTMP0, XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */ - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ - vpsrld XTMP4, XTMP2, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */ - -ROTATE_ARGS + ONE_ROUND 1*4+\XFER + ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - add h, [\XFER+2*4] /* h = k + w + h ; -- */ - vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - or y3, c /* y3 = a|c ; MAJA */ - mov y2, f /* y2 = f ; CH */ - xor y2, g /* y2 = f^g ; CH */ - - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */ - and y2, e /* y2 = (f^g)&e ; CH */ - - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ vpxor XTMP2, XTMP2, XTMP3 - add d, h /* d = k + w + h + d ; -- */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ vpxor XTMP4, XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - vpshufb XTMP4, XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */ - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ vpaddd XTMP0, XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ vpshufd XTMP2, XTMP0, 0b1010000 /* XTMP2 = W[-2] {DDCC} */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - - lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ - - -ROTATE_ARGS + ONE_ROUND 2*4+\XFER + ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - add h, [\XFER+3*4] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - vpsrld XTMP5, XTMP2, 10 /* XTMP5 = W[-2] >> 10 {DDCC} */ - mov y2, f /* y2 = f ; CH */ - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ - xor y2, g /* y2 = f^g ; CH */ - - vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */ - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add d, h /* d = k + w + h + d ; -- */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */ - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - vpxor XTMP2, XTMP2, XTMP3 - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ - add y2, y0 /* y2 = S1 + CH ; -- */ - vpxor XTMP5, XTMP5, XTMP2 /* XTMP5 = s1 {xDxC} */ - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ vpshufb XTMP5, XTMP5, SHUF_DC00 /* XTMP5 = s1 {DC00} */ - vpaddd X0, XTMP5, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */ - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - - add h, y1 /* h = k + w + h + S0 ; -- */ - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ + vpaddd XFER, X0, [TBL + \XFEROUT] -ROTATE_ARGS -rotate_Xs + ONE_ROUND_PART1 3*4+\XFER + vmovdqa [rsp + _XFER + \XFEROUT], XFER + ONE_ROUND_PART2 + ROTATE_ARGS + rotate_Xs .endm .macro DO_4ROUNDS XFER /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ - add h, [\XFER + 4*0] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - - /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND 0*4+\XFER ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ - add h, [\XFER + 4*1] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - - /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND 1*4+\XFER ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ - add h, [\XFER + 4*2] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - - /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND 2*4+\XFER ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ - rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ - rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ - - xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ - rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ - rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ - add h, [\XFER + 4*3] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ - - lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND 3*4+\XFER ROTATE_ARGS .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_avx2 ELF(.type _gcry_sha256_transform_amd64_avx2,@function) .align 32 _gcry_sha256_transform_amd64_avx2: + xor eax, eax + + cmp rdx, 0 + je .Lnowork + push rbx push rbp push r12 push r13 push r14 push r15 vzeroupper + vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] + vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] + vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] + mov rax, rsp sub rsp, STACK_SIZE - and rsp, -32 + and rsp, ~63 mov [rsp + _RSP], rax shl NUM_BLKS, 6 /* convert to bytes */ - jz .Ldone_hash lea NUM_BLKS, [NUM_BLKS + INP - 64] /* pointer to last block */ mov [rsp + _INP_END], NUM_BLKS - cmp INP, NUM_BLKS - je .Lonly_one_block - /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] - vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] - vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] - vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] - mov [rsp + _CTX], CTX .Loop0: lea TBL, [.LK256 ADD_RIP] /* ; Load first 16 dwords from two blocks */ VMOVDQ XTMP0, [INP + 0*32] VMOVDQ XTMP1, [INP + 1*32] VMOVDQ XTMP2, [INP + 2*32] VMOVDQ XTMP3, [INP + 3*32] /* ; byte swap data */ vpshufb XTMP0, XTMP0, BYTE_FLIP_MASK vpshufb XTMP1, XTMP1, BYTE_FLIP_MASK vpshufb XTMP2, XTMP2, BYTE_FLIP_MASK vpshufb XTMP3, XTMP3, BYTE_FLIP_MASK /* ; transpose data into high/low halves */ vperm2i128 X0, XTMP0, XTMP2, 0x20 vperm2i128 X1, XTMP0, XTMP2, 0x31 vperm2i128 X2, XTMP1, XTMP3, 0x20 vperm2i128 X3, XTMP1, XTMP3, 0x31 .Last_block_enter: add INP, 64 mov [rsp + _INP], INP /* ; schedule 48 input dwords, by doing 3 rounds of 12 each */ xor SRND, SRND + vpaddd XFER, X0, [TBL + 0*32] + vmovdqa [rsp + _XFER + 0*32], XFER + vpaddd XFER, X1, [TBL + 1*32] + vmovdqa [rsp + _XFER + 1*32], XFER + vpaddd XFER, X2, [TBL + 2*32] + vmovdqa [rsp + _XFER + 2*32], XFER + vpaddd XFER, X3, [TBL + 3*32] + vmovdqa [rsp + _XFER + 3*32], XFER + .align 16 .Loop1: - vpaddd XFER, X0, [TBL + SRND + 0*32] - vmovdqa [rsp + _XFER + SRND + 0*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 0*32 - - vpaddd XFER, X0, [TBL + SRND + 1*32] - vmovdqa [rsp + _XFER + SRND + 1*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 1*32 - - vpaddd XFER, X0, [TBL + SRND + 2*32] - vmovdqa [rsp + _XFER + SRND + 2*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 2*32 - - vpaddd XFER, X0, [TBL + SRND + 3*32] - vmovdqa [rsp + _XFER + SRND + 3*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 3*32 + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 0*32, SRND + 4*32 + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 1*32, SRND + 5*32 + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 2*32, SRND + 6*32 + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 3*32, SRND + 7*32 add SRND, 4*32 cmp SRND, 3 * 4*32 jb .Loop1 -.Loop2: /* ; Do last 16 rounds with no scheduling */ - vpaddd XFER, X0, [TBL + SRND + 0*32] - vmovdqa [rsp + _XFER + SRND + 0*32], XFER - DO_4ROUNDS rsp + _XFER + SRND + 0*32 - vpaddd XFER, X1, [TBL + SRND + 1*32] - vmovdqa [rsp + _XFER + SRND + 1*32], XFER - DO_4ROUNDS rsp + _XFER + SRND + 1*32 - add SRND, 2*32 - - vmovdqa X0, X2 - vmovdqa X1, X3 - - cmp SRND, 4 * 4*32 - jb .Loop2 + DO_4ROUNDS rsp + _XFER + (3*4*32 + 0*32) + DO_4ROUNDS rsp + _XFER + (3*4*32 + 1*32) + DO_4ROUNDS rsp + _XFER + (3*4*32 + 2*32) + DO_4ROUNDS rsp + _XFER + (3*4*32 + 3*32) mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h cmp INP, [rsp + _INP_END] ja .Ldone_hash /* ;;; Do second block using previously scheduled results */ xor SRND, SRND .align 16 .Loop3: DO_4ROUNDS rsp + _XFER + SRND + 0*32 + 16 DO_4ROUNDS rsp + _XFER + SRND + 1*32 + 16 add SRND, 2*32 cmp SRND, 4 * 4*32 jb .Loop3 mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] add INP, 64 addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h cmp INP, [rsp + _INP_END] jb .Loop0 ja .Ldone_hash .Ldo_last_block: /* ;;; do last block */ lea TBL, [.LK256 ADD_RIP] VMOVDQ XWORD0, [INP + 0*16] VMOVDQ XWORD1, [INP + 1*16] VMOVDQ XWORD2, [INP + 2*16] VMOVDQ XWORD3, [INP + 3*16] vpshufb XWORD0, XWORD0, X_BYTE_FLIP_MASK vpshufb XWORD1, XWORD1, X_BYTE_FLIP_MASK vpshufb XWORD2, XWORD2, X_BYTE_FLIP_MASK vpshufb XWORD3, XWORD3, X_BYTE_FLIP_MASK jmp .Last_block_enter .Lonly_one_block: /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] mov [rsp + _CTX], CTX jmp .Ldo_last_block .Ldone_hash: vzeroall /* burn stack */ vmovdqa [rsp + _XFER + 0 * 32], ymm0 vmovdqa [rsp + _XFER + 1 * 32], ymm0 vmovdqa [rsp + _XFER + 2 * 32], ymm0 vmovdqa [rsp + _XFER + 3 * 32], ymm0 vmovdqa [rsp + _XFER + 4 * 32], ymm0 vmovdqa [rsp + _XFER + 5 * 32], ymm0 vmovdqa [rsp + _XFER + 6 * 32], ymm0 vmovdqa [rsp + _XFER + 7 * 32], ymm0 vmovdqa [rsp + _XFER + 8 * 32], ymm0 vmovdqa [rsp + _XFER + 9 * 32], ymm0 vmovdqa [rsp + _XFER + 10 * 32], ymm0 vmovdqa [rsp + _XFER + 11 * 32], ymm0 vmovdqa [rsp + _XFER + 12 * 32], ymm0 vmovdqa [rsp + _XFER + 13 * 32], ymm0 vmovdqa [rsp + _XFER + 14 * 32], ymm0 vmovdqa [rsp + _XFER + 15 * 32], ymm0 xor eax, eax mov rsp, [rsp + _RSP] pop r15 pop r14 pop r13 pop r12 pop rbp pop rbx +.Lnowork: ret .align 64 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha512-avx2-bmi2-amd64.S b/cipher/sha512-avx2-bmi2-amd64.S index 914f920a..32cfceb0 100644 --- a/cipher/sha512-avx2-bmi2-amd64.S +++ b/cipher/sha512-avx2-bmi2-amd64.S @@ -1,793 +1,560 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA512) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix .text /* Virtual Registers */ Y_0 = ymm4 Y_1 = ymm5 Y_2 = ymm6 Y_3 = ymm7 YTMP0 = ymm0 YTMP1 = ymm1 YTMP2 = ymm2 YTMP3 = ymm3 YTMP4 = ymm8 XFER = YTMP0 BYTE_FLIP_MASK = ymm9 +MASK_YMM_LO = ymm10 +MASK_YMM_LOx = xmm10 INP = rdi /* 1st arg */ CTX = rsi /* 2nd arg */ NUM_BLKS = rdx /* 3rd arg */ c = rcx d = r8 e = rdx y3 = rdi TBL = rbp a = rax b = rbx f = r9 g = r10 h = r11 -old_h = r11 +old_h = rax T1 = r12 y0 = r13 y1 = r14 y2 = r15 y4 = r12 /* Local variables (stack frame) */ #define frame_XFER 0 -#define frame_XFER_size (4*8) +#define frame_XFER_size (4*4*8) #define frame_SRND (frame_XFER + frame_XFER_size) #define frame_SRND_size (1*8) #define frame_INP (frame_SRND + frame_SRND_size) #define frame_INP_size (1*8) -#define frame_INPEND (frame_INP + frame_INP_size) -#define frame_INPEND_size (1*8) -#define frame_RSPSAVE (frame_INPEND + frame_INPEND_size) +#define frame_NBLKS (frame_INP + frame_INP_size) +#define frame_NBLKS_size (1*8) +#define frame_RSPSAVE (frame_NBLKS + frame_NBLKS_size) #define frame_RSPSAVE_size (1*8) #define frame_GPRSAVE (frame_RSPSAVE + frame_RSPSAVE_size) #define frame_GPRSAVE_size (6*8) #define frame_size (frame_GPRSAVE + frame_GPRSAVE_size) #define VMOVDQ vmovdqu /*; assume buffers not aligned */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /* COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask */ /* Load ymm with mem and byte swap each dword */ .macro COPY_YMM_AND_BSWAP p1 p2 p3 VMOVDQ \p1, \p2 vpshufb \p1, \p1, \p3 .endm /* rotate_Ys */ /* Rotate values of symbols Y0...Y3 */ .macro rotate_Ys __Y_ = Y_0 Y_0 = Y_1 Y_1 = Y_2 Y_2 = Y_3 Y_3 = __Y_ .endm /* RotateState */ .macro RotateState /* Rotate symbles a..h right */ old_h = h __TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = __TMP_ .endm /* %macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL */ /* YDST = {YSRC1, YSRC2} >> RVAL*8 */ .macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL vperm2f128 \YDST, \YSRC1, \YSRC2, 0x3 /* YDST = {YS1_LO, YS2_HI} */ vpalignr \YDST, \YDST, \YSRC2, \RVAL /* YDST = {YDS1, YS2} >> RVAL*8 */ .endm -.macro FOUR_ROUNDS_AND_SCHED +.macro ONE_ROUND_PART1 XFER + /* h += Sum1 (e) + Ch (e, f, g) + (k[t] + w[0]); + * d += h; + * h += Sum0 (a) + Maj (a, b, c); + * + * Ch(x, y, z) => ((x & y) + (~x & z)) + * Maj(x, y, z) => ((x & y) + (z & (x ^ y))) + */ + + mov y3, e + add h, [\XFER] + and y3, f + rorx y0, e, 41 + rorx y1, e, 18 + lea h, [h + y3] + andn y3, e, g + rorx T1, a, 34 + xor y0, y1 + lea h, [h + y3] +.endm +.macro ONE_ROUND_PART2 + rorx y2, a, 39 + rorx y1, e, 14 + mov y3, a + xor T1, y2 + xor y0, y1 + xor y3, b + lea h, [h + y0] + mov y0, a + rorx y2, a, 28 + add d, h + and y3, c + xor T1, y2 + lea h, [h + y3] + lea h, [h + T1] + and y0, b + lea h, [h + y0] +.endm + +.macro ONE_ROUND XFER + ONE_ROUND_PART1 \XFER + ONE_ROUND_PART2 +.endm + +.macro FOUR_ROUNDS_AND_SCHED X /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* Extract w[t-7] */ MY_VPALIGNR YTMP0, Y_3, Y_2, 8 /* YTMP0 = W[-7] */ /* Calculate w[t-16] + w[t-7] */ vpaddq YTMP0, YTMP0, Y_0 /* YTMP0 = W[-7] + W[-16] */ /* Extract w[t-15] */ MY_VPALIGNR YTMP1, Y_1, Y_0, 8 /* YTMP1 = W[-15] */ /* Calculate sigma0 */ /* Calculate w[t-15] ror 1 */ vpsrlq YTMP2, YTMP1, 1 vpsllq YTMP3, YTMP1, (64-1) vpor YTMP3, YTMP3, YTMP2 /* YTMP3 = W[-15] ror 1 */ /* Calculate w[t-15] shr 7 */ vpsrlq YTMP4, YTMP1, 7 /* YTMP4 = W[-15] >> 7 */ - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - - add h, [rsp+frame_XFER+0*8] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - mov y2, f /* y2 = f ; CH */ - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - xor y2, g /* y2 = f^g ; CH */ - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - - and y2, e /* y2 = (f^g)&e ; CH */ - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - add d, h /* d = k + w + h + d ; -- */ - - and y3, b /* y3 = (a|c)&b ; MAJA */ - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - - add y2, y0 /* y2 = S1 + CH ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - add h, y3 /* h = t1 + S0 + MAJ ; -- */ - -RotateState + ONE_ROUND rsp+frame_XFER+0*8+\X*32 + RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ /* Calculate w[t-15] ror 8 */ vpsrlq YTMP2, YTMP1, 8 vpsllq YTMP1, YTMP1, (64-8) vpor YTMP1, YTMP1, YTMP2 /* YTMP1 = W[-15] ror 8 */ /* XOR the three components */ vpxor YTMP3, YTMP3, YTMP4 /* YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 */ vpxor YTMP1, YTMP3, YTMP1 /* YTMP1 = s0 */ /* Add three components, w[t-16], w[t-7] and sigma0 */ vpaddq YTMP0, YTMP0, YTMP1 /* YTMP0 = W[-16] + W[-7] + s0 */ /* Move to appropriate lanes for calculating w[16] and w[17] */ vperm2f128 Y_0, YTMP0, YTMP0, 0x0 /* Y_0 = W[-16] + W[-7] + s0 {BABA} */ /* Move to appropriate lanes for calculating w[18] and w[19] */ - vpand YTMP0, YTMP0, [.LMASK_YMM_LO ADD_RIP] /* YTMP0 = W[-16] + W[-7] + s0 {DC00} */ + vpand YTMP0, YTMP0, MASK_YMM_LO /* YTMP0 = W[-16] + W[-7] + s0 {DC00} */ /* Calculate w[16] and w[17] in both 128 bit lanes */ /* Calculate sigma1 for w[16] and w[17] on both 128 bit lanes */ vperm2f128 YTMP2, Y_3, Y_3, 0x11 /* YTMP2 = W[-2] {BABA} */ vpsrlq YTMP4, YTMP2, 6 /* YTMP4 = W[-2] >> 6 {BABA} */ - - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - add h, [rsp+frame_XFER+1*8] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - - mov y2, f /* y2 = f ; CH */ - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - xor y2, g /* y2 = f^g ; CH */ - - - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - and y2, e /* y2 = (f^g)&e ; CH */ - add d, h /* d = k + w + h + d ; -- */ - - and y3, b /* y3 = (a|c)&b ; MAJA */ - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - add h, y3 /* h = t1 + S0 + MAJ ; -- */ - -RotateState - - - + ONE_ROUND rsp+frame_XFER+1*8+\X*32 + RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ vpsrlq YTMP3, YTMP2, 19 /* YTMP3 = W[-2] >> 19 {BABA} */ vpsllq YTMP1, YTMP2, (64-19) /* YTMP1 = W[-2] << 19 {BABA} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {BABA} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} */ vpsrlq YTMP3, YTMP2, 61 /* YTMP3 = W[-2] >> 61 {BABA} */ vpsllq YTMP1, YTMP2, (64-61) /* YTMP1 = W[-2] << 61 {BABA} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {BABA} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} */ /* Add sigma1 to the other compunents to get w[16] and w[17] */ vpaddq Y_0, Y_0, YTMP4 /* Y_0 = {W[1], W[0], W[1], W[0]} */ /* Calculate sigma1 for w[18] and w[19] for upper 128 bit lane */ vpsrlq YTMP4, Y_0, 6 /* YTMP4 = W[-2] >> 6 {DC--} */ - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - add h, [rsp+frame_XFER+2*8] /* h = k + w + h ; -- */ - - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - or y3, c /* y3 = a|c ; MAJA */ - mov y2, f /* y2 = f ; CH */ - xor y2, g /* y2 = f^g ; CH */ - - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - add d, h /* d = k + w + h + d ; -- */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - - add h, y3 /* h = t1 + S0 + MAJ ; -- */ - -RotateState + ONE_ROUND rsp+frame_XFER+2*8+\X*32 + RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ vpsrlq YTMP3, Y_0, 19 /* YTMP3 = W[-2] >> 19 {DC--} */ vpsllq YTMP1, Y_0, (64-19) /* YTMP1 = W[-2] << 19 {DC--} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {DC--} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} */ vpsrlq YTMP3, Y_0, 61 /* YTMP3 = W[-2] >> 61 {DC--} */ vpsllq YTMP1, Y_0, (64-61) /* YTMP1 = W[-2] << 61 {DC--} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {DC--} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} */ /* Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] to newly calculated sigma1 to get w[18] and w[19] */ vpaddq YTMP2, YTMP0, YTMP4 /* YTMP2 = {W[3], W[2], --, --} */ /* Form w[19, w[18], w17], w[16] */ vpblendd Y_0, Y_0, YTMP2, 0xF0 /* Y_0 = {W[3], W[2], W[1], W[0]} */ -/* vperm2f128 Y_0, Y_0, YTMP2, 0x30 */ - - mov y3, a /* y3 = a ; MAJA */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - add h, [rsp+frame_XFER+3*8] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - - mov y2, f /* y2 = f ; CH */ - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - xor y2, g /* y2 = f^g ; CH */ - - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add d, h /* d = k + w + h + d ; -- */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and T1, c /* T1 = a&c ; MAJB */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - - add h, y1 /* h = k + w + h + S0 ; -- */ - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - add h, y3 /* h = t1 + S0 + MAJ ; -- */ - -RotateState - -rotate_Ys + ONE_ROUND_PART1 rsp+frame_XFER+3*8+\X*32 + vpaddq XFER, Y_0, [TBL + (4+\X)*32] + vmovdqa [rsp + frame_XFER + \X*32], XFER + ONE_ROUND_PART2 + RotateState + rotate_Ys .endm -.macro DO_4ROUNDS +.macro DO_4ROUNDS X /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - add h, [rsp + frame_XFER + 8*0] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - - /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND rsp+frame_XFER+0*8+\X*32 RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - add h, [rsp + frame_XFER + 8*1] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - - /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND rsp+frame_XFER+1*8+\X*32 RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - add h, [rsp + frame_XFER + 8*2] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - - /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND rsp+frame_XFER+2*8+\X*32 RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ - add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - mov y2, f /* y2 = f ; CH */ - rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ - rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ - xor y2, g /* y2 = f^g ; CH */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ - rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ - and y2, e /* y2 = (f^g)&e ; CH */ - add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ - - xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ - rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ - xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ - rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ - mov y3, a /* y3 = a ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ - rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ - add h, [rsp + frame_XFER + 8*3] /* h = k + w + h ; -- */ - or y3, c /* y3 = a|c ; MAJA */ - - xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ - mov T1, a /* T1 = a ; MAJB */ - and y3, b /* y3 = (a|c)&b ; MAJA */ - and T1, c /* T1 = a&c ; MAJB */ - add y2, y0 /* y2 = S1 + CH ; -- */ - - - add d, h /* d = k + w + h + d ; -- */ - or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ - add h, y1 /* h = k + w + h + S0 ; -- */ - - add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ - - - add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ - - add h, y3 /* h = t1 + S0 + MAJ ; -- */ - + ONE_ROUND rsp+frame_XFER+3*8+\X*32 RotateState .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_rorx(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx2 ELF(.type _gcry_sha512_transform_amd64_avx2,@function;) .align 16 _gcry_sha512_transform_amd64_avx2: xor eax, eax cmp rdx, 0 je .Lnowork vzeroupper /* Allocate Stack Space */ mov rax, rsp sub rsp, frame_size - and rsp, ~(0x20 - 1) + and rsp, ~(0x40 - 1) mov [rsp + frame_RSPSAVE], rax /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbp mov [rsp + frame_GPRSAVE + 8 * 1], rbx mov [rsp + frame_GPRSAVE + 8 * 2], r12 mov [rsp + frame_GPRSAVE + 8 * 3], r13 mov [rsp + frame_GPRSAVE + 8 * 4], r14 mov [rsp + frame_GPRSAVE + 8 * 5], r15 - vpblendd xmm0, xmm0, xmm1, 0xf0 - vpblendd ymm0, ymm0, ymm1, 0xf0 - - shl NUM_BLKS, 7 /* convert to bytes */ - jz .Ldone_hash - add NUM_BLKS, INP /* pointer to end of data */ - mov [rsp + frame_INPEND], NUM_BLKS + mov [rsp + frame_NBLKS], NUM_BLKS /*; load initial digest */ mov a,[8*0 + CTX] mov b,[8*1 + CTX] mov c,[8*2 + CTX] mov d,[8*3 + CTX] mov e,[8*4 + CTX] mov f,[8*5 + CTX] mov g,[8*6 + CTX] mov h,[8*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] + vmovdqa MASK_YMM_LO, [.LMASK_YMM_LO ADD_RIP] -.Loop0: lea TBL,[.LK512 ADD_RIP] /*; byte swap first 16 dwords */ COPY_YMM_AND_BSWAP Y_0, [INP + 0*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_1, [INP + 1*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_2, [INP + 2*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_3, [INP + 3*32], BYTE_FLIP_MASK + add INP, 128 mov [rsp + frame_INP], INP + vpaddq XFER, Y_0, [TBL + 0*32] + vmovdqa [rsp + frame_XFER + 0*32], XFER + vpaddq XFER, Y_1, [TBL + 1*32] + vmovdqa [rsp + frame_XFER + 1*32], XFER + vpaddq XFER, Y_2, [TBL + 2*32] + vmovdqa [rsp + frame_XFER + 2*32], XFER + vpaddq XFER, Y_3, [TBL + 3*32] + vmovdqa [rsp + frame_XFER + 3*32], XFER + /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ movq [rsp + frame_SRND],4 .align 16 -.Loop1: - vpaddq XFER, Y_0, [TBL + 0*32] - vmovdqa [rsp + frame_XFER], XFER - FOUR_ROUNDS_AND_SCHED +.Loop0: + FOUR_ROUNDS_AND_SCHED 0 + FOUR_ROUNDS_AND_SCHED 1 + FOUR_ROUNDS_AND_SCHED 2 + FOUR_ROUNDS_AND_SCHED 3 + add TBL, 4*32 - vpaddq XFER, Y_0, [TBL + 1*32] - vmovdqa [rsp + frame_XFER], XFER - FOUR_ROUNDS_AND_SCHED + subq [rsp + frame_SRND], 1 + jne .Loop0 - vpaddq XFER, Y_0, [TBL + 2*32] - vmovdqa [rsp + frame_XFER], XFER - FOUR_ROUNDS_AND_SCHED + subq [rsp + frame_NBLKS], 1 + je .Ldone_hash - vpaddq XFER, Y_0, [TBL + 3*32] - vmovdqa [rsp + frame_XFER], XFER - add TBL, 4*32 - FOUR_ROUNDS_AND_SCHED + mov INP, [rsp + frame_INP] - subq [rsp + frame_SRND], 1 - jne .Loop1 + lea TBL,[.LK512 ADD_RIP] - movq [rsp + frame_SRND], 2 -.Loop2: - vpaddq XFER, Y_0, [TBL + 0*32] - vmovdqa [rsp + frame_XFER], XFER - DO_4ROUNDS - vpaddq XFER, Y_1, [TBL + 1*32] - vmovdqa [rsp + frame_XFER], XFER - add TBL, 2*32 - DO_4ROUNDS + /* load next block and byte swap */ + COPY_YMM_AND_BSWAP Y_0, [INP + 0*32], BYTE_FLIP_MASK + COPY_YMM_AND_BSWAP Y_1, [INP + 1*32], BYTE_FLIP_MASK + COPY_YMM_AND_BSWAP Y_2, [INP + 2*32], BYTE_FLIP_MASK + COPY_YMM_AND_BSWAP Y_3, [INP + 3*32], BYTE_FLIP_MASK - vmovdqa Y_0, Y_2 - vmovdqa Y_1, Y_3 + add INP, 128 + mov [rsp + frame_INP], INP - subq [rsp + frame_SRND], 1 - jne .Loop2 + DO_4ROUNDS 0 + vpaddq XFER, Y_0, [TBL + 0*32] + vmovdqa [rsp + frame_XFER + 0*32], XFER + DO_4ROUNDS 1 + vpaddq XFER, Y_1, [TBL + 1*32] + vmovdqa [rsp + frame_XFER + 1*32], XFER + DO_4ROUNDS 2 + vpaddq XFER, Y_2, [TBL + 2*32] + vmovdqa [rsp + frame_XFER + 2*32], XFER + DO_4ROUNDS 3 + vpaddq XFER, Y_3, [TBL + 3*32] + vmovdqa [rsp + frame_XFER + 3*32], XFER addm [8*0 + CTX],a addm [8*1 + CTX],b addm [8*2 + CTX],c addm [8*3 + CTX],d addm [8*4 + CTX],e addm [8*5 + CTX],f addm [8*6 + CTX],g addm [8*7 + CTX],h - mov INP, [rsp + frame_INP] - add INP, 128 - cmp INP, [rsp + frame_INPEND] - jne .Loop0 + /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ + movq [rsp + frame_SRND],4 + + jmp .Loop0 .Ldone_hash: vzeroall + DO_4ROUNDS 0 + vmovdqa [rsp + frame_XFER + 0*32], ymm0 /* burn stack */ + DO_4ROUNDS 1 + vmovdqa [rsp + frame_XFER + 1*32], ymm0 /* burn stack */ + DO_4ROUNDS 2 + vmovdqa [rsp + frame_XFER + 2*32], ymm0 /* burn stack */ + DO_4ROUNDS 3 + vmovdqa [rsp + frame_XFER + 3*32], ymm0 /* burn stack */ + + addm [8*0 + CTX],a + xor eax, eax /* burn stack */ + addm [8*1 + CTX],b + addm [8*2 + CTX],c + addm [8*3 + CTX],d + addm [8*4 + CTX],e + addm [8*5 + CTX],f + addm [8*6 + CTX],g + addm [8*7 + CTX],h + /* Restore GPRs */ mov rbp, [rsp + frame_GPRSAVE + 8 * 0] mov rbx, [rsp + frame_GPRSAVE + 8 * 1] mov r12, [rsp + frame_GPRSAVE + 8 * 2] mov r13, [rsp + frame_GPRSAVE + 8 * 3] mov r14, [rsp + frame_GPRSAVE + 8 * 4] mov r15, [rsp + frame_GPRSAVE + 8 * 5] - /* Burn stack */ - vmovdqa [rsp + frame_XFER], XFER - xor eax, eax - /* Restore Stack Pointer */ mov rsp, [rsp + frame_RSPSAVE] .Lnowork: ret /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;; Binary Data */ .align 64 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .align 32 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .LMASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF #endif #endif