diff --git a/cipher/camellia-aesni-avx-amd64.S b/cipher/camellia-aesni-avx-amd64.S index 5a3a3cbc..8022934f 100644 --- a/cipher/camellia-aesni-avx-amd64.S +++ b/cipher/camellia-aesni-avx-amd64.S @@ -1,2594 +1,2591 @@ /* camellia-avx-aesni-amd64.S - AES-NI/AVX implementation of Camellia cipher * * Copyright (C) 2013-2015 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT) #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /********************************************************************** 16-way camellia **********************************************************************/ /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vmovdqa .Linv_shift_row RIP, t4; \ vbroadcastss .L0f0f0f0f RIP, t7; \ vmovdqa .Lpre_tf_lo_s1 RIP, t0; \ vmovdqa .Lpre_tf_hi_s1 RIP, t1; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ \ /* prefilter sboxes 1, 2 and 3 */ \ vmovdqa .Lpre_tf_lo_s4 RIP, t2; \ vmovdqa .Lpre_tf_hi_s4 RIP, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x1, t0, t1, t7, t6); \ filter_8bit(x4, t0, t1, t7, t6); \ filter_8bit(x2, t0, t1, t7, t6); \ filter_8bit(x5, t0, t1, t7, t6); \ \ /* prefilter sbox 4 */ \ vpxor t4, t4, t4; \ filter_8bit(x3, t2, t3, t7, t6); \ filter_8bit(x6, t2, t3, t7, t6); \ \ /* AES subbytes + AES shift rows */ \ vmovdqa .Lpost_tf_lo_s1 RIP, t0; \ vmovdqa .Lpost_tf_hi_s1 RIP, t1; \ vaesenclast t4, x0, x0; \ vaesenclast t4, x7, x7; \ vaesenclast t4, x1, x1; \ vaesenclast t4, x4, x4; \ vaesenclast t4, x2, x2; \ vaesenclast t4, x5, x5; \ vaesenclast t4, x3, x3; \ vaesenclast t4, x6, x6; \ \ /* postfilter sboxes 1 and 4 */ \ vmovdqa .Lpost_tf_lo_s3 RIP, t2; \ vmovdqa .Lpost_tf_hi_s3 RIP, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vmovdqa .Lpost_tf_lo_s2 RIP, t4; \ vmovdqa .Lpost_tf_hi_s2 RIP, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpxor t6, t6, t6; \ vmovq key, t0; \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ \ vpsrldq $5, t0, t5; \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpsrldq $3, t0, t3; \ vpsrldq $4, t0, t4; \ vpshufb t6, t0, t0; \ vpshufb t6, t1, t1; \ vpshufb t6, t2, t2; \ vpshufb t6, t3, t3; \ vpshufb t6, t4, t4; \ vpsrldq $2, t5, t7; \ vpshufb t6, t7, t7; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t3, x4, x4; \ vpxor 0 * 16(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 16(mem_cd), x5, x5; \ \ vpsrldq $1, t5, t3; \ vpshufb t6, t5, t5; \ vpshufb t6, t3, t6; \ \ vpxor t1, x6, x6; \ vpxor 2 * 16(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 16(mem_cd), x7, x7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 16(mem_cd), x0, x0; \ \ vpxor t6, x1, x1; \ vpxor 5 * 16(mem_cd), x1, x1; \ \ vpxor t5, x2, x2; \ vpxor 6 * 16(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 16(mem_cd), x3, x3; /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu x4, 0 * 16(mem_cd); \ vmovdqu x5, 1 * 16(mem_cd); \ vmovdqu x6, 2 * 16(mem_cd); \ vmovdqu x7, 3 * 16(mem_cd); \ vmovdqu x0, 4 * 16(mem_cd); \ vmovdqu x1, 5 * 16(mem_cd); \ vmovdqu x2, 6 * 16(mem_cd); \ vmovdqu x3, 7 * 16(mem_cd); \ \ roundsm16(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_16(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls16(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpxor tt0, tt0, tt0; \ vmovd kll, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vmovdqu l4, 4 * 16(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 16(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 16(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 16(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vmovd krr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 16(r), t0, t0; \ vpor 5 * 16(r), t1, t1; \ vpor 6 * 16(r), t2, t2; \ vpor 7 * 16(r), t3, t3; \ \ vpxor 0 * 16(r), t0, t0; \ vpxor 1 * 16(r), t1, t1; \ vpxor 2 * 16(r), t2, t2; \ vpxor 3 * 16(r), t3, t3; \ vmovdqu t0, 0 * 16(r); \ vmovdqu t1, 1 * 16(r); \ vmovdqu t2, 2 * 16(r); \ vmovdqu t3, 3 * 16(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vmovd krl, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 16(r), t0, t0; \ vpand 1 * 16(r), t1, t1; \ vpand 2 * 16(r), t2, t2; \ vpand 3 * 16(r), t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 16(r), t0, t0; \ vpxor 5 * 16(r), t1, t1; \ vpxor 6 * 16(r), t2, t2; \ vpxor 7 * 16(r), t3, t3; \ vmovdqu t0, 4 * 16(r); \ vmovdqu t1, 5 * 16(r); \ vmovdqu t2, 6 * 16(r); \ vmovdqu t3, 7 * 16(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vmovd klr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 16(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 16(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 16(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 16(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vmovdqu .Lshufb_16x16b RIP, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ #define transpose_8x8b(a, b, c, d, e, f, g, h, t0, t1, t2, t3, t4) \ vpunpcklbw a, b, t0; \ vpunpckhbw a, b, b; \ \ vpunpcklbw c, d, t1; \ vpunpckhbw c, d, d; \ \ vpunpcklbw e, f, t2; \ vpunpckhbw e, f, f; \ \ vpunpcklbw g, h, t3; \ vpunpckhbw g, h, h; \ \ vpunpcklwd t0, t1, g; \ vpunpckhwd t0, t1, t0; \ \ vpunpcklwd b, d, t1; \ vpunpckhwd b, d, e; \ \ vpunpcklwd t2, t3, c; \ vpunpckhwd t2, t3, t2; \ \ vpunpcklwd f, h, t3; \ vpunpckhwd f, h, b; \ \ vpunpcklwd e, b, t4; \ vpunpckhwd e, b, b; \ \ vpunpcklwd t1, t3, e; \ vpunpckhwd t1, t3, f; \ \ vmovdqa .Ltranspose_8x8_shuf RIP, t3; \ \ vpunpcklwd g, c, d; \ vpunpckhwd g, c, c; \ \ vpunpcklwd t0, t2, t1; \ vpunpckhwd t0, t2, h; \ \ vpunpckhqdq b, h, a; \ vpshufb t3, a, a; \ vpunpcklqdq b, h, b; \ vpshufb t3, b, b; \ \ vpunpckhqdq e, d, g; \ vpshufb t3, g, g; \ vpunpcklqdq e, d, h; \ vpshufb t3, h, h; \ \ vpunpckhqdq f, c, e; \ vpshufb t3, e, e; \ vpunpcklqdq f, c, f; \ vpshufb t3, f, f; \ \ vpunpckhqdq t4, t1, c; \ vpshufb t3, c, c; \ vpunpcklqdq t4, t1, d; \ vpshufb t3, d, d; /* load blocks to registers and apply pre-whitening */ #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vmovq key, x0; \ vpshufb .Lpack_bswap RIP, x0, x0; \ \ vpxor 0 * 16(rio), x0, y7; \ vpxor 1 * 16(rio), x0, y6; \ vpxor 2 * 16(rio), x0, y5; \ vpxor 3 * 16(rio), x0, y4; \ vpxor 4 * 16(rio), x0, y3; \ vpxor 5 * 16(rio), x0, y2; \ vpxor 6 * 16(rio), x0, y1; \ vpxor 7 * 16(rio), x0, y0; \ vpxor 8 * 16(rio), x0, x7; \ vpxor 9 * 16(rio), x0, x6; \ vpxor 10 * 16(rio), x0, x5; \ vpxor 11 * 16(rio), x0, x4; \ vpxor 12 * 16(rio), x0, x3; \ vpxor 13 * 16(rio), x0, x2; \ vpxor 14 * 16(rio), x0, x1; \ vpxor 15 * 16(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); \ vmovdqu y0, 0 * 16(mem_cd); \ vmovdqu y1, 1 * 16(mem_cd); \ vmovdqu y2, 2 * 16(mem_cd); \ vmovdqu y3, 3 * 16(mem_cd); \ vmovdqu y4, 4 * 16(mem_cd); \ vmovdqu y5, 5 * 16(mem_cd); \ vmovdqu y6, 6 * 16(mem_cd); \ vmovdqu y7, 7 * 16(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vmovq key, x0; \ vpshufb .Lpack_bswap RIP, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 16(rio); \ vmovdqu x1, 1 * 16(rio); \ vmovdqu x2, 2 * 16(rio); \ vmovdqu x3, 3 * 16(rio); \ vmovdqu x4, 4 * 16(rio); \ vmovdqu x5, 5 * 16(rio); \ vmovdqu x6, 6 * 16(rio); \ vmovdqu x7, 7 * 16(rio); \ vmovdqu y0, 8 * 16(rio); \ vmovdqu y1, 9 * 16(rio); \ vmovdqu y2, 10 * 16(rio); \ vmovdqu y3, 11 * 16(rio); \ vmovdqu y4, 12 * 16(rio); \ vmovdqu y5, 13 * 16(rio); \ vmovdqu y6, 14 * 16(rio); \ vmovdqu y7, 15 * 16(rio); -.data +.text .align 16 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); .Lpack_bswap: .long 0x00010203 .long 0x04050607 .long 0x80808080 .long 0x80808080 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* shuffle mask for 8x8 byte transpose */ .Ltranspose_8x8_shuf: .byte 0, 1, 4, 5, 2, 3, 6, 7, 8+0, 8+1, 8+4, 8+5, 8+2, 8+3, 8+6, 8+7 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f -.text .align 8 ELF(.type __camellia_enc_blk16,@function;) __camellia_enc_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %xmm0..%xmm15: 16 plaintext blocks * output: * %xmm0..%xmm15: 16 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX), ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 8); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX), ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 16); movl $24, %r8d; cmpl $128, key_bitlength(CTX); jne .Lenc_max32; .Lenc_done: /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax)); ret; .align 8 .Lenc_max32: movl $32, %r8d; fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX), ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX)); enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 24); jmp .Lenc_done; ELF(.size __camellia_enc_blk16,.-__camellia_enc_blk16;) .align 8 ELF(.type __camellia_dec_blk16,@function;) __camellia_dec_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %r8d: 24 for 16 byte key, 32 for larger * %xmm0..%xmm15: 16 encrypted blocks * output: * %xmm0..%xmm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); cmpl $32, %r8d; je .Ldec_max32; .Ldec_max24: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 16); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX), ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX)); dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 8); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX), ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX)); dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); ret; .align 8 .Ldec_max32: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 24); fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX), ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; ELF(.size __camellia_dec_blk16,.-__camellia_dec_blk16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 8 .globl _gcry_camellia_aesni_avx_ctr_enc ELF(.type _gcry_camellia_aesni_avx_ctr_enc,@function;) _gcry_camellia_aesni_avx_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; vmovdqa .Lbswap128_mask RIP, %xmm14; /* load IV and byteswap */ vmovdqu (%rcx), %xmm15; vmovdqu %xmm15, 15 * 16(%rax); vpshufb %xmm14, %xmm15, %xmm0; /* be => le */ vpcmpeqd %xmm15, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */ /* construct IVs */ inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 14 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 13 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm12; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm11; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm10; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm9; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm8; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm7; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm6; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm5; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm4; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm3; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm2; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm13); vmovdqa %xmm0, %xmm13; vpshufb %xmm14, %xmm0, %xmm0; inc_le128(%xmm13, %xmm15, %xmm14); vpshufb .Lbswap128_mask RIP, %xmm13, %xmm13; /* le => be */ vmovdqu %xmm13, (%rcx); /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap RIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor 13 * 16(%rax), %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor 0 * 16(%rdx), %xmm7, %xmm7; vpxor 1 * 16(%rdx), %xmm6, %xmm6; vpxor 2 * 16(%rdx), %xmm5, %xmm5; vpxor 3 * 16(%rdx), %xmm4, %xmm4; vpxor 4 * 16(%rdx), %xmm3, %xmm3; vpxor 5 * 16(%rdx), %xmm2, %xmm2; vpxor 6 * 16(%rdx), %xmm1, %xmm1; vpxor 7 * 16(%rdx), %xmm0, %xmm0; vpxor 8 * 16(%rdx), %xmm15, %xmm15; vpxor 9 * 16(%rdx), %xmm14, %xmm14; vpxor 10 * 16(%rdx), %xmm13, %xmm13; vpxor 11 * 16(%rdx), %xmm12, %xmm12; vpxor 12 * 16(%rdx), %xmm11, %xmm11; vpxor 13 * 16(%rdx), %xmm10, %xmm10; vpxor 14 * 16(%rdx), %xmm9, %xmm9; vpxor 15 * 16(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; ret; ELF(.size _gcry_camellia_aesni_avx_ctr_enc,.-_gcry_camellia_aesni_avx_ctr_enc;) .align 8 .globl _gcry_camellia_aesni_avx_cbc_dec ELF(.type _gcry_camellia_aesni_avx_cbc_dec,@function;) _gcry_camellia_aesni_avx_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ pushq %rbp; movq %rsp, %rbp; vzeroupper; movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; call __camellia_dec_blk16; /* XOR output with IV */ vpxor (%r9), %xmm7, %xmm7; vpxor (0 * 16)(%rdx), %xmm6, %xmm6; vpxor (1 * 16)(%rdx), %xmm5, %xmm5; vpxor (2 * 16)(%rdx), %xmm4, %xmm4; vpxor (3 * 16)(%rdx), %xmm3, %xmm3; vpxor (4 * 16)(%rdx), %xmm2, %xmm2; vpxor (5 * 16)(%rdx), %xmm1, %xmm1; vpxor (6 * 16)(%rdx), %xmm0, %xmm0; vpxor (7 * 16)(%rdx), %xmm15, %xmm15; vpxor (8 * 16)(%rdx), %xmm14, %xmm14; vpxor (9 * 16)(%rdx), %xmm13, %xmm13; vpxor (10 * 16)(%rdx), %xmm12, %xmm12; vpxor (11 * 16)(%rdx), %xmm11, %xmm11; vpxor (12 * 16)(%rdx), %xmm10, %xmm10; vpxor (13 * 16)(%rdx), %xmm9, %xmm9; vpxor (14 * 16)(%rdx), %xmm8, %xmm8; movq (15 * 16 + 0)(%rdx), %r10; movq (15 * 16 + 8)(%rdx), %r11; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); /* store new IV */ movq %r10, (0)(%r9); movq %r11, (8)(%r9); vzeroall; leave; ret; ELF(.size _gcry_camellia_aesni_avx_cbc_dec,.-_gcry_camellia_aesni_avx_cbc_dec;) .align 8 .globl _gcry_camellia_aesni_avx_cfb_dec ELF(.type _gcry_camellia_aesni_avx_cfb_dec,@function;) _gcry_camellia_aesni_avx_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm0; vpshufb .Lpack_bswap RIP, %xmm0, %xmm0; vpxor (%rcx), %xmm0, %xmm15; vmovdqu 15 * 16(%rdx), %xmm1; vmovdqu %xmm1, (%rcx); /* store new IV */ vpxor 0 * 16(%rdx), %xmm0, %xmm14; vpxor 1 * 16(%rdx), %xmm0, %xmm13; vpxor 2 * 16(%rdx), %xmm0, %xmm12; vpxor 3 * 16(%rdx), %xmm0, %xmm11; vpxor 4 * 16(%rdx), %xmm0, %xmm10; vpxor 5 * 16(%rdx), %xmm0, %xmm9; vpxor 6 * 16(%rdx), %xmm0, %xmm8; vpxor 7 * 16(%rdx), %xmm0, %xmm7; vpxor 8 * 16(%rdx), %xmm0, %xmm6; vpxor 9 * 16(%rdx), %xmm0, %xmm5; vpxor 10 * 16(%rdx), %xmm0, %xmm4; vpxor 11 * 16(%rdx), %xmm0, %xmm3; vpxor 12 * 16(%rdx), %xmm0, %xmm2; vpxor 13 * 16(%rdx), %xmm0, %xmm1; vpxor 14 * 16(%rdx), %xmm0, %xmm0; call __camellia_enc_blk16; vpxor 0 * 16(%rdx), %xmm7, %xmm7; vpxor 1 * 16(%rdx), %xmm6, %xmm6; vpxor 2 * 16(%rdx), %xmm5, %xmm5; vpxor 3 * 16(%rdx), %xmm4, %xmm4; vpxor 4 * 16(%rdx), %xmm3, %xmm3; vpxor 5 * 16(%rdx), %xmm2, %xmm2; vpxor 6 * 16(%rdx), %xmm1, %xmm1; vpxor 7 * 16(%rdx), %xmm0, %xmm0; vpxor 8 * 16(%rdx), %xmm15, %xmm15; vpxor 9 * 16(%rdx), %xmm14, %xmm14; vpxor 10 * 16(%rdx), %xmm13, %xmm13; vpxor 11 * 16(%rdx), %xmm12, %xmm12; vpxor 12 * 16(%rdx), %xmm11, %xmm11; vpxor 13 * 16(%rdx), %xmm10, %xmm10; vpxor 14 * 16(%rdx), %xmm9, %xmm9; vpxor 15 * 16(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; ret; ELF(.size _gcry_camellia_aesni_avx_cfb_dec,.-_gcry_camellia_aesni_avx_cfb_dec;) .align 8 .globl _gcry_camellia_aesni_avx_ocb_enc ELF(.type _gcry_camellia_aesni_avx_ocb_enc,@function;) _gcry_camellia_aesni_avx_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rax); movq %r11, (16 * 16 + 1 * 8)(%rax); movq %r12, (16 * 16 + 2 * 8)(%rax); movq %r13, (16 * 16 + 3 * 8)(%rax); vmovdqu (%rcx), %xmm14; vmovdqu (%r8), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), %xmm14, %xmm14; \ vpxor xreg, %xmm15, %xmm15; \ vpxor xreg, %xmm14, xreg; \ vmovdqu %xmm14, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm0); vmovdqu %xmm0, (14 * 16)(%rax); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT vmovdqu %xmm14, (%rcx); vmovdqu %xmm15, (%r8); /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap RIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor 0 * 16(%rsi), %xmm7, %xmm7; vpxor 1 * 16(%rsi), %xmm6, %xmm6; vpxor 2 * 16(%rsi), %xmm5, %xmm5; vpxor 3 * 16(%rsi), %xmm4, %xmm4; vpxor 4 * 16(%rsi), %xmm3, %xmm3; vpxor 5 * 16(%rsi), %xmm2, %xmm2; vpxor 6 * 16(%rsi), %xmm1, %xmm1; vpxor 7 * 16(%rsi), %xmm0, %xmm0; vpxor 8 * 16(%rsi), %xmm15, %xmm15; vpxor 9 * 16(%rsi), %xmm14, %xmm14; vpxor 10 * 16(%rsi), %xmm13, %xmm13; vpxor 11 * 16(%rsi), %xmm12, %xmm12; vpxor 12 * 16(%rsi), %xmm11, %xmm11; vpxor 13 * 16(%rsi), %xmm10, %xmm10; vpxor 14 * 16(%rsi), %xmm9, %xmm9; vpxor 15 * 16(%rsi), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; movq (16 * 16 + 0 * 8)(%rax), %r10; movq (16 * 16 + 1 * 8)(%rax), %r11; movq (16 * 16 + 2 * 8)(%rax), %r12; movq (16 * 16 + 3 * 8)(%rax), %r13; leave; ret; ELF(.size _gcry_camellia_aesni_avx_ocb_enc,.-_gcry_camellia_aesni_avx_ocb_enc;) .align 8 .globl _gcry_camellia_aesni_avx_ocb_dec ELF(.type _gcry_camellia_aesni_avx_ocb_dec,@function;) _gcry_camellia_aesni_avx_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rax); movq %r11, (16 * 16 + 1 * 8)(%rax); movq %r12, (16 * 16 + 2 * 8)(%rax); movq %r13, (16 * 16 + 3 * 8)(%rax); vmovdqu (%rcx), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), %xmm15, %xmm15; \ vpxor xreg, %xmm15, xreg; \ vmovdqu %xmm15, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm14); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT vmovdqu %xmm15, (%rcx); movq %r8, %r10; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r9d; cmovel %r9d, %r8d; /* max */ /* inpack16_pre: */ vmovq (key_table)(CTX, %r8, 8), %xmm15; vpshufb .Lpack_bswap RIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor %xmm14, %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_dec_blk16; vpxor 0 * 16(%rsi), %xmm7, %xmm7; vpxor 1 * 16(%rsi), %xmm6, %xmm6; vpxor 2 * 16(%rsi), %xmm5, %xmm5; vpxor 3 * 16(%rsi), %xmm4, %xmm4; vpxor 4 * 16(%rsi), %xmm3, %xmm3; vpxor 5 * 16(%rsi), %xmm2, %xmm2; vpxor 6 * 16(%rsi), %xmm1, %xmm1; vpxor 7 * 16(%rsi), %xmm0, %xmm0; vmovdqu %xmm7, (7 * 16)(%rax); vpxor 8 * 16(%rsi), %xmm15, %xmm15; vpxor 9 * 16(%rsi), %xmm14, %xmm14; vpxor 10 * 16(%rsi), %xmm13, %xmm13; vpxor 11 * 16(%rsi), %xmm12, %xmm12; vpxor 12 * 16(%rsi), %xmm11, %xmm11; vpxor 13 * 16(%rsi), %xmm10, %xmm10; vpxor 14 * 16(%rsi), %xmm9, %xmm9; vpxor 15 * 16(%rsi), %xmm8, %xmm8; /* Checksum_i = Checksum_{i-1} xor P_i */ vpxor (%r10), %xmm7, %xmm7; vpxor %xmm6, %xmm7, %xmm7; vpxor %xmm5, %xmm7, %xmm7; vpxor %xmm4, %xmm7, %xmm7; vpxor %xmm3, %xmm7, %xmm7; vpxor %xmm2, %xmm7, %xmm7; vpxor %xmm1, %xmm7, %xmm7; vpxor %xmm0, %xmm7, %xmm7; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm14, %xmm7, %xmm7; vpxor %xmm13, %xmm7, %xmm7; vpxor %xmm12, %xmm7, %xmm7; vpxor %xmm11, %xmm7, %xmm7; vpxor %xmm10, %xmm7, %xmm7; vpxor %xmm9, %xmm7, %xmm7; vpxor %xmm8, %xmm7, %xmm7; vmovdqu %xmm7, (%r10); vmovdqu (7 * 16)(%rax), %xmm7; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; movq (16 * 16 + 0 * 8)(%rax), %r10; movq (16 * 16 + 1 * 8)(%rax), %r11; movq (16 * 16 + 2 * 8)(%rax), %r12; movq (16 * 16 + 3 * 8)(%rax), %r13; leave; ret; ELF(.size _gcry_camellia_aesni_avx_ocb_dec,.-_gcry_camellia_aesni_avx_ocb_dec;) .align 8 .globl _gcry_camellia_aesni_avx_ocb_auth ELF(.type _gcry_camellia_aesni_avx_ocb_auth,@function;) _gcry_camellia_aesni_avx_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rax); movq %r11, (16 * 16 + 1 * 8)(%rax); movq %r12, (16 * 16 + 2 * 8)(%rax); movq %r13, (16 * 16 + 3 * 8)(%rax); vmovdqu (%rdx), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rsi), xreg; \ vpxor (lreg), %xmm15, %xmm15; \ vpxor xreg, %xmm15, xreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm14); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT vmovdqu %xmm15, (%rdx); movq %rcx, %r10; /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap RIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor %xmm14, %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor %xmm7, %xmm6, %xmm6; vpxor %xmm5, %xmm4, %xmm4; vpxor %xmm3, %xmm2, %xmm2; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm15, %xmm14, %xmm14; vpxor %xmm13, %xmm12, %xmm12; vpxor %xmm11, %xmm10, %xmm10; vpxor %xmm9, %xmm8, %xmm8; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm4, %xmm0, %xmm0; vpxor %xmm12, %xmm8, %xmm8; vpxor %xmm0, %xmm8, %xmm0; vpxor (%r10), %xmm0, %xmm0; vmovdqu %xmm0, (%r10); vzeroall; movq (16 * 16 + 0 * 8)(%rax), %r10; movq (16 * 16 + 1 * 8)(%rax), %r11; movq (16 * 16 + 2 * 8)(%rax), %r12; movq (16 * 16 + 3 * 8)(%rax), %r13; leave; ret; ELF(.size _gcry_camellia_aesni_avx_ocb_auth,.-_gcry_camellia_aesni_avx_ocb_auth;) /* * IN: * ab: 64-bit AB state * cd: 64-bit CD state */ #define camellia_f(ab, x, t0, t1, t2, t3, t4, inv_shift_row, sbox4mask, \ _0f0f0f0fmask, pre_s1lo_mask, pre_s1hi_mask, key) \ vmovq key, t0; \ vpxor x, x, t3; \ \ vpxor ab, t0, x; \ \ /* \ * S-function with AES subbytes \ */ \ \ /* input rotation for sbox4 (<<< 1) */ \ vpand x, sbox4mask, t0; \ vpandn x, sbox4mask, x; \ vpaddw t0, t0, t1; \ vpsrlw $7, t0, t0; \ vpor t0, t1, t0; \ vpand sbox4mask, t0, t0; \ vpor t0, x, x; \ \ vmovdqa .Lpost_tf_lo_s1 RIP, t0; \ vmovdqa .Lpost_tf_hi_s1 RIP, t1; \ \ /* prefilter sboxes */ \ filter_8bit(x, pre_s1lo_mask, pre_s1hi_mask, _0f0f0f0fmask, t2); \ \ /* AES subbytes + AES shift rows + AES inv shift rows */ \ vaesenclast t3, x, x; \ \ /* postfilter sboxes */ \ filter_8bit(x, t0, t1, _0f0f0f0fmask, t2); \ \ /* output rotation for sbox2 (<<< 1) */ \ /* output rotation for sbox3 (>>> 1) */ \ vpshufb inv_shift_row, x, t1; \ vpshufb .Lsp0044440444044404mask RIP, x, t4; \ vpshufb .Lsp1110111010011110mask RIP, x, x; \ vpaddb t1, t1, t2; \ vpsrlw $7, t1, t0; \ vpsllw $7, t1, t3; \ vpor t0, t2, t0; \ vpsrlw $1, t1, t1; \ vpshufb .Lsp0222022222000222mask RIP, t0, t0; \ vpor t1, t3, t1; \ \ vpxor x, t4, t4; \ vpshufb .Lsp3033303303303033mask RIP, t1, t1; \ vpxor t4, t0, t0; \ vpxor t1, t0, t0; \ vpsrldq $8, t0, x; \ vpxor t0, x, x; #define vec_rol128(in, out, nrol, t0) \ vpshufd $0x4e, in, out; \ vpsllq $(nrol), in, t0; \ vpsrlq $(64-(nrol)), out, out; \ vpaddd t0, out, out; #define vec_ror128(in, out, nror, t0) \ vpshufd $0x4e, in, out; \ vpsrlq $(nror), in, t0; \ vpsllq $(64-(nror)), out, out; \ vpaddd t0, out, out; -.data .align 16 .Linv_shift_row_and_unpcklbw: .byte 0x00, 0xff, 0x0d, 0xff, 0x0a, 0xff, 0x07, 0xff .byte 0x04, 0xff, 0x01, 0xff, 0x0e, 0xff, 0x0b, 0xff .Lsp0044440444044404mask: .long 0xffff0404, 0x0404ff04; .long 0x0d0dff0d, 0x0d0dff0d; .Lsp1110111010011110mask: .long 0x000000ff, 0x000000ff; .long 0x0bffff0b, 0x0b0b0bff; .Lsp0222022222000222mask: .long 0xff060606, 0xff060606; .long 0x0c0cffff, 0xff0c0c0c; .Lsp3033303303303033mask: .long 0x04ff0404, 0x04ff0404; .long 0xff0a0aff, 0x0aff0a0a; .Lsbox4_input_mask: .byte 0x00, 0xff, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00; .Lsigma1: .long 0x3BCC908B, 0xA09E667F; .Lsigma2: .long 0x4CAA73B2, 0xB67AE858; .Lsigma3: .long 0xE94F82BE, 0xC6EF372F; .Lsigma4: .long 0xF1D36F1C, 0x54FF53A5; .Lsigma5: .long 0xDE682D1D, 0x10E527FA; .Lsigma6: .long 0xB3E6C1FD, 0xB05688C2; -.text .align 8 ELF(.type __camellia_avx_setup128,@function;) __camellia_avx_setup128: /* input: * %rdi: ctx, CTX; subkey storage at key_table(CTX) * %xmm0: key */ #define cmll_sub(n, ctx) (key_table+((n)*8))(ctx) #define KL128 %xmm0 #define KA128 %xmm2 vpshufb .Lbswap128_mask RIP, KL128, KL128; vmovdqa .Linv_shift_row_and_unpcklbw RIP, %xmm11; vmovq .Lsbox4_input_mask RIP, %xmm12; vbroadcastss .L0f0f0f0f RIP, %xmm13; vmovdqa .Lpre_tf_lo_s1 RIP, %xmm14; vmovdqa .Lpre_tf_hi_s1 RIP, %xmm15; /* * Generate KA */ vpsrldq $8, KL128, %xmm2; vmovdqa KL128, %xmm3; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm2, %xmm4, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma1 RIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm2, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma2 RIP); camellia_f(%xmm2, %xmm3, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma3 RIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm4, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma4 RIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm4, %xmm2, %xmm2; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm2, KA128; vpor %xmm3, KA128, KA128; /* * Generate subkeys */ vmovdqu KA128, cmll_sub(24, CTX); vec_rol128(KL128, %xmm3, 15, %xmm15); vec_rol128(KA128, %xmm4, 15, %xmm15); vec_rol128(KA128, %xmm5, 30, %xmm15); vec_rol128(KL128, %xmm6, 45, %xmm15); vec_rol128(KA128, %xmm7, 45, %xmm15); vec_rol128(KL128, %xmm8, 60, %xmm15); vec_rol128(KA128, %xmm9, 60, %xmm15); vec_ror128(KL128, %xmm10, 128-77, %xmm15); /* absorb kw2 to other subkeys */ vpslldq $8, KL128, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, KA128, KA128; vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; /* subl(1) ^= subr(1) & ~subr(9); */ vpandn %xmm15, %xmm5, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm9, %xmm9; /* subl(1) ^= subr(1) & ~subr(17); */ vpandn %xmm15, %xmm10, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, KL128, KL128; vpshufd $0x1b, KA128, KA128; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vpshufd $0x1b, %xmm10, %xmm10; vmovdqu KL128, cmll_sub(0, CTX); vpshufd $0x1b, KL128, KL128; vmovdqu KA128, cmll_sub(2, CTX); vmovdqu %xmm3, cmll_sub(4, CTX); vmovdqu %xmm4, cmll_sub(6, CTX); vmovdqu %xmm5, cmll_sub(8, CTX); vmovdqu %xmm6, cmll_sub(10, CTX); vpsrldq $8, %xmm8, %xmm8; vmovq %xmm7, cmll_sub(12, CTX); vmovq %xmm8, cmll_sub(13, CTX); vmovdqu %xmm9, cmll_sub(14, CTX); vmovdqu %xmm10, cmll_sub(16, CTX); vmovdqu cmll_sub(24, CTX), KA128; vec_ror128(KL128, %xmm3, 128 - 94, %xmm7); vec_ror128(KA128, %xmm4, 128 - 94, %xmm7); vec_ror128(KL128, %xmm5, 128 - 111, %xmm7); vec_ror128(KA128, %xmm6, 128 - 111, %xmm7); vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; vpslldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; /* absorb kw4 to other subkeys */ vpslldq $8, %xmm6, %xmm15; vpxor %xmm15, %xmm5, %xmm5; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm3, %xmm3; /* subl(25) ^= subr(25) & ~subr(16); */ vpshufd $0x1b, cmll_sub(16, CTX), %xmm10; vpandn %xmm15, %xmm10, %xmm13; vpslldq $4, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(25) & subl(16), subr(25) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vmovdqu %xmm3, cmll_sub(18, CTX); vmovdqu %xmm4, cmll_sub(20, CTX); vmovdqu %xmm5, cmll_sub(22, CTX); vmovdqu %xmm6, cmll_sub(24, CTX); vpshufd $0x1b, cmll_sub(14, CTX), %xmm3; vpshufd $0x1b, cmll_sub(12, CTX), %xmm4; vpshufd $0x1b, cmll_sub(10, CTX), %xmm5; vpshufd $0x1b, cmll_sub(8, CTX), %xmm6; vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; /* subl(25) ^= subr(25) & ~subr(8); */ vpandn %xmm15, %xmm6, %xmm13; vpslldq $4, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(25) & subl(8), subr(25) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm6, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vmovdqu %xmm3, cmll_sub(14, CTX); vmovdqu %xmm4, cmll_sub(12, CTX); vmovdqu %xmm5, cmll_sub(10, CTX); vpshufd $0x1b, cmll_sub(6, CTX), %xmm6; vpshufd $0x1b, cmll_sub(4, CTX), %xmm4; vpshufd $0x1b, cmll_sub(2, CTX), %xmm2; vpshufd $0x1b, cmll_sub(0, CTX), %xmm0; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm2, %xmm2; vpxor %xmm15, %xmm0, %xmm0; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm0, %xmm0; vpsrldq $8, %xmm2, %xmm3; vpsrldq $8, %xmm4, %xmm5; vpsrldq $8, %xmm6, %xmm7; /* * key XOR is end of F-function. */ vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm4, %xmm2, %xmm2; vmovq %xmm0, cmll_sub(0, CTX); vmovq %xmm3, cmll_sub(2, CTX); vpxor %xmm5, %xmm3, %xmm3; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm7, %xmm5, %xmm5; vmovq %xmm2, cmll_sub(3, CTX); vmovq %xmm3, cmll_sub(4, CTX); vmovq %xmm4, cmll_sub(5, CTX); vmovq %xmm5, cmll_sub(6, CTX); vmovq cmll_sub(7, CTX), %xmm7; vmovq cmll_sub(8, CTX), %xmm8; vmovq cmll_sub(9, CTX), %xmm9; vmovq cmll_sub(10, CTX), %xmm10; /* tl = subl(10) ^ (subr(10) & ~subr(8)); */ vpandn %xmm10, %xmm8, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm10, %xmm0; /* dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw); */ vpand %xmm8, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm6, %xmm6; vmovq %xmm6, cmll_sub(7, CTX); vmovq cmll_sub(11, CTX), %xmm11; vmovq cmll_sub(12, CTX), %xmm12; vmovq cmll_sub(13, CTX), %xmm13; vmovq cmll_sub(14, CTX), %xmm14; vmovq cmll_sub(15, CTX), %xmm15; /* tl = subl(7) ^ (subr(7) & ~subr(9)); */ vpandn %xmm7, %xmm9, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm7, %xmm0; /* dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw); */ vpand %xmm9, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm11, %xmm0, %xmm0; vpxor %xmm12, %xmm10, %xmm10; vpxor %xmm13, %xmm11, %xmm11; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm15, %xmm13, %xmm13; vmovq %xmm0, cmll_sub(10, CTX); vmovq %xmm10, cmll_sub(11, CTX); vmovq %xmm11, cmll_sub(12, CTX); vmovq %xmm12, cmll_sub(13, CTX); vmovq %xmm13, cmll_sub(14, CTX); vmovq cmll_sub(16, CTX), %xmm6; vmovq cmll_sub(17, CTX), %xmm7; vmovq cmll_sub(18, CTX), %xmm8; vmovq cmll_sub(19, CTX), %xmm9; vmovq cmll_sub(20, CTX), %xmm10; /* tl = subl(18) ^ (subr(18) & ~subr(16)); */ vpandn %xmm8, %xmm6, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm8, %xmm0; /* dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw); */ vpand %xmm6, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm14, %xmm0, %xmm0; vmovq %xmm0, cmll_sub(15, CTX); /* tl = subl(15) ^ (subr(15) & ~subr(17)); */ vpandn %xmm15, %xmm7, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm15, %xmm0; /* dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw); */ vpand %xmm7, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vmovq cmll_sub(21, CTX), %xmm1; vmovq cmll_sub(22, CTX), %xmm2; vmovq cmll_sub(23, CTX), %xmm3; vmovq cmll_sub(24, CTX), %xmm4; vpxor %xmm9, %xmm0, %xmm0; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm1, %xmm9, %xmm9; vpxor %xmm2, %xmm10, %xmm10; vpxor %xmm3, %xmm1, %xmm1; vpxor %xmm4, %xmm3, %xmm3; vmovq %xmm0, cmll_sub(18, CTX); vmovq %xmm8, cmll_sub(19, CTX); vmovq %xmm9, cmll_sub(20, CTX); vmovq %xmm10, cmll_sub(21, CTX); vmovq %xmm1, cmll_sub(22, CTX); vmovq %xmm2, cmll_sub(23, CTX); vmovq %xmm3, cmll_sub(24, CTX); /* kw2 and kw4 are unused now. */ movq $0, cmll_sub(1, CTX); movq $0, cmll_sub(25, CTX); vzeroall; ret; ELF(.size __camellia_avx_setup128,.-__camellia_avx_setup128;) .align 8 ELF(.type __camellia_avx_setup256,@function;) __camellia_avx_setup256: /* input: * %rdi: ctx, CTX; subkey storage at key_table(CTX) * %xmm0 & %xmm1: key */ #define KL128 %xmm0 #define KR128 %xmm1 #define KA128 %xmm2 #define KB128 %xmm3 vpshufb .Lbswap128_mask RIP, KL128, KL128; vpshufb .Lbswap128_mask RIP, KR128, KR128; vmovdqa .Linv_shift_row_and_unpcklbw RIP, %xmm11; vmovq .Lsbox4_input_mask RIP, %xmm12; vbroadcastss .L0f0f0f0f RIP, %xmm13; vmovdqa .Lpre_tf_lo_s1 RIP, %xmm14; vmovdqa .Lpre_tf_hi_s1 RIP, %xmm15; /* * Generate KA */ vpxor KL128, KR128, %xmm3; vpsrldq $8, KR128, %xmm6; vpsrldq $8, %xmm3, %xmm2; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm2, %xmm4, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma1 RIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm2, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma2 RIP); vpxor %xmm6, %xmm2, %xmm2; camellia_f(%xmm2, %xmm3, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma3 RIP); vpxor %xmm4, %xmm3, %xmm3; vpxor KR128, %xmm3, %xmm3; camellia_f(%xmm3, %xmm4, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma4 RIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm4, %xmm2, %xmm2; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm2, KA128; vpor %xmm3, KA128, KA128; /* * Generate KB */ vpxor KA128, KR128, %xmm3; vpsrldq $8, %xmm3, %xmm4; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma5 RIP); vpxor %xmm5, %xmm3, %xmm3; camellia_f(%xmm3, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma6 RIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm5, %xmm4, %xmm4; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm4, %xmm4; vpor %xmm3, %xmm4, KB128; /* * Generate subkeys */ vmovdqu KB128, cmll_sub(32, CTX); vec_rol128(KR128, %xmm4, 15, %xmm15); vec_rol128(KA128, %xmm5, 15, %xmm15); vec_rol128(KR128, %xmm6, 30, %xmm15); vec_rol128(KB128, %xmm7, 30, %xmm15); vec_rol128(KL128, %xmm8, 45, %xmm15); vec_rol128(KA128, %xmm9, 45, %xmm15); vec_rol128(KL128, %xmm10, 60, %xmm15); vec_rol128(KR128, %xmm11, 60, %xmm15); vec_rol128(KB128, %xmm12, 60, %xmm15); /* absorb kw2 to other subkeys */ vpslldq $8, KL128, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, KB128, KB128; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; /* subl(1) ^= subr(1) & ~subr(9); */ vpandn %xmm15, %xmm6, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm6, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm9, %xmm9; vpshufd $0x1b, KL128, KL128; vpshufd $0x1b, KB128, KB128; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vmovdqu KL128, cmll_sub(0, CTX); vpshufd $0x1b, KL128, KL128; vmovdqu KB128, cmll_sub(2, CTX); vmovdqu %xmm4, cmll_sub(4, CTX); vmovdqu %xmm5, cmll_sub(6, CTX); vmovdqu %xmm6, cmll_sub(8, CTX); vmovdqu %xmm7, cmll_sub(10, CTX); vmovdqu %xmm8, cmll_sub(12, CTX); vmovdqu %xmm9, cmll_sub(14, CTX); vmovdqu cmll_sub(32, CTX), KB128; /* subl(1) ^= subr(1) & ~subr(17); */ vpandn %xmm15, %xmm10, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm11, %xmm11; vpxor %xmm15, %xmm12, %xmm12; vec_ror128(KL128, %xmm4, 128-77, %xmm14); vec_ror128(KA128, %xmm5, 128-77, %xmm14); vec_ror128(KR128, %xmm6, 128-94, %xmm14); vec_ror128(KA128, %xmm7, 128-94, %xmm14); vec_ror128(KL128, %xmm8, 128-111, %xmm14); vec_ror128(KB128, %xmm9, 128-111, %xmm14); vpxor %xmm15, %xmm4, %xmm4; vpshufd $0x1b, %xmm10, %xmm10; vpshufd $0x1b, %xmm11, %xmm11; vpshufd $0x1b, %xmm12, %xmm12; vpshufd $0x1b, %xmm4, %xmm4; vmovdqu %xmm10, cmll_sub(16, CTX); vmovdqu %xmm11, cmll_sub(18, CTX); vmovdqu %xmm12, cmll_sub(20, CTX); vmovdqu %xmm4, cmll_sub(22, CTX); /* subl(1) ^= subr(1) & ~subr(25); */ vpandn %xmm15, %xmm5, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(25), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm8, %xmm8; vpslldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm9, %xmm9; /* absorb kw4 to other subkeys */ vpslldq $8, %xmm9, %xmm15; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm6, %xmm6; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm5, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vmovdqu %xmm5, cmll_sub(24, CTX); vmovdqu %xmm6, cmll_sub(26, CTX); vmovdqu %xmm7, cmll_sub(28, CTX); vmovdqu %xmm8, cmll_sub(30, CTX); vmovdqu %xmm9, cmll_sub(32, CTX); vpshufd $0x1b, cmll_sub(22, CTX), %xmm0; vpshufd $0x1b, cmll_sub(20, CTX), %xmm1; vpshufd $0x1b, cmll_sub(18, CTX), %xmm2; vpshufd $0x1b, cmll_sub(16, CTX), %xmm3; vpshufd $0x1b, cmll_sub(14, CTX), %xmm4; vpshufd $0x1b, cmll_sub(12, CTX), %xmm5; vpshufd $0x1b, cmll_sub(10, CTX), %xmm6; vpshufd $0x1b, cmll_sub(8, CTX), %xmm7; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm15, %xmm1, %xmm1; vpxor %xmm15, %xmm2, %xmm2; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm3, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm3, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; vpxor %xmm15, %xmm6, %xmm6; vpshufd $0x1b, %xmm0, %xmm0; vpshufd $0x1b, %xmm1, %xmm1; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vmovdqu %xmm0, cmll_sub(22, CTX); vmovdqu %xmm1, cmll_sub(20, CTX); vmovdqu %xmm2, cmll_sub(18, CTX); vmovdqu %xmm4, cmll_sub(14, CTX); vmovdqu %xmm5, cmll_sub(12, CTX); vmovdqu %xmm6, cmll_sub(10, CTX); vpshufd $0x1b, cmll_sub(6, CTX), %xmm6; vpshufd $0x1b, cmll_sub(4, CTX), %xmm4; vpshufd $0x1b, cmll_sub(2, CTX), %xmm2; vpshufd $0x1b, cmll_sub(0, CTX), %xmm0; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm7, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm7, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm2, %xmm2; vpxor %xmm15, %xmm0, %xmm0; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm0, %xmm0; vpsrldq $8, %xmm2, %xmm3; vpsrldq $8, %xmm4, %xmm5; vpsrldq $8, %xmm6, %xmm7; /* * key XOR is end of F-function. */ vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm4, %xmm2, %xmm2; vmovq %xmm0, cmll_sub(0, CTX); vmovq %xmm3, cmll_sub(2, CTX); vpxor %xmm5, %xmm3, %xmm3; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm7, %xmm5, %xmm5; vmovq %xmm2, cmll_sub(3, CTX); vmovq %xmm3, cmll_sub(4, CTX); vmovq %xmm4, cmll_sub(5, CTX); vmovq %xmm5, cmll_sub(6, CTX); vmovq cmll_sub(7, CTX), %xmm7; vmovq cmll_sub(8, CTX), %xmm8; vmovq cmll_sub(9, CTX), %xmm9; vmovq cmll_sub(10, CTX), %xmm10; /* tl = subl(10) ^ (subr(10) & ~subr(8)); */ vpandn %xmm10, %xmm8, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm10, %xmm0; /* dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw); */ vpand %xmm8, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm6, %xmm6; vmovq %xmm6, cmll_sub(7, CTX); vmovq cmll_sub(11, CTX), %xmm11; vmovq cmll_sub(12, CTX), %xmm12; vmovq cmll_sub(13, CTX), %xmm13; vmovq cmll_sub(14, CTX), %xmm14; vmovq cmll_sub(15, CTX), %xmm15; /* tl = subl(7) ^ (subr(7) & ~subr(9)); */ vpandn %xmm7, %xmm9, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm7, %xmm0; /* dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw); */ vpand %xmm9, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm11, %xmm0, %xmm0; vpxor %xmm12, %xmm10, %xmm10; vpxor %xmm13, %xmm11, %xmm11; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm15, %xmm13, %xmm13; vmovq %xmm0, cmll_sub(10, CTX); vmovq %xmm10, cmll_sub(11, CTX); vmovq %xmm11, cmll_sub(12, CTX); vmovq %xmm12, cmll_sub(13, CTX); vmovq %xmm13, cmll_sub(14, CTX); vmovq cmll_sub(16, CTX), %xmm6; vmovq cmll_sub(17, CTX), %xmm7; vmovq cmll_sub(18, CTX), %xmm8; vmovq cmll_sub(19, CTX), %xmm9; vmovq cmll_sub(20, CTX), %xmm10; /* tl = subl(18) ^ (subr(18) & ~subr(16)); */ vpandn %xmm8, %xmm6, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm8, %xmm0; /* dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw); */ vpand %xmm6, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm14, %xmm0, %xmm0; vmovq %xmm0, cmll_sub(15, CTX); /* tl = subl(15) ^ (subr(15) & ~subr(17)); */ vpandn %xmm15, %xmm7, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm15, %xmm0; /* dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw); */ vpand %xmm7, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vmovq cmll_sub(21, CTX), %xmm1; vmovq cmll_sub(22, CTX), %xmm2; vmovq cmll_sub(23, CTX), %xmm3; vmovq cmll_sub(24, CTX), %xmm4; vpxor %xmm9, %xmm0, %xmm0; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm1, %xmm9, %xmm9; vpxor %xmm2, %xmm10, %xmm10; vpxor %xmm3, %xmm1, %xmm1; vmovq %xmm0, cmll_sub(18, CTX); vmovq %xmm8, cmll_sub(19, CTX); vmovq %xmm9, cmll_sub(20, CTX); vmovq %xmm10, cmll_sub(21, CTX); vmovq %xmm1, cmll_sub(22, CTX); vmovq cmll_sub(25, CTX), %xmm5; vmovq cmll_sub(26, CTX), %xmm6; vmovq cmll_sub(27, CTX), %xmm7; vmovq cmll_sub(28, CTX), %xmm8; vmovq cmll_sub(29, CTX), %xmm9; vmovq cmll_sub(30, CTX), %xmm10; vmovq cmll_sub(31, CTX), %xmm11; vmovq cmll_sub(32, CTX), %xmm12; /* tl = subl(26) ^ (subr(26) & ~subr(24)); */ vpandn %xmm6, %xmm4, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm0; /* dw = tl & subl(26), tr = subr(24) ^ CAMELLIA_RL1(dw); */ vpand %xmm4, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm2, %xmm2; vmovq %xmm2, cmll_sub(23, CTX); /* tl = subl(23) ^ (subr(23) & ~subr(25)); */ vpandn %xmm3, %xmm5, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm3, %xmm0; /* dw = tl & subl(26), tr = subr(24) ^ CAMELLIA_RL1(dw); */ vpand %xmm5, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm7, %xmm0, %xmm0; vpxor %xmm8, %xmm6, %xmm6; vpxor %xmm9, %xmm7, %xmm7; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm11, %xmm9, %xmm9; vpxor %xmm12, %xmm11, %xmm11; vmovq %xmm0, cmll_sub(26, CTX); vmovq %xmm6, cmll_sub(27, CTX); vmovq %xmm7, cmll_sub(28, CTX); vmovq %xmm8, cmll_sub(29, CTX); vmovq %xmm9, cmll_sub(30, CTX); vmovq %xmm10, cmll_sub(31, CTX); vmovq %xmm11, cmll_sub(32, CTX); /* kw2 and kw4 are unused now. */ movq $0, cmll_sub(1, CTX); movq $0, cmll_sub(33, CTX); vzeroall; ret; ELF(.size __camellia_avx_setup256,.-__camellia_avx_setup256;) .align 8 .globl _gcry_camellia_aesni_avx_keygen ELF(.type _gcry_camellia_aesni_avx_keygen,@function;) _gcry_camellia_aesni_avx_keygen: /* input: * %rdi: ctx, CTX * %rsi: key * %rdx: keylen */ vzeroupper; vmovdqu (%rsi), %xmm0; cmpl $24, %edx; jb __camellia_avx_setup128; je .Lprepare_key192; vmovdqu 16(%rsi), %xmm1; jmp __camellia_avx_setup256; .Lprepare_key192: vpcmpeqd %xmm2, %xmm2, %xmm2; vmovq 16(%rsi), %xmm1; vpxor %xmm1, %xmm2, %xmm2; vpslldq $8, %xmm2, %xmm2; vpor %xmm2, %xmm1, %xmm1; jmp __camellia_avx_setup256; ELF(.size _gcry_camellia_aesni_avx_keygen,.-_gcry_camellia_aesni_avx_keygen;) #endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/camellia-aesni-avx2-amd64.S b/cipher/camellia-aesni-avx2-amd64.S index 26381df0..897e4aee 100644 --- a/cipher/camellia-aesni-avx2-amd64.S +++ b/cipher/camellia-aesni-avx2-amd64.S @@ -1,1763 +1,1762 @@ /* camellia-avx2-aesni-amd64.S - AES-NI/AVX2 implementation of Camellia cipher * * Copyright (C) 2013-2015 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; #define ymm0_x xmm0 #define ymm1_x xmm1 #define ymm2_x xmm2 #define ymm3_x xmm3 #define ymm4_x xmm4 #define ymm5_x xmm5 #define ymm6_x xmm6 #define ymm7_x xmm7 #define ymm8_x xmm8 #define ymm9_x xmm9 #define ymm10_x xmm10 #define ymm11_x xmm11 #define ymm12_x xmm12 #define ymm13_x xmm13 #define ymm14_x xmm14 #define ymm15_x xmm15 /********************************************************************** 32-way camellia **********************************************************************/ /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vbroadcasti128 .Linv_shift_row RIP, t4; \ vpbroadcastd .L0f0f0f0f RIP, t7; \ vbroadcasti128 .Lpre_tf_lo_s1 RIP, t5; \ vbroadcasti128 .Lpre_tf_hi_s1 RIP, t6; \ vbroadcasti128 .Lpre_tf_lo_s4 RIP, t2; \ vbroadcasti128 .Lpre_tf_hi_s4 RIP, t3; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ \ /* prefilter sboxes 1, 2 and 3 */ \ /* prefilter sbox 4 */ \ filter_8bit(x0, t5, t6, t7, t4); \ filter_8bit(x7, t5, t6, t7, t4); \ vextracti128 $1, x0, t0##_x; \ vextracti128 $1, x7, t1##_x; \ filter_8bit(x3, t2, t3, t7, t4); \ filter_8bit(x6, t2, t3, t7, t4); \ vextracti128 $1, x3, t3##_x; \ vextracti128 $1, x6, t2##_x; \ filter_8bit(x2, t5, t6, t7, t4); \ filter_8bit(x5, t5, t6, t7, t4); \ filter_8bit(x1, t5, t6, t7, t4); \ filter_8bit(x4, t5, t6, t7, t4); \ \ vpxor t4##_x, t4##_x, t4##_x; \ \ /* AES subbytes + AES shift rows */ \ vextracti128 $1, x2, t6##_x; \ vextracti128 $1, x5, t5##_x; \ vaesenclast t4##_x, x0##_x, x0##_x; \ vaesenclast t4##_x, t0##_x, t0##_x; \ vaesenclast t4##_x, x7##_x, x7##_x; \ vaesenclast t4##_x, t1##_x, t1##_x; \ vaesenclast t4##_x, x3##_x, x3##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vaesenclast t4##_x, x6##_x, x6##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t0##_x, x0, x0; \ vinserti128 $1, t1##_x, x7, x7; \ vinserti128 $1, t3##_x, x3, x3; \ vinserti128 $1, t2##_x, x6, x6; \ vextracti128 $1, x1, t3##_x; \ vextracti128 $1, x4, t2##_x; \ vbroadcasti128 .Lpost_tf_lo_s1 RIP, t0; \ vbroadcasti128 .Lpost_tf_hi_s1 RIP, t1; \ vaesenclast t4##_x, x2##_x, x2##_x; \ vaesenclast t4##_x, t6##_x, t6##_x; \ vaesenclast t4##_x, x5##_x, x5##_x; \ vaesenclast t4##_x, t5##_x, t5##_x; \ vaesenclast t4##_x, x1##_x, x1##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vaesenclast t4##_x, x4##_x, x4##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t6##_x, x2, x2; \ vinserti128 $1, t5##_x, x5, x5; \ vinserti128 $1, t3##_x, x1, x1; \ vinserti128 $1, t2##_x, x4, x4; \ \ /* postfilter sboxes 1 and 4 */ \ vbroadcasti128 .Lpost_tf_lo_s3 RIP, t2; \ vbroadcasti128 .Lpost_tf_hi_s3 RIP, t3; \ filter_8bit(x0, t0, t1, t7, t4); \ filter_8bit(x7, t0, t1, t7, t4); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vbroadcasti128 .Lpost_tf_lo_s2 RIP, t4; \ vbroadcasti128 .Lpost_tf_hi_s2 RIP, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ vpxor t7, t7, t7; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 32(mem_cd), x0, x0; \ \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 32(mem_cd), x3, x3; \ \ vpxor t3, x4, x4; \ vpxor 0 * 32(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 32(mem_cd), x5, x5; \ \ vpxor t1, x6, x6; \ vpxor 2 * 32(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 32(mem_cd), x7, x7; /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu x0, 4 * 32(mem_cd); \ vmovdqu x1, 5 * 32(mem_cd); \ vmovdqu x2, 6 * 32(mem_cd); \ vmovdqu x3, 7 * 32(mem_cd); \ vmovdqu x4, 0 * 32(mem_cd); \ vmovdqu x5, 1 * 32(mem_cd); \ vmovdqu x6, 2 * 32(mem_cd); \ vmovdqu x7, 3 * 32(mem_cd); \ \ roundsm32(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpxor tt0, tt0, tt0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu l4, 4 * 32(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 32(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 32(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 32(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 32(r), t0, t0; \ vpor 5 * 32(r), t1, t1; \ vpor 6 * 32(r), t2, t2; \ vpor 7 * 32(r), t3, t3; \ \ vpxor 0 * 32(r), t0, t0; \ vpxor 1 * 32(r), t1, t1; \ vpxor 2 * 32(r), t2, t2; \ vpxor 3 * 32(r), t3, t3; \ vmovdqu t0, 0 * 32(r); \ vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 1 * 32(r); \ vmovdqu t2, 2 * 32(r); \ vmovdqu t3, 3 * 32(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 32(r), t0, t0; \ vpand 1 * 32(r), t1, t1; \ vpand 2 * 32(r), t2, t2; \ vpand 3 * 32(r), t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 32(r), t0, t0; \ vpxor 5 * 32(r), t1, t1; \ vpxor 6 * 32(r), t2, t2; \ vpxor 7 * 32(r), t3, t3; \ vmovdqu t0, 4 * 32(r); \ vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 5 * 32(r); \ vmovdqu t2, 6 * 32(r); \ vmovdqu t3, 7 * 32(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 32(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 32(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 32(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 32(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vbroadcasti128 .Lshufb_16x16b RIP, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap RIP, x0, x0; \ \ vpxor 0 * 32(rio), x0, y7; \ vpxor 1 * 32(rio), x0, y6; \ vpxor 2 * 32(rio), x0, y5; \ vpxor 3 * 32(rio), x0, y4; \ vpxor 4 * 32(rio), x0, y3; \ vpxor 5 * 32(rio), x0, y2; \ vpxor 6 * 32(rio), x0, y1; \ vpxor 7 * 32(rio), x0, y0; \ vpxor 8 * 32(rio), x0, x7; \ vpxor 9 * 32(rio), x0, x6; \ vpxor 10 * 32(rio), x0, x5; \ vpxor 11 * 32(rio), x0, x4; \ vpxor 12 * 32(rio), x0, x3; \ vpxor 13 * 32(rio), x0, x2; \ vpxor 14 * 32(rio), x0, x1; \ vpxor 15 * 32(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu y0, 0 * 32(mem_cd); \ vmovdqu y1, 1 * 32(mem_cd); \ vmovdqu y2, 2 * 32(mem_cd); \ vmovdqu y3, 3 * 32(mem_cd); \ vmovdqu y4, 4 * 32(mem_cd); \ vmovdqu y5, 5 * 32(mem_cd); \ vmovdqu y6, 6 * 32(mem_cd); \ vmovdqu y7, 7 * 32(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap RIP, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 32(rio); \ vmovdqu x1, 1 * 32(rio); \ vmovdqu x2, 2 * 32(rio); \ vmovdqu x3, 3 * 32(rio); \ vmovdqu x4, 4 * 32(rio); \ vmovdqu x5, 5 * 32(rio); \ vmovdqu x6, 6 * 32(rio); \ vmovdqu x7, 7 * 32(rio); \ vmovdqu y0, 8 * 32(rio); \ vmovdqu y1, 9 * 32(rio); \ vmovdqu y2, 10 * 32(rio); \ vmovdqu y3, 11 * 32(rio); \ vmovdqu y4, 12 * 32(rio); \ vmovdqu y5, 13 * 32(rio); \ vmovdqu y6, 14 * 32(rio); \ vmovdqu y7, 15 * 32(rio); -.data +.text .align 32 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) .Lpack_bswap: .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f -.text .align 8 ELF(.type __camellia_enc_blk32,@function;) __camellia_enc_blk32: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %ymm0..%ymm15: 32 plaintext blocks * output: * %ymm0..%ymm15: 32 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX), ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 8); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX), ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 16); movl $24, %r8d; cmpl $128, key_bitlength(CTX); jne .Lenc_max32; .Lenc_done: /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax)); ret; .align 8 .Lenc_max32: movl $32, %r8d; fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX), ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX)); enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 24); jmp .Lenc_done; ELF(.size __camellia_enc_blk32,.-__camellia_enc_blk32;) .align 8 ELF(.type __camellia_dec_blk32,@function;) __camellia_dec_blk32: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %r8d: 24 for 16 byte key, 32 for larger * %ymm0..%ymm15: 16 encrypted blocks * output: * %ymm0..%ymm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); cmpl $32, %r8d; je .Ldec_max32; .Ldec_max24: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 16); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (16) * 8) + 8)(CTX), ((key_table + (16) * 8) + 12)(CTX), ((key_table + (16) * 8) + 0)(CTX), ((key_table + (16) * 8) + 4)(CTX)); dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 8); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (8) * 8) + 8)(CTX), ((key_table + (8) * 8) + 12)(CTX), ((key_table + (8) * 8) + 0)(CTX), ((key_table + (8) * 8) + 4)(CTX)); dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); ret; .align 8 .Ldec_max32: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 24); fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table + (24) * 8) + 8)(CTX), ((key_table + (24) * 8) + 12)(CTX), ((key_table + (24) * 8) + 0)(CTX), ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; ELF(.size __camellia_dec_blk32,.-__camellia_dec_blk32;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 8 .globl _gcry_camellia_aesni_avx2_ctr_enc ELF(.type _gcry_camellia_aesni_avx2_ctr_enc,@function;) _gcry_camellia_aesni_avx2_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (big endian, 128bit) */ pushq %rbp; movq %rsp, %rbp; movq 8(%rcx), %r11; bswapq %r11; vzeroupper; subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; vpcmpeqd %ymm15, %ymm15, %ymm15; vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */ /* load IV and byteswap */ vmovdqu (%rcx), %xmm0; vpshufb .Lbswap128_mask RIP, %xmm0, %xmm0; vmovdqa %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm14); vbroadcasti128 .Lbswap128_mask RIP, %ymm14; vinserti128 $1, %xmm0, %ymm1, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 15 * 32(%rax); /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 32), %r11; ja .Lload_ctr_carry; /* construct IVs */ vpaddq %ymm15, %ymm15, %ymm15; /* ab: -2:0 ; cd: -2:0 */ vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 14 * 32(%rax); vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 13 * 32(%rax); vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm12; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm11; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm10; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm9; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm8; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm7; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm6; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm5; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm4; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm3; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm2; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm1; vpsubq %ymm15, %ymm0, %ymm0; /* +30 ; +31 */ vpsubq %xmm15, %xmm0, %xmm13; /* +32 */ vpshufb %ymm14, %ymm0, %ymm0; vpshufb %xmm14, %xmm13, %xmm13; vmovdqu %xmm13, (%rcx); jmp .Lload_ctr_done; .align 4 .Lload_ctr_carry: /* construct IVs */ inc_le128(%ymm0, %ymm15, %ymm13); /* ab: le1 ; cd: le2 */ inc_le128(%ymm0, %ymm15, %ymm13); /* ab: le2 ; cd: le3 */ vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 14 * 32(%rax); inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 13 * 32(%rax); inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm12; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm11; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm10; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm9; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm8; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm7; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm6; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm5; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm4; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm3; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm2; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm1; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vextracti128 $1, %ymm0, %xmm13; vpshufb %ymm14, %ymm0, %ymm0; inc_le128(%xmm13, %xmm15, %xmm14); vpshufb .Lbswap128_mask RIP, %xmm13, %xmm13; vmovdqu %xmm13, (%rcx); .align 4 .Lload_ctr_done: /* inpack16_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap RIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call __camellia_enc_blk32; vpxor 0 * 32(%rdx), %ymm7, %ymm7; vpxor 1 * 32(%rdx), %ymm6, %ymm6; vpxor 2 * 32(%rdx), %ymm5, %ymm5; vpxor 3 * 32(%rdx), %ymm4, %ymm4; vpxor 4 * 32(%rdx), %ymm3, %ymm3; vpxor 5 * 32(%rdx), %ymm2, %ymm2; vpxor 6 * 32(%rdx), %ymm1, %ymm1; vpxor 7 * 32(%rdx), %ymm0, %ymm0; vpxor 8 * 32(%rdx), %ymm15, %ymm15; vpxor 9 * 32(%rdx), %ymm14, %ymm14; vpxor 10 * 32(%rdx), %ymm13, %ymm13; vpxor 11 * 32(%rdx), %ymm12, %ymm12; vpxor 12 * 32(%rdx), %ymm11, %ymm11; vpxor 13 * 32(%rdx), %ymm10, %ymm10; vpxor 14 * 32(%rdx), %ymm9, %ymm9; vpxor 15 * 32(%rdx), %ymm8, %ymm8; leaq 32 * 16(%rdx), %rdx; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; leave; ret; ELF(.size _gcry_camellia_aesni_avx2_ctr_enc,.-_gcry_camellia_aesni_avx2_ctr_enc;) .align 8 .globl _gcry_camellia_aesni_avx2_cbc_dec ELF(.type _gcry_camellia_aesni_avx2_cbc_dec,@function;) _gcry_camellia_aesni_avx2_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ pushq %rbp; movq %rsp, %rbp; vzeroupper; movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); call __camellia_dec_blk32; /* XOR output with IV */ vmovdqu %ymm8, (%rax); vmovdqu (%r9), %xmm8; vinserti128 $1, (%rdx), %ymm8, %ymm8; vpxor %ymm8, %ymm7, %ymm7; vmovdqu (%rax), %ymm8; vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6; vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5; vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4; vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3; vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2; vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0; vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15; vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14; vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13; vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12; vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11; vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10; vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9; vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8; movq (15 * 32 + 16 + 0)(%rdx), %rax; movq (15 * 32 + 16 + 8)(%rdx), %rcx; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); /* store new IV */ movq %rax, (0)(%r9); movq %rcx, (8)(%r9); vzeroall; leave; ret; ELF(.size _gcry_camellia_aesni_avx2_cbc_dec,.-_gcry_camellia_aesni_avx2_cbc_dec;) .align 8 .globl _gcry_camellia_aesni_avx2_cfb_dec ELF(.type _gcry_camellia_aesni_avx2_cfb_dec,@function;) _gcry_camellia_aesni_avx2_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; /* inpack16_pre: */ vpbroadcastq (key_table)(CTX), %ymm0; vpshufb .Lpack_bswap RIP, %ymm0, %ymm0; vmovdqu (%rcx), %xmm15; vinserti128 $1, (%rdx), %ymm15, %ymm15; vpxor %ymm15, %ymm0, %ymm15; vmovdqu (15 * 32 + 16)(%rdx), %xmm1; vmovdqu %xmm1, (%rcx); /* store new IV */ vpxor (0 * 32 + 16)(%rdx), %ymm0, %ymm14; vpxor (1 * 32 + 16)(%rdx), %ymm0, %ymm13; vpxor (2 * 32 + 16)(%rdx), %ymm0, %ymm12; vpxor (3 * 32 + 16)(%rdx), %ymm0, %ymm11; vpxor (4 * 32 + 16)(%rdx), %ymm0, %ymm10; vpxor (5 * 32 + 16)(%rdx), %ymm0, %ymm9; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm8; vpxor (7 * 32 + 16)(%rdx), %ymm0, %ymm7; vpxor (8 * 32 + 16)(%rdx), %ymm0, %ymm6; vpxor (9 * 32 + 16)(%rdx), %ymm0, %ymm5; vpxor (10 * 32 + 16)(%rdx), %ymm0, %ymm4; vpxor (11 * 32 + 16)(%rdx), %ymm0, %ymm3; vpxor (12 * 32 + 16)(%rdx), %ymm0, %ymm2; vpxor (13 * 32 + 16)(%rdx), %ymm0, %ymm1; vpxor (14 * 32 + 16)(%rdx), %ymm0, %ymm0; call __camellia_enc_blk32; vpxor 0 * 32(%rdx), %ymm7, %ymm7; vpxor 1 * 32(%rdx), %ymm6, %ymm6; vpxor 2 * 32(%rdx), %ymm5, %ymm5; vpxor 3 * 32(%rdx), %ymm4, %ymm4; vpxor 4 * 32(%rdx), %ymm3, %ymm3; vpxor 5 * 32(%rdx), %ymm2, %ymm2; vpxor 6 * 32(%rdx), %ymm1, %ymm1; vpxor 7 * 32(%rdx), %ymm0, %ymm0; vpxor 8 * 32(%rdx), %ymm15, %ymm15; vpxor 9 * 32(%rdx), %ymm14, %ymm14; vpxor 10 * 32(%rdx), %ymm13, %ymm13; vpxor 11 * 32(%rdx), %ymm12, %ymm12; vpxor 12 * 32(%rdx), %ymm11, %ymm11; vpxor 13 * 32(%rdx), %ymm10, %ymm10; vpxor 14 * 32(%rdx), %ymm9, %ymm9; vpxor 15 * 32(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; leave; ret; ELF(.size _gcry_camellia_aesni_avx2_cfb_dec,.-_gcry_camellia_aesni_avx2_cfb_dec;) .align 8 .globl _gcry_camellia_aesni_avx2_ocb_enc ELF(.type _gcry_camellia_aesni_avx2_ocb_enc,@function;) _gcry_camellia_aesni_avx2_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rax); movq %r11, (16 * 32 + 1 * 8)(%rax); movq %r12, (16 * 32 + 2 * 8)(%rax); movq %r13, (16 * 32 + 3 * 8)(%rax); vmovdqu (%rcx), %xmm14; vmovdqu (%r8), %xmm13; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm13, %ymm13; \ vpxor yreg, %ymm15, yreg; \ vmovdqu %ymm15, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, %ymm0); vmovdqu %ymm0, (13 * 32)(%rax); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r9), %r10; movq (17 * 8)(%r9), %r11; movq (18 * 8)(%r9), %r12; movq (19 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r9), %r10; movq (21 * 8)(%r9), %r11; movq (22 * 8)(%r9), %r12; movq (23 * 8)(%r9), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r9), %r10; movq (25 * 8)(%r9), %r11; movq (26 * 8)(%r9), %r12; movq (27 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r9), %r10; movq (29 * 8)(%r9), %r11; movq (30 * 8)(%r9), %r12; movq (31 * 8)(%r9), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vextracti128 $1, %ymm13, %xmm15; vmovdqu %xmm14, (%rcx); vpxor %xmm13, %xmm15, %xmm15; vmovdqu %xmm15, (%r8); /* inpack16_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap RIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call __camellia_enc_blk32; vpxor 0 * 32(%rsi), %ymm7, %ymm7; vpxor 1 * 32(%rsi), %ymm6, %ymm6; vpxor 2 * 32(%rsi), %ymm5, %ymm5; vpxor 3 * 32(%rsi), %ymm4, %ymm4; vpxor 4 * 32(%rsi), %ymm3, %ymm3; vpxor 5 * 32(%rsi), %ymm2, %ymm2; vpxor 6 * 32(%rsi), %ymm1, %ymm1; vpxor 7 * 32(%rsi), %ymm0, %ymm0; vpxor 8 * 32(%rsi), %ymm15, %ymm15; vpxor 9 * 32(%rsi), %ymm14, %ymm14; vpxor 10 * 32(%rsi), %ymm13, %ymm13; vpxor 11 * 32(%rsi), %ymm12, %ymm12; vpxor 12 * 32(%rsi), %ymm11, %ymm11; vpxor 13 * 32(%rsi), %ymm10, %ymm10; vpxor 14 * 32(%rsi), %ymm9, %ymm9; vpxor 15 * 32(%rsi), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; movq (16 * 32 + 0 * 8)(%rax), %r10; movq (16 * 32 + 1 * 8)(%rax), %r11; movq (16 * 32 + 2 * 8)(%rax), %r12; movq (16 * 32 + 3 * 8)(%rax), %r13; leave; ret; ELF(.size _gcry_camellia_aesni_avx2_ocb_enc,.-_gcry_camellia_aesni_avx2_ocb_enc;) .align 8 .globl _gcry_camellia_aesni_avx2_ocb_dec ELF(.type _gcry_camellia_aesni_avx2_ocb_dec,@function;) _gcry_camellia_aesni_avx2_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rax); movq %r11, (16 * 32 + 1 * 8)(%rax); movq %r12, (16 * 32 + 2 * 8)(%rax); movq %r13, (16 * 32 + 3 * 8)(%rax); vmovdqu (%rcx), %xmm14; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm15, yreg; \ vmovdqu %ymm15, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, %ymm13); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r9), %r10; movq (17 * 8)(%r9), %r11; movq (18 * 8)(%r9), %r12; movq (19 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r9), %r10; movq (21 * 8)(%r9), %r11; movq (22 * 8)(%r9), %r12; movq (23 * 8)(%r9), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r9), %r10; movq (25 * 8)(%r9), %r11; movq (26 * 8)(%r9), %r12; movq (27 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r9), %r10; movq (29 * 8)(%r9), %r11; movq (30 * 8)(%r9), %r12; movq (31 * 8)(%r9), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vmovdqu %xmm14, (%rcx); movq %r8, %r10; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r9d; cmovel %r9d, %r8d; /* max */ /* inpack16_pre: */ vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15; vpshufb .Lpack_bswap RIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor %ymm13, %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call __camellia_dec_blk32; vpxor 0 * 32(%rsi), %ymm7, %ymm7; vpxor 1 * 32(%rsi), %ymm6, %ymm6; vpxor 2 * 32(%rsi), %ymm5, %ymm5; vpxor 3 * 32(%rsi), %ymm4, %ymm4; vpxor 4 * 32(%rsi), %ymm3, %ymm3; vpxor 5 * 32(%rsi), %ymm2, %ymm2; vpxor 6 * 32(%rsi), %ymm1, %ymm1; vpxor 7 * 32(%rsi), %ymm0, %ymm0; vmovdqu %ymm7, (7 * 32)(%rax); vmovdqu %ymm6, (6 * 32)(%rax); vpxor 8 * 32(%rsi), %ymm15, %ymm15; vpxor 9 * 32(%rsi), %ymm14, %ymm14; vpxor 10 * 32(%rsi), %ymm13, %ymm13; vpxor 11 * 32(%rsi), %ymm12, %ymm12; vpxor 12 * 32(%rsi), %ymm11, %ymm11; vpxor 13 * 32(%rsi), %ymm10, %ymm10; vpxor 14 * 32(%rsi), %ymm9, %ymm9; vpxor 15 * 32(%rsi), %ymm8, %ymm8; /* Checksum_i = Checksum_{i-1} xor P_i */ vpxor %ymm5, %ymm7, %ymm7; vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm3, %ymm7, %ymm7; vpxor %ymm2, %ymm6, %ymm6; vpxor %ymm1, %ymm7, %ymm7; vpxor %ymm0, %ymm6, %ymm6; vpxor %ymm15, %ymm7, %ymm7; vpxor %ymm14, %ymm6, %ymm6; vpxor %ymm13, %ymm7, %ymm7; vpxor %ymm12, %ymm6, %ymm6; vpxor %ymm11, %ymm7, %ymm7; vpxor %ymm10, %ymm6, %ymm6; vpxor %ymm9, %ymm7, %ymm7; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm7, %ymm6, %ymm7; vextracti128 $1, %ymm7, %xmm6; vpxor %xmm6, %xmm7, %xmm7; vpxor (%r10), %xmm7, %xmm7; vmovdqu %xmm7, (%r10); vmovdqu 7 * 32(%rax), %ymm7; vmovdqu 6 * 32(%rax), %ymm6; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; movq (16 * 32 + 0 * 8)(%rax), %r10; movq (16 * 32 + 1 * 8)(%rax), %r11; movq (16 * 32 + 2 * 8)(%rax), %r12; movq (16 * 32 + 3 * 8)(%rax), %r13; leave; ret; ELF(.size _gcry_camellia_aesni_avx2_ocb_dec,.-_gcry_camellia_aesni_avx2_ocb_dec;) .align 8 .globl _gcry_camellia_aesni_avx2_ocb_auth ELF(.type _gcry_camellia_aesni_avx2_ocb_auth,@function;) _gcry_camellia_aesni_avx2_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ pushq %rbp; movq %rsp, %rbp; vzeroupper; subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rax); movq %r11, (16 * 32 + 1 * 8)(%rax); movq %r12, (16 * 32 + 2 * 8)(%rax); movq %r13, (16 * 32 + 3 * 8)(%rax); vmovdqu (%rdx), %xmm14; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm15, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, %ymm13); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r8), %r10; movq (17 * 8)(%r8), %r11; movq (18 * 8)(%r8), %r12; movq (19 * 8)(%r8), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r8), %r10; movq (21 * 8)(%r8), %r11; movq (22 * 8)(%r8), %r12; movq (23 * 8)(%r8), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r8), %r10; movq (25 * 8)(%r8), %r11; movq (26 * 8)(%r8), %r12; movq (27 * 8)(%r8), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r8), %r10; movq (29 * 8)(%r8), %r11; movq (30 * 8)(%r8), %r12; movq (31 * 8)(%r8), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vmovdqu %xmm14, (%rdx); movq %rcx, %r10; /* inpack16_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap RIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor %ymm13, %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call __camellia_enc_blk32; vpxor %ymm7, %ymm6, %ymm6; vpxor %ymm5, %ymm4, %ymm4; vpxor %ymm3, %ymm2, %ymm2; vpxor %ymm1, %ymm0, %ymm0; vpxor %ymm15, %ymm14, %ymm14; vpxor %ymm13, %ymm12, %ymm12; vpxor %ymm11, %ymm10, %ymm10; vpxor %ymm9, %ymm8, %ymm8; vpxor %ymm6, %ymm4, %ymm4; vpxor %ymm2, %ymm0, %ymm0; vpxor %ymm14, %ymm12, %ymm12; vpxor %ymm10, %ymm8, %ymm8; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm12, %ymm8, %ymm8; vpxor %ymm0, %ymm8, %ymm0; vextracti128 $1, %ymm0, %xmm1; vpxor (%r10), %xmm0, %xmm0; vpxor %xmm0, %xmm1, %xmm0; vmovdqu %xmm0, (%r10); vzeroall; movq (16 * 32 + 0 * 8)(%rax), %r10; movq (16 * 32 + 1 * 8)(%rax), %r11; movq (16 * 32 + 2 * 8)(%rax), %r12; movq (16 * 32 + 3 * 8)(%rax), %r13; leave; ret; ELF(.size _gcry_camellia_aesni_avx2_ocb_auth,.-_gcry_camellia_aesni_avx2_ocb_auth;) #endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/chacha20-avx2-amd64.S b/cipher/chacha20-avx2-amd64.S index 12bed35b..8c085bad 100644 --- a/cipher/chacha20-avx2-amd64.S +++ b/cipher/chacha20-avx2-amd64.S @@ -1,957 +1,956 @@ /* chacha20-avx2-amd64.S - AMD64/AVX2 implementation of ChaCha20 * * Copyright (C) 2014 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Based on public domain implementation by Andrew Moon at * https://github.com/floodyberry/chacha-opt */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AVX2_SUPPORT) && USE_CHACHA20 #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .text .align 8 .globl _gcry_chacha20_amd64_avx2_blocks ELF(.type _gcry_chacha20_amd64_avx2_blocks,@function;) _gcry_chacha20_amd64_avx2_blocks: .Lchacha_blocks_avx2_local: vzeroupper pushq %rbx pushq %rbp pushq %r12 pushq %r13 pushq %r14 movq %rsp, %rbp andq $~63, %rsp subq $512, %rsp leaq .LC RIP, %rax vmovdqu 0(%rax), %xmm6 vmovdqu 16(%rax), %xmm7 vmovdqu 0(%rdi), %xmm8 vmovdqu 16(%rdi), %xmm9 vmovdqu 32(%rdi), %xmm10 vmovdqu 48(%rdi), %xmm11 movl $20, %eax movq $1, %r9 vmovdqa %xmm8, 0(%rsp) vmovdqa %xmm9, 16(%rsp) vmovdqa %xmm10, 32(%rsp) vmovdqa %xmm11, 48(%rsp) movq %rax, 64(%rsp) vmovdqa %xmm6, 448(%rsp) vmovdqa %xmm6, 464(%rsp) vmovdqa %xmm7, 480(%rsp) vmovdqa %xmm7, 496(%rsp) cmpq $512, %rcx jae .Lchacha_blocks_avx2_atleast512 cmp $256, %rcx jae .Lchacha_blocks_avx2_atleast256 jmp .Lchacha_blocks_avx2_below256 .p2align 6,,63 .Lchacha_blocks_avx2_atleast512: movq 48(%rsp), %rax leaq 1(%rax), %r8 leaq 2(%rax), %r9 leaq 3(%rax), %r10 leaq 4(%rax), %rbx leaq 5(%rax), %r11 leaq 6(%rax), %r12 leaq 7(%rax), %r13 leaq 8(%rax), %r14 movl %eax, 128(%rsp) movl %r8d, 4+128(%rsp) movl %r9d, 8+128(%rsp) movl %r10d, 12+128(%rsp) movl %ebx, 16+128(%rsp) movl %r11d, 20+128(%rsp) movl %r12d, 24+128(%rsp) movl %r13d, 28+128(%rsp) shrq $32, %rax shrq $32, %r8 shrq $32, %r9 shrq $32, %r10 shrq $32, %rbx shrq $32, %r11 shrq $32, %r12 shrq $32, %r13 movl %eax, 160(%rsp) movl %r8d, 4+160(%rsp) movl %r9d, 8+160(%rsp) movl %r10d, 12+160(%rsp) movl %ebx, 16+160(%rsp) movl %r11d, 20+160(%rsp) movl %r12d, 24+160(%rsp) movl %r13d, 28+160(%rsp) movq %r14, 48(%rsp) movq 64(%rsp), %rax vpbroadcastd 0(%rsp), %ymm0 vpbroadcastd 4+0(%rsp), %ymm1 vpbroadcastd 8+0(%rsp), %ymm2 vpbroadcastd 12+0(%rsp), %ymm3 vpbroadcastd 16(%rsp), %ymm4 vpbroadcastd 4+16(%rsp), %ymm5 vpbroadcastd 8+16(%rsp), %ymm6 vpbroadcastd 12+16(%rsp), %ymm7 vpbroadcastd 32(%rsp), %ymm8 vpbroadcastd 4+32(%rsp), %ymm9 vpbroadcastd 8+32(%rsp), %ymm10 vpbroadcastd 12+32(%rsp), %ymm11 vpbroadcastd 8+48(%rsp), %ymm14 vpbroadcastd 12+48(%rsp), %ymm15 vmovdqa 128(%rsp), %ymm12 vmovdqa 160(%rsp), %ymm13 .Lchacha_blocks_avx2_mainloop1: vpaddd %ymm0, %ymm4, %ymm0 vpaddd %ymm1, %ymm5, %ymm1 vpxor %ymm12, %ymm0, %ymm12 vpxor %ymm13, %ymm1, %ymm13 vpaddd %ymm2, %ymm6, %ymm2 vpaddd %ymm3, %ymm7, %ymm3 vpxor %ymm14, %ymm2, %ymm14 vpxor %ymm15, %ymm3, %ymm15 vpshufb 448(%rsp), %ymm12, %ymm12 vpshufb 448(%rsp), %ymm13, %ymm13 vpaddd %ymm8, %ymm12, %ymm8 vpaddd %ymm9, %ymm13, %ymm9 vpshufb 448(%rsp), %ymm14, %ymm14 vpshufb 448(%rsp), %ymm15, %ymm15 vpaddd %ymm10, %ymm14, %ymm10 vpaddd %ymm11, %ymm15, %ymm11 vmovdqa %ymm12, 96(%rsp) vpxor %ymm4, %ymm8, %ymm4 vpxor %ymm5, %ymm9, %ymm5 vpslld $ 12, %ymm4, %ymm12 vpsrld $20, %ymm4, %ymm4 vpxor %ymm4, %ymm12, %ymm4 vpslld $ 12, %ymm5, %ymm12 vpsrld $20, %ymm5, %ymm5 vpxor %ymm5, %ymm12, %ymm5 vpxor %ymm6, %ymm10, %ymm6 vpxor %ymm7, %ymm11, %ymm7 vpslld $ 12, %ymm6, %ymm12 vpsrld $20, %ymm6, %ymm6 vpxor %ymm6, %ymm12, %ymm6 vpslld $ 12, %ymm7, %ymm12 vpsrld $20, %ymm7, %ymm7 vpxor %ymm7, %ymm12, %ymm7 vpaddd %ymm0, %ymm4, %ymm0 vpaddd %ymm1, %ymm5, %ymm1 vpxor 96(%rsp), %ymm0, %ymm12 vpxor %ymm13, %ymm1, %ymm13 vpaddd %ymm2, %ymm6, %ymm2 vpaddd %ymm3, %ymm7, %ymm3 vpxor %ymm14, %ymm2, %ymm14 vpxor %ymm15, %ymm3, %ymm15 vpshufb 480(%rsp), %ymm12, %ymm12 vpshufb 480(%rsp), %ymm13, %ymm13 vpaddd %ymm8, %ymm12, %ymm8 vpaddd %ymm9, %ymm13, %ymm9 vpshufb 480(%rsp), %ymm14, %ymm14 vpshufb 480(%rsp), %ymm15, %ymm15 vpaddd %ymm10, %ymm14, %ymm10 vpaddd %ymm11, %ymm15, %ymm11 vmovdqa %ymm12, 96(%rsp) vpxor %ymm4, %ymm8, %ymm4 vpxor %ymm5, %ymm9, %ymm5 vpslld $ 7, %ymm4, %ymm12 vpsrld $25, %ymm4, %ymm4 vpxor %ymm4, %ymm12, %ymm4 vpslld $ 7, %ymm5, %ymm12 vpsrld $25, %ymm5, %ymm5 vpxor %ymm5, %ymm12, %ymm5 vpxor %ymm6, %ymm10, %ymm6 vpxor %ymm7, %ymm11, %ymm7 vpslld $ 7, %ymm6, %ymm12 vpsrld $25, %ymm6, %ymm6 vpxor %ymm6, %ymm12, %ymm6 vpslld $ 7, %ymm7, %ymm12 vpsrld $25, %ymm7, %ymm7 vpxor %ymm7, %ymm12, %ymm7 vpaddd %ymm0, %ymm5, %ymm0 vpaddd %ymm1, %ymm6, %ymm1 vpxor %ymm15, %ymm0, %ymm15 vpxor 96(%rsp), %ymm1, %ymm12 vpaddd %ymm2, %ymm7, %ymm2 vpaddd %ymm3, %ymm4, %ymm3 vpxor %ymm13, %ymm2, %ymm13 vpxor %ymm14, %ymm3, %ymm14 vpshufb 448(%rsp), %ymm15, %ymm15 vpshufb 448(%rsp), %ymm12, %ymm12 vpaddd %ymm10, %ymm15, %ymm10 vpaddd %ymm11, %ymm12, %ymm11 vpshufb 448(%rsp), %ymm13, %ymm13 vpshufb 448(%rsp), %ymm14, %ymm14 vpaddd %ymm8, %ymm13, %ymm8 vpaddd %ymm9, %ymm14, %ymm9 vmovdqa %ymm15, 96(%rsp) vpxor %ymm5, %ymm10, %ymm5 vpxor %ymm6, %ymm11, %ymm6 vpslld $ 12, %ymm5, %ymm15 vpsrld $20, %ymm5, %ymm5 vpxor %ymm5, %ymm15, %ymm5 vpslld $ 12, %ymm6, %ymm15 vpsrld $20, %ymm6, %ymm6 vpxor %ymm6, %ymm15, %ymm6 vpxor %ymm7, %ymm8, %ymm7 vpxor %ymm4, %ymm9, %ymm4 vpslld $ 12, %ymm7, %ymm15 vpsrld $20, %ymm7, %ymm7 vpxor %ymm7, %ymm15, %ymm7 vpslld $ 12, %ymm4, %ymm15 vpsrld $20, %ymm4, %ymm4 vpxor %ymm4, %ymm15, %ymm4 vpaddd %ymm0, %ymm5, %ymm0 vpaddd %ymm1, %ymm6, %ymm1 vpxor 96(%rsp), %ymm0, %ymm15 vpxor %ymm12, %ymm1, %ymm12 vpaddd %ymm2, %ymm7, %ymm2 vpaddd %ymm3, %ymm4, %ymm3 vpxor %ymm13, %ymm2, %ymm13 vpxor %ymm14, %ymm3, %ymm14 vpshufb 480(%rsp), %ymm15, %ymm15 vpshufb 480(%rsp), %ymm12, %ymm12 vpaddd %ymm10, %ymm15, %ymm10 vpaddd %ymm11, %ymm12, %ymm11 vpshufb 480(%rsp), %ymm13, %ymm13 vpshufb 480(%rsp), %ymm14, %ymm14 vpaddd %ymm8, %ymm13, %ymm8 vpaddd %ymm9, %ymm14, %ymm9 vmovdqa %ymm15, 96(%rsp) vpxor %ymm5, %ymm10, %ymm5 vpxor %ymm6, %ymm11, %ymm6 vpslld $ 7, %ymm5, %ymm15 vpsrld $25, %ymm5, %ymm5 vpxor %ymm5, %ymm15, %ymm5 vpslld $ 7, %ymm6, %ymm15 vpsrld $25, %ymm6, %ymm6 vpxor %ymm6, %ymm15, %ymm6 vpxor %ymm7, %ymm8, %ymm7 vpxor %ymm4, %ymm9, %ymm4 vpslld $ 7, %ymm7, %ymm15 vpsrld $25, %ymm7, %ymm7 vpxor %ymm7, %ymm15, %ymm7 vpslld $ 7, %ymm4, %ymm15 vpsrld $25, %ymm4, %ymm4 vpxor %ymm4, %ymm15, %ymm4 vmovdqa 96(%rsp), %ymm15 subq $2, %rax jnz .Lchacha_blocks_avx2_mainloop1 vmovdqa %ymm8, 192(%rsp) vmovdqa %ymm9, 224(%rsp) vmovdqa %ymm10, 256(%rsp) vmovdqa %ymm11, 288(%rsp) vmovdqa %ymm12, 320(%rsp) vmovdqa %ymm13, 352(%rsp) vmovdqa %ymm14, 384(%rsp) vmovdqa %ymm15, 416(%rsp) vpbroadcastd 0(%rsp), %ymm8 vpbroadcastd 4+0(%rsp), %ymm9 vpbroadcastd 8+0(%rsp), %ymm10 vpbroadcastd 12+0(%rsp), %ymm11 vpbroadcastd 16(%rsp), %ymm12 vpbroadcastd 4+16(%rsp), %ymm13 vpbroadcastd 8+16(%rsp), %ymm14 vpbroadcastd 12+16(%rsp), %ymm15 vpaddd %ymm8, %ymm0, %ymm0 vpaddd %ymm9, %ymm1, %ymm1 vpaddd %ymm10, %ymm2, %ymm2 vpaddd %ymm11, %ymm3, %ymm3 vpaddd %ymm12, %ymm4, %ymm4 vpaddd %ymm13, %ymm5, %ymm5 vpaddd %ymm14, %ymm6, %ymm6 vpaddd %ymm15, %ymm7, %ymm7 vpunpckldq %ymm1, %ymm0, %ymm8 vpunpckldq %ymm3, %ymm2, %ymm9 vpunpckhdq %ymm1, %ymm0, %ymm12 vpunpckhdq %ymm3, %ymm2, %ymm13 vpunpckldq %ymm5, %ymm4, %ymm10 vpunpckldq %ymm7, %ymm6, %ymm11 vpunpckhdq %ymm5, %ymm4, %ymm14 vpunpckhdq %ymm7, %ymm6, %ymm15 vpunpcklqdq %ymm9, %ymm8, %ymm0 vpunpcklqdq %ymm11, %ymm10, %ymm1 vpunpckhqdq %ymm9, %ymm8, %ymm2 vpunpckhqdq %ymm11, %ymm10, %ymm3 vpunpcklqdq %ymm13, %ymm12, %ymm4 vpunpcklqdq %ymm15, %ymm14, %ymm5 vpunpckhqdq %ymm13, %ymm12, %ymm6 vpunpckhqdq %ymm15, %ymm14, %ymm7 vperm2i128 $0x20, %ymm1, %ymm0, %ymm8 vperm2i128 $0x20, %ymm3, %ymm2, %ymm9 vperm2i128 $0x31, %ymm1, %ymm0, %ymm12 vperm2i128 $0x31, %ymm3, %ymm2, %ymm13 vperm2i128 $0x20, %ymm5, %ymm4, %ymm10 vperm2i128 $0x20, %ymm7, %ymm6, %ymm11 vperm2i128 $0x31, %ymm5, %ymm4, %ymm14 vperm2i128 $0x31, %ymm7, %ymm6, %ymm15 andq %rsi, %rsi jz .Lchacha_blocks_avx2_noinput1 vpxor 0(%rsi), %ymm8, %ymm8 vpxor 64(%rsi), %ymm9, %ymm9 vpxor 128(%rsi), %ymm10, %ymm10 vpxor 192(%rsi), %ymm11, %ymm11 vpxor 256(%rsi), %ymm12, %ymm12 vpxor 320(%rsi), %ymm13, %ymm13 vpxor 384(%rsi), %ymm14, %ymm14 vpxor 448(%rsi), %ymm15, %ymm15 vmovdqu %ymm8, 0(%rdx) vmovdqu %ymm9, 64(%rdx) vmovdqu %ymm10, 128(%rdx) vmovdqu %ymm11, 192(%rdx) vmovdqu %ymm12, 256(%rdx) vmovdqu %ymm13, 320(%rdx) vmovdqu %ymm14, 384(%rdx) vmovdqu %ymm15, 448(%rdx) vmovdqa 192(%rsp), %ymm0 vmovdqa 224(%rsp), %ymm1 vmovdqa 256(%rsp), %ymm2 vmovdqa 288(%rsp), %ymm3 vmovdqa 320(%rsp), %ymm4 vmovdqa 352(%rsp), %ymm5 vmovdqa 384(%rsp), %ymm6 vmovdqa 416(%rsp), %ymm7 vpbroadcastd 32(%rsp), %ymm8 vpbroadcastd 4+32(%rsp), %ymm9 vpbroadcastd 8+32(%rsp), %ymm10 vpbroadcastd 12+32(%rsp), %ymm11 vmovdqa 128(%rsp), %ymm12 vmovdqa 160(%rsp), %ymm13 vpbroadcastd 8+48(%rsp), %ymm14 vpbroadcastd 12+48(%rsp), %ymm15 vpaddd %ymm8, %ymm0, %ymm0 vpaddd %ymm9, %ymm1, %ymm1 vpaddd %ymm10, %ymm2, %ymm2 vpaddd %ymm11, %ymm3, %ymm3 vpaddd %ymm12, %ymm4, %ymm4 vpaddd %ymm13, %ymm5, %ymm5 vpaddd %ymm14, %ymm6, %ymm6 vpaddd %ymm15, %ymm7, %ymm7 vpunpckldq %ymm1, %ymm0, %ymm8 vpunpckldq %ymm3, %ymm2, %ymm9 vpunpckhdq %ymm1, %ymm0, %ymm12 vpunpckhdq %ymm3, %ymm2, %ymm13 vpunpckldq %ymm5, %ymm4, %ymm10 vpunpckldq %ymm7, %ymm6, %ymm11 vpunpckhdq %ymm5, %ymm4, %ymm14 vpunpckhdq %ymm7, %ymm6, %ymm15 vpunpcklqdq %ymm9, %ymm8, %ymm0 vpunpcklqdq %ymm11, %ymm10, %ymm1 vpunpckhqdq %ymm9, %ymm8, %ymm2 vpunpckhqdq %ymm11, %ymm10, %ymm3 vpunpcklqdq %ymm13, %ymm12, %ymm4 vpunpcklqdq %ymm15, %ymm14, %ymm5 vpunpckhqdq %ymm13, %ymm12, %ymm6 vpunpckhqdq %ymm15, %ymm14, %ymm7 vperm2i128 $0x20, %ymm1, %ymm0, %ymm8 vperm2i128 $0x20, %ymm3, %ymm2, %ymm9 vperm2i128 $0x31, %ymm1, %ymm0, %ymm12 vperm2i128 $0x31, %ymm3, %ymm2, %ymm13 vperm2i128 $0x20, %ymm5, %ymm4, %ymm10 vperm2i128 $0x20, %ymm7, %ymm6, %ymm11 vperm2i128 $0x31, %ymm5, %ymm4, %ymm14 vperm2i128 $0x31, %ymm7, %ymm6, %ymm15 vpxor 32(%rsi), %ymm8, %ymm8 vpxor 96(%rsi), %ymm9, %ymm9 vpxor 160(%rsi), %ymm10, %ymm10 vpxor 224(%rsi), %ymm11, %ymm11 vpxor 288(%rsi), %ymm12, %ymm12 vpxor 352(%rsi), %ymm13, %ymm13 vpxor 416(%rsi), %ymm14, %ymm14 vpxor 480(%rsi), %ymm15, %ymm15 vmovdqu %ymm8, 32(%rdx) vmovdqu %ymm9, 96(%rdx) vmovdqu %ymm10, 160(%rdx) vmovdqu %ymm11, 224(%rdx) vmovdqu %ymm12, 288(%rdx) vmovdqu %ymm13, 352(%rdx) vmovdqu %ymm14, 416(%rdx) vmovdqu %ymm15, 480(%rdx) addq $512, %rsi jmp .Lchacha_blocks_avx2_mainloop1_cont .Lchacha_blocks_avx2_noinput1: vmovdqu %ymm8, 0(%rdx) vmovdqu %ymm9, 64(%rdx) vmovdqu %ymm10, 128(%rdx) vmovdqu %ymm11, 192(%rdx) vmovdqu %ymm12, 256(%rdx) vmovdqu %ymm13, 320(%rdx) vmovdqu %ymm14, 384(%rdx) vmovdqu %ymm15, 448(%rdx) vmovdqa 192(%rsp), %ymm0 vmovdqa 224(%rsp), %ymm1 vmovdqa 256(%rsp), %ymm2 vmovdqa 288(%rsp), %ymm3 vmovdqa 320(%rsp), %ymm4 vmovdqa 352(%rsp), %ymm5 vmovdqa 384(%rsp), %ymm6 vmovdqa 416(%rsp), %ymm7 vpbroadcastd 32(%rsp), %ymm8 vpbroadcastd 4+32(%rsp), %ymm9 vpbroadcastd 8+32(%rsp), %ymm10 vpbroadcastd 12+32(%rsp), %ymm11 vmovdqa 128(%rsp), %ymm12 vmovdqa 160(%rsp), %ymm13 vpbroadcastd 8+48(%rsp), %ymm14 vpbroadcastd 12+48(%rsp), %ymm15 vpaddd %ymm8, %ymm0, %ymm0 vpaddd %ymm9, %ymm1, %ymm1 vpaddd %ymm10, %ymm2, %ymm2 vpaddd %ymm11, %ymm3, %ymm3 vpaddd %ymm12, %ymm4, %ymm4 vpaddd %ymm13, %ymm5, %ymm5 vpaddd %ymm14, %ymm6, %ymm6 vpaddd %ymm15, %ymm7, %ymm7 vpunpckldq %ymm1, %ymm0, %ymm8 vpunpckldq %ymm3, %ymm2, %ymm9 vpunpckhdq %ymm1, %ymm0, %ymm12 vpunpckhdq %ymm3, %ymm2, %ymm13 vpunpckldq %ymm5, %ymm4, %ymm10 vpunpckldq %ymm7, %ymm6, %ymm11 vpunpckhdq %ymm5, %ymm4, %ymm14 vpunpckhdq %ymm7, %ymm6, %ymm15 vpunpcklqdq %ymm9, %ymm8, %ymm0 vpunpcklqdq %ymm11, %ymm10, %ymm1 vpunpckhqdq %ymm9, %ymm8, %ymm2 vpunpckhqdq %ymm11, %ymm10, %ymm3 vpunpcklqdq %ymm13, %ymm12, %ymm4 vpunpcklqdq %ymm15, %ymm14, %ymm5 vpunpckhqdq %ymm13, %ymm12, %ymm6 vpunpckhqdq %ymm15, %ymm14, %ymm7 vperm2i128 $0x20, %ymm1, %ymm0, %ymm8 vperm2i128 $0x20, %ymm3, %ymm2, %ymm9 vperm2i128 $0x31, %ymm1, %ymm0, %ymm12 vperm2i128 $0x31, %ymm3, %ymm2, %ymm13 vperm2i128 $0x20, %ymm5, %ymm4, %ymm10 vperm2i128 $0x20, %ymm7, %ymm6, %ymm11 vperm2i128 $0x31, %ymm5, %ymm4, %ymm14 vperm2i128 $0x31, %ymm7, %ymm6, %ymm15 vmovdqu %ymm8, 32(%rdx) vmovdqu %ymm9, 96(%rdx) vmovdqu %ymm10, 160(%rdx) vmovdqu %ymm11, 224(%rdx) vmovdqu %ymm12, 288(%rdx) vmovdqu %ymm13, 352(%rdx) vmovdqu %ymm14, 416(%rdx) vmovdqu %ymm15, 480(%rdx) .Lchacha_blocks_avx2_mainloop1_cont: addq $512, %rdx subq $512, %rcx cmp $512, %rcx jae .Lchacha_blocks_avx2_atleast512 cmp $256, %rcx jb .Lchacha_blocks_avx2_below256_fixup .Lchacha_blocks_avx2_atleast256: movq 48(%rsp), %rax leaq 1(%rax), %r8 leaq 2(%rax), %r9 leaq 3(%rax), %r10 leaq 4(%rax), %rbx movl %eax, 128(%rsp) movl %r8d, 4+128(%rsp) movl %r9d, 8+128(%rsp) movl %r10d, 12+128(%rsp) shrq $32, %rax shrq $32, %r8 shrq $32, %r9 shrq $32, %r10 movl %eax, 160(%rsp) movl %r8d, 4+160(%rsp) movl %r9d, 8+160(%rsp) movl %r10d, 12+160(%rsp) movq %rbx, 48(%rsp) movq 64(%rsp), %rax vpbroadcastd 0(%rsp), %xmm0 vpbroadcastd 4+0(%rsp), %xmm1 vpbroadcastd 8+0(%rsp), %xmm2 vpbroadcastd 12+0(%rsp), %xmm3 vpbroadcastd 16(%rsp), %xmm4 vpbroadcastd 4+16(%rsp), %xmm5 vpbroadcastd 8+16(%rsp), %xmm6 vpbroadcastd 12+16(%rsp), %xmm7 vpbroadcastd 32(%rsp), %xmm8 vpbroadcastd 4+32(%rsp), %xmm9 vpbroadcastd 8+32(%rsp), %xmm10 vpbroadcastd 12+32(%rsp), %xmm11 vmovdqa 128(%rsp), %xmm12 vmovdqa 160(%rsp), %xmm13 vpbroadcastd 8+48(%rsp), %xmm14 vpbroadcastd 12+48(%rsp), %xmm15 .Lchacha_blocks_avx2_mainloop2: vpaddd %xmm0, %xmm4, %xmm0 vpaddd %xmm1, %xmm5, %xmm1 vpxor %xmm12, %xmm0, %xmm12 vpxor %xmm13, %xmm1, %xmm13 vpaddd %xmm2, %xmm6, %xmm2 vpaddd %xmm3, %xmm7, %xmm3 vpxor %xmm14, %xmm2, %xmm14 vpxor %xmm15, %xmm3, %xmm15 vpshufb 448(%rsp), %xmm12, %xmm12 vpshufb 448(%rsp), %xmm13, %xmm13 vpaddd %xmm8, %xmm12, %xmm8 vpaddd %xmm9, %xmm13, %xmm9 vpshufb 448(%rsp), %xmm14, %xmm14 vpshufb 448(%rsp), %xmm15, %xmm15 vpaddd %xmm10, %xmm14, %xmm10 vpaddd %xmm11, %xmm15, %xmm11 vmovdqa %xmm12, 96(%rsp) vpxor %xmm4, %xmm8, %xmm4 vpxor %xmm5, %xmm9, %xmm5 vpslld $ 12, %xmm4, %xmm12 vpsrld $20, %xmm4, %xmm4 vpxor %xmm4, %xmm12, %xmm4 vpslld $ 12, %xmm5, %xmm12 vpsrld $20, %xmm5, %xmm5 vpxor %xmm5, %xmm12, %xmm5 vpxor %xmm6, %xmm10, %xmm6 vpxor %xmm7, %xmm11, %xmm7 vpslld $ 12, %xmm6, %xmm12 vpsrld $20, %xmm6, %xmm6 vpxor %xmm6, %xmm12, %xmm6 vpslld $ 12, %xmm7, %xmm12 vpsrld $20, %xmm7, %xmm7 vpxor %xmm7, %xmm12, %xmm7 vpaddd %xmm0, %xmm4, %xmm0 vpaddd %xmm1, %xmm5, %xmm1 vpxor 96(%rsp), %xmm0, %xmm12 vpxor %xmm13, %xmm1, %xmm13 vpaddd %xmm2, %xmm6, %xmm2 vpaddd %xmm3, %xmm7, %xmm3 vpxor %xmm14, %xmm2, %xmm14 vpxor %xmm15, %xmm3, %xmm15 vpshufb 480(%rsp), %xmm12, %xmm12 vpshufb 480(%rsp), %xmm13, %xmm13 vpaddd %xmm8, %xmm12, %xmm8 vpaddd %xmm9, %xmm13, %xmm9 vpshufb 480(%rsp), %xmm14, %xmm14 vpshufb 480(%rsp), %xmm15, %xmm15 vpaddd %xmm10, %xmm14, %xmm10 vpaddd %xmm11, %xmm15, %xmm11 vmovdqa %xmm12, 96(%rsp) vpxor %xmm4, %xmm8, %xmm4 vpxor %xmm5, %xmm9, %xmm5 vpslld $ 7, %xmm4, %xmm12 vpsrld $25, %xmm4, %xmm4 vpxor %xmm4, %xmm12, %xmm4 vpslld $ 7, %xmm5, %xmm12 vpsrld $25, %xmm5, %xmm5 vpxor %xmm5, %xmm12, %xmm5 vpxor %xmm6, %xmm10, %xmm6 vpxor %xmm7, %xmm11, %xmm7 vpslld $ 7, %xmm6, %xmm12 vpsrld $25, %xmm6, %xmm6 vpxor %xmm6, %xmm12, %xmm6 vpslld $ 7, %xmm7, %xmm12 vpsrld $25, %xmm7, %xmm7 vpxor %xmm7, %xmm12, %xmm7 vpaddd %xmm0, %xmm5, %xmm0 vpaddd %xmm1, %xmm6, %xmm1 vpxor %xmm15, %xmm0, %xmm15 vpxor 96(%rsp), %xmm1, %xmm12 vpaddd %xmm2, %xmm7, %xmm2 vpaddd %xmm3, %xmm4, %xmm3 vpxor %xmm13, %xmm2, %xmm13 vpxor %xmm14, %xmm3, %xmm14 vpshufb 448(%rsp), %xmm15, %xmm15 vpshufb 448(%rsp), %xmm12, %xmm12 vpaddd %xmm10, %xmm15, %xmm10 vpaddd %xmm11, %xmm12, %xmm11 vpshufb 448(%rsp), %xmm13, %xmm13 vpshufb 448(%rsp), %xmm14, %xmm14 vpaddd %xmm8, %xmm13, %xmm8 vpaddd %xmm9, %xmm14, %xmm9 vmovdqa %xmm15, 96(%rsp) vpxor %xmm5, %xmm10, %xmm5 vpxor %xmm6, %xmm11, %xmm6 vpslld $ 12, %xmm5, %xmm15 vpsrld $20, %xmm5, %xmm5 vpxor %xmm5, %xmm15, %xmm5 vpslld $ 12, %xmm6, %xmm15 vpsrld $20, %xmm6, %xmm6 vpxor %xmm6, %xmm15, %xmm6 vpxor %xmm7, %xmm8, %xmm7 vpxor %xmm4, %xmm9, %xmm4 vpslld $ 12, %xmm7, %xmm15 vpsrld $20, %xmm7, %xmm7 vpxor %xmm7, %xmm15, %xmm7 vpslld $ 12, %xmm4, %xmm15 vpsrld $20, %xmm4, %xmm4 vpxor %xmm4, %xmm15, %xmm4 vpaddd %xmm0, %xmm5, %xmm0 vpaddd %xmm1, %xmm6, %xmm1 vpxor 96(%rsp), %xmm0, %xmm15 vpxor %xmm12, %xmm1, %xmm12 vpaddd %xmm2, %xmm7, %xmm2 vpaddd %xmm3, %xmm4, %xmm3 vpxor %xmm13, %xmm2, %xmm13 vpxor %xmm14, %xmm3, %xmm14 vpshufb 480(%rsp), %xmm15, %xmm15 vpshufb 480(%rsp), %xmm12, %xmm12 vpaddd %xmm10, %xmm15, %xmm10 vpaddd %xmm11, %xmm12, %xmm11 vpshufb 480(%rsp), %xmm13, %xmm13 vpshufb 480(%rsp), %xmm14, %xmm14 vpaddd %xmm8, %xmm13, %xmm8 vpaddd %xmm9, %xmm14, %xmm9 vmovdqa %xmm15, 96(%rsp) vpxor %xmm5, %xmm10, %xmm5 vpxor %xmm6, %xmm11, %xmm6 vpslld $ 7, %xmm5, %xmm15 vpsrld $25, %xmm5, %xmm5 vpxor %xmm5, %xmm15, %xmm5 vpslld $ 7, %xmm6, %xmm15 vpsrld $25, %xmm6, %xmm6 vpxor %xmm6, %xmm15, %xmm6 vpxor %xmm7, %xmm8, %xmm7 vpxor %xmm4, %xmm9, %xmm4 vpslld $ 7, %xmm7, %xmm15 vpsrld $25, %xmm7, %xmm7 vpxor %xmm7, %xmm15, %xmm7 vpslld $ 7, %xmm4, %xmm15 vpsrld $25, %xmm4, %xmm4 vpxor %xmm4, %xmm15, %xmm4 vmovdqa 96(%rsp), %xmm15 subq $2, %rax jnz .Lchacha_blocks_avx2_mainloop2 vmovdqa %xmm8, 192(%rsp) vmovdqa %xmm9, 208(%rsp) vmovdqa %xmm10, 224(%rsp) vmovdqa %xmm11, 240(%rsp) vmovdqa %xmm12, 256(%rsp) vmovdqa %xmm13, 272(%rsp) vmovdqa %xmm14, 288(%rsp) vmovdqa %xmm15, 304(%rsp) vpbroadcastd 0(%rsp), %xmm8 vpbroadcastd 4+0(%rsp), %xmm9 vpbroadcastd 8+0(%rsp), %xmm10 vpbroadcastd 12+0(%rsp), %xmm11 vpbroadcastd 16(%rsp), %xmm12 vpbroadcastd 4+16(%rsp), %xmm13 vpbroadcastd 8+16(%rsp), %xmm14 vpbroadcastd 12+16(%rsp), %xmm15 vpaddd %xmm8, %xmm0, %xmm0 vpaddd %xmm9, %xmm1, %xmm1 vpaddd %xmm10, %xmm2, %xmm2 vpaddd %xmm11, %xmm3, %xmm3 vpaddd %xmm12, %xmm4, %xmm4 vpaddd %xmm13, %xmm5, %xmm5 vpaddd %xmm14, %xmm6, %xmm6 vpaddd %xmm15, %xmm7, %xmm7 vpunpckldq %xmm1, %xmm0, %xmm8 vpunpckldq %xmm3, %xmm2, %xmm9 vpunpckhdq %xmm1, %xmm0, %xmm12 vpunpckhdq %xmm3, %xmm2, %xmm13 vpunpckldq %xmm5, %xmm4, %xmm10 vpunpckldq %xmm7, %xmm6, %xmm11 vpunpckhdq %xmm5, %xmm4, %xmm14 vpunpckhdq %xmm7, %xmm6, %xmm15 vpunpcklqdq %xmm9, %xmm8, %xmm0 vpunpcklqdq %xmm11, %xmm10, %xmm1 vpunpckhqdq %xmm9, %xmm8, %xmm2 vpunpckhqdq %xmm11, %xmm10, %xmm3 vpunpcklqdq %xmm13, %xmm12, %xmm4 vpunpcklqdq %xmm15, %xmm14, %xmm5 vpunpckhqdq %xmm13, %xmm12, %xmm6 vpunpckhqdq %xmm15, %xmm14, %xmm7 andq %rsi, %rsi jz .Lchacha_blocks_avx2_noinput2 vpxor 0(%rsi), %xmm0, %xmm0 vpxor 16(%rsi), %xmm1, %xmm1 vpxor 64(%rsi), %xmm2, %xmm2 vpxor 80(%rsi), %xmm3, %xmm3 vpxor 128(%rsi), %xmm4, %xmm4 vpxor 144(%rsi), %xmm5, %xmm5 vpxor 192(%rsi), %xmm6, %xmm6 vpxor 208(%rsi), %xmm7, %xmm7 vmovdqu %xmm0, 0(%rdx) vmovdqu %xmm1, 16(%rdx) vmovdqu %xmm2, 64(%rdx) vmovdqu %xmm3, 80(%rdx) vmovdqu %xmm4, 128(%rdx) vmovdqu %xmm5, 144(%rdx) vmovdqu %xmm6, 192(%rdx) vmovdqu %xmm7, 208(%rdx) vmovdqa 192(%rsp), %xmm0 vmovdqa 208(%rsp), %xmm1 vmovdqa 224(%rsp), %xmm2 vmovdqa 240(%rsp), %xmm3 vmovdqa 256(%rsp), %xmm4 vmovdqa 272(%rsp), %xmm5 vmovdqa 288(%rsp), %xmm6 vmovdqa 304(%rsp), %xmm7 vpbroadcastd 32(%rsp), %xmm8 vpbroadcastd 4+32(%rsp), %xmm9 vpbroadcastd 8+32(%rsp), %xmm10 vpbroadcastd 12+32(%rsp), %xmm11 vmovdqa 128(%rsp), %xmm12 vmovdqa 160(%rsp), %xmm13 vpbroadcastd 8+48(%rsp), %xmm14 vpbroadcastd 12+48(%rsp), %xmm15 vpaddd %xmm8, %xmm0, %xmm0 vpaddd %xmm9, %xmm1, %xmm1 vpaddd %xmm10, %xmm2, %xmm2 vpaddd %xmm11, %xmm3, %xmm3 vpaddd %xmm12, %xmm4, %xmm4 vpaddd %xmm13, %xmm5, %xmm5 vpaddd %xmm14, %xmm6, %xmm6 vpaddd %xmm15, %xmm7, %xmm7 vpunpckldq %xmm1, %xmm0, %xmm8 vpunpckldq %xmm3, %xmm2, %xmm9 vpunpckhdq %xmm1, %xmm0, %xmm12 vpunpckhdq %xmm3, %xmm2, %xmm13 vpunpckldq %xmm5, %xmm4, %xmm10 vpunpckldq %xmm7, %xmm6, %xmm11 vpunpckhdq %xmm5, %xmm4, %xmm14 vpunpckhdq %xmm7, %xmm6, %xmm15 vpunpcklqdq %xmm9, %xmm8, %xmm0 vpunpcklqdq %xmm11, %xmm10, %xmm1 vpunpckhqdq %xmm9, %xmm8, %xmm2 vpunpckhqdq %xmm11, %xmm10, %xmm3 vpunpcklqdq %xmm13, %xmm12, %xmm4 vpunpcklqdq %xmm15, %xmm14, %xmm5 vpunpckhqdq %xmm13, %xmm12, %xmm6 vpunpckhqdq %xmm15, %xmm14, %xmm7 vpxor 32(%rsi), %xmm0, %xmm0 vpxor 48(%rsi), %xmm1, %xmm1 vpxor 96(%rsi), %xmm2, %xmm2 vpxor 112(%rsi), %xmm3, %xmm3 vpxor 160(%rsi), %xmm4, %xmm4 vpxor 176(%rsi), %xmm5, %xmm5 vpxor 224(%rsi), %xmm6, %xmm6 vpxor 240(%rsi), %xmm7, %xmm7 vmovdqu %xmm0, 32(%rdx) vmovdqu %xmm1, 48(%rdx) vmovdqu %xmm2, 96(%rdx) vmovdqu %xmm3, 112(%rdx) vmovdqu %xmm4, 160(%rdx) vmovdqu %xmm5, 176(%rdx) vmovdqu %xmm6, 224(%rdx) vmovdqu %xmm7, 240(%rdx) addq $256, %rsi jmp .Lchacha_blocks_avx2_mainloop2_cont .Lchacha_blocks_avx2_noinput2: vmovdqu %xmm0, 0(%rdx) vmovdqu %xmm1, 16(%rdx) vmovdqu %xmm2, 64(%rdx) vmovdqu %xmm3, 80(%rdx) vmovdqu %xmm4, 128(%rdx) vmovdqu %xmm5, 144(%rdx) vmovdqu %xmm6, 192(%rdx) vmovdqu %xmm7, 208(%rdx) vmovdqa 192(%rsp), %xmm0 vmovdqa 208(%rsp), %xmm1 vmovdqa 224(%rsp), %xmm2 vmovdqa 240(%rsp), %xmm3 vmovdqa 256(%rsp), %xmm4 vmovdqa 272(%rsp), %xmm5 vmovdqa 288(%rsp), %xmm6 vmovdqa 304(%rsp), %xmm7 vpbroadcastd 32(%rsp), %xmm8 vpbroadcastd 4+32(%rsp), %xmm9 vpbroadcastd 8+32(%rsp), %xmm10 vpbroadcastd 12+32(%rsp), %xmm11 vmovdqa 128(%rsp), %xmm12 vmovdqa 160(%rsp), %xmm13 vpbroadcastd 8+48(%rsp), %xmm14 vpbroadcastd 12+48(%rsp), %xmm15 vpaddd %xmm8, %xmm0, %xmm0 vpaddd %xmm9, %xmm1, %xmm1 vpaddd %xmm10, %xmm2, %xmm2 vpaddd %xmm11, %xmm3, %xmm3 vpaddd %xmm12, %xmm4, %xmm4 vpaddd %xmm13, %xmm5, %xmm5 vpaddd %xmm14, %xmm6, %xmm6 vpaddd %xmm15, %xmm7, %xmm7 vpunpckldq %xmm1, %xmm0, %xmm8 vpunpckldq %xmm3, %xmm2, %xmm9 vpunpckhdq %xmm1, %xmm0, %xmm12 vpunpckhdq %xmm3, %xmm2, %xmm13 vpunpckldq %xmm5, %xmm4, %xmm10 vpunpckldq %xmm7, %xmm6, %xmm11 vpunpckhdq %xmm5, %xmm4, %xmm14 vpunpckhdq %xmm7, %xmm6, %xmm15 vpunpcklqdq %xmm9, %xmm8, %xmm0 vpunpcklqdq %xmm11, %xmm10, %xmm1 vpunpckhqdq %xmm9, %xmm8, %xmm2 vpunpckhqdq %xmm11, %xmm10, %xmm3 vpunpcklqdq %xmm13, %xmm12, %xmm4 vpunpcklqdq %xmm15, %xmm14, %xmm5 vpunpckhqdq %xmm13, %xmm12, %xmm6 vpunpckhqdq %xmm15, %xmm14, %xmm7 vmovdqu %xmm0, 32(%rdx) vmovdqu %xmm1, 48(%rdx) vmovdqu %xmm2, 96(%rdx) vmovdqu %xmm3, 112(%rdx) vmovdqu %xmm4, 160(%rdx) vmovdqu %xmm5, 176(%rdx) vmovdqu %xmm6, 224(%rdx) vmovdqu %xmm7, 240(%rdx) .Lchacha_blocks_avx2_mainloop2_cont: addq $256, %rdx subq $256, %rcx cmp $256, %rcx jae .Lchacha_blocks_avx2_atleast256 .Lchacha_blocks_avx2_below256_fixup: vmovdqa 448(%rsp), %xmm6 vmovdqa 480(%rsp), %xmm7 vmovdqa 0(%rsp), %xmm8 vmovdqa 16(%rsp), %xmm9 vmovdqa 32(%rsp), %xmm10 vmovdqa 48(%rsp), %xmm11 movq $1, %r9 .Lchacha_blocks_avx2_below256: vmovq %r9, %xmm5 andq %rcx, %rcx jz .Lchacha_blocks_avx2_done cmpq $64, %rcx jae .Lchacha_blocks_avx2_above63 movq %rdx, %r9 andq %rsi, %rsi jz .Lchacha_blocks_avx2_noinput3 movq %rcx, %r10 movq %rsp, %rdx addq %r10, %rsi addq %r10, %rdx negq %r10 .Lchacha_blocks_avx2_copyinput: movb (%rsi, %r10), %al movb %al, (%rdx, %r10) incq %r10 jnz .Lchacha_blocks_avx2_copyinput movq %rsp, %rsi .Lchacha_blocks_avx2_noinput3: movq %rsp, %rdx .Lchacha_blocks_avx2_above63: vmovdqa %xmm8, %xmm0 vmovdqa %xmm9, %xmm1 vmovdqa %xmm10, %xmm2 vmovdqa %xmm11, %xmm3 movq 64(%rsp), %rax .Lchacha_blocks_avx2_mainloop3: vpaddd %xmm0, %xmm1, %xmm0 vpxor %xmm3, %xmm0, %xmm3 vpshufb %xmm6, %xmm3, %xmm3 vpaddd %xmm2, %xmm3, %xmm2 vpxor %xmm1, %xmm2, %xmm1 vpslld $12, %xmm1, %xmm4 vpsrld $20, %xmm1, %xmm1 vpxor %xmm1, %xmm4, %xmm1 vpaddd %xmm0, %xmm1, %xmm0 vpxor %xmm3, %xmm0, %xmm3 vpshufb %xmm7, %xmm3, %xmm3 vpshufd $0x93, %xmm0, %xmm0 vpaddd %xmm2, %xmm3, %xmm2 vpshufd $0x4e, %xmm3, %xmm3 vpxor %xmm1, %xmm2, %xmm1 vpshufd $0x39, %xmm2, %xmm2 vpslld $7, %xmm1, %xmm4 vpsrld $25, %xmm1, %xmm1 vpxor %xmm1, %xmm4, %xmm1 vpaddd %xmm0, %xmm1, %xmm0 vpxor %xmm3, %xmm0, %xmm3 vpshufb %xmm6, %xmm3, %xmm3 vpaddd %xmm2, %xmm3, %xmm2 vpxor %xmm1, %xmm2, %xmm1 vpslld $12, %xmm1, %xmm4 vpsrld $20, %xmm1, %xmm1 vpxor %xmm1, %xmm4, %xmm1 vpaddd %xmm0, %xmm1, %xmm0 vpxor %xmm3, %xmm0, %xmm3 vpshufb %xmm7, %xmm3, %xmm3 vpshufd $0x39, %xmm0, %xmm0 vpaddd %xmm2, %xmm3, %xmm2 vpshufd $0x4e, %xmm3, %xmm3 vpxor %xmm1, %xmm2, %xmm1 vpshufd $0x93, %xmm2, %xmm2 vpslld $7, %xmm1, %xmm4 vpsrld $25, %xmm1, %xmm1 vpxor %xmm1, %xmm4, %xmm1 subq $2, %rax jnz .Lchacha_blocks_avx2_mainloop3 vpaddd %xmm0, %xmm8, %xmm0 vpaddd %xmm1, %xmm9, %xmm1 vpaddd %xmm2, %xmm10, %xmm2 vpaddd %xmm3, %xmm11, %xmm3 andq %rsi, %rsi jz .Lchacha_blocks_avx2_noinput4 vpxor 0(%rsi), %xmm0, %xmm0 vpxor 16(%rsi), %xmm1, %xmm1 vpxor 32(%rsi), %xmm2, %xmm2 vpxor 48(%rsi), %xmm3, %xmm3 addq $64, %rsi .Lchacha_blocks_avx2_noinput4: vmovdqu %xmm0, 0(%rdx) vmovdqu %xmm1, 16(%rdx) vmovdqu %xmm2, 32(%rdx) vmovdqu %xmm3, 48(%rdx) vpaddq %xmm11, %xmm5, %xmm11 cmpq $64, %rcx jbe .Lchacha_blocks_avx2_mainloop3_finishup addq $64, %rdx subq $64, %rcx jmp .Lchacha_blocks_avx2_below256 .Lchacha_blocks_avx2_mainloop3_finishup: cmpq $64, %rcx je .Lchacha_blocks_avx2_done addq %rcx, %r9 addq %rcx, %rdx negq %rcx .Lchacha_blocks_avx2_copyoutput: movb (%rdx, %rcx), %al movb %al, (%r9, %rcx) incq %rcx jnz .Lchacha_blocks_avx2_copyoutput .Lchacha_blocks_avx2_done: vmovdqu %xmm11, 48(%rdi) movq %rbp, %rsp popq %r14 popq %r13 popq %r12 popq %rbp popq %rbx vzeroall movl $(63 + 512), %eax ret ELF(.size _gcry_chacha20_amd64_avx2_blocks,.-_gcry_chacha20_amd64_avx2_blocks;) -.data .align 16 .LC: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 /* pshufb rotate by 16 */ .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 /* pshufb rotate by 8 */ #endif /*defined(USE_CHACHA20)*/ #endif /*__x86_64*/ diff --git a/cipher/chacha20-ssse3-amd64.S b/cipher/chacha20-ssse3-amd64.S index a1a843fa..c04010e7 100644 --- a/cipher/chacha20-ssse3-amd64.S +++ b/cipher/chacha20-ssse3-amd64.S @@ -1,633 +1,632 @@ /* chacha20-ssse3-amd64.S - AMD64/SSSE3 implementation of ChaCha20 * * Copyright (C) 2014 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Based on public domain implementation by Andrew Moon at * https://github.com/floodyberry/chacha-opt */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && USE_CHACHA20 #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .text .align 8 .globl _gcry_chacha20_amd64_ssse3_blocks ELF(.type _gcry_chacha20_amd64_ssse3_blocks,@function;) _gcry_chacha20_amd64_ssse3_blocks: .Lchacha_blocks_ssse3_local: pushq %rbx pushq %rbp movq %rsp, %rbp andq $~63, %rsp subq $512, %rsp leaq .LC RIP, %rax movdqa 0(%rax), %xmm6 movdqa 16(%rax), %xmm7 movdqu 0(%rdi), %xmm8 movdqu 16(%rdi), %xmm9 movdqu 32(%rdi), %xmm10 movdqu 48(%rdi), %xmm11 movl $20, %eax movq $1, %r9 movdqa %xmm8, 0(%rsp) movdqa %xmm9, 16(%rsp) movdqa %xmm10, 32(%rsp) movdqa %xmm11, 48(%rsp) movdqa %xmm6, 80(%rsp) movdqa %xmm7, 96(%rsp) movq %rax, 64(%rsp) cmpq $256, %rcx jb .Lchacha_blocks_ssse3_below256 pshufd $0x00, %xmm8, %xmm0 pshufd $0x55, %xmm8, %xmm1 pshufd $0xaa, %xmm8, %xmm2 pshufd $0xff, %xmm8, %xmm3 movdqa %xmm0, 128(%rsp) movdqa %xmm1, 144(%rsp) movdqa %xmm2, 160(%rsp) movdqa %xmm3, 176(%rsp) pshufd $0x00, %xmm9, %xmm0 pshufd $0x55, %xmm9, %xmm1 pshufd $0xaa, %xmm9, %xmm2 pshufd $0xff, %xmm9, %xmm3 movdqa %xmm0, 192(%rsp) movdqa %xmm1, 208(%rsp) movdqa %xmm2, 224(%rsp) movdqa %xmm3, 240(%rsp) pshufd $0x00, %xmm10, %xmm0 pshufd $0x55, %xmm10, %xmm1 pshufd $0xaa, %xmm10, %xmm2 pshufd $0xff, %xmm10, %xmm3 movdqa %xmm0, 256(%rsp) movdqa %xmm1, 272(%rsp) movdqa %xmm2, 288(%rsp) movdqa %xmm3, 304(%rsp) pshufd $0xaa, %xmm11, %xmm0 pshufd $0xff, %xmm11, %xmm1 movdqa %xmm0, 352(%rsp) movdqa %xmm1, 368(%rsp) jmp .Lchacha_blocks_ssse3_atleast256 .p2align 6,,63 # align to 4 mod 64 nop;nop;nop;nop; .Lchacha_blocks_ssse3_atleast256: movq 48(%rsp), %rax leaq 1(%rax), %r8 leaq 2(%rax), %r9 leaq 3(%rax), %r10 leaq 4(%rax), %rbx movl %eax, 320(%rsp) movl %r8d, 4+320(%rsp) movl %r9d, 8+320(%rsp) movl %r10d, 12+320(%rsp) shrq $32, %rax shrq $32, %r8 shrq $32, %r9 shrq $32, %r10 movl %eax, 336(%rsp) movl %r8d, 4+336(%rsp) movl %r9d, 8+336(%rsp) movl %r10d, 12+336(%rsp) movq %rbx, 48(%rsp) movq 64(%rsp), %rax movdqa 128(%rsp), %xmm0 movdqa 144(%rsp), %xmm1 movdqa 160(%rsp), %xmm2 movdqa 176(%rsp), %xmm3 movdqa 192(%rsp), %xmm4 movdqa 208(%rsp), %xmm5 movdqa 224(%rsp), %xmm6 movdqa 240(%rsp), %xmm7 movdqa 256(%rsp), %xmm8 movdqa 272(%rsp), %xmm9 movdqa 288(%rsp), %xmm10 movdqa 304(%rsp), %xmm11 movdqa 320(%rsp), %xmm12 movdqa 336(%rsp), %xmm13 movdqa 352(%rsp), %xmm14 movdqa 368(%rsp), %xmm15 .Lchacha_blocks_ssse3_mainloop1: paddd %xmm4, %xmm0 paddd %xmm5, %xmm1 pxor %xmm0, %xmm12 pxor %xmm1, %xmm13 paddd %xmm6, %xmm2 paddd %xmm7, %xmm3 pxor %xmm2, %xmm14 pxor %xmm3, %xmm15 pshufb 80(%rsp), %xmm12 pshufb 80(%rsp), %xmm13 paddd %xmm12, %xmm8 paddd %xmm13, %xmm9 pshufb 80(%rsp), %xmm14 pshufb 80(%rsp), %xmm15 paddd %xmm14, %xmm10 paddd %xmm15, %xmm11 movdqa %xmm12, 112(%rsp) pxor %xmm8, %xmm4 pxor %xmm9, %xmm5 movdqa %xmm4, %xmm12 pslld $ 12, %xmm4 psrld $20, %xmm12 pxor %xmm12, %xmm4 movdqa %xmm5, %xmm12 pslld $ 12, %xmm5 psrld $20, %xmm12 pxor %xmm12, %xmm5 pxor %xmm10, %xmm6 pxor %xmm11, %xmm7 movdqa %xmm6, %xmm12 pslld $ 12, %xmm6 psrld $20, %xmm12 pxor %xmm12, %xmm6 movdqa %xmm7, %xmm12 pslld $ 12, %xmm7 psrld $20, %xmm12 pxor %xmm12, %xmm7 movdqa 112(%rsp), %xmm12 paddd %xmm4, %xmm0 paddd %xmm5, %xmm1 pxor %xmm0, %xmm12 pxor %xmm1, %xmm13 paddd %xmm6, %xmm2 paddd %xmm7, %xmm3 pxor %xmm2, %xmm14 pxor %xmm3, %xmm15 pshufb 96(%rsp), %xmm12 pshufb 96(%rsp), %xmm13 paddd %xmm12, %xmm8 paddd %xmm13, %xmm9 pshufb 96(%rsp), %xmm14 pshufb 96(%rsp), %xmm15 paddd %xmm14, %xmm10 paddd %xmm15, %xmm11 movdqa %xmm12, 112(%rsp) pxor %xmm8, %xmm4 pxor %xmm9, %xmm5 movdqa %xmm4, %xmm12 pslld $ 7, %xmm4 psrld $25, %xmm12 pxor %xmm12, %xmm4 movdqa %xmm5, %xmm12 pslld $ 7, %xmm5 psrld $25, %xmm12 pxor %xmm12, %xmm5 pxor %xmm10, %xmm6 pxor %xmm11, %xmm7 movdqa %xmm6, %xmm12 pslld $ 7, %xmm6 psrld $25, %xmm12 pxor %xmm12, %xmm6 movdqa %xmm7, %xmm12 pslld $ 7, %xmm7 psrld $25, %xmm12 pxor %xmm12, %xmm7 movdqa 112(%rsp), %xmm12 paddd %xmm5, %xmm0 paddd %xmm6, %xmm1 pxor %xmm0, %xmm15 pxor %xmm1, %xmm12 paddd %xmm7, %xmm2 paddd %xmm4, %xmm3 pxor %xmm2, %xmm13 pxor %xmm3, %xmm14 pshufb 80(%rsp), %xmm15 pshufb 80(%rsp), %xmm12 paddd %xmm15, %xmm10 paddd %xmm12, %xmm11 pshufb 80(%rsp), %xmm13 pshufb 80(%rsp), %xmm14 paddd %xmm13, %xmm8 paddd %xmm14, %xmm9 movdqa %xmm15, 112(%rsp) pxor %xmm10, %xmm5 pxor %xmm11, %xmm6 movdqa %xmm5, %xmm15 pslld $ 12, %xmm5 psrld $20, %xmm15 pxor %xmm15, %xmm5 movdqa %xmm6, %xmm15 pslld $ 12, %xmm6 psrld $20, %xmm15 pxor %xmm15, %xmm6 pxor %xmm8, %xmm7 pxor %xmm9, %xmm4 movdqa %xmm7, %xmm15 pslld $ 12, %xmm7 psrld $20, %xmm15 pxor %xmm15, %xmm7 movdqa %xmm4, %xmm15 pslld $ 12, %xmm4 psrld $20, %xmm15 pxor %xmm15, %xmm4 movdqa 112(%rsp), %xmm15 paddd %xmm5, %xmm0 paddd %xmm6, %xmm1 pxor %xmm0, %xmm15 pxor %xmm1, %xmm12 paddd %xmm7, %xmm2 paddd %xmm4, %xmm3 pxor %xmm2, %xmm13 pxor %xmm3, %xmm14 pshufb 96(%rsp), %xmm15 pshufb 96(%rsp), %xmm12 paddd %xmm15, %xmm10 paddd %xmm12, %xmm11 pshufb 96(%rsp), %xmm13 pshufb 96(%rsp), %xmm14 paddd %xmm13, %xmm8 paddd %xmm14, %xmm9 movdqa %xmm15, 112(%rsp) pxor %xmm10, %xmm5 pxor %xmm11, %xmm6 movdqa %xmm5, %xmm15 pslld $ 7, %xmm5 psrld $25, %xmm15 pxor %xmm15, %xmm5 movdqa %xmm6, %xmm15 pslld $ 7, %xmm6 psrld $25, %xmm15 pxor %xmm15, %xmm6 pxor %xmm8, %xmm7 pxor %xmm9, %xmm4 movdqa %xmm7, %xmm15 pslld $ 7, %xmm7 psrld $25, %xmm15 pxor %xmm15, %xmm7 movdqa %xmm4, %xmm15 pslld $ 7, %xmm4 psrld $25, %xmm15 pxor %xmm15, %xmm4 subq $2, %rax movdqa 112(%rsp), %xmm15 jnz .Lchacha_blocks_ssse3_mainloop1 paddd 128(%rsp), %xmm0 paddd 144(%rsp), %xmm1 paddd 160(%rsp), %xmm2 paddd 176(%rsp), %xmm3 paddd 192(%rsp), %xmm4 paddd 208(%rsp), %xmm5 paddd 224(%rsp), %xmm6 paddd 240(%rsp), %xmm7 paddd 256(%rsp), %xmm8 paddd 272(%rsp), %xmm9 paddd 288(%rsp), %xmm10 paddd 304(%rsp), %xmm11 paddd 320(%rsp), %xmm12 paddd 336(%rsp), %xmm13 paddd 352(%rsp), %xmm14 paddd 368(%rsp), %xmm15 movdqa %xmm8, 384(%rsp) movdqa %xmm9, 400(%rsp) movdqa %xmm10, 416(%rsp) movdqa %xmm11, 432(%rsp) movdqa %xmm12, 448(%rsp) movdqa %xmm13, 464(%rsp) movdqa %xmm14, 480(%rsp) movdqa %xmm15, 496(%rsp) movdqa %xmm0, %xmm8 movdqa %xmm2, %xmm9 movdqa %xmm4, %xmm10 movdqa %xmm6, %xmm11 punpckhdq %xmm1, %xmm0 punpckhdq %xmm3, %xmm2 punpckhdq %xmm5, %xmm4 punpckhdq %xmm7, %xmm6 punpckldq %xmm1, %xmm8 punpckldq %xmm3, %xmm9 punpckldq %xmm5, %xmm10 punpckldq %xmm7, %xmm11 movdqa %xmm0, %xmm1 movdqa %xmm4, %xmm3 movdqa %xmm8, %xmm5 movdqa %xmm10, %xmm7 punpckhqdq %xmm2, %xmm0 punpckhqdq %xmm6, %xmm4 punpckhqdq %xmm9, %xmm8 punpckhqdq %xmm11, %xmm10 punpcklqdq %xmm2, %xmm1 punpcklqdq %xmm6, %xmm3 punpcklqdq %xmm9, %xmm5 punpcklqdq %xmm11, %xmm7 andq %rsi, %rsi jz .Lchacha_blocks_ssse3_noinput1 movdqu 0(%rsi), %xmm2 movdqu 16(%rsi), %xmm6 movdqu 64(%rsi), %xmm9 movdqu 80(%rsi), %xmm11 movdqu 128(%rsi), %xmm12 movdqu 144(%rsi), %xmm13 movdqu 192(%rsi), %xmm14 movdqu 208(%rsi), %xmm15 pxor %xmm2, %xmm5 pxor %xmm6, %xmm7 pxor %xmm9, %xmm8 pxor %xmm11, %xmm10 pxor %xmm12, %xmm1 pxor %xmm13, %xmm3 pxor %xmm14, %xmm0 pxor %xmm15, %xmm4 movdqu %xmm5, 0(%rdx) movdqu %xmm7, 16(%rdx) movdqu %xmm8, 64(%rdx) movdqu %xmm10, 80(%rdx) movdqu %xmm1, 128(%rdx) movdqu %xmm3, 144(%rdx) movdqu %xmm0, 192(%rdx) movdqu %xmm4, 208(%rdx) movdqa 384(%rsp), %xmm0 movdqa 400(%rsp), %xmm1 movdqa 416(%rsp), %xmm2 movdqa 432(%rsp), %xmm3 movdqa 448(%rsp), %xmm4 movdqa 464(%rsp), %xmm5 movdqa 480(%rsp), %xmm6 movdqa 496(%rsp), %xmm7 movdqa %xmm0, %xmm8 movdqa %xmm2, %xmm9 movdqa %xmm4, %xmm10 movdqa %xmm6, %xmm11 punpckldq %xmm1, %xmm8 punpckldq %xmm3, %xmm9 punpckhdq %xmm1, %xmm0 punpckhdq %xmm3, %xmm2 punpckldq %xmm5, %xmm10 punpckldq %xmm7, %xmm11 punpckhdq %xmm5, %xmm4 punpckhdq %xmm7, %xmm6 movdqa %xmm8, %xmm1 movdqa %xmm0, %xmm3 movdqa %xmm10, %xmm5 movdqa %xmm4, %xmm7 punpcklqdq %xmm9, %xmm1 punpcklqdq %xmm11, %xmm5 punpckhqdq %xmm9, %xmm8 punpckhqdq %xmm11, %xmm10 punpcklqdq %xmm2, %xmm3 punpcklqdq %xmm6, %xmm7 punpckhqdq %xmm2, %xmm0 punpckhqdq %xmm6, %xmm4 movdqu 32(%rsi), %xmm2 movdqu 48(%rsi), %xmm6 movdqu 96(%rsi), %xmm9 movdqu 112(%rsi), %xmm11 movdqu 160(%rsi), %xmm12 movdqu 176(%rsi), %xmm13 movdqu 224(%rsi), %xmm14 movdqu 240(%rsi), %xmm15 pxor %xmm2, %xmm1 pxor %xmm6, %xmm5 pxor %xmm9, %xmm8 pxor %xmm11, %xmm10 pxor %xmm12, %xmm3 pxor %xmm13, %xmm7 pxor %xmm14, %xmm0 pxor %xmm15, %xmm4 movdqu %xmm1, 32(%rdx) movdqu %xmm5, 48(%rdx) movdqu %xmm8, 96(%rdx) movdqu %xmm10, 112(%rdx) movdqu %xmm3, 160(%rdx) movdqu %xmm7, 176(%rdx) movdqu %xmm0, 224(%rdx) movdqu %xmm4, 240(%rdx) addq $256, %rsi jmp .Lchacha_blocks_ssse3_mainloop_cont .Lchacha_blocks_ssse3_noinput1: movdqu %xmm5, 0(%rdx) movdqu %xmm7, 16(%rdx) movdqu %xmm8, 64(%rdx) movdqu %xmm10, 80(%rdx) movdqu %xmm1, 128(%rdx) movdqu %xmm3, 144(%rdx) movdqu %xmm0, 192(%rdx) movdqu %xmm4, 208(%rdx) movdqa 384(%rsp), %xmm0 movdqa 400(%rsp), %xmm1 movdqa 416(%rsp), %xmm2 movdqa 432(%rsp), %xmm3 movdqa 448(%rsp), %xmm4 movdqa 464(%rsp), %xmm5 movdqa 480(%rsp), %xmm6 movdqa 496(%rsp), %xmm7 movdqa %xmm0, %xmm8 movdqa %xmm2, %xmm9 movdqa %xmm4, %xmm10 movdqa %xmm6, %xmm11 punpckldq %xmm1, %xmm8 punpckldq %xmm3, %xmm9 punpckhdq %xmm1, %xmm0 punpckhdq %xmm3, %xmm2 punpckldq %xmm5, %xmm10 punpckldq %xmm7, %xmm11 punpckhdq %xmm5, %xmm4 punpckhdq %xmm7, %xmm6 movdqa %xmm8, %xmm1 movdqa %xmm0, %xmm3 movdqa %xmm10, %xmm5 movdqa %xmm4, %xmm7 punpcklqdq %xmm9, %xmm1 punpcklqdq %xmm11, %xmm5 punpckhqdq %xmm9, %xmm8 punpckhqdq %xmm11, %xmm10 punpcklqdq %xmm2, %xmm3 punpcklqdq %xmm6, %xmm7 punpckhqdq %xmm2, %xmm0 punpckhqdq %xmm6, %xmm4 movdqu %xmm1, 32(%rdx) movdqu %xmm5, 48(%rdx) movdqu %xmm8, 96(%rdx) movdqu %xmm10, 112(%rdx) movdqu %xmm3, 160(%rdx) movdqu %xmm7, 176(%rdx) movdqu %xmm0, 224(%rdx) movdqu %xmm4, 240(%rdx) .Lchacha_blocks_ssse3_mainloop_cont: addq $256, %rdx subq $256, %rcx cmp $256, %rcx jae .Lchacha_blocks_ssse3_atleast256 movdqa 80(%rsp), %xmm6 movdqa 96(%rsp), %xmm7 movdqa 0(%rsp), %xmm8 movdqa 16(%rsp), %xmm9 movdqa 32(%rsp), %xmm10 movdqa 48(%rsp), %xmm11 movq $1, %r9 .Lchacha_blocks_ssse3_below256: movq %r9, %xmm5 andq %rcx, %rcx jz .Lchacha_blocks_ssse3_done cmpq $64, %rcx jae .Lchacha_blocks_ssse3_above63 movq %rdx, %r9 andq %rsi, %rsi jz .Lchacha_blocks_ssse3_noinput2 movq %rcx, %r10 movq %rsp, %rdx addq %r10, %rsi addq %r10, %rdx negq %r10 .Lchacha_blocks_ssse3_copyinput: movb (%rsi, %r10), %al movb %al, (%rdx, %r10) incq %r10 jnz .Lchacha_blocks_ssse3_copyinput movq %rsp, %rsi .Lchacha_blocks_ssse3_noinput2: movq %rsp, %rdx .Lchacha_blocks_ssse3_above63: movdqa %xmm8, %xmm0 movdqa %xmm9, %xmm1 movdqa %xmm10, %xmm2 movdqa %xmm11, %xmm3 movq 64(%rsp), %rax .Lchacha_blocks_ssse3_mainloop2: paddd %xmm1, %xmm0 pxor %xmm0, %xmm3 pshufb %xmm6, %xmm3 paddd %xmm3, %xmm2 pxor %xmm2, %xmm1 movdqa %xmm1, %xmm4 pslld $12, %xmm4 psrld $20, %xmm1 pxor %xmm4, %xmm1 paddd %xmm1, %xmm0 pxor %xmm0, %xmm3 pshufb %xmm7, %xmm3 pshufd $0x93, %xmm0, %xmm0 paddd %xmm3, %xmm2 pshufd $0x4e, %xmm3, %xmm3 pxor %xmm2, %xmm1 pshufd $0x39, %xmm2, %xmm2 movdqa %xmm1, %xmm4 pslld $7, %xmm4 psrld $25, %xmm1 pxor %xmm4, %xmm1 paddd %xmm1, %xmm0 pxor %xmm0, %xmm3 pshufb %xmm6, %xmm3 paddd %xmm3, %xmm2 pxor %xmm2, %xmm1 movdqa %xmm1, %xmm4 pslld $12, %xmm4 psrld $20, %xmm1 pxor %xmm4, %xmm1 paddd %xmm1, %xmm0 pxor %xmm0, %xmm3 pshufb %xmm7, %xmm3 pshufd $0x39, %xmm0, %xmm0 paddd %xmm3, %xmm2 pshufd $0x4e, %xmm3, %xmm3 pxor %xmm2, %xmm1 pshufd $0x93, %xmm2, %xmm2 movdqa %xmm1, %xmm4 pslld $7, %xmm4 psrld $25, %xmm1 pxor %xmm4, %xmm1 subq $2, %rax jnz .Lchacha_blocks_ssse3_mainloop2 paddd %xmm8, %xmm0 paddd %xmm9, %xmm1 paddd %xmm10, %xmm2 paddd %xmm11, %xmm3 andq %rsi, %rsi jz .Lchacha_blocks_ssse3_noinput3 movdqu 0(%rsi), %xmm12 movdqu 16(%rsi), %xmm13 movdqu 32(%rsi), %xmm14 movdqu 48(%rsi), %xmm15 pxor %xmm12, %xmm0 pxor %xmm13, %xmm1 pxor %xmm14, %xmm2 pxor %xmm15, %xmm3 addq $64, %rsi .Lchacha_blocks_ssse3_noinput3: movdqu %xmm0, 0(%rdx) movdqu %xmm1, 16(%rdx) movdqu %xmm2, 32(%rdx) movdqu %xmm3, 48(%rdx) paddq %xmm5, %xmm11 cmpq $64, %rcx jbe .Lchacha_blocks_ssse3_mainloop2_finishup addq $64, %rdx subq $64, %rcx jmp .Lchacha_blocks_ssse3_below256 .Lchacha_blocks_ssse3_mainloop2_finishup: cmpq $64, %rcx je .Lchacha_blocks_ssse3_done addq %rcx, %r9 addq %rcx, %rdx negq %rcx .Lchacha_blocks_ssse3_copyoutput: movb (%rdx, %rcx), %al movb %al, (%r9, %rcx) incq %rcx jnz .Lchacha_blocks_ssse3_copyoutput .Lchacha_blocks_ssse3_done: movdqu %xmm11, 48(%rdi) movq %rbp, %rsp pxor %xmm15, %xmm15 pxor %xmm7, %xmm7 pxor %xmm14, %xmm14 pxor %xmm6, %xmm6 pxor %xmm13, %xmm13 pxor %xmm5, %xmm5 pxor %xmm12, %xmm12 pxor %xmm4, %xmm4 popq %rbp popq %rbx movl $(63 + 512 + 16), %eax pxor %xmm11, %xmm11 pxor %xmm3, %xmm3 pxor %xmm10, %xmm10 pxor %xmm2, %xmm2 pxor %xmm9, %xmm9 pxor %xmm1, %xmm1 pxor %xmm8, %xmm8 pxor %xmm0, %xmm0 ret ELF(.size _gcry_chacha20_amd64_ssse3_blocks,.-_gcry_chacha20_amd64_ssse3_blocks;) -.data .align 16; .LC: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 /* pshufb rotate by 16 */ .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 /* pshufb rotate by 8 */ #endif /*defined(USE_CHACHA20)*/ #endif /*__x86_64*/ diff --git a/cipher/des-amd64.S b/cipher/des-amd64.S index 307d2112..1b7cfba8 100644 --- a/cipher/des-amd64.S +++ b/cipher/des-amd64.S @@ -1,1037 +1,1036 @@ /* des-amd64.S - AMD64 assembly implementation of 3DES cipher * * Copyright (C) 2014 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifdef __x86_64 #include #if defined(USE_DES) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .text #define s1 0 #define s2 ((s1) + (64*8)) #define s3 ((s2) + (64*8)) #define s4 ((s3) + (64*8)) #define s5 ((s4) + (64*8)) #define s6 ((s5) + (64*8)) #define s7 ((s6) + (64*8)) #define s8 ((s7) + (64*8)) /* register macros */ #define CTX %rdi #define SBOXES %rbp #define RL0 %r8 #define RL1 %r9 #define RL2 %r10 #define RL0d %r8d #define RL1d %r9d #define RL2d %r10d #define RR0 %r11 #define RR1 %r12 #define RR2 %r13 #define RR0d %r11d #define RR1d %r12d #define RR2d %r13d #define RW0 %rax #define RW1 %rbx #define RW2 %rcx #define RW0d %eax #define RW1d %ebx #define RW2d %ecx #define RW0bl %al #define RW1bl %bl #define RW2bl %cl #define RW0bh %ah #define RW1bh %bh #define RW2bh %ch #define RT0 %r15 #define RT1 %rsi #define RT2 %r14 #define RT3 %rdx #define RT0d %r15d #define RT1d %esi #define RT2d %r14d #define RT3d %edx /*********************************************************************** * 1-way 3DES ***********************************************************************/ #define do_permutation(a, b, offset, mask) \ movl a, RT0d; \ shrl $(offset), RT0d; \ xorl b, RT0d; \ andl $(mask), RT0d; \ xorl RT0d, b; \ shll $(offset), RT0d; \ xorl RT0d, a; #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation(left, right) \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ movl left##d, RW0d; \ roll $1, right##d; \ xorl right##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##d; \ xorl RW0d, right##d; \ roll $1, left##d; \ expand_to_64bits(right, RT3); \ expand_to_64bits(left, RT3); #define final_permutation(left, right) \ compress_to_64bits(right); \ compress_to_64bits(left); \ movl right##d, RW0d; \ rorl $1, left##d; \ xorl left##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##d; \ xorl RW0d, left##d; \ rorl $1, right##d; \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); #define round1(n, from, to, load_next_key) \ xorq from, RW0; \ \ movzbl RW0bl, RT0d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ shrq $16, RW0; \ movq s8(SBOXES, RT0, 8), RT0; \ xorq s6(SBOXES, RT1, 8), to; \ movzbl RW0bl, RL1d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s4(SBOXES, RT2, 8), RT0; \ xorq s2(SBOXES, RT3, 8), to; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ xorq s7(SBOXES, RL1, 8), RT0; \ xorq s5(SBOXES, RT1, 8), to; \ xorq s3(SBOXES, RT2, 8), RT0; \ load_next_key(n, RW0); \ xorq RT0, to; \ xorq s1(SBOXES, RT3, 8), to; \ #define load_next_key(n, RWx) \ movq (((n) + 1) * 8)(CTX), RWx; #define dummy2(a, b) /*_*/ #define read_block(io, left, right) \ movl (io), left##d; \ movl 4(io), right##d; \ bswapl left##d; \ bswapl right##d; #define write_block(io, left, right) \ bswapl left##d; \ bswapl right##d; \ movl left##d, (io); \ movl right##d, 4(io); .align 8 .globl _gcry_3des_amd64_crypt_block ELF(.type _gcry_3des_amd64_crypt_block,@function;) _gcry_3des_amd64_crypt_block: /* input: * %rdi: round keys, CTX * %rsi: dst * %rdx: src */ pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi; /*dst*/ leaq .L_s1 RIP, SBOXES; read_block(%rdx, RL0, RR0); initial_permutation(RL0, RR0); movq (CTX), RW0; round1(0, RR0, RL0, load_next_key); round1(1, RL0, RR0, load_next_key); round1(2, RR0, RL0, load_next_key); round1(3, RL0, RR0, load_next_key); round1(4, RR0, RL0, load_next_key); round1(5, RL0, RR0, load_next_key); round1(6, RR0, RL0, load_next_key); round1(7, RL0, RR0, load_next_key); round1(8, RR0, RL0, load_next_key); round1(9, RL0, RR0, load_next_key); round1(10, RR0, RL0, load_next_key); round1(11, RL0, RR0, load_next_key); round1(12, RR0, RL0, load_next_key); round1(13, RL0, RR0, load_next_key); round1(14, RR0, RL0, load_next_key); round1(15, RL0, RR0, load_next_key); round1(16+0, RL0, RR0, load_next_key); round1(16+1, RR0, RL0, load_next_key); round1(16+2, RL0, RR0, load_next_key); round1(16+3, RR0, RL0, load_next_key); round1(16+4, RL0, RR0, load_next_key); round1(16+5, RR0, RL0, load_next_key); round1(16+6, RL0, RR0, load_next_key); round1(16+7, RR0, RL0, load_next_key); round1(16+8, RL0, RR0, load_next_key); round1(16+9, RR0, RL0, load_next_key); round1(16+10, RL0, RR0, load_next_key); round1(16+11, RR0, RL0, load_next_key); round1(16+12, RL0, RR0, load_next_key); round1(16+13, RR0, RL0, load_next_key); round1(16+14, RL0, RR0, load_next_key); round1(16+15, RR0, RL0, load_next_key); round1(32+0, RR0, RL0, load_next_key); round1(32+1, RL0, RR0, load_next_key); round1(32+2, RR0, RL0, load_next_key); round1(32+3, RL0, RR0, load_next_key); round1(32+4, RR0, RL0, load_next_key); round1(32+5, RL0, RR0, load_next_key); round1(32+6, RR0, RL0, load_next_key); round1(32+7, RL0, RR0, load_next_key); round1(32+8, RR0, RL0, load_next_key); round1(32+9, RL0, RR0, load_next_key); round1(32+10, RR0, RL0, load_next_key); round1(32+11, RL0, RR0, load_next_key); round1(32+12, RR0, RL0, load_next_key); round1(32+13, RL0, RR0, load_next_key); round1(32+14, RR0, RL0, load_next_key); round1(32+15, RL0, RR0, dummy2); popq RW2; /*dst*/ final_permutation(RR0, RL0); write_block(RW2, RR0, RL0); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; popq %rbp; ret; ELF(.size _gcry_3des_amd64_crypt_block,.-_gcry_3des_amd64_crypt_block;) /*********************************************************************** * 3-way 3DES ***********************************************************************/ #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation3(left, right) \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ \ movl left##0d, RW0d; \ roll $1, right##0d; \ xorl right##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##0d; \ xorl RW0d, right##0d; \ roll $1, left##0d; \ expand_to_64bits(right##0, RT3); \ expand_to_64bits(left##0, RT3); \ movl left##1d, RW1d; \ roll $1, right##1d; \ xorl right##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, left##1d; \ xorl RW1d, right##1d; \ roll $1, left##1d; \ expand_to_64bits(right##1, RT3); \ expand_to_64bits(left##1, RT3); \ movl left##2d, RW2d; \ roll $1, right##2d; \ xorl right##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, left##2d; \ xorl RW2d, right##2d; \ roll $1, left##2d; \ expand_to_64bits(right##2, RT3); \ expand_to_64bits(left##2, RT3); #define final_permutation3(left, right) \ compress_to_64bits(right##0); \ compress_to_64bits(left##0); \ movl right##0d, RW0d; \ rorl $1, left##0d; \ xorl left##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##0d; \ xorl RW0d, left##0d; \ rorl $1, right##0d; \ compress_to_64bits(right##1); \ compress_to_64bits(left##1); \ movl right##1d, RW1d; \ rorl $1, left##1d; \ xorl left##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, right##1d; \ xorl RW1d, left##1d; \ rorl $1, right##1d; \ compress_to_64bits(right##2); \ compress_to_64bits(left##2); \ movl right##2d, RW2d; \ rorl $1, left##2d; \ xorl left##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, right##2d; \ xorl RW2d, left##2d; \ rorl $1, right##2d; \ \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); #define round3(n, from, to, load_next_key, do_movq) \ xorq from##0, RW0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s8(SBOXES, RT3, 8), to##0; \ xorq s6(SBOXES, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s4(SBOXES, RT3, 8), to##0; \ xorq s2(SBOXES, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s7(SBOXES, RT3, 8), to##0; \ xorq s5(SBOXES, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ load_next_key(n, RW0); \ xorq s3(SBOXES, RT3, 8), to##0; \ xorq s1(SBOXES, RT1, 8), to##0; \ xorq from##1, RW1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s8(SBOXES, RT3, 8), to##1; \ xorq s6(SBOXES, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s4(SBOXES, RT3, 8), to##1; \ xorq s2(SBOXES, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrl $16, RW1d; \ xorq s7(SBOXES, RT3, 8), to##1; \ xorq s5(SBOXES, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ do_movq(RW0, RW1); \ xorq s3(SBOXES, RT3, 8), to##1; \ xorq s1(SBOXES, RT1, 8), to##1; \ xorq from##2, RW2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s8(SBOXES, RT3, 8), to##2; \ xorq s6(SBOXES, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s4(SBOXES, RT3, 8), to##2; \ xorq s2(SBOXES, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrl $16, RW2d; \ xorq s7(SBOXES, RT3, 8), to##2; \ xorq s5(SBOXES, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ do_movq(RW0, RW2); \ xorq s3(SBOXES, RT3, 8), to##2; \ xorq s1(SBOXES, RT1, 8), to##2; #define __movq(src, dst) \ movq src, dst; #define read_block(io, left, right) \ movl (io), left##d; \ movl 4(io), right##d; \ bswapl left##d; \ bswapl right##d; #define write_block(io, left, right) \ bswapl left##d; \ bswapl right##d; \ movl left##d, (io); \ movl right##d, 4(io); .align 8 ELF(.type _gcry_3des_amd64_crypt_blk3,@function;) _gcry_3des_amd64_crypt_blk3: /* input: * %rdi: round keys, CTX * RL0d, RR0d, RL1d, RR1d, RL2d, RR2d: 3 input blocks * RR0d, RL0d, RR1d, RL1d, RR2d, RL2d: 3 output blocks */ leaq .L_s1 RIP, SBOXES; initial_permutation3(RL, RR); movq 0(CTX), RW0; movq RW0, RW1; movq RW0, RW2; round3(0, RR, RL, load_next_key, __movq); round3(1, RL, RR, load_next_key, __movq); round3(2, RR, RL, load_next_key, __movq); round3(3, RL, RR, load_next_key, __movq); round3(4, RR, RL, load_next_key, __movq); round3(5, RL, RR, load_next_key, __movq); round3(6, RR, RL, load_next_key, __movq); round3(7, RL, RR, load_next_key, __movq); round3(8, RR, RL, load_next_key, __movq); round3(9, RL, RR, load_next_key, __movq); round3(10, RR, RL, load_next_key, __movq); round3(11, RL, RR, load_next_key, __movq); round3(12, RR, RL, load_next_key, __movq); round3(13, RL, RR, load_next_key, __movq); round3(14, RR, RL, load_next_key, __movq); round3(15, RL, RR, load_next_key, __movq); round3(16+0, RL, RR, load_next_key, __movq); round3(16+1, RR, RL, load_next_key, __movq); round3(16+2, RL, RR, load_next_key, __movq); round3(16+3, RR, RL, load_next_key, __movq); round3(16+4, RL, RR, load_next_key, __movq); round3(16+5, RR, RL, load_next_key, __movq); round3(16+6, RL, RR, load_next_key, __movq); round3(16+7, RR, RL, load_next_key, __movq); round3(16+8, RL, RR, load_next_key, __movq); round3(16+9, RR, RL, load_next_key, __movq); round3(16+10, RL, RR, load_next_key, __movq); round3(16+11, RR, RL, load_next_key, __movq); round3(16+12, RL, RR, load_next_key, __movq); round3(16+13, RR, RL, load_next_key, __movq); round3(16+14, RL, RR, load_next_key, __movq); round3(16+15, RR, RL, load_next_key, __movq); round3(32+0, RR, RL, load_next_key, __movq); round3(32+1, RL, RR, load_next_key, __movq); round3(32+2, RR, RL, load_next_key, __movq); round3(32+3, RL, RR, load_next_key, __movq); round3(32+4, RR, RL, load_next_key, __movq); round3(32+5, RL, RR, load_next_key, __movq); round3(32+6, RR, RL, load_next_key, __movq); round3(32+7, RL, RR, load_next_key, __movq); round3(32+8, RR, RL, load_next_key, __movq); round3(32+9, RL, RR, load_next_key, __movq); round3(32+10, RR, RL, load_next_key, __movq); round3(32+11, RL, RR, load_next_key, __movq); round3(32+12, RR, RL, load_next_key, __movq); round3(32+13, RL, RR, load_next_key, __movq); round3(32+14, RR, RL, load_next_key, __movq); round3(32+15, RL, RR, dummy2, dummy2); final_permutation3(RR, RL); ret; ELF(.size _gcry_3des_amd64_crypt_blk3,.-_gcry_3des_amd64_crypt_blk3;) .align 8 .globl _gcry_3des_amd64_cbc_dec ELF(.type _gcry_3des_amd64_cbc_dec,@function;) _gcry_3des_amd64_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (3 blocks) * %rdx: src (3 blocks) * %rcx: iv (64bit) */ pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi; /*dst*/ pushq %rdx; /*src*/ pushq %rcx; /*iv*/ /* load input */ movl 0 * 4(%rdx), RL0d; movl 1 * 4(%rdx), RR0d; movl 2 * 4(%rdx), RL1d; movl 3 * 4(%rdx), RR1d; movl 4 * 4(%rdx), RL2d; movl 5 * 4(%rdx), RR2d; bswapl RL0d; bswapl RR0d; bswapl RL1d; bswapl RR1d; bswapl RL2d; bswapl RR2d; call _gcry_3des_amd64_crypt_blk3; popq %rcx; /*iv*/ popq %rdx; /*src*/ popq %rsi; /*dst*/ bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; movq 2 * 8(%rdx), RT0; xorl 0 * 4(%rcx), RR0d; xorl 1 * 4(%rcx), RL0d; xorl 0 * 4(%rdx), RR1d; xorl 1 * 4(%rdx), RL1d; xorl 2 * 4(%rdx), RR2d; xorl 3 * 4(%rdx), RL2d; movq RT0, (%rcx); /* store new IV */ movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; popq %rbp; ret; ELF(.size _gcry_3des_amd64_cbc_dec,.-_gcry_3des_amd64_cbc_dec;) .align 8 .globl _gcry_3des_amd64_ctr_enc ELF(.type _gcry_3des_amd64_ctr_enc,@function;) _gcry_3des_amd64_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (3 blocks) * %rdx: src (3 blocks) * %rcx: iv (64bit) */ pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi; /*dst*/ pushq %rdx; /*src*/ movq %rcx, RW2; /* load IV and byteswap */ movq (RW2), RT0; bswapq RT0; movq RT0, RR0; /* construct IVs */ leaq 1(RT0), RR1; leaq 2(RT0), RR2; leaq 3(RT0), RT0; movq RR0, RL0; movq RR1, RL1; movq RR2, RL2; bswapq RT0; shrq $32, RL0; shrq $32, RL1; shrq $32, RL2; /* store new IV */ movq RT0, (RW2); call _gcry_3des_amd64_crypt_blk3; popq %rdx; /*src*/ popq %rsi; /*dst*/ bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; xorl 0 * 4(%rdx), RR0d; xorl 1 * 4(%rdx), RL0d; xorl 2 * 4(%rdx), RR1d; xorl 3 * 4(%rdx), RL1d; xorl 4 * 4(%rdx), RR2d; xorl 5 * 4(%rdx), RL2d; movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; popq %rbp; ret; ELF(.size _gcry_3des_amd64_cbc_dec,.-_gcry_3des_amd64_cbc_dec;) .align 8 .globl _gcry_3des_amd64_cfb_dec ELF(.type _gcry_3des_amd64_cfb_dec,@function;) _gcry_3des_amd64_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (3 blocks) * %rdx: src (3 blocks) * %rcx: iv (64bit) */ pushq %rbp; pushq %rbx; pushq %r12; pushq %r13; pushq %r14; pushq %r15; pushq %rsi; /*dst*/ pushq %rdx; /*src*/ movq %rcx, RW2; /* Load input */ movl 0 * 4(RW2), RL0d; movl 1 * 4(RW2), RR0d; movl 0 * 4(%rdx), RL1d; movl 1 * 4(%rdx), RR1d; movl 2 * 4(%rdx), RL2d; movl 3 * 4(%rdx), RR2d; bswapl RL0d; bswapl RR0d; bswapl RL1d; bswapl RR1d; bswapl RL2d; bswapl RR2d; /* Update IV */ movq 4 * 4(%rdx), RW0; movq RW0, (RW2); call _gcry_3des_amd64_crypt_blk3; popq %rdx; /*src*/ popq %rsi; /*dst*/ bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; xorl 0 * 4(%rdx), RR0d; xorl 1 * 4(%rdx), RL0d; xorl 2 * 4(%rdx), RR1d; xorl 3 * 4(%rdx), RL1d; xorl 4 * 4(%rdx), RR2d; xorl 5 * 4(%rdx), RL2d; movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; popq %r14; popq %r13; popq %r12; popq %rbx; popq %rbp; ret; ELF(.size _gcry_3des_amd64_cfb_dec,.-_gcry_3des_amd64_cfb_dec;) -.data .align 16 .L_s1: .quad 0x0010100001010400, 0x0000000000000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0010100001010004, 0x0000100000010404 .quad 0x0000000000000004, 0x0000100000010000 .quad 0x0000000000000400, 0x0010100001010400 .quad 0x0010100001010404, 0x0000000000000400 .quad 0x0010000001000404, 0x0010100001010004 .quad 0x0010000001000000, 0x0000000000000004 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000100000010400 .quad 0x0000100000010400, 0x0010100001010000 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0000100000010004, 0x0010000001000004 .quad 0x0010000001000004, 0x0000100000010004 .quad 0x0000000000000000, 0x0000000000000404 .quad 0x0000100000010404, 0x0010000001000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0000000000000004, 0x0010100001010000 .quad 0x0010100001010400, 0x0010000001000000 .quad 0x0010000001000000, 0x0000000000000400 .quad 0x0010100001010004, 0x0000100000010000 .quad 0x0000100000010400, 0x0010000001000004 .quad 0x0000000000000400, 0x0000000000000004 .quad 0x0010000001000404, 0x0000100000010404 .quad 0x0010100001010404, 0x0000100000010004 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0010000001000004, 0x0000000000000404 .quad 0x0000100000010404, 0x0010100001010400 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000000000000000 .quad 0x0000100000010004, 0x0000100000010400 .quad 0x0000000000000000, 0x0010100001010004 .L_s2: .quad 0x0801080200100020, 0x0800080000000000 .quad 0x0000080000000000, 0x0001080200100020 .quad 0x0001000000100000, 0x0000000200000020 .quad 0x0801000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0801080200100020 .quad 0x0801080000100000, 0x0800000000000000 .quad 0x0800080000000000, 0x0001000000100000 .quad 0x0000000200000020, 0x0801000200100020 .quad 0x0001080000100000, 0x0001000200100020 .quad 0x0800080200000020, 0x0000000000000000 .quad 0x0800000000000000, 0x0000080000000000 .quad 0x0001080200100020, 0x0801000000100000 .quad 0x0001000200100020, 0x0800000200000020 .quad 0x0000000000000000, 0x0001080000100000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0801000000100000, 0x0000080200000020 .quad 0x0000000000000000, 0x0001080200100020 .quad 0x0801000200100020, 0x0001000000100000 .quad 0x0800080200000020, 0x0801000000100000 .quad 0x0801080000100000, 0x0000080000000000 .quad 0x0801000000100000, 0x0800080000000000 .quad 0x0000000200000020, 0x0801080200100020 .quad 0x0001080200100020, 0x0000000200000020 .quad 0x0000080000000000, 0x0800000000000000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0001000000100000, 0x0800000200000020 .quad 0x0001000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0001000200100020 .quad 0x0001080000100000, 0x0000000000000000 .quad 0x0800080000000000, 0x0000080200000020 .quad 0x0800000000000000, 0x0801000200100020 .quad 0x0801080200100020, 0x0001080000100000 .L_s3: .quad 0x0000002000000208, 0x0000202008020200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000202000020208, 0x0000002008000200 .quad 0x0000200000020008, 0x0000000008000008 .quad 0x0000000008000008, 0x0000200000020000 .quad 0x0000202008020208, 0x0000200000020008 .quad 0x0000200008020000, 0x0000002000000208 .quad 0x0000000008000000, 0x0000000000000008 .quad 0x0000202008020200, 0x0000002000000200 .quad 0x0000202000020200, 0x0000200008020000 .quad 0x0000200008020008, 0x0000202000020208 .quad 0x0000002008000208, 0x0000202000020200 .quad 0x0000200000020000, 0x0000002008000208 .quad 0x0000000000000008, 0x0000202008020208 .quad 0x0000002000000200, 0x0000000008000000 .quad 0x0000202008020200, 0x0000000008000000 .quad 0x0000200000020008, 0x0000002000000208 .quad 0x0000200000020000, 0x0000202008020200 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000002000000200, 0x0000200000020008 .quad 0x0000202008020208, 0x0000002008000200 .quad 0x0000000008000008, 0x0000002000000200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000208, 0x0000200000020000 .quad 0x0000000008000000, 0x0000202008020208 .quad 0x0000000000000008, 0x0000202000020208 .quad 0x0000202000020200, 0x0000000008000008 .quad 0x0000200008020000, 0x0000002008000208 .quad 0x0000002000000208, 0x0000200008020000 .quad 0x0000202000020208, 0x0000000000000008 .quad 0x0000200008020008, 0x0000202000020200 .L_s4: .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x0000020000002000, 0x0008020800002000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x0000020000002000, 0x0008020800002000 .L_s5: .quad 0x0000001000000100, 0x0020001002080100 .quad 0x0020000002080000, 0x0420001002000100 .quad 0x0000000000080000, 0x0000001000000100 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0400001000080100, 0x0000000000080000 .quad 0x0020001002000100, 0x0400001000080100 .quad 0x0420001002000100, 0x0420000002080000 .quad 0x0000001000080100, 0x0400000000000000 .quad 0x0020000002000000, 0x0400000000080000 .quad 0x0400000000080000, 0x0000000000000000 .quad 0x0400001000000100, 0x0420001002080100 .quad 0x0420001002080100, 0x0020001002000100 .quad 0x0420000002080000, 0x0400001000000100 .quad 0x0000000000000000, 0x0420000002000000 .quad 0x0020001002080100, 0x0020000002000000 .quad 0x0420000002000000, 0x0000001000080100 .quad 0x0000000000080000, 0x0420001002000100 .quad 0x0000001000000100, 0x0020000002000000 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0420001002000100, 0x0400001000080100 .quad 0x0020001002000100, 0x0400000000000000 .quad 0x0420000002080000, 0x0020001002080100 .quad 0x0400001000080100, 0x0000001000000100 .quad 0x0020000002000000, 0x0420000002080000 .quad 0x0420001002080100, 0x0000001000080100 .quad 0x0420000002000000, 0x0420001002080100 .quad 0x0020000002080000, 0x0000000000000000 .quad 0x0400000000080000, 0x0420000002000000 .quad 0x0000001000080100, 0x0020001002000100 .quad 0x0400001000000100, 0x0000000000080000 .quad 0x0000000000000000, 0x0400000000080000 .quad 0x0020001002080100, 0x0400001000000100 .L_s6: .quad 0x0200000120000010, 0x0204000020000000 .quad 0x0000040000000000, 0x0204040120000010 .quad 0x0204000020000000, 0x0000000100000010 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0200040020000000, 0x0004040100000010 .quad 0x0004000000000000, 0x0200000120000010 .quad 0x0004000100000010, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0000000000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000040000000000 .quad 0x0004040000000000, 0x0200040120000010 .quad 0x0000000100000010, 0x0204000120000010 .quad 0x0204000120000010, 0x0000000000000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000040100000010, 0x0004040000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0200040020000000, 0x0000000100000010 .quad 0x0204000120000010, 0x0004040000000000 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0000040100000010, 0x0200000120000010 .quad 0x0004000000000000, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0200000120000010, 0x0204040120000010 .quad 0x0004040000000000, 0x0204000020000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000000000000000, 0x0204000120000010 .quad 0x0000000100000010, 0x0000040000000000 .quad 0x0204000020000000, 0x0004040100000010 .quad 0x0000040000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000000000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0004000100000010, 0x0200040120000010 .L_s7: .quad 0x0002000000200000, 0x2002000004200002 .quad 0x2000000004000802, 0x0000000000000000 .quad 0x0000000000000800, 0x2000000004000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2002000004200802, 0x0002000000200000 .quad 0x0000000000000000, 0x2000000004000002 .quad 0x2000000000000002, 0x0000000004000000 .quad 0x2002000004200002, 0x2000000000000802 .quad 0x0000000004000800, 0x2002000000200802 .quad 0x2002000000200002, 0x0000000004000800 .quad 0x2000000004000002, 0x0002000004200000 .quad 0x0002000004200800, 0x2002000000200002 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000000000802, 0x2002000004200802 .quad 0x0002000000200800, 0x2000000000000002 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0002000000200000, 0x2000000004000802 .quad 0x2000000004000802, 0x2002000004200002 .quad 0x2002000004200002, 0x2000000000000002 .quad 0x2002000000200002, 0x0000000004000000 .quad 0x0000000004000800, 0x0002000000200000 .quad 0x0002000004200800, 0x2000000000000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2000000000000802, 0x2000000004000002 .quad 0x2002000004200802, 0x0002000004200000 .quad 0x0002000000200800, 0x0000000000000000 .quad 0x2000000000000002, 0x2002000004200802 .quad 0x0000000000000000, 0x2002000000200802 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000004000002, 0x0000000004000800 .quad 0x0000000000000800, 0x2002000000200002 .L_s8: .quad 0x0100010410001000, 0x0000010000001000 .quad 0x0000000000040000, 0x0100010410041000 .quad 0x0100000010000000, 0x0100010410001000 .quad 0x0000000400000000, 0x0100000010000000 .quad 0x0000000400040000, 0x0100000010040000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0100010010041000, 0x0000010400041000 .quad 0x0000010000001000, 0x0000000400000000 .quad 0x0100000010040000, 0x0100000410000000 .quad 0x0100010010001000, 0x0000010400001000 .quad 0x0000010000041000, 0x0000000400040000 .quad 0x0100000410040000, 0x0100010010041000 .quad 0x0000010400001000, 0x0000000000000000 .quad 0x0000000000000000, 0x0100000410040000 .quad 0x0100000410000000, 0x0100010010001000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0100010010041000, 0x0000010000001000 .quad 0x0000000400000000, 0x0100000410040000 .quad 0x0000010000001000, 0x0000010400041000 .quad 0x0100010010001000, 0x0000000400000000 .quad 0x0100000410000000, 0x0100000010040000 .quad 0x0100000410040000, 0x0100000010000000 .quad 0x0000000000040000, 0x0100010410001000 .quad 0x0000000000000000, 0x0100010410041000 .quad 0x0000000400040000, 0x0100000410000000 .quad 0x0100000010040000, 0x0100010010001000 .quad 0x0100010410001000, 0x0000000000000000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0000010000041000, 0x0000010400001000 .quad 0x0000010400001000, 0x0000000400040000 .quad 0x0100000010000000, 0x0100010010041000 #endif #endif diff --git a/cipher/serpent-avx2-amd64.S b/cipher/serpent-avx2-amd64.S index 2902dab5..8d60a159 100644 --- a/cipher/serpent-avx2-amd64.S +++ b/cipher/serpent-avx2-amd64.S @@ -1,1124 +1,1123 @@ /* serpent-avx2-amd64.S - AVX2 implementation of Serpent cipher * * Copyright (C) 2013-2015 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && defined(USE_SERPENT) && \ defined(ENABLE_AVX2_SUPPORT) #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif /* struct serpent_context: */ #define ctx_keys 0 /* register macros */ #define CTX %rdi /* vector registers */ #define RA0 %ymm0 #define RA1 %ymm1 #define RA2 %ymm2 #define RA3 %ymm3 #define RA4 %ymm4 #define RB0 %ymm5 #define RB1 %ymm6 #define RB2 %ymm7 #define RB3 %ymm8 #define RB4 %ymm9 #define RNOT %ymm10 #define RTMP0 %ymm11 #define RTMP1 %ymm12 #define RTMP2 %ymm13 #define RTMP3 %ymm14 #define RTMP4 %ymm15 #define RNOTx %xmm10 #define RTMP0x %xmm11 #define RTMP1x %xmm12 #define RTMP2x %xmm13 #define RTMP3x %xmm14 #define RTMP4x %xmm15 /********************************************************************** helper macros **********************************************************************/ /* vector 32-bit rotation to left */ #define vec_rol(reg, nleft, tmp) \ vpslld $(nleft), reg, tmp; \ vpsrld $(32 - (nleft)), reg, reg; \ vpor tmp, reg, reg; /* vector 32-bit rotation to right */ #define vec_ror(reg, nright, tmp) \ vec_rol(reg, 32 - nright, tmp) /* 4x4 32-bit integer matrix transpose */ #define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /********************************************************************** 16-way serpent **********************************************************************/ /* * These are the S-Boxes of Serpent from following research paper. * * D. A. Osvik, “Speeding up Serpent,” in Third AES Candidate Conference, * (New York, New York, USA), p. 317–329, National Institute of Standards and * Technology, 2000. * * Paper is also available at: http://www.ii.uib.no/~osvik/pub/aes3.pdf * */ #define SBOX0(r0, r1, r2, r3, r4) \ vpxor r0, r3, r3; vmovdqa r1, r4; \ vpand r3, r1, r1; vpxor r2, r4, r4; \ vpxor r0, r1, r1; vpor r3, r0, r0; \ vpxor r4, r0, r0; vpxor r3, r4, r4; \ vpxor r2, r3, r3; vpor r1, r2, r2; \ vpxor r4, r2, r2; vpxor RNOT, r4, r4; \ vpor r1, r4, r4; vpxor r3, r1, r1; \ vpxor r4, r1, r1; vpor r0, r3, r3; \ vpxor r3, r1, r1; vpxor r3, r4, r4; #define SBOX0_INVERSE(r0, r1, r2, r3, r4) \ vpxor RNOT, r2, r2; vmovdqa r1, r4; \ vpor r0, r1, r1; vpxor RNOT, r4, r4; \ vpxor r2, r1, r1; vpor r4, r2, r2; \ vpxor r3, r1, r1; vpxor r4, r0, r0; \ vpxor r0, r2, r2; vpand r3, r0, r0; \ vpxor r0, r4, r4; vpor r1, r0, r0; \ vpxor r2, r0, r0; vpxor r4, r3, r3; \ vpxor r1, r2, r2; vpxor r0, r3, r3; \ vpxor r1, r3, r3; \ vpand r3, r2, r2; \ vpxor r2, r4, r4; #define SBOX1(r0, r1, r2, r3, r4) \ vpxor RNOT, r0, r0; vpxor RNOT, r2, r2; \ vmovdqa r0, r4; vpand r1, r0, r0; \ vpxor r0, r2, r2; vpor r3, r0, r0; \ vpxor r2, r3, r3; vpxor r0, r1, r1; \ vpxor r4, r0, r0; vpor r1, r4, r4; \ vpxor r3, r1, r1; vpor r0, r2, r2; \ vpand r4, r2, r2; vpxor r1, r0, r0; \ vpand r2, r1, r1; \ vpxor r0, r1, r1; vpand r2, r0, r0; \ vpxor r4, r0, r0; #define SBOX1_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r1, r4; vpxor r3, r1, r1; \ vpand r1, r3, r3; vpxor r2, r4, r4; \ vpxor r0, r3, r3; vpor r1, r0, r0; \ vpxor r3, r2, r2; vpxor r4, r0, r0; \ vpor r2, r0, r0; vpxor r3, r1, r1; \ vpxor r1, r0, r0; vpor r3, r1, r1; \ vpxor r0, r1, r1; vpxor RNOT, r4, r4; \ vpxor r1, r4, r4; vpor r0, r1, r1; \ vpxor r0, r1, r1; \ vpor r4, r1, r1; \ vpxor r1, r3, r3; #define SBOX2(r0, r1, r2, r3, r4) \ vmovdqa r0, r4; vpand r2, r0, r0; \ vpxor r3, r0, r0; vpxor r1, r2, r2; \ vpxor r0, r2, r2; vpor r4, r3, r3; \ vpxor r1, r3, r3; vpxor r2, r4, r4; \ vmovdqa r3, r1; vpor r4, r3, r3; \ vpxor r0, r3, r3; vpand r1, r0, r0; \ vpxor r0, r4, r4; vpxor r3, r1, r1; \ vpxor r4, r1, r1; vpxor RNOT, r4, r4; #define SBOX2_INVERSE(r0, r1, r2, r3, r4) \ vpxor r3, r2, r2; vpxor r0, r3, r3; \ vmovdqa r3, r4; vpand r2, r3, r3; \ vpxor r1, r3, r3; vpor r2, r1, r1; \ vpxor r4, r1, r1; vpand r3, r4, r4; \ vpxor r3, r2, r2; vpand r0, r4, r4; \ vpxor r2, r4, r4; vpand r1, r2, r2; \ vpor r0, r2, r2; vpxor RNOT, r3, r3; \ vpxor r3, r2, r2; vpxor r3, r0, r0; \ vpand r1, r0, r0; vpxor r4, r3, r3; \ vpxor r0, r3, r3; #define SBOX3(r0, r1, r2, r3, r4) \ vmovdqa r0, r4; vpor r3, r0, r0; \ vpxor r1, r3, r3; vpand r4, r1, r1; \ vpxor r2, r4, r4; vpxor r3, r2, r2; \ vpand r0, r3, r3; vpor r1, r4, r4; \ vpxor r4, r3, r3; vpxor r1, r0, r0; \ vpand r0, r4, r4; vpxor r3, r1, r1; \ vpxor r2, r4, r4; vpor r0, r1, r1; \ vpxor r2, r1, r1; vpxor r3, r0, r0; \ vmovdqa r1, r2; vpor r3, r1, r1; \ vpxor r0, r1, r1; #define SBOX3_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r2, r4; vpxor r1, r2, r2; \ vpxor r2, r0, r0; vpand r2, r4, r4; \ vpxor r0, r4, r4; vpand r1, r0, r0; \ vpxor r3, r1, r1; vpor r4, r3, r3; \ vpxor r3, r2, r2; vpxor r3, r0, r0; \ vpxor r4, r1, r1; vpand r2, r3, r3; \ vpxor r1, r3, r3; vpxor r0, r1, r1; \ vpor r2, r1, r1; vpxor r3, r0, r0; \ vpxor r4, r1, r1; \ vpxor r1, r0, r0; #define SBOX4(r0, r1, r2, r3, r4) \ vpxor r3, r1, r1; vpxor RNOT, r3, r3; \ vpxor r3, r2, r2; vpxor r0, r3, r3; \ vmovdqa r1, r4; vpand r3, r1, r1; \ vpxor r2, r1, r1; vpxor r3, r4, r4; \ vpxor r4, r0, r0; vpand r4, r2, r2; \ vpxor r0, r2, r2; vpand r1, r0, r0; \ vpxor r0, r3, r3; vpor r1, r4, r4; \ vpxor r0, r4, r4; vpor r3, r0, r0; \ vpxor r2, r0, r0; vpand r3, r2, r2; \ vpxor RNOT, r0, r0; vpxor r2, r4, r4; #define SBOX4_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r2, r4; vpand r3, r2, r2; \ vpxor r1, r2, r2; vpor r3, r1, r1; \ vpand r0, r1, r1; vpxor r2, r4, r4; \ vpxor r1, r4, r4; vpand r2, r1, r1; \ vpxor RNOT, r0, r0; vpxor r4, r3, r3; \ vpxor r3, r1, r1; vpand r0, r3, r3; \ vpxor r2, r3, r3; vpxor r1, r0, r0; \ vpand r0, r2, r2; vpxor r0, r3, r3; \ vpxor r4, r2, r2; \ vpor r3, r2, r2; vpxor r0, r3, r3; \ vpxor r1, r2, r2; #define SBOX5(r0, r1, r2, r3, r4) \ vpxor r1, r0, r0; vpxor r3, r1, r1; \ vpxor RNOT, r3, r3; vmovdqa r1, r4; \ vpand r0, r1, r1; vpxor r3, r2, r2; \ vpxor r2, r1, r1; vpor r4, r2, r2; \ vpxor r3, r4, r4; vpand r1, r3, r3; \ vpxor r0, r3, r3; vpxor r1, r4, r4; \ vpxor r2, r4, r4; vpxor r0, r2, r2; \ vpand r3, r0, r0; vpxor RNOT, r2, r2; \ vpxor r4, r0, r0; vpor r3, r4, r4; \ vpxor r4, r2, r2; #define SBOX5_INVERSE(r0, r1, r2, r3, r4) \ vpxor RNOT, r1, r1; vmovdqa r3, r4; \ vpxor r1, r2, r2; vpor r0, r3, r3; \ vpxor r2, r3, r3; vpor r1, r2, r2; \ vpand r0, r2, r2; vpxor r3, r4, r4; \ vpxor r4, r2, r2; vpor r0, r4, r4; \ vpxor r1, r4, r4; vpand r2, r1, r1; \ vpxor r3, r1, r1; vpxor r2, r4, r4; \ vpand r4, r3, r3; vpxor r1, r4, r4; \ vpxor r4, r3, r3; vpxor RNOT, r4, r4; \ vpxor r0, r3, r3; #define SBOX6(r0, r1, r2, r3, r4) \ vpxor RNOT, r2, r2; vmovdqa r3, r4; \ vpand r0, r3, r3; vpxor r4, r0, r0; \ vpxor r2, r3, r3; vpor r4, r2, r2; \ vpxor r3, r1, r1; vpxor r0, r2, r2; \ vpor r1, r0, r0; vpxor r1, r2, r2; \ vpxor r0, r4, r4; vpor r3, r0, r0; \ vpxor r2, r0, r0; vpxor r3, r4, r4; \ vpxor r0, r4, r4; vpxor RNOT, r3, r3; \ vpand r4, r2, r2; \ vpxor r3, r2, r2; #define SBOX6_INVERSE(r0, r1, r2, r3, r4) \ vpxor r2, r0, r0; vmovdqa r2, r4; \ vpand r0, r2, r2; vpxor r3, r4, r4; \ vpxor RNOT, r2, r2; vpxor r1, r3, r3; \ vpxor r3, r2, r2; vpor r0, r4, r4; \ vpxor r2, r0, r0; vpxor r4, r3, r3; \ vpxor r1, r4, r4; vpand r3, r1, r1; \ vpxor r0, r1, r1; vpxor r3, r0, r0; \ vpor r2, r0, r0; vpxor r1, r3, r3; \ vpxor r0, r4, r4; #define SBOX7(r0, r1, r2, r3, r4) \ vmovdqa r1, r4; vpor r2, r1, r1; \ vpxor r3, r1, r1; vpxor r2, r4, r4; \ vpxor r1, r2, r2; vpor r4, r3, r3; \ vpand r0, r3, r3; vpxor r2, r4, r4; \ vpxor r1, r3, r3; vpor r4, r1, r1; \ vpxor r0, r1, r1; vpor r4, r0, r0; \ vpxor r2, r0, r0; vpxor r4, r1, r1; \ vpxor r1, r2, r2; vpand r0, r1, r1; \ vpxor r4, r1, r1; vpxor RNOT, r2, r2; \ vpor r0, r2, r2; \ vpxor r2, r4, r4; #define SBOX7_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r2, r4; vpxor r0, r2, r2; \ vpand r3, r0, r0; vpor r3, r4, r4; \ vpxor RNOT, r2, r2; vpxor r1, r3, r3; \ vpor r0, r1, r1; vpxor r2, r0, r0; \ vpand r4, r2, r2; vpand r4, r3, r3; \ vpxor r2, r1, r1; vpxor r0, r2, r2; \ vpor r2, r0, r0; vpxor r1, r4, r4; \ vpxor r3, r0, r0; vpxor r4, r3, r3; \ vpor r0, r4, r4; vpxor r2, r3, r3; \ vpxor r2, r4, r4; /* Apply SBOX number WHICH to to the block. */ #define SBOX(which, r0, r1, r2, r3, r4) \ SBOX##which (r0, r1, r2, r3, r4) /* Apply inverse SBOX number WHICH to to the block. */ #define SBOX_INVERSE(which, r0, r1, r2, r3, r4) \ SBOX##which##_INVERSE (r0, r1, r2, r3, r4) /* XOR round key into block state in r0,r1,r2,r3. r4 used as temporary. */ #define BLOCK_XOR_KEY(r0, r1, r2, r3, r4, round) \ vpbroadcastd (ctx_keys + (round) * 16 + 0 * 4)(CTX), r4; \ vpxor r4, r0, r0; \ vpbroadcastd (ctx_keys + (round) * 16 + 1 * 4)(CTX), r4; \ vpxor r4, r1, r1; \ vpbroadcastd (ctx_keys + (round) * 16 + 2 * 4)(CTX), r4; \ vpxor r4, r2, r2; \ vpbroadcastd (ctx_keys + (round) * 16 + 3 * 4)(CTX), r4; \ vpxor r4, r3, r3; /* Apply the linear transformation to BLOCK. */ #define LINEAR_TRANSFORMATION(r0, r1, r2, r3, r4) \ vec_rol(r0, 13, r4); \ vec_rol(r2, 3, r4); \ vpxor r0, r1, r1; \ vpxor r2, r1, r1; \ vpslld $3, r0, r4; \ vpxor r2, r3, r3; \ vpxor r4, r3, r3; \ vec_rol(r1, 1, r4); \ vec_rol(r3, 7, r4); \ vpxor r1, r0, r0; \ vpxor r3, r0, r0; \ vpslld $7, r1, r4; \ vpxor r3, r2, r2; \ vpxor r4, r2, r2; \ vec_rol(r0, 5, r4); \ vec_rol(r2, 22, r4); /* Apply the inverse linear transformation to BLOCK. */ #define LINEAR_TRANSFORMATION_INVERSE(r0, r1, r2, r3, r4) \ vec_ror(r2, 22, r4); \ vec_ror(r0, 5, r4); \ vpslld $7, r1, r4; \ vpxor r3, r2, r2; \ vpxor r4, r2, r2; \ vpxor r1, r0, r0; \ vpxor r3, r0, r0; \ vec_ror(r3, 7, r4); \ vec_ror(r1, 1, r4); \ vpslld $3, r0, r4; \ vpxor r2, r3, r3; \ vpxor r4, r3, r3; \ vpxor r0, r1, r1; \ vpxor r2, r1, r1; \ vec_ror(r2, 3, r4); \ vec_ror(r0, 13, r4); /* Apply a Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND(round, which, a0, a1, a2, a3, a4, na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, nb0, nb1, nb2, nb3, nb4) \ BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round); \ SBOX (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round); \ SBOX (which, b0, b1, b2, b3, b4); \ LINEAR_TRANSFORMATION (na0, na1, na2, na3, na4); \ LINEAR_TRANSFORMATION (nb0, nb1, nb2, nb3, nb4); /* Apply the last Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND_LAST(round, which, a0, a1, a2, a3, a4, na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, nb0, nb1, nb2, nb3, nb4) \ BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round); \ SBOX (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round); \ SBOX (which, b0, b1, b2, b3, b4); \ BLOCK_XOR_KEY (na0, na1, na2, na3, na4, ((round) + 1)); \ BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, ((round) + 1)); /* Apply an inverse Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND_INVERSE(round, which, a0, a1, a2, a3, a4, \ na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, \ nb0, nb1, nb2, nb3, nb4) \ LINEAR_TRANSFORMATION_INVERSE (a0, a1, a2, a3, a4); \ LINEAR_TRANSFORMATION_INVERSE (b0, b1, b2, b3, b4); \ SBOX_INVERSE (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (na0, na1, na2, na3, na4, round); \ SBOX_INVERSE (which, b0, b1, b2, b3, b4); \ BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, round); /* Apply the first inverse Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND_FIRST_INVERSE(round, which, a0, a1, a2, a3, a4, \ na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, \ nb0, nb1, nb2, nb3, nb4) \ BLOCK_XOR_KEY (a0, a1, a2, a3, a4, ((round) + 1)); \ BLOCK_XOR_KEY (b0, b1, b2, b3, b4, ((round) + 1)); \ SBOX_INVERSE (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (na0, na1, na2, na3, na4, round); \ SBOX_INVERSE (which, b0, b1, b2, b3, b4); \ BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, round); .text .align 8 ELF(.type __serpent_enc_blk16,@function;) __serpent_enc_blk16: /* input: * %rdi: ctx, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks * output: * RA4, RA1, RA2, RA0, RB4, RB1, RB2, RB0: sixteen parallel * ciphertext blocks */ vpcmpeqd RNOT, RNOT, RNOT; transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1); ROUND (0, 0, RA0, RA1, RA2, RA3, RA4, RA1, RA4, RA2, RA0, RA3, RB0, RB1, RB2, RB3, RB4, RB1, RB4, RB2, RB0, RB3); ROUND (1, 1, RA1, RA4, RA2, RA0, RA3, RA2, RA1, RA0, RA4, RA3, RB1, RB4, RB2, RB0, RB3, RB2, RB1, RB0, RB4, RB3); ROUND (2, 2, RA2, RA1, RA0, RA4, RA3, RA0, RA4, RA1, RA3, RA2, RB2, RB1, RB0, RB4, RB3, RB0, RB4, RB1, RB3, RB2); ROUND (3, 3, RA0, RA4, RA1, RA3, RA2, RA4, RA1, RA3, RA2, RA0, RB0, RB4, RB1, RB3, RB2, RB4, RB1, RB3, RB2, RB0); ROUND (4, 4, RA4, RA1, RA3, RA2, RA0, RA1, RA0, RA4, RA2, RA3, RB4, RB1, RB3, RB2, RB0, RB1, RB0, RB4, RB2, RB3); ROUND (5, 5, RA1, RA0, RA4, RA2, RA3, RA0, RA2, RA1, RA4, RA3, RB1, RB0, RB4, RB2, RB3, RB0, RB2, RB1, RB4, RB3); ROUND (6, 6, RA0, RA2, RA1, RA4, RA3, RA0, RA2, RA3, RA1, RA4, RB0, RB2, RB1, RB4, RB3, RB0, RB2, RB3, RB1, RB4); ROUND (7, 7, RA0, RA2, RA3, RA1, RA4, RA4, RA1, RA2, RA0, RA3, RB0, RB2, RB3, RB1, RB4, RB4, RB1, RB2, RB0, RB3); ROUND (8, 0, RA4, RA1, RA2, RA0, RA3, RA1, RA3, RA2, RA4, RA0, RB4, RB1, RB2, RB0, RB3, RB1, RB3, RB2, RB4, RB0); ROUND (9, 1, RA1, RA3, RA2, RA4, RA0, RA2, RA1, RA4, RA3, RA0, RB1, RB3, RB2, RB4, RB0, RB2, RB1, RB4, RB3, RB0); ROUND (10, 2, RA2, RA1, RA4, RA3, RA0, RA4, RA3, RA1, RA0, RA2, RB2, RB1, RB4, RB3, RB0, RB4, RB3, RB1, RB0, RB2); ROUND (11, 3, RA4, RA3, RA1, RA0, RA2, RA3, RA1, RA0, RA2, RA4, RB4, RB3, RB1, RB0, RB2, RB3, RB1, RB0, RB2, RB4); ROUND (12, 4, RA3, RA1, RA0, RA2, RA4, RA1, RA4, RA3, RA2, RA0, RB3, RB1, RB0, RB2, RB4, RB1, RB4, RB3, RB2, RB0); ROUND (13, 5, RA1, RA4, RA3, RA2, RA0, RA4, RA2, RA1, RA3, RA0, RB1, RB4, RB3, RB2, RB0, RB4, RB2, RB1, RB3, RB0); ROUND (14, 6, RA4, RA2, RA1, RA3, RA0, RA4, RA2, RA0, RA1, RA3, RB4, RB2, RB1, RB3, RB0, RB4, RB2, RB0, RB1, RB3); ROUND (15, 7, RA4, RA2, RA0, RA1, RA3, RA3, RA1, RA2, RA4, RA0, RB4, RB2, RB0, RB1, RB3, RB3, RB1, RB2, RB4, RB0); ROUND (16, 0, RA3, RA1, RA2, RA4, RA0, RA1, RA0, RA2, RA3, RA4, RB3, RB1, RB2, RB4, RB0, RB1, RB0, RB2, RB3, RB4); ROUND (17, 1, RA1, RA0, RA2, RA3, RA4, RA2, RA1, RA3, RA0, RA4, RB1, RB0, RB2, RB3, RB4, RB2, RB1, RB3, RB0, RB4); ROUND (18, 2, RA2, RA1, RA3, RA0, RA4, RA3, RA0, RA1, RA4, RA2, RB2, RB1, RB3, RB0, RB4, RB3, RB0, RB1, RB4, RB2); ROUND (19, 3, RA3, RA0, RA1, RA4, RA2, RA0, RA1, RA4, RA2, RA3, RB3, RB0, RB1, RB4, RB2, RB0, RB1, RB4, RB2, RB3); ROUND (20, 4, RA0, RA1, RA4, RA2, RA3, RA1, RA3, RA0, RA2, RA4, RB0, RB1, RB4, RB2, RB3, RB1, RB3, RB0, RB2, RB4); ROUND (21, 5, RA1, RA3, RA0, RA2, RA4, RA3, RA2, RA1, RA0, RA4, RB1, RB3, RB0, RB2, RB4, RB3, RB2, RB1, RB0, RB4); ROUND (22, 6, RA3, RA2, RA1, RA0, RA4, RA3, RA2, RA4, RA1, RA0, RB3, RB2, RB1, RB0, RB4, RB3, RB2, RB4, RB1, RB0); ROUND (23, 7, RA3, RA2, RA4, RA1, RA0, RA0, RA1, RA2, RA3, RA4, RB3, RB2, RB4, RB1, RB0, RB0, RB1, RB2, RB3, RB4); ROUND (24, 0, RA0, RA1, RA2, RA3, RA4, RA1, RA4, RA2, RA0, RA3, RB0, RB1, RB2, RB3, RB4, RB1, RB4, RB2, RB0, RB3); ROUND (25, 1, RA1, RA4, RA2, RA0, RA3, RA2, RA1, RA0, RA4, RA3, RB1, RB4, RB2, RB0, RB3, RB2, RB1, RB0, RB4, RB3); ROUND (26, 2, RA2, RA1, RA0, RA4, RA3, RA0, RA4, RA1, RA3, RA2, RB2, RB1, RB0, RB4, RB3, RB0, RB4, RB1, RB3, RB2); ROUND (27, 3, RA0, RA4, RA1, RA3, RA2, RA4, RA1, RA3, RA2, RA0, RB0, RB4, RB1, RB3, RB2, RB4, RB1, RB3, RB2, RB0); ROUND (28, 4, RA4, RA1, RA3, RA2, RA0, RA1, RA0, RA4, RA2, RA3, RB4, RB1, RB3, RB2, RB0, RB1, RB0, RB4, RB2, RB3); ROUND (29, 5, RA1, RA0, RA4, RA2, RA3, RA0, RA2, RA1, RA4, RA3, RB1, RB0, RB4, RB2, RB3, RB0, RB2, RB1, RB4, RB3); ROUND (30, 6, RA0, RA2, RA1, RA4, RA3, RA0, RA2, RA3, RA1, RA4, RB0, RB2, RB1, RB4, RB3, RB0, RB2, RB3, RB1, RB4); ROUND_LAST (31, 7, RA0, RA2, RA3, RA1, RA4, RA4, RA1, RA2, RA0, RA3, RB0, RB2, RB3, RB1, RB4, RB4, RB1, RB2, RB0, RB3); transpose_4x4(RA4, RA1, RA2, RA0, RA3, RTMP0, RTMP1); transpose_4x4(RB4, RB1, RB2, RB0, RB3, RTMP0, RTMP1); ret; ELF(.size __serpent_enc_blk16,.-__serpent_enc_blk16;) .align 8 ELF(.type __serpent_dec_blk16,@function;) __serpent_dec_blk16: /* input: * %rdi: ctx, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * ciphertext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks */ vpcmpeqd RNOT, RNOT, RNOT; transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1); ROUND_FIRST_INVERSE (31, 7, RA0, RA1, RA2, RA3, RA4, RA3, RA0, RA1, RA4, RA2, RB0, RB1, RB2, RB3, RB4, RB3, RB0, RB1, RB4, RB2); ROUND_INVERSE (30, 6, RA3, RA0, RA1, RA4, RA2, RA0, RA1, RA2, RA4, RA3, RB3, RB0, RB1, RB4, RB2, RB0, RB1, RB2, RB4, RB3); ROUND_INVERSE (29, 5, RA0, RA1, RA2, RA4, RA3, RA1, RA3, RA4, RA2, RA0, RB0, RB1, RB2, RB4, RB3, RB1, RB3, RB4, RB2, RB0); ROUND_INVERSE (28, 4, RA1, RA3, RA4, RA2, RA0, RA1, RA2, RA4, RA0, RA3, RB1, RB3, RB4, RB2, RB0, RB1, RB2, RB4, RB0, RB3); ROUND_INVERSE (27, 3, RA1, RA2, RA4, RA0, RA3, RA4, RA2, RA0, RA1, RA3, RB1, RB2, RB4, RB0, RB3, RB4, RB2, RB0, RB1, RB3); ROUND_INVERSE (26, 2, RA4, RA2, RA0, RA1, RA3, RA2, RA3, RA0, RA1, RA4, RB4, RB2, RB0, RB1, RB3, RB2, RB3, RB0, RB1, RB4); ROUND_INVERSE (25, 1, RA2, RA3, RA0, RA1, RA4, RA4, RA2, RA1, RA0, RA3, RB2, RB3, RB0, RB1, RB4, RB4, RB2, RB1, RB0, RB3); ROUND_INVERSE (24, 0, RA4, RA2, RA1, RA0, RA3, RA4, RA3, RA2, RA0, RA1, RB4, RB2, RB1, RB0, RB3, RB4, RB3, RB2, RB0, RB1); ROUND_INVERSE (23, 7, RA4, RA3, RA2, RA0, RA1, RA0, RA4, RA3, RA1, RA2, RB4, RB3, RB2, RB0, RB1, RB0, RB4, RB3, RB1, RB2); ROUND_INVERSE (22, 6, RA0, RA4, RA3, RA1, RA2, RA4, RA3, RA2, RA1, RA0, RB0, RB4, RB3, RB1, RB2, RB4, RB3, RB2, RB1, RB0); ROUND_INVERSE (21, 5, RA4, RA3, RA2, RA1, RA0, RA3, RA0, RA1, RA2, RA4, RB4, RB3, RB2, RB1, RB0, RB3, RB0, RB1, RB2, RB4); ROUND_INVERSE (20, 4, RA3, RA0, RA1, RA2, RA4, RA3, RA2, RA1, RA4, RA0, RB3, RB0, RB1, RB2, RB4, RB3, RB2, RB1, RB4, RB0); ROUND_INVERSE (19, 3, RA3, RA2, RA1, RA4, RA0, RA1, RA2, RA4, RA3, RA0, RB3, RB2, RB1, RB4, RB0, RB1, RB2, RB4, RB3, RB0); ROUND_INVERSE (18, 2, RA1, RA2, RA4, RA3, RA0, RA2, RA0, RA4, RA3, RA1, RB1, RB2, RB4, RB3, RB0, RB2, RB0, RB4, RB3, RB1); ROUND_INVERSE (17, 1, RA2, RA0, RA4, RA3, RA1, RA1, RA2, RA3, RA4, RA0, RB2, RB0, RB4, RB3, RB1, RB1, RB2, RB3, RB4, RB0); ROUND_INVERSE (16, 0, RA1, RA2, RA3, RA4, RA0, RA1, RA0, RA2, RA4, RA3, RB1, RB2, RB3, RB4, RB0, RB1, RB0, RB2, RB4, RB3); ROUND_INVERSE (15, 7, RA1, RA0, RA2, RA4, RA3, RA4, RA1, RA0, RA3, RA2, RB1, RB0, RB2, RB4, RB3, RB4, RB1, RB0, RB3, RB2); ROUND_INVERSE (14, 6, RA4, RA1, RA0, RA3, RA2, RA1, RA0, RA2, RA3, RA4, RB4, RB1, RB0, RB3, RB2, RB1, RB0, RB2, RB3, RB4); ROUND_INVERSE (13, 5, RA1, RA0, RA2, RA3, RA4, RA0, RA4, RA3, RA2, RA1, RB1, RB0, RB2, RB3, RB4, RB0, RB4, RB3, RB2, RB1); ROUND_INVERSE (12, 4, RA0, RA4, RA3, RA2, RA1, RA0, RA2, RA3, RA1, RA4, RB0, RB4, RB3, RB2, RB1, RB0, RB2, RB3, RB1, RB4); ROUND_INVERSE (11, 3, RA0, RA2, RA3, RA1, RA4, RA3, RA2, RA1, RA0, RA4, RB0, RB2, RB3, RB1, RB4, RB3, RB2, RB1, RB0, RB4); ROUND_INVERSE (10, 2, RA3, RA2, RA1, RA0, RA4, RA2, RA4, RA1, RA0, RA3, RB3, RB2, RB1, RB0, RB4, RB2, RB4, RB1, RB0, RB3); ROUND_INVERSE (9, 1, RA2, RA4, RA1, RA0, RA3, RA3, RA2, RA0, RA1, RA4, RB2, RB4, RB1, RB0, RB3, RB3, RB2, RB0, RB1, RB4); ROUND_INVERSE (8, 0, RA3, RA2, RA0, RA1, RA4, RA3, RA4, RA2, RA1, RA0, RB3, RB2, RB0, RB1, RB4, RB3, RB4, RB2, RB1, RB0); ROUND_INVERSE (7, 7, RA3, RA4, RA2, RA1, RA0, RA1, RA3, RA4, RA0, RA2, RB3, RB4, RB2, RB1, RB0, RB1, RB3, RB4, RB0, RB2); ROUND_INVERSE (6, 6, RA1, RA3, RA4, RA0, RA2, RA3, RA4, RA2, RA0, RA1, RB1, RB3, RB4, RB0, RB2, RB3, RB4, RB2, RB0, RB1); ROUND_INVERSE (5, 5, RA3, RA4, RA2, RA0, RA1, RA4, RA1, RA0, RA2, RA3, RB3, RB4, RB2, RB0, RB1, RB4, RB1, RB0, RB2, RB3); ROUND_INVERSE (4, 4, RA4, RA1, RA0, RA2, RA3, RA4, RA2, RA0, RA3, RA1, RB4, RB1, RB0, RB2, RB3, RB4, RB2, RB0, RB3, RB1); ROUND_INVERSE (3, 3, RA4, RA2, RA0, RA3, RA1, RA0, RA2, RA3, RA4, RA1, RB4, RB2, RB0, RB3, RB1, RB0, RB2, RB3, RB4, RB1); ROUND_INVERSE (2, 2, RA0, RA2, RA3, RA4, RA1, RA2, RA1, RA3, RA4, RA0, RB0, RB2, RB3, RB4, RB1, RB2, RB1, RB3, RB4, RB0); ROUND_INVERSE (1, 1, RA2, RA1, RA3, RA4, RA0, RA0, RA2, RA4, RA3, RA1, RB2, RB1, RB3, RB4, RB0, RB0, RB2, RB4, RB3, RB1); ROUND_INVERSE (0, 0, RA0, RA2, RA4, RA3, RA1, RA0, RA1, RA2, RA3, RA4, RB0, RB2, RB4, RB3, RB1, RB0, RB1, RB2, RB3, RB4); transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1); ret; ELF(.size __serpent_dec_blk16,.-__serpent_dec_blk16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 8 .globl _gcry_serpent_avx2_ctr_enc ELF(.type _gcry_serpent_avx2_ctr_enc,@function;) _gcry_serpent_avx2_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ movq 8(%rcx), %rax; bswapq %rax; vzeroupper; vbroadcasti128 .Lbswap128_mask RIP, RTMP3; vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */ vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */ /* load IV and byteswap */ vmovdqu (%rcx), RTMP4x; vpshufb RTMP3x, RTMP4x, RTMP4x; vmovdqa RTMP4x, RTMP0x; inc_le128(RTMP4x, RNOTx, RTMP1x); vinserti128 $1, RTMP4x, RTMP0, RTMP0; vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */ /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 16), %rax; ja .Lhandle_ctr_carry; /* construct IVs */ vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */ vpshufb RTMP3, RTMP0, RA1; vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */ vpshufb RTMP3, RTMP0, RA2; vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */ vpshufb RTMP3, RTMP0, RA3; vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */ vpshufb RTMP3, RTMP0, RB0; vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */ vpshufb RTMP3, RTMP0, RB1; vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */ vpshufb RTMP3, RTMP0, RB2; vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */ vpshufb RTMP3, RTMP0, RB3; vpsubq RTMP2, RTMP0, RTMP0; /* +16 */ vpshufb RTMP3x, RTMP0x, RTMP0x; jmp .Lctr_carry_done; .Lhandle_ctr_carry: /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */ inc_le128(RTMP0, RNOT, RTMP1); vextracti128 $1, RTMP0, RTMP0x; vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */ .align 4 .Lctr_carry_done: /* store new IV */ vmovdqu RTMP0x, (%rcx); call __serpent_enc_blk16; vpxor (0 * 32)(%rdx), RA4, RA4; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA0, RA0; vpxor (4 * 32)(%rdx), RB4, RB4; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB0, RB0; vmovdqu RA4, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA0, (3 * 32)(%rsi); vmovdqu RB4, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB0, (7 * 32)(%rsi); vzeroall; ret ELF(.size _gcry_serpent_avx2_ctr_enc,.-_gcry_serpent_avx2_ctr_enc;) .align 8 .globl _gcry_serpent_avx2_cbc_dec ELF(.type _gcry_serpent_avx2_cbc_dec,@function;) _gcry_serpent_avx2_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ vzeroupper; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; vmovdqu (4 * 32)(%rdx), RB0; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RB2; vmovdqu (7 * 32)(%rdx), RB3; call __serpent_dec_blk16; vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RNOT; vpxor RNOT, RA0, RA0; vpxor (0 * 32 + 16)(%rdx), RA1, RA1; vpxor (1 * 32 + 16)(%rdx), RA2, RA2; vpxor (2 * 32 + 16)(%rdx), RA3, RA3; vpxor (3 * 32 + 16)(%rdx), RB0, RB0; vpxor (4 * 32 + 16)(%rdx), RB1, RB1; vpxor (5 * 32 + 16)(%rdx), RB2, RB2; vpxor (6 * 32 + 16)(%rdx), RB3, RB3; vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret ELF(.size _gcry_serpent_avx2_cbc_dec,.-_gcry_serpent_avx2_cbc_dec;) .align 8 .globl _gcry_serpent_avx2_cfb_dec ELF(.type _gcry_serpent_avx2_cfb_dec,@function;) _gcry_serpent_avx2_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ vzeroupper; /* Load input */ vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RA0; vmovdqu (0 * 32 + 16)(%rdx), RA1; vmovdqu (1 * 32 + 16)(%rdx), RA2; vmovdqu (2 * 32 + 16)(%rdx), RA3; vmovdqu (3 * 32 + 16)(%rdx), RB0; vmovdqu (4 * 32 + 16)(%rdx), RB1; vmovdqu (5 * 32 + 16)(%rdx), RB2; vmovdqu (6 * 32 + 16)(%rdx), RB3; /* Update IV */ vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __serpent_enc_blk16; vpxor (0 * 32)(%rdx), RA4, RA4; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA0, RA0; vpxor (4 * 32)(%rdx), RB4, RB4; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB0, RB0; vmovdqu RA4, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA0, (3 * 32)(%rsi); vmovdqu RB4, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB0, (7 * 32)(%rsi); vzeroall; ret ELF(.size _gcry_serpent_avx2_cfb_dec,.-_gcry_serpent_avx2_cfb_dec;) .align 8 .globl _gcry_serpent_avx2_ocb_enc ELF(.type _gcry_serpent_avx2_ocb_enc,@function;) _gcry_serpent_avx2_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ vzeroupper; subq $(4 * 8), %rsp; movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); vmovdqu (%rcx), RTMP0x; vmovdqu (%r8), RTMP1x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RTMP1, RTMP1; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vextracti128 $1, RTMP1, RNOTx; vmovdqu RTMP0x, (%rcx); vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; call __serpent_enc_blk16; addq $(4 * 8), %rsp; vpxor (0 * 32)(%rsi), RA4, RA4; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA0, RA0; vpxor (4 * 32)(%rsi), RB4, RB4; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB0, RB0; vmovdqu RA4, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA0, (3 * 32)(%rsi); vmovdqu RB4, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB0, (7 * 32)(%rsi); vzeroall; ret; ELF(.size _gcry_serpent_avx2_ocb_enc,.-_gcry_serpent_avx2_ocb_enc;) .align 8 .globl _gcry_serpent_avx2_ocb_dec ELF(.type _gcry_serpent_avx2_ocb_dec,@function;) _gcry_serpent_avx2_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ vzeroupper; subq $(4 * 8), %rsp; movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rcx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; call __serpent_dec_blk16; addq $(4 * 8), %rsp; vmovdqu (%r8), RTMP1x; vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; /* Checksum_i = Checksum_{i-1} xor P_i */ vmovdqu RA0, (0 * 32)(%rsi); vpxor RA0, RTMP1, RTMP1; vmovdqu RA1, (1 * 32)(%rsi); vpxor RA1, RTMP1, RTMP1; vmovdqu RA2, (2 * 32)(%rsi); vpxor RA2, RTMP1, RTMP1; vmovdqu RA3, (3 * 32)(%rsi); vpxor RA3, RTMP1, RTMP1; vmovdqu RB0, (4 * 32)(%rsi); vpxor RB0, RTMP1, RTMP1; vmovdqu RB1, (5 * 32)(%rsi); vpxor RB1, RTMP1, RTMP1; vmovdqu RB2, (6 * 32)(%rsi); vpxor RB2, RTMP1, RTMP1; vmovdqu RB3, (7 * 32)(%rsi); vpxor RB3, RTMP1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); vzeroall; ret; ELF(.size _gcry_serpent_avx2_ocb_dec,.-_gcry_serpent_avx2_ocb_dec;) .align 8 .globl _gcry_serpent_avx2_ocb_auth ELF(.type _gcry_serpent_avx2_ocb_auth,@function;) _gcry_serpent_avx2_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ vzeroupper; subq $(4 * 8), %rsp; movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); vmovdqu (%rdx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rdx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; call __serpent_enc_blk16; addq $(4 * 8), %rsp; vpxor RA4, RB4, RA4; vpxor RA1, RB1, RA1; vpxor RA2, RB2, RA2; vpxor RA0, RB0, RA0; vpxor RA4, RA1, RA1; vpxor RA2, RA0, RA0; vpxor RA1, RA0, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor (%rcx), RTMP1x, RTMP1x; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%rcx); vzeroall; ret; ELF(.size _gcry_serpent_avx2_ocb_auth,.-_gcry_serpent_avx2_ocb_auth;) -.data .align 16 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 #endif /*defined(USE_SERPENT) && defined(ENABLE_AVX2_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/sha1-avx-amd64.S b/cipher/sha1-avx-amd64.S index 3b3a6d11..b14603bf 100644 --- a/cipher/sha1-avx-amd64.S +++ b/cipher/sha1-avx-amd64.S @@ -1,427 +1,426 @@ /* sha1-avx-amd64.S - Intel AVX accelerated SHA-1 transform function * Copyright (C) 2013 Jussi Kivilinna * * Based on sha1.c: * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Intel SSSE3 accelerated SHA-1 implementation based on white paper: * "Improving the Performance of the Secure Hash Algorithm (SHA-1)" * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA1) #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 /* Constants */ -.data +.text #define K1 0x5A827999 #define K2 0x6ED9EBA1 #define K3 0x8F1BBCDC #define K4 0xCA62C1D6 .align 16 .LK_XMM: .LK1: .long K1, K1, K1, K1 .LK2: .long K2, K2, K2, K2 .LK3: .long K3, K3, K3, K3 .LK4: .long K4, K4, K4, K4 .Lbswap_shufb_ctl: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f /* Register macros */ #define RSTATE %r8 #define RDATA %r9 #define ROLDSTACK %r10 #define RNBLKS %r11 #define a %eax #define b %ebx #define c %ecx #define d %edx #define e %edi #define RT0 %esi #define RT1 %ebp #define Wtmp0 %xmm0 #define Wtmp1 %xmm1 #define W0 %xmm2 #define W1 %xmm3 #define W2 %xmm4 #define W3 %xmm5 #define W4 %xmm6 #define W5 %xmm7 #define W6 %xmm8 #define W7 %xmm9 #define BSWAP_REG %xmm10 /* Round function macros. */ #define WK(i) (((i) & 15) * 4)(%rsp) #define R_F1(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl d, RT0; \ movl a, RT1; \ andl b, RT0; \ shldl $30, b, b; \ xorl d, RT0; \ leal (RT0,e), e; \ shldl $5, RT1, RT1; \ addl RT1, e; #define R_F2(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl b, RT0; \ shldl $30, b, b; \ xorl d, RT0; \ movl a, RT1; \ leal (RT0,e), e; \ shldl $5, RT1, RT1; \ addl RT1, e; #define R_F3(a,b,c,d,e,i) \ movl c, RT0; \ movl b, RT1; \ xorl b, RT0; \ andl c, RT1; \ andl d, RT0; \ addl RT1, e; \ addl WK(i), e; \ shldl $30, b, b; \ movl a, RT1; \ leal (RT0,e), e; \ shldl $5, RT1, RT1; \ addl RT1, e; #define R_F4(a,b,c,d,e,i) R_F2(a,b,c,d,e,i) #define R(a,b,c,d,e,f,i) \ R_##f(a,b,c,d,e,i) /* Input expansion macros. */ #define W_PRECALC_00_15_0(i, W, tmp0) \ vmovdqu (4*(i))(RDATA), tmp0; #define W_PRECALC_00_15_1(i, W, tmp0) \ vpshufb BSWAP_REG, tmp0, W; #define W_PRECALC_00_15_2(i, W, tmp0) \ vpaddd (.LK_XMM + ((i)/20)*16) RIP, W, tmp0; #define W_PRECALC_00_15_3(i, W, tmp0) \ vmovdqa tmp0, WK(i&~3); #define W_PRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpalignr $8, W_m16, W_m12, W; \ vpsrldq $4, W_m04, tmp0; \ vpxor W_m08, W, W; #define W_PRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W_m16, tmp0, tmp0; \ vpxor tmp0, W, W; \ vpslld $1, W, tmp0; \ vpslldq $12, W, tmp1; \ vpsrld $31, W, W; #define W_PRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpor W, tmp0, tmp0; \ vpsrld $30, tmp1, W; \ vpslld $2, tmp1, tmp1; #define W_PRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W, tmp0, tmp0; \ vpxor tmp1, tmp0, W; \ vpaddd (.LK_XMM + ((i)/20)*16) RIP, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); #define W_PRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m28, W, W; \ vpalignr $8, W_m08, W_m04, tmp0; #define W_PRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m16, W, W; \ vpxor tmp0, W, W; #define W_PRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpsrld $30, W, tmp0; \ vpslld $2, W, W; #define W_PRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpor W, tmp0, W; \ vpaddd (.LK_XMM + ((i)/20)*16) RIP, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sha1_transform_amd64_avx (void *ctx, const unsigned char *data, * size_t nblks) */ -.text .globl _gcry_sha1_transform_amd64_avx ELF(.type _gcry_sha1_transform_amd64_avx,@function) .align 16 _gcry_sha1_transform_amd64_avx: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks */ xorl %eax, %eax; cmpq $0, %rdx; jz .Lret; vzeroupper; movq %rdx, RNBLKS; movq %rdi, RSTATE; movq %rsi, RDATA; pushq %rbx; pushq %rbp; movq %rsp, ROLDSTACK; subq $(16*4), %rsp; andq $(~31), %rsp; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; movdqa .Lbswap_shufb_ctl RIP, BSWAP_REG; /* Precalc 0-15. */ W_PRECALC_00_15_0(0, W0, Wtmp0); W_PRECALC_00_15_1(1, W0, Wtmp0); W_PRECALC_00_15_2(2, W0, Wtmp0); W_PRECALC_00_15_3(3, W0, Wtmp0); W_PRECALC_00_15_0(4, W7, Wtmp0); W_PRECALC_00_15_1(5, W7, Wtmp0); W_PRECALC_00_15_2(6, W7, Wtmp0); W_PRECALC_00_15_3(7, W7, Wtmp0); W_PRECALC_00_15_0(8, W6, Wtmp0); W_PRECALC_00_15_1(9, W6, Wtmp0); W_PRECALC_00_15_2(10, W6, Wtmp0); W_PRECALC_00_15_3(11, W6, Wtmp0); W_PRECALC_00_15_0(12, W5, Wtmp0); W_PRECALC_00_15_1(13, W5, Wtmp0); W_PRECALC_00_15_2(14, W5, Wtmp0); W_PRECALC_00_15_3(15, W5, Wtmp0); .align 8 .Loop: addq $64, RDATA; /* Transform 0-15 + Precalc 16-31. */ R( a, b, c, d, e, F1, 0 ); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 1 ); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 2 ); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 3 ); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 4 ); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 5 ); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 6 ); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 7 ); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 8 ); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 9 ); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 10 ); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 11 ); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 12 ); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 13 ); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 14 ); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 15 ); W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); /* Transform 16-63 + Precalc 32-79. */ R( e, a, b, c, d, F1, 16 ); W_PRECALC_32_79_0(32, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F1, 17 ); W_PRECALC_32_79_1(33, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F1, 18 ); W_PRECALC_32_79_2(34, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F1, 19 ); W_PRECALC_32_79_3(35, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F2, 20 ); W_PRECALC_32_79_0(36, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F2, 21 ); W_PRECALC_32_79_1(37, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F2, 22 ); W_PRECALC_32_79_2(38, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F2, 23 ); W_PRECALC_32_79_3(39, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F2, 24 ); W_PRECALC_32_79_0(40, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F2, 25 ); W_PRECALC_32_79_1(41, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F2, 26 ); W_PRECALC_32_79_2(42, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F2, 27 ); W_PRECALC_32_79_3(43, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F2, 28 ); W_PRECALC_32_79_0(44, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( b, c, d, e, a, F2, 29 ); W_PRECALC_32_79_1(45, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F2, 30 ); W_PRECALC_32_79_2(46, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F2, 31 ); W_PRECALC_32_79_3(47, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F2, 32 ); W_PRECALC_32_79_0(48, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( c, d, e, a, b, F2, 33 ); W_PRECALC_32_79_1(49, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( b, c, d, e, a, F2, 34 ); W_PRECALC_32_79_2(50, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( a, b, c, d, e, F2, 35 ); W_PRECALC_32_79_3(51, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( e, a, b, c, d, F2, 36 ); W_PRECALC_32_79_0(52, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( d, e, a, b, c, F2, 37 ); W_PRECALC_32_79_1(53, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( c, d, e, a, b, F2, 38 ); W_PRECALC_32_79_2(54, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( b, c, d, e, a, F2, 39 ); W_PRECALC_32_79_3(55, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( a, b, c, d, e, F3, 40 ); W_PRECALC_32_79_0(56, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( e, a, b, c, d, F3, 41 ); W_PRECALC_32_79_1(57, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( d, e, a, b, c, F3, 42 ); W_PRECALC_32_79_2(58, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( c, d, e, a, b, F3, 43 ); W_PRECALC_32_79_3(59, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( b, c, d, e, a, F3, 44 ); W_PRECALC_32_79_0(60, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( a, b, c, d, e, F3, 45 ); W_PRECALC_32_79_1(61, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( e, a, b, c, d, F3, 46 ); W_PRECALC_32_79_2(62, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( d, e, a, b, c, F3, 47 ); W_PRECALC_32_79_3(63, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( c, d, e, a, b, F3, 48 ); W_PRECALC_32_79_0(64, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F3, 49 ); W_PRECALC_32_79_1(65, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F3, 50 ); W_PRECALC_32_79_2(66, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( e, a, b, c, d, F3, 51 ); W_PRECALC_32_79_3(67, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F3, 52 ); W_PRECALC_32_79_0(68, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F3, 53 ); W_PRECALC_32_79_1(69, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F3, 54 ); W_PRECALC_32_79_2(70, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( a, b, c, d, e, F3, 55 ); W_PRECALC_32_79_3(71, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F3, 56 ); W_PRECALC_32_79_0(72, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F3, 57 ); W_PRECALC_32_79_1(73, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F3, 58 ); W_PRECALC_32_79_2(74, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( b, c, d, e, a, F3, 59 ); W_PRECALC_32_79_3(75, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F4, 60 ); W_PRECALC_32_79_0(76, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F4, 61 ); W_PRECALC_32_79_1(77, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F4, 62 ); W_PRECALC_32_79_2(78, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( c, d, e, a, b, F4, 63 ); W_PRECALC_32_79_3(79, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); decq RNBLKS; jz .Lend; /* Transform 64-79 + Precalc 0-15 of next block. */ R( b, c, d, e, a, F4, 64 ); W_PRECALC_00_15_0(0, W0, Wtmp0); R( a, b, c, d, e, F4, 65 ); W_PRECALC_00_15_1(1, W0, Wtmp0); R( e, a, b, c, d, F4, 66 ); W_PRECALC_00_15_2(2, W0, Wtmp0); R( d, e, a, b, c, F4, 67 ); W_PRECALC_00_15_3(3, W0, Wtmp0); R( c, d, e, a, b, F4, 68 ); W_PRECALC_00_15_0(4, W7, Wtmp0); R( b, c, d, e, a, F4, 69 ); W_PRECALC_00_15_1(5, W7, Wtmp0); R( a, b, c, d, e, F4, 70 ); W_PRECALC_00_15_2(6, W7, Wtmp0); R( e, a, b, c, d, F4, 71 ); W_PRECALC_00_15_3(7, W7, Wtmp0); R( d, e, a, b, c, F4, 72 ); W_PRECALC_00_15_0(8, W6, Wtmp0); R( c, d, e, a, b, F4, 73 ); W_PRECALC_00_15_1(9, W6, Wtmp0); R( b, c, d, e, a, F4, 74 ); W_PRECALC_00_15_2(10, W6, Wtmp0); R( a, b, c, d, e, F4, 75 ); W_PRECALC_00_15_3(11, W6, Wtmp0); R( e, a, b, c, d, F4, 76 ); W_PRECALC_00_15_0(12, W5, Wtmp0); R( d, e, a, b, c, F4, 77 ); W_PRECALC_00_15_1(13, W5, Wtmp0); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; W_PRECALC_00_15_2(14, W5, Wtmp0); R( b, c, d, e, a, F4, 79 ); W_PRECALC_00_15_3(15, W5, Wtmp0); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); jmp .Loop; .align 16 .Lend: vzeroall; /* Transform 64-79. */ R( b, c, d, e, a, F4, 64 ); R( a, b, c, d, e, F4, 65 ); R( e, a, b, c, d, F4, 66 ); R( d, e, a, b, c, F4, 67 ); R( c, d, e, a, b, F4, 68 ); R( b, c, d, e, a, F4, 69 ); R( a, b, c, d, e, F4, 70 ); R( e, a, b, c, d, F4, 71 ); R( d, e, a, b, c, F4, 72 ); R( c, d, e, a, b, F4, 73 ); R( b, c, d, e, a, F4, 74 ); R( a, b, c, d, e, F4, 75 ); R( e, a, b, c, d, F4, 76 ); R( d, e, a, b, c, F4, 77 ); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79 ); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); movq ROLDSTACK, %rsp; popq %rbp; popq %rbx; /* burn_stack */ movl $(16*4 + 2*8 + 31), %eax; .Lret: ret; #endif #endif diff --git a/cipher/sha1-avx-bmi2-amd64.S b/cipher/sha1-avx-bmi2-amd64.S index 22bcbb3c..b267693f 100644 --- a/cipher/sha1-avx-bmi2-amd64.S +++ b/cipher/sha1-avx-bmi2-amd64.S @@ -1,425 +1,424 @@ /* sha1-avx-bmi2-amd64.S - Intel AVX/BMI2 accelerated SHA-1 transform function * Copyright (C) 2013 Jussi Kivilinna * * Based on sha1.c: * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Intel SSSE3 accelerated SHA-1 implementation based on white paper: * "Improving the Performance of the Secure Hash Algorithm (SHA-1)" * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA1) #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 /* Constants */ -.data +.text #define K1 0x5A827999 #define K2 0x6ED9EBA1 #define K3 0x8F1BBCDC #define K4 0xCA62C1D6 .align 16 .LK_XMM: .LK1: .long K1, K1, K1, K1 .LK2: .long K2, K2, K2, K2 .LK3: .long K3, K3, K3, K3 .LK4: .long K4, K4, K4, K4 .Lbswap_shufb_ctl: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f /* Register macros */ #define RSTATE %r8 #define RDATA %r9 #define ROLDSTACK %r10 #define RNBLKS %r11 #define a %eax #define b %ebx #define c %ecx #define d %edx #define e %edi #define RT0 %esi #define RT1 %ebp #define Wtmp0 %xmm0 #define Wtmp1 %xmm1 #define W0 %xmm2 #define W1 %xmm3 #define W2 %xmm4 #define W3 %xmm5 #define W4 %xmm6 #define W5 %xmm7 #define W6 %xmm8 #define W7 %xmm9 #define BSWAP_REG %xmm10 /* Round function macros. */ #define WK(i) (((i) & 15) * 4)(%rsp) #define R_F1(a,b,c,d,e,i) \ movl c, RT0; \ andn d, b, RT1; \ addl WK(i), e; \ andl b, RT0; \ rorxl $2, b, b; \ addl RT1, e; \ leal (RT0,e), e; \ rorxl $27, a, RT1; \ addl RT1, e; #define R_F2(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl b, RT0; \ rorxl $2, b, b; \ xorl d, RT0; \ leal (RT0,e), e; \ rorxl $27, a, RT1; \ addl RT1, e; #define R_F3(a,b,c,d,e,i) \ movl c, RT0; \ movl b, RT1; \ xorl b, RT0; \ andl c, RT1; \ andl d, RT0; \ addl RT1, e; \ addl WK(i), e; \ rorxl $2, b, b; \ leal (RT0,e), e; \ rorxl $27, a, RT1; \ addl RT1, e; #define R_F4(a,b,c,d,e,i) R_F2(a,b,c,d,e,i) #define R(a,b,c,d,e,f,i) \ R_##f(a,b,c,d,e,i) /* Input expansion macros. */ #define W_PRECALC_00_15_0(i, W, tmp0) \ vmovdqu (4*(i))(RDATA), tmp0; #define W_PRECALC_00_15_1(i, W, tmp0) \ vpshufb BSWAP_REG, tmp0, W; #define W_PRECALC_00_15_2(i, W, tmp0) \ vpaddd (.LK_XMM + ((i)/20)*16) RIP, W, tmp0; #define W_PRECALC_00_15_3(i, W, tmp0) \ vmovdqa tmp0, WK(i&~3); #define W_PRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpalignr $8, W_m16, W_m12, W; \ vpsrldq $4, W_m04, tmp0; \ vpxor W_m08, W, W; #define W_PRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W_m16, tmp0, tmp0; \ vpxor tmp0, W, W; \ vpslld $1, W, tmp0; \ vpslldq $12, W, tmp1; \ vpsrld $31, W, W; #define W_PRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpor W, tmp0, tmp0; \ vpsrld $30, tmp1, W; \ vpslld $2, tmp1, tmp1; #define W_PRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ vpxor W, tmp0, tmp0; \ vpxor tmp1, tmp0, W; \ vpaddd (.LK_XMM + ((i)/20)*16) RIP, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); #define W_PRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m28, W, W; \ vpalignr $8, W_m08, W_m04, tmp0; #define W_PRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpxor W_m16, W, W; \ vpxor tmp0, W, W; #define W_PRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpsrld $30, W, tmp0; \ vpslld $2, W, W; #define W_PRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ vpor W, tmp0, W; \ vpaddd (.LK_XMM + ((i)/20)*16) RIP, W, tmp0; \ vmovdqa tmp0, WK((i)&~3); /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sha1_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data, * size_t nblks) */ -.text .globl _gcry_sha1_transform_amd64_avx_bmi2 ELF(.type _gcry_sha1_transform_amd64_avx_bmi2,@function) .align 16 _gcry_sha1_transform_amd64_avx_bmi2: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks */ xorl %eax, %eax; cmpq $0, %rdx; jz .Lret; vzeroupper; movq %rdx, RNBLKS; movq %rdi, RSTATE; movq %rsi, RDATA; pushq %rbx; pushq %rbp; movq %rsp, ROLDSTACK; subq $(16*4), %rsp; andq $(~31), %rsp; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; movdqa .Lbswap_shufb_ctl RIP, BSWAP_REG; /* Precalc 0-15. */ W_PRECALC_00_15_0(0, W0, Wtmp0); W_PRECALC_00_15_1(1, W0, Wtmp0); W_PRECALC_00_15_2(2, W0, Wtmp0); W_PRECALC_00_15_3(3, W0, Wtmp0); W_PRECALC_00_15_0(4, W7, Wtmp0); W_PRECALC_00_15_1(5, W7, Wtmp0); W_PRECALC_00_15_2(6, W7, Wtmp0); W_PRECALC_00_15_3(7, W7, Wtmp0); W_PRECALC_00_15_0(8, W6, Wtmp0); W_PRECALC_00_15_1(9, W6, Wtmp0); W_PRECALC_00_15_2(10, W6, Wtmp0); W_PRECALC_00_15_3(11, W6, Wtmp0); W_PRECALC_00_15_0(12, W5, Wtmp0); W_PRECALC_00_15_1(13, W5, Wtmp0); W_PRECALC_00_15_2(14, W5, Wtmp0); W_PRECALC_00_15_3(15, W5, Wtmp0); .align 8 .Loop: addq $64, RDATA; /* Transform 0-15 + Precalc 16-31. */ R( a, b, c, d, e, F1, 0 ); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 1 ); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 2 ); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 3 ); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 4 ); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 5 ); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 6 ); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 7 ); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 8 ); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 9 ); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 10 ); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 11 ); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 12 ); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 13 ); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 14 ); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 15 ); W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); /* Transform 16-63 + Precalc 32-79. */ R( e, a, b, c, d, F1, 16 ); W_PRECALC_32_79_0(32, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F1, 17 ); W_PRECALC_32_79_1(33, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F1, 18 ); W_PRECALC_32_79_2(34, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F1, 19 ); W_PRECALC_32_79_3(35, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F2, 20 ); W_PRECALC_32_79_0(36, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F2, 21 ); W_PRECALC_32_79_1(37, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F2, 22 ); W_PRECALC_32_79_2(38, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F2, 23 ); W_PRECALC_32_79_3(39, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F2, 24 ); W_PRECALC_32_79_0(40, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F2, 25 ); W_PRECALC_32_79_1(41, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F2, 26 ); W_PRECALC_32_79_2(42, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F2, 27 ); W_PRECALC_32_79_3(43, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F2, 28 ); W_PRECALC_32_79_0(44, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( b, c, d, e, a, F2, 29 ); W_PRECALC_32_79_1(45, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F2, 30 ); W_PRECALC_32_79_2(46, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F2, 31 ); W_PRECALC_32_79_3(47, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F2, 32 ); W_PRECALC_32_79_0(48, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( c, d, e, a, b, F2, 33 ); W_PRECALC_32_79_1(49, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( b, c, d, e, a, F2, 34 ); W_PRECALC_32_79_2(50, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( a, b, c, d, e, F2, 35 ); W_PRECALC_32_79_3(51, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( e, a, b, c, d, F2, 36 ); W_PRECALC_32_79_0(52, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( d, e, a, b, c, F2, 37 ); W_PRECALC_32_79_1(53, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( c, d, e, a, b, F2, 38 ); W_PRECALC_32_79_2(54, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( b, c, d, e, a, F2, 39 ); W_PRECALC_32_79_3(55, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( a, b, c, d, e, F3, 40 ); W_PRECALC_32_79_0(56, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( e, a, b, c, d, F3, 41 ); W_PRECALC_32_79_1(57, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( d, e, a, b, c, F3, 42 ); W_PRECALC_32_79_2(58, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( c, d, e, a, b, F3, 43 ); W_PRECALC_32_79_3(59, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( b, c, d, e, a, F3, 44 ); W_PRECALC_32_79_0(60, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( a, b, c, d, e, F3, 45 ); W_PRECALC_32_79_1(61, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( e, a, b, c, d, F3, 46 ); W_PRECALC_32_79_2(62, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( d, e, a, b, c, F3, 47 ); W_PRECALC_32_79_3(63, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( c, d, e, a, b, F3, 48 ); W_PRECALC_32_79_0(64, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F3, 49 ); W_PRECALC_32_79_1(65, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F3, 50 ); W_PRECALC_32_79_2(66, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( e, a, b, c, d, F3, 51 ); W_PRECALC_32_79_3(67, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F3, 52 ); W_PRECALC_32_79_0(68, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F3, 53 ); W_PRECALC_32_79_1(69, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F3, 54 ); W_PRECALC_32_79_2(70, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( a, b, c, d, e, F3, 55 ); W_PRECALC_32_79_3(71, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F3, 56 ); W_PRECALC_32_79_0(72, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F3, 57 ); W_PRECALC_32_79_1(73, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F3, 58 ); W_PRECALC_32_79_2(74, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( b, c, d, e, a, F3, 59 ); W_PRECALC_32_79_3(75, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F4, 60 ); W_PRECALC_32_79_0(76, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F4, 61 ); W_PRECALC_32_79_1(77, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F4, 62 ); W_PRECALC_32_79_2(78, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( c, d, e, a, b, F4, 63 ); W_PRECALC_32_79_3(79, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); decq RNBLKS; jz .Lend; /* Transform 64-79 + Precalc 0-15 of next block. */ R( b, c, d, e, a, F4, 64 ); W_PRECALC_00_15_0(0, W0, Wtmp0); R( a, b, c, d, e, F4, 65 ); W_PRECALC_00_15_1(1, W0, Wtmp0); R( e, a, b, c, d, F4, 66 ); W_PRECALC_00_15_2(2, W0, Wtmp0); R( d, e, a, b, c, F4, 67 ); W_PRECALC_00_15_3(3, W0, Wtmp0); R( c, d, e, a, b, F4, 68 ); W_PRECALC_00_15_0(4, W7, Wtmp0); R( b, c, d, e, a, F4, 69 ); W_PRECALC_00_15_1(5, W7, Wtmp0); R( a, b, c, d, e, F4, 70 ); W_PRECALC_00_15_2(6, W7, Wtmp0); R( e, a, b, c, d, F4, 71 ); W_PRECALC_00_15_3(7, W7, Wtmp0); R( d, e, a, b, c, F4, 72 ); W_PRECALC_00_15_0(8, W6, Wtmp0); R( c, d, e, a, b, F4, 73 ); W_PRECALC_00_15_1(9, W6, Wtmp0); R( b, c, d, e, a, F4, 74 ); W_PRECALC_00_15_2(10, W6, Wtmp0); R( a, b, c, d, e, F4, 75 ); W_PRECALC_00_15_3(11, W6, Wtmp0); R( e, a, b, c, d, F4, 76 ); W_PRECALC_00_15_0(12, W5, Wtmp0); R( d, e, a, b, c, F4, 77 ); W_PRECALC_00_15_1(13, W5, Wtmp0); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; W_PRECALC_00_15_2(14, W5, Wtmp0); R( b, c, d, e, a, F4, 79 ); W_PRECALC_00_15_3(15, W5, Wtmp0); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); jmp .Loop; .align 16 .Lend: vzeroall; /* Transform 64-79. */ R( b, c, d, e, a, F4, 64 ); R( a, b, c, d, e, F4, 65 ); R( e, a, b, c, d, F4, 66 ); R( d, e, a, b, c, F4, 67 ); R( c, d, e, a, b, F4, 68 ); R( b, c, d, e, a, F4, 69 ); R( a, b, c, d, e, F4, 70 ); R( e, a, b, c, d, F4, 71 ); R( d, e, a, b, c, F4, 72 ); R( c, d, e, a, b, F4, 73 ); R( b, c, d, e, a, F4, 74 ); R( a, b, c, d, e, F4, 75 ); R( e, a, b, c, d, F4, 76 ); R( d, e, a, b, c, F4, 77 ); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79 ); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); movq ROLDSTACK, %rsp; popq %rbp; popq %rbx; /* burn_stack */ movl $(16*4 + 2*8 + 31), %eax; .Lret: ret; #endif #endif diff --git a/cipher/sha1-ssse3-amd64.S b/cipher/sha1-ssse3-amd64.S index 98a19e60..2b439476 100644 --- a/cipher/sha1-ssse3-amd64.S +++ b/cipher/sha1-ssse3-amd64.S @@ -1,435 +1,434 @@ /* sha1-ssse3-amd64.S - Intel SSSE3 accelerated SHA-1 transform function * Copyright (C) 2013 Jussi Kivilinna * * Based on sha1.c: * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Intel SSSE3 accelerated SHA-1 implementation based on white paper: * "Improving the Performance of the Secure Hash Algorithm (SHA-1)" * http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 */ #ifdef __x86_64__ #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA1) #ifdef __PIC__ # define RIP (%rip) #else # define RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif /* Context structure */ #define state_h0 0 #define state_h1 4 #define state_h2 8 #define state_h3 12 #define state_h4 16 /* Constants */ -.data +.text #define K1 0x5A827999 #define K2 0x6ED9EBA1 #define K3 0x8F1BBCDC #define K4 0xCA62C1D6 .align 16 .LK_XMM: .LK1: .long K1, K1, K1, K1 .LK2: .long K2, K2, K2, K2 .LK3: .long K3, K3, K3, K3 .LK4: .long K4, K4, K4, K4 .Lbswap_shufb_ctl: .long 0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f /* Register macros */ #define RSTATE %r8 #define RDATA %r9 #define ROLDSTACK %r10 #define RNBLKS %r11 #define a %eax #define b %ebx #define c %ecx #define d %edx #define e %edi #define RT0 %esi #define RT1 %ebp #define Wtmp0 %xmm0 #define Wtmp1 %xmm1 #define W0 %xmm2 #define W1 %xmm3 #define W2 %xmm4 #define W3 %xmm5 #define W4 %xmm6 #define W5 %xmm7 #define W6 %xmm8 #define W7 %xmm9 #define BSWAP_REG %xmm10 /* Round function macros. */ #define WK(i) (((i) & 15) * 4)(%rsp) #define R_F1(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl d, RT0; \ movl a, RT1; \ andl b, RT0; \ roll $30, b; \ xorl d, RT0; \ leal (RT0,e), e; \ roll $5, RT1; \ addl RT1, e; #define R_F2(a,b,c,d,e,i) \ movl c, RT0; \ addl WK(i), e; \ xorl b, RT0; \ roll $30, b; \ xorl d, RT0; \ movl a, RT1; \ leal (RT0,e), e; \ roll $5, RT1; \ addl RT1, e; #define R_F3(a,b,c,d,e,i) \ movl c, RT0; \ movl b, RT1; \ xorl b, RT0; \ andl c, RT1; \ andl d, RT0; \ addl RT1, e; \ addl WK(i), e; \ roll $30, b; \ movl a, RT1; \ leal (RT0,e), e; \ roll $5, RT1; \ addl RT1, e; #define R_F4(a,b,c,d,e,i) R_F2(a,b,c,d,e,i) #define R(a,b,c,d,e,f,i) \ R_##f(a,b,c,d,e,i) /* Input expansion macros. */ #define W_PRECALC_00_15_0(i, W, tmp0) \ movdqu (4*(i))(RDATA), tmp0; #define W_PRECALC_00_15_1(i, W, tmp0) \ pshufb BSWAP_REG, tmp0; \ movdqa tmp0, W; #define W_PRECALC_00_15_2(i, W, tmp0) \ paddd (.LK_XMM + ((i)/20)*16) RIP, tmp0; #define W_PRECALC_00_15_3(i, W, tmp0) \ movdqa tmp0, WK(i&~3); #define W_PRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ movdqa W_m12, W; \ palignr $8, W_m16, W; \ movdqa W_m04, tmp0; \ psrldq $4, tmp0; \ pxor W_m08, W; #define W_PRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ pxor W_m16, tmp0; \ pxor tmp0, W; \ movdqa W, tmp1; \ movdqa W, tmp0; \ pslldq $12, tmp1; #define W_PRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ psrld $31, W; \ pslld $1, tmp0; \ por W, tmp0; \ movdqa tmp1, W; \ psrld $30, tmp1; \ pslld $2, W; #define W_PRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, tmp0, tmp1) \ pxor W, tmp0; \ pxor tmp1, tmp0; \ movdqa tmp0, W; \ paddd (.LK_XMM + ((i)/20)*16) RIP, tmp0; \ movdqa tmp0, WK((i)&~3); #define W_PRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ movdqa W_m04, tmp0; \ pxor W_m28, W; \ palignr $8, W_m08, tmp0; #define W_PRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ pxor W_m16, W; \ pxor tmp0, W; \ movdqa W, tmp0; #define W_PRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ psrld $30, W; \ pslld $2, tmp0; \ por W, tmp0; #define W_PRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28, tmp0) \ movdqa tmp0, W; \ paddd (.LK_XMM + ((i)/20)*16) RIP, tmp0; \ movdqa tmp0, WK((i)&~3); #define CLEAR_REG(reg) pxor reg, reg; /* * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA. * * unsigned int * _gcry_sha1_transform_amd64_ssse3 (void *ctx, const unsigned char *data, * size_t nblks) */ -.text .globl _gcry_sha1_transform_amd64_ssse3 ELF(.type _gcry_sha1_transform_amd64_ssse3,@function) .align 16 _gcry_sha1_transform_amd64_ssse3: /* input: * %rdi: ctx, CTX * %rsi: data (64*nblks bytes) * %rdx: nblks */ xorl %eax, %eax; cmpq $0, %rdx; jz .Lret; movq %rdx, RNBLKS; movq %rdi, RSTATE; movq %rsi, RDATA; pushq %rbx; pushq %rbp; movq %rsp, ROLDSTACK; subq $(16*4), %rsp; andq $(~31), %rsp; /* Get the values of the chaining variables. */ movl state_h0(RSTATE), a; movl state_h1(RSTATE), b; movl state_h2(RSTATE), c; movl state_h3(RSTATE), d; movl state_h4(RSTATE), e; movdqa .Lbswap_shufb_ctl RIP, BSWAP_REG; /* Precalc 0-15. */ W_PRECALC_00_15_0(0, W0, Wtmp0); W_PRECALC_00_15_1(1, W0, Wtmp0); W_PRECALC_00_15_2(2, W0, Wtmp0); W_PRECALC_00_15_3(3, W0, Wtmp0); W_PRECALC_00_15_0(4, W7, Wtmp0); W_PRECALC_00_15_1(5, W7, Wtmp0); W_PRECALC_00_15_2(6, W7, Wtmp0); W_PRECALC_00_15_3(7, W7, Wtmp0); W_PRECALC_00_15_0(8, W6, Wtmp0); W_PRECALC_00_15_1(9, W6, Wtmp0); W_PRECALC_00_15_2(10, W6, Wtmp0); W_PRECALC_00_15_3(11, W6, Wtmp0); W_PRECALC_00_15_0(12, W5, Wtmp0); W_PRECALC_00_15_1(13, W5, Wtmp0); W_PRECALC_00_15_2(14, W5, Wtmp0); W_PRECALC_00_15_3(15, W5, Wtmp0); .align 8 .Loop: addq $64, RDATA; /* Transform 0-15 + Precalc 16-31. */ R( a, b, c, d, e, F1, 0 ); W_PRECALC_16_31_0(16, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 1 ); W_PRECALC_16_31_1(17, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 2 ); W_PRECALC_16_31_2(18, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 3 ); W_PRECALC_16_31_3(19, W4, W5, W6, W7, W0, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 4 ); W_PRECALC_16_31_0(20, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 5 ); W_PRECALC_16_31_1(21, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 6 ); W_PRECALC_16_31_2(22, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 7 ); W_PRECALC_16_31_3(23, W3, W4, W5, W6, W7, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 8 ); W_PRECALC_16_31_0(24, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 9 ); W_PRECALC_16_31_1(25, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 10 ); W_PRECALC_16_31_2(26, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( e, a, b, c, d, F1, 11 ); W_PRECALC_16_31_3(27, W2, W3, W4, W5, W6, Wtmp0, Wtmp1); R( d, e, a, b, c, F1, 12 ); W_PRECALC_16_31_0(28, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( c, d, e, a, b, F1, 13 ); W_PRECALC_16_31_1(29, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( b, c, d, e, a, F1, 14 ); W_PRECALC_16_31_2(30, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); R( a, b, c, d, e, F1, 15 ); W_PRECALC_16_31_3(31, W1, W2, W3, W4, W5, Wtmp0, Wtmp1); /* Transform 16-63 + Precalc 32-79. */ R( e, a, b, c, d, F1, 16 ); W_PRECALC_32_79_0(32, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F1, 17 ); W_PRECALC_32_79_1(33, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( c, d, e, a, b, F1, 18 ); W_PRECALC_32_79_2(34, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F1, 19 ); W_PRECALC_32_79_3(35, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F2, 20 ); W_PRECALC_32_79_0(36, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F2, 21 ); W_PRECALC_32_79_1(37, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( d, e, a, b, c, F2, 22 ); W_PRECALC_32_79_2(38, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F2, 23 ); W_PRECALC_32_79_3(39, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F2, 24 ); W_PRECALC_32_79_0(40, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F2, 25 ); W_PRECALC_32_79_1(41, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( e, a, b, c, d, F2, 26 ); W_PRECALC_32_79_2(42, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F2, 27 ); W_PRECALC_32_79_3(43, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F2, 28 ); W_PRECALC_32_79_0(44, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( b, c, d, e, a, F2, 29 ); W_PRECALC_32_79_1(45, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( a, b, c, d, e, F2, 30 ); W_PRECALC_32_79_2(46, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F2, 31 ); W_PRECALC_32_79_3(47, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F2, 32 ); W_PRECALC_32_79_0(48, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( c, d, e, a, b, F2, 33 ); W_PRECALC_32_79_1(49, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( b, c, d, e, a, F2, 34 ); W_PRECALC_32_79_2(50, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( a, b, c, d, e, F2, 35 ); W_PRECALC_32_79_3(51, W4, W5, W6, W7, W0, W1, W2, W3, Wtmp0); R( e, a, b, c, d, F2, 36 ); W_PRECALC_32_79_0(52, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( d, e, a, b, c, F2, 37 ); W_PRECALC_32_79_1(53, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( c, d, e, a, b, F2, 38 ); W_PRECALC_32_79_2(54, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( b, c, d, e, a, F2, 39 ); W_PRECALC_32_79_3(55, W3, W4, W5, W6, W7, W0, W1, W2, Wtmp0); R( a, b, c, d, e, F3, 40 ); W_PRECALC_32_79_0(56, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( e, a, b, c, d, F3, 41 ); W_PRECALC_32_79_1(57, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( d, e, a, b, c, F3, 42 ); W_PRECALC_32_79_2(58, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( c, d, e, a, b, F3, 43 ); W_PRECALC_32_79_3(59, W2, W3, W4, W5, W6, W7, W0, W1, Wtmp0); R( b, c, d, e, a, F3, 44 ); W_PRECALC_32_79_0(60, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( a, b, c, d, e, F3, 45 ); W_PRECALC_32_79_1(61, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( e, a, b, c, d, F3, 46 ); W_PRECALC_32_79_2(62, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( d, e, a, b, c, F3, 47 ); W_PRECALC_32_79_3(63, W1, W2, W3, W4, W5, W6, W7, W0, Wtmp0); R( c, d, e, a, b, F3, 48 ); W_PRECALC_32_79_0(64, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( b, c, d, e, a, F3, 49 ); W_PRECALC_32_79_1(65, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( a, b, c, d, e, F3, 50 ); W_PRECALC_32_79_2(66, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( e, a, b, c, d, F3, 51 ); W_PRECALC_32_79_3(67, W0, W1, W2, W3, W4, W5, W6, W7, Wtmp0); R( d, e, a, b, c, F3, 52 ); W_PRECALC_32_79_0(68, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( c, d, e, a, b, F3, 53 ); W_PRECALC_32_79_1(69, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( b, c, d, e, a, F3, 54 ); W_PRECALC_32_79_2(70, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( a, b, c, d, e, F3, 55 ); W_PRECALC_32_79_3(71, W7, W0, W1, W2, W3, W4, W5, W6, Wtmp0); R( e, a, b, c, d, F3, 56 ); W_PRECALC_32_79_0(72, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( d, e, a, b, c, F3, 57 ); W_PRECALC_32_79_1(73, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( c, d, e, a, b, F3, 58 ); W_PRECALC_32_79_2(74, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( b, c, d, e, a, F3, 59 ); W_PRECALC_32_79_3(75, W6, W7, W0, W1, W2, W3, W4, W5, Wtmp0); R( a, b, c, d, e, F4, 60 ); W_PRECALC_32_79_0(76, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( e, a, b, c, d, F4, 61 ); W_PRECALC_32_79_1(77, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( d, e, a, b, c, F4, 62 ); W_PRECALC_32_79_2(78, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); R( c, d, e, a, b, F4, 63 ); W_PRECALC_32_79_3(79, W5, W6, W7, W0, W1, W2, W3, W4, Wtmp0); decq RNBLKS; jz .Lend; /* Transform 64-79 + Precalc 0-15 of next block. */ R( b, c, d, e, a, F4, 64 ); W_PRECALC_00_15_0(0, W0, Wtmp0); R( a, b, c, d, e, F4, 65 ); W_PRECALC_00_15_1(1, W0, Wtmp0); R( e, a, b, c, d, F4, 66 ); W_PRECALC_00_15_2(2, W0, Wtmp0); R( d, e, a, b, c, F4, 67 ); W_PRECALC_00_15_3(3, W0, Wtmp0); R( c, d, e, a, b, F4, 68 ); W_PRECALC_00_15_0(4, W7, Wtmp0); R( b, c, d, e, a, F4, 69 ); W_PRECALC_00_15_1(5, W7, Wtmp0); R( a, b, c, d, e, F4, 70 ); W_PRECALC_00_15_2(6, W7, Wtmp0); R( e, a, b, c, d, F4, 71 ); W_PRECALC_00_15_3(7, W7, Wtmp0); R( d, e, a, b, c, F4, 72 ); W_PRECALC_00_15_0(8, W6, Wtmp0); R( c, d, e, a, b, F4, 73 ); W_PRECALC_00_15_1(9, W6, Wtmp0); R( b, c, d, e, a, F4, 74 ); W_PRECALC_00_15_2(10, W6, Wtmp0); R( a, b, c, d, e, F4, 75 ); W_PRECALC_00_15_3(11, W6, Wtmp0); R( e, a, b, c, d, F4, 76 ); W_PRECALC_00_15_0(12, W5, Wtmp0); R( d, e, a, b, c, F4, 77 ); W_PRECALC_00_15_1(13, W5, Wtmp0); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; W_PRECALC_00_15_2(14, W5, Wtmp0); R( b, c, d, e, a, F4, 79 ); W_PRECALC_00_15_3(15, W5, Wtmp0); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); jmp .Loop; .align 16 .Lend: /* Transform 64-79 + Clear XMM registers. */ R( b, c, d, e, a, F4, 64 ); CLEAR_REG(BSWAP_REG); R( a, b, c, d, e, F4, 65 ); CLEAR_REG(Wtmp0); R( e, a, b, c, d, F4, 66 ); CLEAR_REG(Wtmp1); R( d, e, a, b, c, F4, 67 ); CLEAR_REG(W0); R( c, d, e, a, b, F4, 68 ); CLEAR_REG(W1); R( b, c, d, e, a, F4, 69 ); CLEAR_REG(W2); R( a, b, c, d, e, F4, 70 ); CLEAR_REG(W3); R( e, a, b, c, d, F4, 71 ); CLEAR_REG(W4); R( d, e, a, b, c, F4, 72 ); CLEAR_REG(W5); R( c, d, e, a, b, F4, 73 ); CLEAR_REG(W6); R( b, c, d, e, a, F4, 74 ); CLEAR_REG(W7); R( a, b, c, d, e, F4, 75 ); R( e, a, b, c, d, F4, 76 ); R( d, e, a, b, c, F4, 77 ); R( c, d, e, a, b, F4, 78 ); addl state_h0(RSTATE), a; R( b, c, d, e, a, F4, 79 ); /* Update the chaining variables. */ addl state_h3(RSTATE), d; addl state_h2(RSTATE), c; addl state_h1(RSTATE), b; addl state_h4(RSTATE), e; movl d, state_h3(RSTATE); movl c, state_h2(RSTATE); movl b, state_h1(RSTATE); movl a, state_h0(RSTATE); movl e, state_h4(RSTATE); movq ROLDSTACK, %rsp; popq %rbp; popq %rbx; /* burn_stack */ movl $(16*4 + 2*8 + 31), %eax; .Lret: ret; #endif #endif diff --git a/cipher/sha256-avx-amd64.S b/cipher/sha256-avx-amd64.S index 8bf26bd7..6953855b 100644 --- a/cipher/sha256-avx-amd64.S +++ b/cipher/sha256-avx-amd64.S @@ -1,528 +1,527 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: Based on the SSSE3 implementation. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA256) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix #define VMOVDQ vmovdqu /* assume buffers not aligned */ .macro ROR p1 p2 /* shld is faster than ror on Intel Sandybridge */ shld \p1, \p1, (32 - \p2) .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros*/ /* addm [mem], reg * Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ /* COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask * Load xmm with mem and byte swap each dword */ .macro COPY_XMM_AND_BSWAP p1 p2 p3 VMOVDQ \p1, \p2 vpshufb \p1, \p1, \p3 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ X0 = xmm4 X1 = xmm5 X2 = xmm6 X3 = xmm7 XTMP0 = xmm0 XTMP1 = xmm1 XTMP2 = xmm2 XTMP3 = xmm3 XTMP4 = xmm8 XFER = xmm9 SHUF_00BA = xmm10 /* shuffle xBxA -> 00BA */ SHUF_DC00 = xmm11 /* shuffle xDxC -> DC00 */ BYTE_FLIP_MASK = xmm12 NUM_BLKS = rdx /* 3rd arg */ CTX = rsi /* 2nd arg */ INP = rdi /* 1st arg */ SRND = rdi /* clobbers INP */ c = ecx d = r8d e = edx TBL = rbp a = eax b = ebx f = r9d g = r10d h = r11d y0 = r13d y1 = r14d y2 = r15d #define _INP_END_SIZE 8 #define _INP_SIZE 8 #define _XFER_SIZE 8 #define _XMM_SAVE_SIZE 0 /* STACK_SIZE plus pushes must be an odd multiple of 8 */ #define _ALIGN_SIZE 8 #define _INP_END 0 #define _INP (_INP_END + _INP_END_SIZE) #define _XFER (_INP + _INP_SIZE) #define _XMM_SAVE (_XFER + _XFER_SIZE + _ALIGN_SIZE) #define STACK_SIZE (_XMM_SAVE + _XMM_SAVE_SIZE) /* rotate_Xs * Rotate values of symbols X0...X3 */ .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm /* ROTATE_ARGS * Rotate values of symbols a...h */ .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED /* compute s0 four at a time and s1 two at a time * compute W[-16] + W[-7] 4 at a time */ mov y0, e /* y0 = e */ ROR y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */ ROR y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16] */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ /* compute s0 */ vpalignr XTMP1, X1, X0, 4 /* XTMP1 = W[-15] */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 0*4] /* y2 = k + w + S1 + CH */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpslld XTMP2, XTMP1, (32-7) or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ vpsrld XTMP3, XTMP1, 7 and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ vpor XTMP3, XTMP3, XTMP2 /* XTMP1 = W[-15] ror 7 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ ROR y0, (25-11) /* y0 = e >> (25-11) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ROR y1, (22-13) /* y1 = a >> (22-13) */ vpslld XTMP2, XTMP1, (32-18) xor y1, a /* y1 = a ^ (a >> (22-13) */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ vpsrld XTMP4, XTMP1, 18 ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ vpxor XTMP4, XTMP4, XTMP3 xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ vpsrld XTMP1, XTMP1, 3 /* XTMP4 = W[-15] >> 3 */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 1*4] /* y2 = k + w + S1 + CH */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ vpxor XTMP1, XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpxor XTMP1, XTMP1, XTMP4 /* XTMP1 = s0 */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ /* compute low s1 */ vpshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ vpaddd XTMP0, XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ ROR y0, (25-11) /* y0 = e >> (25-11) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ROR y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ vpsrlq XTMP3, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */ xor y2, g /* y2 = f^g */ vpsrlq XTMP4, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ vpsrld XTMP2, XTMP2, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ vpxor XTMP2, XTMP2, XTMP3 add y2, y0 /* y2 = S1 + CH */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + 2*4] /* y2 = k + w + S1 + CH */ vpxor XTMP4, XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpshufb XTMP4, XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ vpaddd XTMP0, XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ /* compute high s1 */ vpshufd XTMP2, XTMP0, 0b01010000 /* XTMP2 = W[-2] {DDCC} */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS mov y0, e /* y0 = e */ ROR y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ ROR y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ vpsrlq XTMP3, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */ xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ vpsrlq X0, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ vpsrld XTMP2, XTMP2, 10 /* X0 = W[-2] >> 10 {DDCC} */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ vpxor XTMP2, XTMP2, XTMP3 ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 3*4] /* y2 = k + w + S1 + CH */ vpxor X0, X0, XTMP2 /* X0 = s1 {xDxC} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ vpshufb X0, X0, SHUF_DC00 /* X0 = s1 {DC00} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ vpaddd X0, X0, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS rotate_Xs .endm /* input is [rsp + _XFER + %1 * 4] */ .macro DO_ROUND i1 mov y0, e /* y0 = e */ ROR y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ROR y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ROR y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ ROR y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ and y2, e /* y2 = (f^g)&e */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ROR y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ add y2, y0 /* y2 = S1 + CH */ ROR y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + \i1 * 4] /* y2 = k + w + S1 + CH */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_avx ELF(.type _gcry_sha256_transform_amd64_avx,@function;) .align 16 _gcry_sha256_transform_amd64_avx: vzeroupper push rbx push rbp push r13 push r14 push r15 sub rsp, STACK_SIZE shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + _INP_END], NUM_BLKS /* load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] .Loop0: lea TBL, [.LK256 ADD_RIP] /* byte swap first 16 dwords */ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK mov [rsp + _INP], INP /* schedule 48 input dwords, by doing 3 rounds of 16 each */ mov SRND, 3 .align 16 .Loop1: vpaddd XFER, X0, [TBL + 0*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddd XFER, X0, [TBL + 1*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddd XFER, X0, [TBL + 2*16] vmovdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddd XFER, X0, [TBL + 3*16] vmovdqa [rsp + _XFER], XFER add TBL, 4*16 FOUR_ROUNDS_AND_SCHED sub SRND, 1 jne .Loop1 mov SRND, 2 .Loop2: vpaddd X0, X0, [TBL + 0*16] vmovdqa [rsp + _XFER], X0 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vpaddd X1, X1, [TBL + 1*16] vmovdqa [rsp + _XFER], X1 add TBL, 2*16 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 vmovdqa X0, X2 vmovdqa X1, X3 sub SRND, 1 jne .Loop2 addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h mov INP, [rsp + _INP] add INP, 64 cmp INP, [rsp + _INP_END] jne .Loop0 vzeroall .Ldone_hash: add rsp, STACK_SIZE pop r15 pop r14 pop r13 pop rbp pop rbx mov eax, STACK_SIZE + 5*8 ret -.data .align 16 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha256-avx2-bmi2-amd64.S b/cipher/sha256-avx2-bmi2-amd64.S index 74b60631..85e663fe 100644 --- a/cipher/sha256-avx2-bmi2-amd64.S +++ b/cipher/sha256-avx2-bmi2-amd64.S @@ -1,814 +1,813 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 2 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA256) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix #define VMOVDQ vmovdqu /* ; assume buffers not aligned */ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ X0 = ymm4 X1 = ymm5 X2 = ymm6 X3 = ymm7 /* XMM versions of above */ XWORD0 = xmm4 XWORD1 = xmm5 XWORD2 = xmm6 XWORD3 = xmm7 XTMP0 = ymm0 XTMP1 = ymm1 XTMP2 = ymm2 XTMP3 = ymm3 XTMP4 = ymm8 XFER = ymm9 XTMP5 = ymm11 SHUF_00BA = ymm10 /* shuffle xBxA -> 00BA */ SHUF_DC00 = ymm12 /* shuffle xDxC -> DC00 */ BYTE_FLIP_MASK = ymm13 X_BYTE_FLIP_MASK = xmm13 /* XMM version of BYTE_FLIP_MASK */ NUM_BLKS = rdx /* 3rd arg */ CTX = rsi /* 2nd arg */ INP = rdi /* 1st arg */ c = ecx d = r8d e = edx /* clobbers NUM_BLKS */ y3 = edi /* clobbers INP */ TBL = rbp SRND = CTX /* SRND is same register as CTX */ a = eax b = ebx f = r9d g = r10d h = r11d old_h = r11d T1 = r12d y0 = r13d y1 = r14d y2 = r15d _XFER_SIZE = 2*64*4 /* 2 blocks, 64 rounds, 4 bytes/round */ _XMM_SAVE_SIZE = 0 _INP_END_SIZE = 8 _INP_SIZE = 8 _CTX_SIZE = 8 _RSP_SIZE = 8 _XFER = 0 _XMM_SAVE = _XFER + _XFER_SIZE _INP_END = _XMM_SAVE + _XMM_SAVE_SIZE _INP = _INP_END + _INP_END_SIZE _CTX = _INP + _INP_SIZE _RSP = _CTX + _CTX_SIZE STACK_SIZE = _RSP + _RSP_SIZE /* rotate_Xs */ /* Rotate values of symbols X0...X3 */ .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm /* ROTATE_ARGS */ /* Rotate values of symbols a...h */ .macro ROTATE_ARGS old_h = h TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED XFER /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ add h, [\XFER+0*4] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ vpalignr XTMP0, X3, X2, 4 /* XTMP0 = W[-7] */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ xor y2, g /* y2 = f^g ; CH */ vpaddd XTMP0, XTMP0, X0 /* XTMP0 = W[-7] + W[-16]; y1 = (e >> 6) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ vpalignr XTMP1, X1, X0, 4 /* XTMP1 = W[-15] */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpsrld XTMP2, XTMP1, 7 xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ vpslld XTMP3, XTMP1, (32-7) or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ vpor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 */ vpsrld XTMP2, XTMP1,18 add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ add h, [\XFER+1*4] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ vpsrld XTMP4, XTMP1, 3 /* XTMP4 = W[-15] >> 3 */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ vpslld XTMP1, XTMP1, (32-18) and y3, b /* y3 = (a|c)&b ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ vpxor XTMP3, XTMP3, XTMP1 rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpxor XTMP3, XTMP3, XTMP2 /* XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ vpxor XTMP1, XTMP3, XTMP4 /* XTMP1 = s0 */ vpshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ vpaddd XTMP0, XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ vpsrld XTMP4, XTMP2, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ add h, [\XFER+2*4] /* h = k + w + h ; -- */ vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ xor y2, g /* y2 = f^g ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */ and y2, e /* y2 = (f^g)&e ; CH */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ vpxor XTMP2, XTMP2, XTMP3 add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ vpxor XTMP4, XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpshufb XTMP4, XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ vpaddd XTMP0, XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ vpshufd XTMP2, XTMP0, 0b1010000 /* XTMP2 = W[-2] {DDCC} */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ add h, [\XFER+3*4] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ vpsrld XTMP5, XTMP2, 10 /* XTMP5 = W[-2] >> 10 {DDCC} */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ xor y2, g /* y2 = f^g ; CH */ vpsrlq XTMP3, XTMP2, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ vpsrlq XTMP2, XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ vpxor XTMP2, XTMP2, XTMP3 rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ add y2, y0 /* y2 = S1 + CH ; -- */ vpxor XTMP5, XTMP5, XTMP2 /* XTMP5 = s1 {xDxC} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ vpshufb XTMP5, XTMP5, SHUF_DC00 /* XTMP5 = s1 {DC00} */ vpaddd X0, XTMP5, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS rotate_Xs .endm .macro DO_4ROUNDS XFER /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*0] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*1] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*2] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /* add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ /* lea h, [h + y3] ; h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 25 /* y0 = e >> 25 ; S1A */ rorx y1, e, 11 /* y1 = e >> 11 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ; S1 */ rorx y1, e, 6 /* y1 = (e >> 6) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 */ rorx T1, a, 13 /* T1 = a >> 13 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 22 /* y1 = a >> 22 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ; S0 */ rorx T1, a, 2 /* T1 = (a >> 2) ; S0 */ add h, [\XFER + 4*3] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0; -- */ lea h, [h + y3] /* h = t1 + S0 + MAJ ; -- */ ROTATE_ARGS .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_avx2 ELF(.type _gcry_sha256_transform_amd64_avx2,@function) .align 32 _gcry_sha256_transform_amd64_avx2: push rbx push rbp push r12 push r13 push r14 push r15 vzeroupper mov rax, rsp sub rsp, STACK_SIZE and rsp, -32 mov [rsp + _RSP], rax shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash lea NUM_BLKS, [NUM_BLKS + INP - 64] /* pointer to last block */ mov [rsp + _INP_END], NUM_BLKS cmp INP, NUM_BLKS je .Lonly_one_block /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] mov [rsp + _CTX], CTX .Loop0: lea TBL, [.LK256 ADD_RIP] /* ; Load first 16 dwords from two blocks */ VMOVDQ XTMP0, [INP + 0*32] VMOVDQ XTMP1, [INP + 1*32] VMOVDQ XTMP2, [INP + 2*32] VMOVDQ XTMP3, [INP + 3*32] /* ; byte swap data */ vpshufb XTMP0, XTMP0, BYTE_FLIP_MASK vpshufb XTMP1, XTMP1, BYTE_FLIP_MASK vpshufb XTMP2, XTMP2, BYTE_FLIP_MASK vpshufb XTMP3, XTMP3, BYTE_FLIP_MASK /* ; transpose data into high/low halves */ vperm2i128 X0, XTMP0, XTMP2, 0x20 vperm2i128 X1, XTMP0, XTMP2, 0x31 vperm2i128 X2, XTMP1, XTMP3, 0x20 vperm2i128 X3, XTMP1, XTMP3, 0x31 .Last_block_enter: add INP, 64 mov [rsp + _INP], INP /* ; schedule 48 input dwords, by doing 3 rounds of 12 each */ xor SRND, SRND .align 16 .Loop1: vpaddd XFER, X0, [TBL + SRND + 0*32] vmovdqa [rsp + _XFER + SRND + 0*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 0*32 vpaddd XFER, X0, [TBL + SRND + 1*32] vmovdqa [rsp + _XFER + SRND + 1*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 1*32 vpaddd XFER, X0, [TBL + SRND + 2*32] vmovdqa [rsp + _XFER + SRND + 2*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 2*32 vpaddd XFER, X0, [TBL + SRND + 3*32] vmovdqa [rsp + _XFER + SRND + 3*32], XFER FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 3*32 add SRND, 4*32 cmp SRND, 3 * 4*32 jb .Loop1 .Loop2: /* ; Do last 16 rounds with no scheduling */ vpaddd XFER, X0, [TBL + SRND + 0*32] vmovdqa [rsp + _XFER + SRND + 0*32], XFER DO_4ROUNDS rsp + _XFER + SRND + 0*32 vpaddd XFER, X1, [TBL + SRND + 1*32] vmovdqa [rsp + _XFER + SRND + 1*32], XFER DO_4ROUNDS rsp + _XFER + SRND + 1*32 add SRND, 2*32 vmovdqa X0, X2 vmovdqa X1, X3 cmp SRND, 4 * 4*32 jb .Loop2 mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h cmp INP, [rsp + _INP_END] ja .Ldone_hash /* ;;; Do second block using previously scheduled results */ xor SRND, SRND .align 16 .Loop3: DO_4ROUNDS rsp + _XFER + SRND + 0*32 + 16 DO_4ROUNDS rsp + _XFER + SRND + 1*32 + 16 add SRND, 2*32 cmp SRND, 4 * 4*32 jb .Loop3 mov CTX, [rsp + _CTX] mov INP, [rsp + _INP] add INP, 64 addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h cmp INP, [rsp + _INP_END] jb .Loop0 ja .Ldone_hash .Ldo_last_block: /* ;;; do last block */ lea TBL, [.LK256 ADD_RIP] VMOVDQ XWORD0, [INP + 0*16] VMOVDQ XWORD1, [INP + 1*16] VMOVDQ XWORD2, [INP + 2*16] VMOVDQ XWORD3, [INP + 3*16] vpshufb XWORD0, XWORD0, X_BYTE_FLIP_MASK vpshufb XWORD1, XWORD1, X_BYTE_FLIP_MASK vpshufb XWORD2, XWORD2, X_BYTE_FLIP_MASK vpshufb XWORD3, XWORD3, X_BYTE_FLIP_MASK jmp .Last_block_enter .Lonly_one_block: /* ; load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] vmovdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] vmovdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] mov [rsp + _CTX], CTX jmp .Ldo_last_block .Ldone_hash: mov rsp, [rsp + _RSP] vzeroall pop r15 pop r14 pop r13 pop r12 pop rbp pop rbx /* stack burn depth */ mov eax, STACK_SIZE + 6*8 + 31 ret -.data .align 64 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha256-ssse3-amd64.S b/cipher/sha256-ssse3-amd64.S index 9ec87e46..a9213e41 100644 --- a/cipher/sha256-ssse3-amd64.S +++ b/cipher/sha256-ssse3-amd64.S @@ -1,548 +1,547 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; ; This code is described in an Intel White-Paper: ; "Fast SHA-256 Implementations on Intel Architecture Processors" ; ; To find it, surf to http://www.intel.com/p/en_US/embedded ; and search for that title. ; The paper is expected to be released roughly at the end of April, 2012 ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: original implementation was named as SHA256-SSE4. However, only SSSE3 * is required. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA256) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix #define MOVDQ movdqu /* assume buffers not aligned */ /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros*/ /* addm [mem], reg * Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ /* COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask * Load xmm with mem and byte swap each dword */ .macro COPY_XMM_AND_BSWAP p1 p2 p3 MOVDQ \p1, \p2 pshufb \p1, \p3 .endm /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;*/ X0 = xmm4 X1 = xmm5 X2 = xmm6 X3 = xmm7 XTMP0 = xmm0 XTMP1 = xmm1 XTMP2 = xmm2 XTMP3 = xmm3 XTMP4 = xmm8 XFER = xmm9 SHUF_00BA = xmm10 /* shuffle xBxA -> 00BA */ SHUF_DC00 = xmm11 /* shuffle xDxC -> DC00 */ BYTE_FLIP_MASK = xmm12 NUM_BLKS = rdx /* 3rd arg */ CTX = rsi /* 2nd arg */ INP = rdi /* 1st arg */ SRND = rdi /* clobbers INP */ c = ecx d = r8d e = edx TBL = rbp a = eax b = ebx f = r9d g = r10d h = r11d y0 = r13d y1 = r14d y2 = r15d #define _INP_END_SIZE 8 #define _INP_SIZE 8 #define _XFER_SIZE 8 #define _XMM_SAVE_SIZE 0 /* STACK_SIZE plus pushes must be an odd multiple of 8 */ #define _ALIGN_SIZE 8 #define _INP_END 0 #define _INP (_INP_END + _INP_END_SIZE) #define _XFER (_INP + _INP_SIZE) #define _XMM_SAVE (_XFER + _XFER_SIZE + _ALIGN_SIZE) #define STACK_SIZE (_XMM_SAVE + _XMM_SAVE_SIZE) /* rotate_Xs * Rotate values of symbols X0...X3 */ .macro rotate_Xs X_ = X0 X0 = X1 X1 = X2 X2 = X3 X3 = X_ .endm /* ROTATE_ARGS * Rotate values of symbols a...h */ .macro ROTATE_ARGS TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = TMP_ .endm .macro FOUR_ROUNDS_AND_SCHED /* compute s0 four at a time and s1 two at a time * compute W[-16] + W[-7] 4 at a time */ movdqa XTMP0, X3 mov y0, e /* y0 = e */ ror y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ palignr XTMP0, X2, 4 /* XTMP0 = W[-7] */ ror y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ movdqa XTMP1, X1 xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ paddd XTMP0, X0 /* XTMP0 = W[-7] + W[-16] */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ /* compute s0 */ palignr XTMP1, X0, 4 /* XTMP1 = W[-15] */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ movdqa XTMP2, XTMP1 /* XTMP2 = W[-15] */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 0*4] /* y2 = k + w + S1 + CH */ movdqa XTMP3, XTMP1 /* XTMP3 = W[-15] */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pslld XTMP1, (32-7) or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ psrld XTMP2, 7 and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ por XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS movdqa XTMP2, XTMP3 /* XTMP2 = W[-15] */ mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ movdqa XTMP4, XTMP3 /* XTMP4 = W[-15] */ ror y0, (25-11) /* y0 = e >> (25-11) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ror y1, (22-13) /* y1 = a >> (22-13) */ pslld XTMP3, (32-18) xor y1, a /* y1 = a ^ (a >> (22-13) */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ psrld XTMP2, 18 ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ pxor XTMP1, XTMP3 xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ psrld XTMP4, 3 /* XTMP4 = W[-15] >> 3 */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 1*4] /* y2 = k + w + S1 + CH */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ pxor XTMP1, XTMP2 /* XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pxor XTMP1, XTMP4 /* XTMP1 = s0 */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ /* compute low s1 */ pshufd XTMP2, X3, 0b11111010 /* XTMP2 = W[-2] {BBAA} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ paddd XTMP0, XTMP1 /* XTMP0 = W[-16] + W[-7] + s0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {BBAA} */ mov y0, e /* y0 = e */ mov y1, a /* y1 = a */ ror y0, (25-11) /* y0 = e >> (25-11) */ movdqa XTMP4, XTMP2 /* XTMP4 = W[-2] {BBAA} */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ror y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xBxA} */ xor y2, g /* y2 = f^g */ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xBxA} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ psrld XTMP4, 10 /* XTMP4 = W[-2] >> 10 {BBAA} */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ pxor XTMP2, XTMP3 add y2, y0 /* y2 = S1 + CH */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + 2*4] /* y2 = k + w + S1 + CH */ pxor XTMP4, XTMP2 /* XTMP4 = s1 {xBxA} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pshufb XTMP4, SHUF_00BA /* XTMP4 = s1 {00BA} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ paddd XTMP0, XTMP4 /* XTMP0 = {..., ..., W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ /* compute high s1 */ pshufd XTMP2, XTMP0, 0b01010000 /* XTMP2 = W[-2] {DDCC} */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS movdqa XTMP3, XTMP2 /* XTMP3 = W[-2] {DDCC} */ mov y0, e /* y0 = e */ ror y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ movdqa X0, XTMP2 /* X0 = W[-2] {DDCC} */ ror y1, (22-13) /* y1 = a >> (22-13) */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ mov y2, f /* y2 = f */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ psrlq XTMP2, 17 /* XTMP2 = W[-2] ror 17 {xDxC} */ xor y1, a /* y1 = a ^ (a >> (22-13) */ xor y2, g /* y2 = f^g */ psrlq XTMP3, 19 /* XTMP3 = W[-2] ror 19 {xDxC} */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ and y2, e /* y2 = (f^g)&e */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ psrld X0, 10 /* X0 = W[-2] >> 10 {DDCC} */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ pxor XTMP2, XTMP3 ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, y0 /* y2 = S1 + CH */ add y2, [rsp + _XFER + 3*4] /* y2 = k + w + S1 + CH */ pxor X0, XTMP2 /* X0 = s1 {xDxC} */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ pshufb X0, SHUF_DC00 /* X0 = s1 {DC00} */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ paddd X0, XTMP0 /* X0 = {W[3], W[2], W[1], W[0]} */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS rotate_Xs .endm /* input is [rsp + _XFER + %1 * 4] */ .macro DO_ROUND i1 mov y0, e /* y0 = e */ ror y0, (25-11) /* y0 = e >> (25-11) */ mov y1, a /* y1 = a */ xor y0, e /* y0 = e ^ (e >> (25-11)) */ ror y1, (22-13) /* y1 = a >> (22-13) */ mov y2, f /* y2 = f */ xor y1, a /* y1 = a ^ (a >> (22-13) */ ror y0, (11-6) /* y0 = (e >> (11-6)) ^ (e >> (25-6)) */ xor y2, g /* y2 = f^g */ xor y0, e /* y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) */ ror y1, (13-2) /* y1 = (a >> (13-2)) ^ (a >> (22-2)) */ and y2, e /* y2 = (f^g)&e */ xor y1, a /* y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) */ ror y0, 6 /* y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) */ xor y2, g /* y2 = CH = ((f^g)&e)^g */ add y2, y0 /* y2 = S1 + CH */ ror y1, 2 /* y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) */ add y2, [rsp + _XFER + \i1 * 4] /* y2 = k + w + S1 + CH */ mov y0, a /* y0 = a */ add h, y2 /* h = h + S1 + CH + k + w */ mov y2, a /* y2 = a */ or y0, c /* y0 = a|c */ add d, h /* d = d + h + S1 + CH + k + w */ and y2, c /* y2 = a&c */ and y0, b /* y0 = (a|c)&b */ add h, y1 /* h = h + S1 + CH + k + w + S0 */ or y0, y2 /* y0 = MAJ = (a|c)&b)|(a&c) */ lea h, [h + y0] /* h = h + S1 + CH + k + w + S0 + MAJ */ ROTATE_ARGS .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; void sha256_sse4(void *input_data, UINT32 digest[8], UINT64 num_blks) ;; arg 1 : pointer to input data ;; arg 2 : pointer to digest ;; arg 3 : Num blocks */ .text .globl _gcry_sha256_transform_amd64_ssse3 ELF(.type _gcry_sha256_transform_amd64_ssse3,@function;) .align 16 _gcry_sha256_transform_amd64_ssse3: push rbx push rbp push r13 push r14 push r15 sub rsp, STACK_SIZE shl NUM_BLKS, 6 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + _INP_END], NUM_BLKS /* load initial digest */ mov a,[4*0 + CTX] mov b,[4*1 + CTX] mov c,[4*2 + CTX] mov d,[4*3 + CTX] mov e,[4*4 + CTX] mov f,[4*5 + CTX] mov g,[4*6 + CTX] mov h,[4*7 + CTX] movdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] movdqa SHUF_00BA, [.L_SHUF_00BA ADD_RIP] movdqa SHUF_DC00, [.L_SHUF_DC00 ADD_RIP] .Loop0: lea TBL, [.LK256 ADD_RIP] /* byte swap first 16 dwords */ COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK mov [rsp + _INP], INP /* schedule 48 input dwords, by doing 3 rounds of 16 each */ mov SRND, 3 .align 16 .Loop1: movdqa XFER, [TBL + 0*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED movdqa XFER, [TBL + 1*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED movdqa XFER, [TBL + 2*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER FOUR_ROUNDS_AND_SCHED movdqa XFER, [TBL + 3*16] paddd XFER, X0 movdqa [rsp + _XFER], XFER add TBL, 4*16 FOUR_ROUNDS_AND_SCHED sub SRND, 1 jne .Loop1 mov SRND, 2 .Loop2: paddd X0, [TBL + 0*16] movdqa [rsp + _XFER], X0 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 paddd X1, [TBL + 1*16] movdqa [rsp + _XFER], X1 add TBL, 2*16 DO_ROUND 0 DO_ROUND 1 DO_ROUND 2 DO_ROUND 3 movdqa X0, X2 movdqa X1, X3 sub SRND, 1 jne .Loop2 addm [4*0 + CTX],a addm [4*1 + CTX],b addm [4*2 + CTX],c addm [4*3 + CTX],d addm [4*4 + CTX],e addm [4*5 + CTX],f addm [4*6 + CTX],g addm [4*7 + CTX],h mov INP, [rsp + _INP] add INP, 64 cmp INP, [rsp + _INP_END] jne .Loop0 pxor xmm0, xmm0 pxor xmm1, xmm1 pxor xmm2, xmm2 pxor xmm3, xmm3 pxor xmm4, xmm4 pxor xmm5, xmm5 pxor xmm6, xmm6 pxor xmm7, xmm7 pxor xmm8, xmm8 pxor xmm9, xmm9 pxor xmm10, xmm10 pxor xmm11, xmm11 pxor xmm12, xmm12 .Ldone_hash: add rsp, STACK_SIZE pop r15 pop r14 pop r13 pop rbp pop rbx mov eax, STACK_SIZE + 5*8 ret -.data .align 16 .LK256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x0c0d0e0f08090a0b0405060700010203 /* shuffle xBxA -> 00BA */ .L_SHUF_00BA: .octa 0xFFFFFFFFFFFFFFFF0b0a090803020100 /* shuffle xDxC -> DC00 */ .L_SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha512-avx-amd64.S b/cipher/sha512-avx-amd64.S index 699c271b..446a8b4e 100644 --- a/cipher/sha512-avx-amd64.S +++ b/cipher/sha512-avx-amd64.S @@ -1,423 +1,421 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA512) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix .text /* Virtual Registers */ msg = rdi /* ARG1 */ digest = rsi /* ARG2 */ msglen = rdx /* ARG3 */ T1 = rcx T2 = r8 a_64 = r9 b_64 = r10 c_64 = r11 d_64 = r12 e_64 = r13 f_64 = r14 g_64 = r15 h_64 = rbx tmp0 = rax /* ; Local variables (stack frame) ; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP */ frame_W = 0 /* Message Schedule */ frame_W_size = (80 * 8) frame_WK = ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */ frame_WK_size = (2 * 8) frame_GPRSAVE = ((frame_WK) + (frame_WK_size)) frame_GPRSAVE_size = (5 * 8) frame_size = ((frame_GPRSAVE) + (frame_GPRSAVE_size)) /* Useful QWORD "arrays" for simpler memory references */ #define MSG(i) msg + 8*(i) /* Input message (arg1) */ #define DIGEST(i) digest + 8*(i) /* Output Digest (arg2) */ #define K_t(i) .LK512 + 8*(i) ADD_RIP /* SHA Constants (static mem) */ #define W_t(i) rsp + frame_W + 8*(i) /* Message Schedule (stack frame) */ #define WK_2(i) rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */ /* MSG, DIGEST, K_t, W_t are arrays */ /* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */ .macro RotateState /* Rotate symbles a..h right */ __TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = __TMP .endm .macro RORQ p1 p2 /* shld is faster than ror on Intel Sandybridge */ shld \p1, \p1, (64 - \p2) .endm .macro SHA512_Round t /* Compute Round %%t */ mov T1, f_64 /* T1 = f */ mov tmp0, e_64 /* tmp = e */ xor T1, g_64 /* T1 = f ^ g */ RORQ tmp0, 23 /* 41 ; tmp = e ror 23 */ and T1, e_64 /* T1 = (f ^ g) & e */ xor tmp0, e_64 /* tmp = (e ror 23) ^ e */ xor T1, g_64 /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */ add T1, [WK_2(\t)] /* W[t] + K[t] from message scheduler */ RORQ tmp0, 4 /* 18 ; tmp = ((e ror 23) ^ e) ror 4 */ xor tmp0, e_64 /* tmp = (((e ror 23) ^ e) ror 4) ^ e */ mov T2, a_64 /* T2 = a */ add T1, h_64 /* T1 = CH(e,f,g) + W[t] + K[t] + h */ RORQ tmp0, 14 /* 14 ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */ add T1, tmp0 /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */ mov tmp0, a_64 /* tmp = a */ xor T2, c_64 /* T2 = a ^ c */ and tmp0, c_64 /* tmp = a & c */ and T2, b_64 /* T2 = (a ^ c) & b */ xor T2, tmp0 /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */ mov tmp0, a_64 /* tmp = a */ RORQ tmp0, 5 /* 39 ; tmp = a ror 5 */ xor tmp0, a_64 /* tmp = (a ror 5) ^ a */ add d_64, T1 /* e(next_state) = d + T1 */ RORQ tmp0, 6 /* 34 ; tmp = ((a ror 5) ^ a) ror 6 */ xor tmp0, a_64 /* tmp = (((a ror 5) ^ a) ror 6) ^ a */ lea h_64, [T1 + T2] /* a(next_state) = T1 + Maj(a,b,c) */ RORQ tmp0, 28 /* 28 ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */ add h_64, tmp0 /* a(next_state) = T1 + Maj(a,b,c) S0(a) */ RotateState .endm .macro SHA512_2Sched_2Round_avx t /* ; Compute rounds %%t-2 and %%t-1 ; Compute message schedule QWORDS %%t and %%t+1 ; Two rounds are computed based on the values for K[t-2]+W[t-2] and ; K[t-1]+W[t-1] which were previously stored at WK_2 by the message ; scheduler. ; The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. ; They are then added to their respective SHA512 constants at ; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] ; For brievity, the comments following vectored instructions only refer to ; the first of a pair of QWORDS. ; Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} ; The computation of the message schedule and the rounds are tightly ; stitched to take advantage of instruction-level parallelism. ; For clarity, integer instructions (for the rounds calculation) are indented ; by one tab. Vectored instructions (for the message scheduler) are indented ; by two tabs. */ vmovdqa xmm4, [W_t(\t-2)] /* XMM4 = W[t-2] */ vmovdqu xmm5, [W_t(\t-15)] /* XMM5 = W[t-15] */ mov T1, f_64 vpsrlq xmm0, xmm4, 61 /* XMM0 = W[t-2]>>61 */ mov tmp0, e_64 vpsrlq xmm6, xmm5, 1 /* XMM6 = W[t-15]>>1 */ xor T1, g_64 RORQ tmp0, 23 /* 41 */ vpsrlq xmm1, xmm4, 19 /* XMM1 = W[t-2]>>19 */ and T1, e_64 xor tmp0, e_64 vpxor xmm0, xmm0, xmm1 /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 */ xor T1, g_64 add T1, [WK_2(\t)]; vpsrlq xmm7, xmm5, 8 /* XMM7 = W[t-15]>>8 */ RORQ tmp0, 4 /* 18 */ vpsrlq xmm2, xmm4, 6 /* XMM2 = W[t-2]>>6 */ xor tmp0, e_64 mov T2, a_64 add T1, h_64 vpxor xmm6, xmm6, xmm7 /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 */ RORQ tmp0, 14 /* 14 */ add T1, tmp0 vpsrlq xmm8, xmm5, 7 /* XMM8 = W[t-15]>>7 */ mov tmp0, a_64 xor T2, c_64 vpsllq xmm3, xmm4, (64-61) /* XMM3 = W[t-2]<<3 */ and tmp0, c_64 and T2, b_64 vpxor xmm2, xmm2, xmm3 /* XMM2 = W[t-2]>>6 ^ W[t-2]<<3 */ xor T2, tmp0 mov tmp0, a_64 vpsllq xmm9, xmm5, (64-1) /* XMM9 = W[t-15]<<63 */ RORQ tmp0, 5 /* 39 */ vpxor xmm8, xmm8, xmm9 /* XMM8 = W[t-15]>>7 ^ W[t-15]<<63 */ xor tmp0, a_64 add d_64, T1 RORQ tmp0, 6 /* 34 */ xor tmp0, a_64 vpxor xmm6, xmm6, xmm8 /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^ W[t-15]>>7 ^ W[t-15]<<63 */ lea h_64, [T1 + T2] RORQ tmp0, 28 /* 28 */ vpsllq xmm4, xmm4, (64-19) /* XMM4 = W[t-2]<<25 */ add h_64, tmp0 RotateState vpxor xmm0, xmm0, xmm4 /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ W[t-2]<<25 */ mov T1, f_64 vpxor xmm0, xmm0, xmm2 /* XMM0 = s1(W[t-2]) */ mov tmp0, e_64 xor T1, g_64 vpaddq xmm0, xmm0, [W_t(\t-16)] /* XMM0 = s1(W[t-2]) + W[t-16] */ vmovdqu xmm1, [W_t(\t- 7)] /* XMM1 = W[t-7] */ RORQ tmp0, 23 /* 41 */ and T1, e_64 xor tmp0, e_64 xor T1, g_64 vpsllq xmm5, xmm5, (64-8) /* XMM5 = W[t-15]<<56 */ add T1, [WK_2(\t+1)] vpxor xmm6, xmm6, xmm5 /* XMM6 = s0(W[t-15]) */ RORQ tmp0, 4 /* 18 */ vpaddq xmm0, xmm0, xmm6 /* XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) */ xor tmp0, e_64 vpaddq xmm0, xmm0, xmm1 /* XMM0 = W[t] = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */ mov T2, a_64 add T1, h_64 RORQ tmp0, 14 /* 14 */ add T1, tmp0 vmovdqa [W_t(\t)], xmm0 /* Store W[t] */ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */ vmovdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] for next rounds */ mov tmp0, a_64 xor T2, c_64 and tmp0, c_64 and T2, b_64 xor T2, tmp0 mov tmp0, a_64 RORQ tmp0, 5 /* 39 */ xor tmp0, a_64 add d_64, T1 RORQ tmp0, 6 /* 34 */ xor tmp0, a_64 lea h_64, [T1 + T2] RORQ tmp0, 28 /* 28 */ add h_64, tmp0 RotateState .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_avx(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx ELF(.type _gcry_sha512_transform_amd64_avx,@function;) .align 16 _gcry_sha512_transform_amd64_avx: xor eax, eax cmp msglen, 0 je .Lnowork vzeroupper /* Allocate Stack Space */ sub rsp, frame_size /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbx mov [rsp + frame_GPRSAVE + 8 * 1], r12 mov [rsp + frame_GPRSAVE + 8 * 2], r13 mov [rsp + frame_GPRSAVE + 8 * 3], r14 mov [rsp + frame_GPRSAVE + 8 * 4], r15 .Lupdateblock: /* Load state variables */ mov a_64, [DIGEST(0)] mov b_64, [DIGEST(1)] mov c_64, [DIGEST(2)] mov d_64, [DIGEST(3)] mov e_64, [DIGEST(4)] mov f_64, [DIGEST(5)] mov g_64, [DIGEST(6)] mov h_64, [DIGEST(7)] t = 0 .rept 80/2 + 1 /* (80 rounds) / (2 rounds/iteration) + (1 iteration) */ /* +1 iteration because the scheduler leads hashing by 1 iteration */ .if t < 2 /* BSWAP 2 QWORDS */ vmovdqa xmm1, [.LXMM_QWORD_BSWAP ADD_RIP] vmovdqu xmm0, [MSG(t)] vpshufb xmm0, xmm0, xmm1 /* BSWAP */ vmovdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */ vmovdqa [WK_2(t)], xmm0 /* Store into WK for rounds */ .elseif t < 16 /* BSWAP 2 QWORDS, Compute 2 Rounds */ vmovdqu xmm0, [MSG(t)] vpshufb xmm0, xmm0, xmm1 /* BSWAP */ SHA512_Round (t - 2) /* Round t-2 */ vmovdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ vpaddq xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */ SHA512_Round (t - 1) /* Round t-1 */ vmovdqa [WK_2(t)], xmm0 /* W[t]+K[t] into WK */ .elseif t < 79 /* Schedule 2 QWORDS; Compute 2 Rounds */ SHA512_2Sched_2Round_avx t .else /* Compute 2 Rounds */ SHA512_Round (t - 2) SHA512_Round (t - 1) .endif t = ((t)+2) .endr /* Update digest */ add [DIGEST(0)], a_64 add [DIGEST(1)], b_64 add [DIGEST(2)], c_64 add [DIGEST(3)], d_64 add [DIGEST(4)], e_64 add [DIGEST(5)], f_64 add [DIGEST(6)], g_64 add [DIGEST(7)], h_64 /* Advance to next message block */ add msg, 16*8 dec msglen jnz .Lupdateblock /* Restore GPRs */ mov rbx, [rsp + frame_GPRSAVE + 8 * 0] mov r12, [rsp + frame_GPRSAVE + 8 * 1] mov r13, [rsp + frame_GPRSAVE + 8 * 2] mov r14, [rsp + frame_GPRSAVE + 8 * 3] mov r15, [rsp + frame_GPRSAVE + 8 * 4] /* Restore Stack Pointer */ add rsp, frame_size vzeroall /* Return stack burn depth */ mov rax, frame_size .Lnowork: ret /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Binary Data */ -.data - .align 16 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LXMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif #endif diff --git a/cipher/sha512-avx2-bmi2-amd64.S b/cipher/sha512-avx2-bmi2-amd64.S index 02f95af6..05bef64c 100644 --- a/cipher/sha512-avx2-bmi2-amd64.S +++ b/cipher/sha512-avx2-bmi2-amd64.S @@ -1,794 +1,792 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; This code schedules 1 blocks at a time, with 4 lanes per block ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AVX2) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(USE_SHA512) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix .text /* Virtual Registers */ Y_0 = ymm4 Y_1 = ymm5 Y_2 = ymm6 Y_3 = ymm7 YTMP0 = ymm0 YTMP1 = ymm1 YTMP2 = ymm2 YTMP3 = ymm3 YTMP4 = ymm8 XFER = YTMP0 BYTE_FLIP_MASK = ymm9 INP = rdi /* 1st arg */ CTX = rsi /* 2nd arg */ NUM_BLKS = rdx /* 3rd arg */ c = rcx d = r8 e = rdx y3 = rdi TBL = rbp a = rax b = rbx f = r9 g = r10 h = r11 old_h = r11 T1 = r12 y0 = r13 y1 = r14 y2 = r15 y4 = r12 /* Local variables (stack frame) */ #define frame_XFER 0 #define frame_XFER_size (4*8) #define frame_SRND (frame_XFER + frame_XFER_size) #define frame_SRND_size (1*8) #define frame_INP (frame_SRND + frame_SRND_size) #define frame_INP_size (1*8) #define frame_INPEND (frame_INP + frame_INP_size) #define frame_INPEND_size (1*8) #define frame_RSPSAVE (frame_INPEND + frame_INPEND_size) #define frame_RSPSAVE_size (1*8) #define frame_GPRSAVE (frame_RSPSAVE + frame_RSPSAVE_size) #define frame_GPRSAVE_size (6*8) #define frame_size (frame_GPRSAVE + frame_GPRSAVE_size) #define VMOVDQ vmovdqu /*; assume buffers not aligned */ /* addm [mem], reg */ /* Add reg to mem using reg-mem add and store */ .macro addm p1 p2 add \p2, \p1 mov \p1, \p2 .endm /* COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask */ /* Load ymm with mem and byte swap each dword */ .macro COPY_YMM_AND_BSWAP p1 p2 p3 VMOVDQ \p1, \p2 vpshufb \p1, \p1, \p3 .endm /* rotate_Ys */ /* Rotate values of symbols Y0...Y3 */ .macro rotate_Ys __Y_ = Y_0 Y_0 = Y_1 Y_1 = Y_2 Y_2 = Y_3 Y_3 = __Y_ .endm /* RotateState */ .macro RotateState /* Rotate symbles a..h right */ old_h = h __TMP_ = h h = g g = f f = e e = d d = c c = b b = a a = __TMP_ .endm /* %macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL */ /* YDST = {YSRC1, YSRC2} >> RVAL*8 */ .macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL vperm2f128 \YDST, \YSRC1, \YSRC2, 0x3 /* YDST = {YS1_LO, YS2_HI} */ vpalignr \YDST, \YDST, \YSRC2, \RVAL /* YDST = {YDS1, YS2} >> RVAL*8 */ .endm .macro FOUR_ROUNDS_AND_SCHED /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* Extract w[t-7] */ MY_VPALIGNR YTMP0, Y_3, Y_2, 8 /* YTMP0 = W[-7] */ /* Calculate w[t-16] + w[t-7] */ vpaddq YTMP0, YTMP0, Y_0 /* YTMP0 = W[-7] + W[-16] */ /* Extract w[t-15] */ MY_VPALIGNR YTMP1, Y_1, Y_0, 8 /* YTMP1 = W[-15] */ /* Calculate sigma0 */ /* Calculate w[t-15] ror 1 */ vpsrlq YTMP2, YTMP1, 1 vpsllq YTMP3, YTMP1, (64-1) vpor YTMP3, YTMP3, YTMP2 /* YTMP3 = W[-15] ror 1 */ /* Calculate w[t-15] shr 7 */ vpsrlq YTMP4, YTMP1, 7 /* YTMP4 = W[-15] >> 7 */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ add h, [rsp+frame_XFER+0*8] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ /* Calculate w[t-15] ror 8 */ vpsrlq YTMP2, YTMP1, 8 vpsllq YTMP1, YTMP1, (64-8) vpor YTMP1, YTMP1, YTMP2 /* YTMP1 = W[-15] ror 8 */ /* XOR the three components */ vpxor YTMP3, YTMP3, YTMP4 /* YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 */ vpxor YTMP1, YTMP3, YTMP1 /* YTMP1 = s0 */ /* Add three components, w[t-16], w[t-7] and sigma0 */ vpaddq YTMP0, YTMP0, YTMP1 /* YTMP0 = W[-16] + W[-7] + s0 */ /* Move to appropriate lanes for calculating w[16] and w[17] */ vperm2f128 Y_0, YTMP0, YTMP0, 0x0 /* Y_0 = W[-16] + W[-7] + s0 {BABA} */ /* Move to appropriate lanes for calculating w[18] and w[19] */ vpand YTMP0, YTMP0, [.LMASK_YMM_LO ADD_RIP] /* YTMP0 = W[-16] + W[-7] + s0 {DC00} */ /* Calculate w[16] and w[17] in both 128 bit lanes */ /* Calculate sigma1 for w[16] and w[17] on both 128 bit lanes */ vperm2f128 YTMP2, Y_3, Y_3, 0x11 /* YTMP2 = W[-2] {BABA} */ vpsrlq YTMP4, YTMP2, 6 /* YTMP4 = W[-2] >> 6 {BABA} */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ add h, [rsp+frame_XFER+1*8] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ vpsrlq YTMP3, YTMP2, 19 /* YTMP3 = W[-2] >> 19 {BABA} */ vpsllq YTMP1, YTMP2, (64-19) /* YTMP1 = W[-2] << 19 {BABA} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {BABA} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} */ vpsrlq YTMP3, YTMP2, 61 /* YTMP3 = W[-2] >> 61 {BABA} */ vpsllq YTMP1, YTMP2, (64-61) /* YTMP1 = W[-2] << 61 {BABA} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {BABA} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} */ /* Add sigma1 to the other compunents to get w[16] and w[17] */ vpaddq Y_0, Y_0, YTMP4 /* Y_0 = {W[1], W[0], W[1], W[0]} */ /* Calculate sigma1 for w[18] and w[19] for upper 128 bit lane */ vpsrlq YTMP4, Y_0, 6 /* YTMP4 = W[-2] >> 6 {DC--} */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ add h, [rsp+frame_XFER+2*8] /* h = k + w + h ; -- */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ xor y2, g /* y2 = f^g ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;;;;;;;;;;;;;;;;;;;;;;;;; */ vpsrlq YTMP3, Y_0, 19 /* YTMP3 = W[-2] >> 19 {DC--} */ vpsllq YTMP1, Y_0, (64-19) /* YTMP1 = W[-2] << 19 {DC--} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 19 {DC--} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} */ vpsrlq YTMP3, Y_0, 61 /* YTMP3 = W[-2] >> 61 {DC--} */ vpsllq YTMP1, Y_0, (64-61) /* YTMP1 = W[-2] << 61 {DC--} */ vpor YTMP3, YTMP3, YTMP1 /* YTMP3 = W[-2] ror 61 {DC--} */ vpxor YTMP4, YTMP4, YTMP3 /* YTMP4 = s1 = (W[-2] ror 19) ^ (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} */ /* Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] to newly calculated sigma1 to get w[18] and w[19] */ vpaddq YTMP2, YTMP0, YTMP4 /* YTMP2 = {W[3], W[2], --, --} */ /* Form w[19, w[18], w17], w[16] */ vpblendd Y_0, Y_0, YTMP2, 0xF0 /* Y_0 = {W[3], W[2], W[1], W[0]} */ /* vperm2f128 Y_0, Y_0, YTMP2, 0x30 */ mov y3, a /* y3 = a ; MAJA */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ add h, [rsp+frame_XFER+3*8] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ mov y2, f /* y2 = f ; CH */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ xor y2, g /* y2 = f^g ; CH */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add d, h /* d = k + w + h + d ; -- */ and y3, b /* y3 = (a|c)&b ; MAJA */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ add y2, y0 /* y2 = S1 + CH ; -- */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and T1, c /* T1 = a&c ; MAJB */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState rotate_Ys .endm .macro DO_4ROUNDS /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*0] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*1] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*2] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ /*add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ /*add h, y3 ; h = t1 + S0 + MAJ ; -- */ RotateState /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ add old_h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ mov y2, f /* y2 = f ; CH */ rorx y0, e, 41 /* y0 = e >> 41 ; S1A */ rorx y1, e, 18 /* y1 = e >> 18 ; S1B */ xor y2, g /* y2 = f^g ; CH */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ; S1 */ rorx y1, e, 14 /* y1 = (e >> 14) ; S1 */ and y2, e /* y2 = (f^g)&e ; CH */ add old_h, y3 /* h = t1 + S0 + MAJ ; -- */ xor y0, y1 /* y0 = (e>>41) ^ (e>>18) ^ (e>>14) ; S1 */ rorx T1, a, 34 /* T1 = a >> 34 ; S0B */ xor y2, g /* y2 = CH = ((f^g)&e)^g ; CH */ rorx y1, a, 39 /* y1 = a >> 39 ; S0A */ mov y3, a /* y3 = a ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ; S0 */ rorx T1, a, 28 /* T1 = (a >> 28) ; S0 */ add h, [rsp + frame_XFER + 8*3] /* h = k + w + h ; -- */ or y3, c /* y3 = a|c ; MAJA */ xor y1, T1 /* y1 = (a>>39) ^ (a>>34) ^ (a>>28) ; S0 */ mov T1, a /* T1 = a ; MAJB */ and y3, b /* y3 = (a|c)&b ; MAJA */ and T1, c /* T1 = a&c ; MAJB */ add y2, y0 /* y2 = S1 + CH ; -- */ add d, h /* d = k + w + h + d ; -- */ or y3, T1 /* y3 = MAJ = (a|c)&b)|(a&c) ; MAJ */ add h, y1 /* h = k + w + h + S0 ; -- */ add d, y2 /* d = k + w + h + d + S1 + CH = d + t1 ; -- */ add h, y2 /* h = k + w + h + S0 + S1 + CH = t1 + S0 ; -- */ add h, y3 /* h = t1 + S0 + MAJ ; -- */ RotateState .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_rorx(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks */ .globl _gcry_sha512_transform_amd64_avx2 ELF(.type _gcry_sha512_transform_amd64_avx2,@function;) .align 16 _gcry_sha512_transform_amd64_avx2: xor eax, eax cmp rdx, 0 je .Lnowork vzeroupper /* Allocate Stack Space */ mov rax, rsp sub rsp, frame_size and rsp, ~(0x20 - 1) mov [rsp + frame_RSPSAVE], rax /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbp mov [rsp + frame_GPRSAVE + 8 * 1], rbx mov [rsp + frame_GPRSAVE + 8 * 2], r12 mov [rsp + frame_GPRSAVE + 8 * 3], r13 mov [rsp + frame_GPRSAVE + 8 * 4], r14 mov [rsp + frame_GPRSAVE + 8 * 5], r15 vpblendd xmm0, xmm0, xmm1, 0xf0 vpblendd ymm0, ymm0, ymm1, 0xf0 shl NUM_BLKS, 7 /* convert to bytes */ jz .Ldone_hash add NUM_BLKS, INP /* pointer to end of data */ mov [rsp + frame_INPEND], NUM_BLKS /*; load initial digest */ mov a,[8*0 + CTX] mov b,[8*1 + CTX] mov c,[8*2 + CTX] mov d,[8*3 + CTX] mov e,[8*4 + CTX] mov f,[8*5 + CTX] mov g,[8*6 + CTX] mov h,[8*7 + CTX] vmovdqa BYTE_FLIP_MASK, [.LPSHUFFLE_BYTE_FLIP_MASK ADD_RIP] .Loop0: lea TBL,[.LK512 ADD_RIP] /*; byte swap first 16 dwords */ COPY_YMM_AND_BSWAP Y_0, [INP + 0*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_1, [INP + 1*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_2, [INP + 2*32], BYTE_FLIP_MASK COPY_YMM_AND_BSWAP Y_3, [INP + 3*32], BYTE_FLIP_MASK mov [rsp + frame_INP], INP /*; schedule 64 input dwords, by doing 12 rounds of 4 each */ movq [rsp + frame_SRND],4 .align 16 .Loop1: vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddq XFER, Y_0, [TBL + 1*32] vmovdqa [rsp + frame_XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddq XFER, Y_0, [TBL + 2*32] vmovdqa [rsp + frame_XFER], XFER FOUR_ROUNDS_AND_SCHED vpaddq XFER, Y_0, [TBL + 3*32] vmovdqa [rsp + frame_XFER], XFER add TBL, 4*32 FOUR_ROUNDS_AND_SCHED subq [rsp + frame_SRND], 1 jne .Loop1 movq [rsp + frame_SRND], 2 .Loop2: vpaddq XFER, Y_0, [TBL + 0*32] vmovdqa [rsp + frame_XFER], XFER DO_4ROUNDS vpaddq XFER, Y_1, [TBL + 1*32] vmovdqa [rsp + frame_XFER], XFER add TBL, 2*32 DO_4ROUNDS vmovdqa Y_0, Y_2 vmovdqa Y_1, Y_3 subq [rsp + frame_SRND], 1 jne .Loop2 addm [8*0 + CTX],a addm [8*1 + CTX],b addm [8*2 + CTX],c addm [8*3 + CTX],d addm [8*4 + CTX],e addm [8*5 + CTX],f addm [8*6 + CTX],g addm [8*7 + CTX],h mov INP, [rsp + frame_INP] add INP, 128 cmp INP, [rsp + frame_INPEND] jne .Loop0 .Ldone_hash: /* Restore GPRs */ mov rbp, [rsp + frame_GPRSAVE + 8 * 0] mov rbx, [rsp + frame_GPRSAVE + 8 * 1] mov r12, [rsp + frame_GPRSAVE + 8 * 2] mov r13, [rsp + frame_GPRSAVE + 8 * 3] mov r14, [rsp + frame_GPRSAVE + 8 * 4] mov r15, [rsp + frame_GPRSAVE + 8 * 5] /* Restore Stack Pointer */ mov rsp, [rsp + frame_RSPSAVE] vzeroall mov eax, frame_size + 31 .Lnowork: ret /*;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /*;; Binary Data */ -.data - .align 64 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 .align 32 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LPSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607 .octa 0x18191a1b1c1d1e1f1011121314151617 .LMASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF #endif #endif diff --git a/cipher/sha512-ssse3-amd64.S b/cipher/sha512-ssse3-amd64.S index c721bcf2..51193b36 100644 --- a/cipher/sha512-ssse3-amd64.S +++ b/cipher/sha512-ssse3-amd64.S @@ -1,428 +1,426 @@ /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; Copyright (c) 2012, Intel Corporation ; ; All rights reserved. ; ; Redistribution and use in source and binary forms, with or without ; modification, are permitted provided that the following conditions are ; met: ; ; * Redistributions of source code must retain the above copyright ; notice, this list of conditions and the following disclaimer. ; ; * Redistributions in binary form must reproduce the above copyright ; notice, this list of conditions and the following disclaimer in the ; documentation and/or other materials provided with the ; distribution. ; ; * Neither the name of the Intel Corporation nor the names of its ; contributors may be used to endorse or promote products derived from ; this software without specific prior written permission. ; ; ; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY ; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR ; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; */ /* * Conversion to GAS assembly and integration to libgcrypt * by Jussi Kivilinna * * Note: original implementation was named as SHA512-SSE4. However, only SSSE3 * is required. */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) && defined(USE_SHA512) #ifdef __PIC__ # define ADD_RIP +rip #else # define ADD_RIP #endif #ifdef HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS # define ELF(...) __VA_ARGS__ #else # define ELF(...) /*_*/ #endif .intel_syntax noprefix .text /* Virtual Registers */ msg = rdi /* ARG1 */ digest = rsi /* ARG2 */ msglen = rdx /* ARG3 */ T1 = rcx T2 = r8 a_64 = r9 b_64 = r10 c_64 = r11 d_64 = r12 e_64 = r13 f_64 = r14 g_64 = r15 h_64 = rbx tmp0 = rax /* ; Local variables (stack frame) ; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP */ frame_W = 0 /* Message Schedule */ frame_W_size = (80 * 8) frame_WK = ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */ frame_WK_size = (2 * 8) frame_GPRSAVE = ((frame_WK) + (frame_WK_size)) frame_GPRSAVE_size = (5 * 8) frame_size = ((frame_GPRSAVE) + (frame_GPRSAVE_size)) /* Useful QWORD "arrays" for simpler memory references */ #define MSG(i) msg + 8*(i) /* Input message (arg1) */ #define DIGEST(i) digest + 8*(i) /* Output Digest (arg2) */ #define K_t(i) .LK512 + 8*(i) ADD_RIP /* SHA Constants (static mem) */ #define W_t(i) rsp + frame_W + 8*(i) /* Message Schedule (stack frame) */ #define WK_2(i) rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */ /* MSG, DIGEST, K_t, W_t are arrays */ /* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */ .macro RotateState /* Rotate symbles a..h right */ __TMP = h_64 h_64 = g_64 g_64 = f_64 f_64 = e_64 e_64 = d_64 d_64 = c_64 c_64 = b_64 b_64 = a_64 a_64 = __TMP .endm .macro SHA512_Round t /* Compute Round %%t */ mov T1, f_64 /* T1 = f */ mov tmp0, e_64 /* tmp = e */ xor T1, g_64 /* T1 = f ^ g */ ror tmp0, 23 /* 41 ; tmp = e ror 23 */ and T1, e_64 /* T1 = (f ^ g) & e */ xor tmp0, e_64 /* tmp = (e ror 23) ^ e */ xor T1, g_64 /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */ add T1, [WK_2(\t)] /* W[t] + K[t] from message scheduler */ ror tmp0, 4 /* 18 ; tmp = ((e ror 23) ^ e) ror 4 */ xor tmp0, e_64 /* tmp = (((e ror 23) ^ e) ror 4) ^ e */ mov T2, a_64 /* T2 = a */ add T1, h_64 /* T1 = CH(e,f,g) + W[t] + K[t] + h */ ror tmp0, 14 /* 14 ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */ add T1, tmp0 /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */ mov tmp0, a_64 /* tmp = a */ xor T2, c_64 /* T2 = a ^ c */ and tmp0, c_64 /* tmp = a & c */ and T2, b_64 /* T2 = (a ^ c) & b */ xor T2, tmp0 /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */ mov tmp0, a_64 /* tmp = a */ ror tmp0, 5 /* 39 ; tmp = a ror 5 */ xor tmp0, a_64 /* tmp = (a ror 5) ^ a */ add d_64, T1 /* e(next_state) = d + T1 */ ror tmp0, 6 /* 34 ; tmp = ((a ror 5) ^ a) ror 6 */ xor tmp0, a_64 /* tmp = (((a ror 5) ^ a) ror 6) ^ a */ lea h_64, [T1 + T2] /* a(next_state) = T1 + Maj(a,b,c) */ ror tmp0, 28 /* 28 ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */ add h_64, tmp0 /* a(next_state) = T1 + Maj(a,b,c) S0(a) */ RotateState .endm .macro SHA512_2Sched_2Round_sse t /* ; Compute rounds %%t-2 and %%t-1 ; Compute message schedule QWORDS %%t and %%t+1 ; Two rounds are computed based on the values for K[t-2]+W[t-2] and ; K[t-1]+W[t-1] which were previously stored at WK_2 by the message ; scheduler. ; The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. ; They are then added to their respective SHA512 constants at ; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] ; For brievity, the comments following vectored instructions only refer to ; the first of a pair of QWORDS. ; Eg. XMM2=W[t-2] really means XMM2={W[t-2]|W[t-1]} ; The computation of the message schedule and the rounds are tightly ; stitched to take advantage of instruction-level parallelism. ; For clarity, integer instructions (for the rounds calculation) are indented ; by one tab. Vectored instructions (for the message scheduler) are indented ; by two tabs. */ mov T1, f_64 movdqa xmm2, [W_t(\t-2)] /* XMM2 = W[t-2] */ xor T1, g_64 and T1, e_64 movdqa xmm0, xmm2 /* XMM0 = W[t-2] */ xor T1, g_64 add T1, [WK_2(\t)] movdqu xmm5, [W_t(\t-15)] /* XMM5 = W[t-15] */ mov tmp0, e_64 ror tmp0, 23 /* 41 */ movdqa xmm3, xmm5 /* XMM3 = W[t-15] */ xor tmp0, e_64 ror tmp0, 4 /* 18 */ psrlq xmm0, 61 - 19 /* XMM0 = W[t-2] >> 42 */ xor tmp0, e_64 ror tmp0, 14 /* 14 */ psrlq xmm3, (8 - 7) /* XMM3 = W[t-15] >> 1 */ add T1, tmp0 add T1, h_64 pxor xmm0, xmm2 /* XMM0 = (W[t-2] >> 42) ^ W[t-2] */ mov T2, a_64 xor T2, c_64 pxor xmm3, xmm5 /* XMM3 = (W[t-15] >> 1) ^ W[t-15] */ and T2, b_64 mov tmp0, a_64 psrlq xmm0, 19 - 6 /* XMM0 = ((W[t-2]>>42)^W[t-2])>>13 */ and tmp0, c_64 xor T2, tmp0 psrlq xmm3, (7 - 1) /* XMM3 = ((W[t-15]>>1)^W[t-15])>>6 */ mov tmp0, a_64 ror tmp0, 5 /* 39 */ pxor xmm0, xmm2 /* XMM0 = (((W[t-2]>>42)^W[t-2])>>13)^W[t-2] */ xor tmp0, a_64 ror tmp0, 6 /* 34 */ pxor xmm3, xmm5 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15] */ xor tmp0, a_64 ror tmp0, 28 /* 28 */ psrlq xmm0, 6 /* XMM0 = ((((W[t-2]>>42)^W[t-2])>>13)^W[t-2])>>6 */ add T2, tmp0 add d_64, T1 psrlq xmm3, 1 /* XMM3 = (((W[t-15]>>1)^W[t-15])>>6)^W[t-15]>>1 */ lea h_64, [T1 + T2] RotateState movdqa xmm1, xmm2 /* XMM1 = W[t-2] */ mov T1, f_64 xor T1, g_64 movdqa xmm4, xmm5 /* XMM4 = W[t-15] */ and T1, e_64 xor T1, g_64 psllq xmm1, (64 - 19) - (64 - 61) /* XMM1 = W[t-2] << 42 */ add T1, [WK_2(\t+1)] mov tmp0, e_64 psllq xmm4, (64 - 1) - (64 - 8) /* XMM4 = W[t-15] << 7 */ ror tmp0, 23 /* 41 */ xor tmp0, e_64 pxor xmm1, xmm2 /* XMM1 = (W[t-2] << 42)^W[t-2] */ ror tmp0, 4 /* 18 */ xor tmp0, e_64 pxor xmm4, xmm5 /* XMM4 = (W[t-15]<<7)^W[t-15] */ ror tmp0, 14 /* 14 */ add T1, tmp0 psllq xmm1, (64 - 61) /* XMM1 = ((W[t-2] << 42)^W[t-2])<<3 */ add T1, h_64 mov T2, a_64 psllq xmm4, (64 - 8) /* XMM4 = ((W[t-15]<<7)^W[t-15])<<56 */ xor T2, c_64 and T2, b_64 pxor xmm0, xmm1 /* XMM0 = s1(W[t-2]) */ mov tmp0, a_64 and tmp0, c_64 movdqu xmm1, [W_t(\t- 7)] /* XMM1 = W[t-7] */ xor T2, tmp0 pxor xmm3, xmm4 /* XMM3 = s0(W[t-15]) */ mov tmp0, a_64 paddq xmm0, xmm3 /* XMM0 = s1(W[t-2]) + s0(W[t-15]) */ ror tmp0, 5 /* 39 */ paddq xmm0, [W_t(\t-16)] /* XMM0 = s1(W[t-2]) + s0(W[t-15]) + W[t-16] */ xor tmp0, a_64 paddq xmm0, xmm1 /* XMM0 = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */ ror tmp0, 6 /* 34 */ movdqa [W_t(\t)], xmm0 /* Store scheduled qwords */ xor tmp0, a_64 paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */ ror tmp0, 28 /* 28 */ movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] for next rounds */ add T2, tmp0 add d_64, T1 lea h_64, [T1 + T2] RotateState .endm /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ; void sha512_sse4(const void* M, void* D, uint64_t L); ; Purpose: Updates the SHA512 digest stored at D with the message stored in M. ; The size of the message pointed to by M must be an integer multiple of SHA512 ; message blocks. ; L is the message length in SHA512 blocks. */ .globl _gcry_sha512_transform_amd64_ssse3 ELF(.type _gcry_sha512_transform_amd64_ssse3,@function;) .align 16 _gcry_sha512_transform_amd64_ssse3: xor eax, eax cmp msglen, 0 je .Lnowork /* Allocate Stack Space */ sub rsp, frame_size /* Save GPRs */ mov [rsp + frame_GPRSAVE + 8 * 0], rbx mov [rsp + frame_GPRSAVE + 8 * 1], r12 mov [rsp + frame_GPRSAVE + 8 * 2], r13 mov [rsp + frame_GPRSAVE + 8 * 3], r14 mov [rsp + frame_GPRSAVE + 8 * 4], r15 .Lupdateblock: /* Load state variables */ mov a_64, [DIGEST(0)] mov b_64, [DIGEST(1)] mov c_64, [DIGEST(2)] mov d_64, [DIGEST(3)] mov e_64, [DIGEST(4)] mov f_64, [DIGEST(5)] mov g_64, [DIGEST(6)] mov h_64, [DIGEST(7)] t = 0 .rept 80/2 + 1 /* (80 rounds) / (2 rounds/iteration) + (1 iteration) */ /* +1 iteration because the scheduler leads hashing by 1 iteration */ .if t < 2 /* BSWAP 2 QWORDS */ movdqa xmm1, [.LXMM_QWORD_BSWAP ADD_RIP] movdqu xmm0, [MSG(t)] pshufb xmm0, xmm1 /* BSWAP */ movdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */ movdqa [WK_2(t)], xmm0 /* Store into WK for rounds */ .elseif t < 16 /* BSWAP 2 QWORDS; Compute 2 Rounds */ movdqu xmm0, [MSG(t)] pshufb xmm0, xmm1 /* BSWAP */ SHA512_Round (t - 2) /* Round t-2 */ movdqa [W_t(t)], xmm0 /* Store Scheduled Pair */ paddq xmm0, [K_t(t)] /* Compute W[t]+K[t] */ SHA512_Round (t - 1) /* Round t-1 */ movdqa [WK_2(t)], xmm0 /* Store W[t]+K[t] into WK */ .elseif t < 79 /* Schedule 2 QWORDS; Compute 2 Rounds */ SHA512_2Sched_2Round_sse t .else /* Compute 2 Rounds */ SHA512_Round (t - 2) SHA512_Round (t - 1) .endif t = (t)+2 .endr /* Update digest */ add [DIGEST(0)], a_64 add [DIGEST(1)], b_64 add [DIGEST(2)], c_64 add [DIGEST(3)], d_64 add [DIGEST(4)], e_64 add [DIGEST(5)], f_64 add [DIGEST(6)], g_64 add [DIGEST(7)], h_64 /* Advance to next message block */ add msg, 16*8 dec msglen jnz .Lupdateblock /* Restore GPRs */ mov rbx, [rsp + frame_GPRSAVE + 8 * 0] mov r12, [rsp + frame_GPRSAVE + 8 * 1] mov r13, [rsp + frame_GPRSAVE + 8 * 2] mov r14, [rsp + frame_GPRSAVE + 8 * 3] mov r15, [rsp + frame_GPRSAVE + 8 * 4] /* Restore Stack Pointer */ add rsp, frame_size pxor xmm0, xmm0 pxor xmm1, xmm1 pxor xmm2, xmm2 pxor xmm3, xmm3 pxor xmm4, xmm4 pxor xmm5, xmm5 /* Return stack burn depth */ mov rax, frame_size .Lnowork: ret /* ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;; Binary Data */ -.data - .align 16 /* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */ .LXMM_QWORD_BSWAP: .octa 0x08090a0b0c0d0e0f0001020304050607 /* K[t] used in SHA512 hashing */ .LK512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc .quad 0x3956c25bf348b538,0x59f111f1b605d019 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 .quad 0xd807aa98a3030242,0x12835b0145706fbe .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 .quad 0x06ca6351e003826f,0x142929670a0e6e70 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 .quad 0x81c2c92e47edaee6,0x92722c851482353b .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 .quad 0xd192e819d6ef5218,0xd69906245565a910 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec .quad 0x90befffa23631e28,0xa4506cebde82bde9 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b .quad 0xca273eceea26619c,0xd186b8c721c0c207 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 .quad 0x113f9804bef90dae,0x1b710b35131c471b .quad 0x28db77f523047d84,0x32caab7b40c72493 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 #endif #endif