diff --git a/cipher/camellia-aesni-avx-amd64.S b/cipher/camellia-aesni-avx-amd64.S index e15e445b..1f241e03 100644 --- a/cipher/camellia-aesni-avx-amd64.S +++ b/cipher/camellia-aesni-avx-amd64.S @@ -1,2618 +1,2627 @@ /* camellia-avx-aesni-amd64.S - AES-NI/AVX implementation of Camellia cipher * * Copyright (C) 2013-2015,2020 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT) #include "asm-common-amd64.h" #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi /********************************************************************** helper macros **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /********************************************************************** 16-way camellia **********************************************************************/ /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vmovdqa .Linv_shift_row rRIP, t4; \ vbroadcastss .L0f0f0f0f rRIP, t7; \ vmovdqa .Lpre_tf_lo_s1 rRIP, t0; \ vmovdqa .Lpre_tf_hi_s1 rRIP, t1; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ \ /* prefilter sboxes 1, 2 and 3 */ \ vmovdqa .Lpre_tf_lo_s4 rRIP, t2; \ vmovdqa .Lpre_tf_hi_s4 rRIP, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x1, t0, t1, t7, t6); \ filter_8bit(x4, t0, t1, t7, t6); \ filter_8bit(x2, t0, t1, t7, t6); \ filter_8bit(x5, t0, t1, t7, t6); \ \ /* prefilter sbox 4 */ \ vpxor t4, t4, t4; \ filter_8bit(x3, t2, t3, t7, t6); \ filter_8bit(x6, t2, t3, t7, t6); \ \ /* AES subbytes + AES shift rows */ \ vmovdqa .Lpost_tf_lo_s1 rRIP, t0; \ vmovdqa .Lpost_tf_hi_s1 rRIP, t1; \ vaesenclast t4, x0, x0; \ vaesenclast t4, x7, x7; \ vaesenclast t4, x1, x1; \ vaesenclast t4, x4, x4; \ vaesenclast t4, x2, x2; \ vaesenclast t4, x5, x5; \ vaesenclast t4, x3, x3; \ vaesenclast t4, x6, x6; \ \ /* postfilter sboxes 1 and 4 */ \ vmovdqa .Lpost_tf_lo_s3 rRIP, t2; \ vmovdqa .Lpost_tf_hi_s3 rRIP, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vmovdqa .Lpost_tf_lo_s2 rRIP, t4; \ vmovdqa .Lpost_tf_hi_s2 rRIP, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpxor t6, t6, t6; \ vmovq key, t0; \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ \ vpsrldq $5, t0, t5; \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpsrldq $3, t0, t3; \ vpsrldq $4, t0, t4; \ vpshufb t6, t0, t0; \ vpshufb t6, t1, t1; \ vpshufb t6, t2, t2; \ vpshufb t6, t3, t3; \ vpshufb t6, t4, t4; \ vpsrldq $2, t5, t7; \ vpshufb t6, t7, t7; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t3, x4, x4; \ vpxor 0 * 16(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 16(mem_cd), x5, x5; \ \ vpsrldq $1, t5, t3; \ vpshufb t6, t5, t5; \ vpshufb t6, t3, t6; \ \ vpxor t1, x6, x6; \ vpxor 2 * 16(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 16(mem_cd), x7, x7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 16(mem_cd), x0, x0; \ \ vpxor t6, x1, x1; \ vpxor 5 * 16(mem_cd), x1, x1; \ \ vpxor t5, x2, x2; \ vpxor 6 * 16(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 16(mem_cd), x3, x3; /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu x4, 0 * 16(mem_cd); \ vmovdqu x5, 1 * 16(mem_cd); \ vmovdqu x6, 2 * 16(mem_cd); \ vmovdqu x7, 3 * 16(mem_cd); \ vmovdqu x0, 4 * 16(mem_cd); \ vmovdqu x1, 5 * 16(mem_cd); \ vmovdqu x2, 6 * 16(mem_cd); \ vmovdqu x3, 7 * 16(mem_cd); \ \ roundsm16(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_16(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls16(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpxor tt0, tt0, tt0; \ vmovd kll, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vmovdqu l4, 4 * 16(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 16(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 16(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 16(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vmovd krr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 16(r), t0, t0; \ vpor 5 * 16(r), t1, t1; \ vpor 6 * 16(r), t2, t2; \ vpor 7 * 16(r), t3, t3; \ \ vpxor 0 * 16(r), t0, t0; \ vpxor 1 * 16(r), t1, t1; \ vpxor 2 * 16(r), t2, t2; \ vpxor 3 * 16(r), t3, t3; \ vmovdqu t0, 0 * 16(r); \ vmovdqu t1, 1 * 16(r); \ vmovdqu t2, 2 * 16(r); \ vmovdqu t3, 3 * 16(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vmovd krl, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 16(r), t0, t0; \ vpand 1 * 16(r), t1, t1; \ vpand 2 * 16(r), t2, t2; \ vpand 3 * 16(r), t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 16(r), t0, t0; \ vpxor 5 * 16(r), t1, t1; \ vpxor 6 * 16(r), t2, t2; \ vpxor 7 * 16(r), t3, t3; \ vmovdqu t0, 4 * 16(r); \ vmovdqu t1, 5 * 16(r); \ vmovdqu t2, 6 * 16(r); \ vmovdqu t3, 7 * 16(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vmovd klr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 16(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 16(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 16(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 16(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vmovdqu .Lshufb_16x16b rRIP, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ #define transpose_8x8b(a, b, c, d, e, f, g, h, t0, t1, t2, t3, t4) \ vpunpcklbw a, b, t0; \ vpunpckhbw a, b, b; \ \ vpunpcklbw c, d, t1; \ vpunpckhbw c, d, d; \ \ vpunpcklbw e, f, t2; \ vpunpckhbw e, f, f; \ \ vpunpcklbw g, h, t3; \ vpunpckhbw g, h, h; \ \ vpunpcklwd t0, t1, g; \ vpunpckhwd t0, t1, t0; \ \ vpunpcklwd b, d, t1; \ vpunpckhwd b, d, e; \ \ vpunpcklwd t2, t3, c; \ vpunpckhwd t2, t3, t2; \ \ vpunpcklwd f, h, t3; \ vpunpckhwd f, h, b; \ \ vpunpcklwd e, b, t4; \ vpunpckhwd e, b, b; \ \ vpunpcklwd t1, t3, e; \ vpunpckhwd t1, t3, f; \ \ vmovdqa .Ltranspose_8x8_shuf rRIP, t3; \ \ vpunpcklwd g, c, d; \ vpunpckhwd g, c, c; \ \ vpunpcklwd t0, t2, t1; \ vpunpckhwd t0, t2, h; \ \ vpunpckhqdq b, h, a; \ vpshufb t3, a, a; \ vpunpcklqdq b, h, b; \ vpshufb t3, b, b; \ \ vpunpckhqdq e, d, g; \ vpshufb t3, g, g; \ vpunpcklqdq e, d, h; \ vpshufb t3, h, h; \ \ vpunpckhqdq f, c, e; \ vpshufb t3, e, e; \ vpunpcklqdq f, c, f; \ vpshufb t3, f, f; \ \ vpunpckhqdq t4, t1, c; \ vpshufb t3, c, c; \ vpunpcklqdq t4, t1, d; \ vpshufb t3, d, d; /* load blocks to registers and apply pre-whitening */ #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vmovq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor 0 * 16(rio), x0, y7; \ vpxor 1 * 16(rio), x0, y6; \ vpxor 2 * 16(rio), x0, y5; \ vpxor 3 * 16(rio), x0, y4; \ vpxor 4 * 16(rio), x0, y3; \ vpxor 5 * 16(rio), x0, y2; \ vpxor 6 * 16(rio), x0, y1; \ vpxor 7 * 16(rio), x0, y0; \ vpxor 8 * 16(rio), x0, x7; \ vpxor 9 * 16(rio), x0, x6; \ vpxor 10 * 16(rio), x0, x5; \ vpxor 11 * 16(rio), x0, x4; \ vpxor 12 * 16(rio), x0, x3; \ vpxor 13 * 16(rio), x0, x2; \ vpxor 14 * 16(rio), x0, x1; \ vpxor 15 * 16(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); \ vmovdqu y0, 0 * 16(mem_cd); \ vmovdqu y1, 1 * 16(mem_cd); \ vmovdqu y2, 2 * 16(mem_cd); \ vmovdqu y3, 3 * 16(mem_cd); \ vmovdqu y4, 4 * 16(mem_cd); \ vmovdqu y5, 5 * 16(mem_cd); \ vmovdqu y6, 6 * 16(mem_cd); \ vmovdqu y7, 7 * 16(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vmovq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 16(rio); \ vmovdqu x1, 1 * 16(rio); \ vmovdqu x2, 2 * 16(rio); \ vmovdqu x3, 3 * 16(rio); \ vmovdqu x4, 4 * 16(rio); \ vmovdqu x5, 5 * 16(rio); \ vmovdqu x6, 6 * 16(rio); \ vmovdqu x7, 7 * 16(rio); \ vmovdqu y0, 8 * 16(rio); \ vmovdqu y1, 9 * 16(rio); \ vmovdqu y2, 10 * 16(rio); \ vmovdqu y3, 11 * 16(rio); \ vmovdqu y4, 12 * 16(rio); \ vmovdqu y5, 13 * 16(rio); \ vmovdqu y6, 14 * 16(rio); \ vmovdqu y7, 15 * 16(rio); -.text +SECTION_RODATA + +ELF(.type _camellia_aesni_avx_data,@object;) +_camellia_aesni_avx_data: .align 16 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); .Lpack_bswap: .long 0x00010203 .long 0x04050607 .long 0x80808080 .long 0x80808080 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* shuffle mask for 8x8 byte transpose */ .Ltranspose_8x8_shuf: .byte 0, 1, 4, 5, 2, 3, 6, 7, 8+0, 8+1, 8+4, 8+5, 8+2, 8+3, 8+6, 8+7 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f +.text .align 16 ELF(.type __camellia_enc_blk16,@function;) __camellia_enc_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %r8d: 24 for 16 byte key, 32 for larger * %xmm0..%xmm15: 16 plaintext blocks * output: * %xmm0..%xmm15: 16 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); leaq 8 * 16(%rax), %rcx; leaq (-8 * 8)(CTX, %r8, 8), %r8; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); .align 8 .Lenc_loop: enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Lenc_done; leaq (8 * 8)(CTX), CTX; fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), ((key_table) + 8)(CTX), ((key_table) + 12)(CTX)); jmp .Lenc_loop; .align 8 .Lenc_done: /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table) + 8 * 8)(%r8), (%rax), 1 * 16(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_enc_blk16,.-__camellia_enc_blk16;) .align 16 ELF(.type __camellia_dec_blk16,@function;) __camellia_dec_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %r8d: 24 for 16 byte key, 32 for larger * %xmm0..%xmm15: 16 encrypted blocks * output: * %xmm0..%xmm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); movq %r8, %rcx; movq CTX, %r8 leaq (-8 * 8)(CTX, %rcx, 8), CTX; leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); .align 8 .Ldec_loop: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Ldec_done; fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), ((key_table) + 0)(CTX), ((key_table) + 4)(CTX)); leaq (-8 * 8)(CTX), CTX; jmp .Ldec_loop; .align 8 .Ldec_done: /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_dec_blk16,.-__camellia_dec_blk16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl _gcry_camellia_aesni_avx_ctr_enc ELF(.type _gcry_camellia_aesni_avx_ctr_enc,@function;) _gcry_camellia_aesni_avx_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; vmovdqa .Lbswap128_mask rRIP, %xmm14; /* load IV and byteswap */ vmovdqu (%rcx), %xmm15; vmovdqu %xmm15, 15 * 16(%rax); vpshufb %xmm14, %xmm15, %xmm0; /* be => le */ vpcmpeqd %xmm15, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */ /* construct IVs */ inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 14 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 13 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm12; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm11; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm10; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm9; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm8; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm7; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm6; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm5; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm4; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm3; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm2; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm13); vmovdqa %xmm0, %xmm13; vpshufb %xmm14, %xmm0, %xmm0; inc_le128(%xmm13, %xmm15, %xmm14); vpshufb .Lbswap128_mask rRIP, %xmm13, %xmm13; /* le => be */ vmovdqu %xmm13, (%rcx); /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor 13 * 16(%rax), %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor 0 * 16(%rdx), %xmm7, %xmm7; vpxor 1 * 16(%rdx), %xmm6, %xmm6; vpxor 2 * 16(%rdx), %xmm5, %xmm5; vpxor 3 * 16(%rdx), %xmm4, %xmm4; vpxor 4 * 16(%rdx), %xmm3, %xmm3; vpxor 5 * 16(%rdx), %xmm2, %xmm2; vpxor 6 * 16(%rdx), %xmm1, %xmm1; vpxor 7 * 16(%rdx), %xmm0, %xmm0; vpxor 8 * 16(%rdx), %xmm15, %xmm15; vpxor 9 * 16(%rdx), %xmm14, %xmm14; vpxor 10 * 16(%rdx), %xmm13, %xmm13; vpxor 11 * 16(%rdx), %xmm12, %xmm12; vpxor 12 * 16(%rdx), %xmm11, %xmm11; vpxor 13 * 16(%rdx), %xmm10, %xmm10; vpxor 14 * 16(%rdx), %xmm9, %xmm9; vpxor 15 * 16(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ctr_enc,.-_gcry_camellia_aesni_avx_ctr_enc;) .align 16 .globl _gcry_camellia_aesni_avx_cbc_dec ELF(.type _gcry_camellia_aesni_avx_cbc_dec,@function;) _gcry_camellia_aesni_avx_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; call __camellia_dec_blk16; /* XOR output with IV */ vpxor (%r9), %xmm7, %xmm7; vpxor (0 * 16)(%rdx), %xmm6, %xmm6; vpxor (1 * 16)(%rdx), %xmm5, %xmm5; vpxor (2 * 16)(%rdx), %xmm4, %xmm4; vpxor (3 * 16)(%rdx), %xmm3, %xmm3; vpxor (4 * 16)(%rdx), %xmm2, %xmm2; vpxor (5 * 16)(%rdx), %xmm1, %xmm1; vpxor (6 * 16)(%rdx), %xmm0, %xmm0; vpxor (7 * 16)(%rdx), %xmm15, %xmm15; vpxor (8 * 16)(%rdx), %xmm14, %xmm14; vpxor (9 * 16)(%rdx), %xmm13, %xmm13; vpxor (10 * 16)(%rdx), %xmm12, %xmm12; vpxor (11 * 16)(%rdx), %xmm11, %xmm11; vpxor (12 * 16)(%rdx), %xmm10, %xmm10; vpxor (13 * 16)(%rdx), %xmm9, %xmm9; vpxor (14 * 16)(%rdx), %xmm8, %xmm8; movq (15 * 16 + 0)(%rdx), %r10; movq (15 * 16 + 8)(%rdx), %r11; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); /* store new IV */ movq %r10, (0)(%r9); movq %r11, (8)(%r9); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_cbc_dec,.-_gcry_camellia_aesni_avx_cbc_dec;) .align 16 .globl _gcry_camellia_aesni_avx_cfb_dec ELF(.type _gcry_camellia_aesni_avx_cfb_dec,@function;) _gcry_camellia_aesni_avx_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm0; vpshufb .Lpack_bswap rRIP, %xmm0, %xmm0; vpxor (%rcx), %xmm0, %xmm15; vmovdqu 15 * 16(%rdx), %xmm1; vmovdqu %xmm1, (%rcx); /* store new IV */ vpxor 0 * 16(%rdx), %xmm0, %xmm14; vpxor 1 * 16(%rdx), %xmm0, %xmm13; vpxor 2 * 16(%rdx), %xmm0, %xmm12; vpxor 3 * 16(%rdx), %xmm0, %xmm11; vpxor 4 * 16(%rdx), %xmm0, %xmm10; vpxor 5 * 16(%rdx), %xmm0, %xmm9; vpxor 6 * 16(%rdx), %xmm0, %xmm8; vpxor 7 * 16(%rdx), %xmm0, %xmm7; vpxor 8 * 16(%rdx), %xmm0, %xmm6; vpxor 9 * 16(%rdx), %xmm0, %xmm5; vpxor 10 * 16(%rdx), %xmm0, %xmm4; vpxor 11 * 16(%rdx), %xmm0, %xmm3; vpxor 12 * 16(%rdx), %xmm0, %xmm2; vpxor 13 * 16(%rdx), %xmm0, %xmm1; vpxor 14 * 16(%rdx), %xmm0, %xmm0; call __camellia_enc_blk16; vpxor 0 * 16(%rdx), %xmm7, %xmm7; vpxor 1 * 16(%rdx), %xmm6, %xmm6; vpxor 2 * 16(%rdx), %xmm5, %xmm5; vpxor 3 * 16(%rdx), %xmm4, %xmm4; vpxor 4 * 16(%rdx), %xmm3, %xmm3; vpxor 5 * 16(%rdx), %xmm2, %xmm2; vpxor 6 * 16(%rdx), %xmm1, %xmm1; vpxor 7 * 16(%rdx), %xmm0, %xmm0; vpxor 8 * 16(%rdx), %xmm15, %xmm15; vpxor 9 * 16(%rdx), %xmm14, %xmm14; vpxor 10 * 16(%rdx), %xmm13, %xmm13; vpxor 11 * 16(%rdx), %xmm12, %xmm12; vpxor 12 * 16(%rdx), %xmm11, %xmm11; vpxor 13 * 16(%rdx), %xmm10, %xmm10; vpxor 14 * 16(%rdx), %xmm9, %xmm9; vpxor 15 * 16(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_cfb_dec,.-_gcry_camellia_aesni_avx_cfb_dec;) .align 16 .globl _gcry_camellia_aesni_avx_ocb_enc ELF(.type _gcry_camellia_aesni_avx_ocb_enc,@function;) _gcry_camellia_aesni_avx_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rsp); movq %r11, (16 * 16 + 1 * 8)(%rsp); movq %r12, (16 * 16 + 2 * 8)(%rsp); movq %r13, (16 * 16 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 16 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 16 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 16 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 16 + 3 * 8); vmovdqu (%rcx), %xmm14; vmovdqu (%r8), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), %xmm14, %xmm14; \ vpxor xreg, %xmm15, %xmm15; \ vpxor xreg, %xmm14, xreg; \ vmovdqu %xmm14, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm0); vmovdqu %xmm0, (14 * 16)(%rax); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT vmovdqu %xmm14, (%rcx); vmovdqu %xmm15, (%r8); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor 0 * 16(%rsi), %xmm7, %xmm7; vpxor 1 * 16(%rsi), %xmm6, %xmm6; vpxor 2 * 16(%rsi), %xmm5, %xmm5; vpxor 3 * 16(%rsi), %xmm4, %xmm4; vpxor 4 * 16(%rsi), %xmm3, %xmm3; vpxor 5 * 16(%rsi), %xmm2, %xmm2; vpxor 6 * 16(%rsi), %xmm1, %xmm1; vpxor 7 * 16(%rsi), %xmm0, %xmm0; vpxor 8 * 16(%rsi), %xmm15, %xmm15; vpxor 9 * 16(%rsi), %xmm14, %xmm14; vpxor 10 * 16(%rsi), %xmm13, %xmm13; vpxor 11 * 16(%rsi), %xmm12, %xmm12; vpxor 12 * 16(%rsi), %xmm11, %xmm11; vpxor 13 * 16(%rsi), %xmm10, %xmm10; vpxor 14 * 16(%rsi), %xmm9, %xmm9; vpxor 15 * 16(%rsi), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; movq (16 * 16 + 0 * 8)(%rsp), %r10; movq (16 * 16 + 1 * 8)(%rsp), %r11; movq (16 * 16 + 2 * 8)(%rsp), %r12; movq (16 * 16 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ocb_enc,.-_gcry_camellia_aesni_avx_ocb_enc;) .align 16 .globl _gcry_camellia_aesni_avx_ocb_dec ELF(.type _gcry_camellia_aesni_avx_ocb_dec,@function;) _gcry_camellia_aesni_avx_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rsp); movq %r11, (16 * 16 + 1 * 8)(%rsp); movq %r12, (16 * 16 + 2 * 8)(%rsp); movq %r13, (16 * 16 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 16 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 16 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 16 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 16 + 3 * 8); vmovdqu (%rcx), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), %xmm15, %xmm15; \ vpxor xreg, %xmm15, xreg; \ vmovdqu %xmm15, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm14); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT vmovdqu %xmm15, (%rcx); movq %r8, %r10; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r9d; cmovel %r9d, %r8d; /* max */ /* inpack16_pre: */ vmovq (key_table)(CTX, %r8, 8), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor %xmm14, %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_dec_blk16; vpxor 0 * 16(%rsi), %xmm7, %xmm7; vpxor 1 * 16(%rsi), %xmm6, %xmm6; vpxor 2 * 16(%rsi), %xmm5, %xmm5; vpxor 3 * 16(%rsi), %xmm4, %xmm4; vpxor 4 * 16(%rsi), %xmm3, %xmm3; vpxor 5 * 16(%rsi), %xmm2, %xmm2; vpxor 6 * 16(%rsi), %xmm1, %xmm1; vpxor 7 * 16(%rsi), %xmm0, %xmm0; vmovdqu %xmm7, (7 * 16)(%rax); vpxor 8 * 16(%rsi), %xmm15, %xmm15; vpxor 9 * 16(%rsi), %xmm14, %xmm14; vpxor 10 * 16(%rsi), %xmm13, %xmm13; vpxor 11 * 16(%rsi), %xmm12, %xmm12; vpxor 12 * 16(%rsi), %xmm11, %xmm11; vpxor 13 * 16(%rsi), %xmm10, %xmm10; vpxor 14 * 16(%rsi), %xmm9, %xmm9; vpxor 15 * 16(%rsi), %xmm8, %xmm8; /* Checksum_i = Checksum_{i-1} xor P_i */ vpxor (%r10), %xmm7, %xmm7; vpxor %xmm6, %xmm7, %xmm7; vpxor %xmm5, %xmm7, %xmm7; vpxor %xmm4, %xmm7, %xmm7; vpxor %xmm3, %xmm7, %xmm7; vpxor %xmm2, %xmm7, %xmm7; vpxor %xmm1, %xmm7, %xmm7; vpxor %xmm0, %xmm7, %xmm7; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm14, %xmm7, %xmm7; vpxor %xmm13, %xmm7, %xmm7; vpxor %xmm12, %xmm7, %xmm7; vpxor %xmm11, %xmm7, %xmm7; vpxor %xmm10, %xmm7, %xmm7; vpxor %xmm9, %xmm7, %xmm7; vpxor %xmm8, %xmm7, %xmm7; vmovdqu %xmm7, (%r10); vmovdqu (7 * 16)(%rax), %xmm7; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; movq (16 * 16 + 0 * 8)(%rsp), %r10; movq (16 * 16 + 1 * 8)(%rsp), %r11; movq (16 * 16 + 2 * 8)(%rsp), %r12; movq (16 * 16 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ocb_dec,.-_gcry_camellia_aesni_avx_ocb_dec;) .align 16 .globl _gcry_camellia_aesni_avx_ocb_auth ELF(.type _gcry_camellia_aesni_avx_ocb_auth,@function;) _gcry_camellia_aesni_avx_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rsp); movq %r11, (16 * 16 + 1 * 8)(%rsp); movq %r12, (16 * 16 + 2 * 8)(%rsp); movq %r13, (16 * 16 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 16 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 16 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 16 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 16 + 3 * 8); vmovdqu (%rdx), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rsi), xreg; \ vpxor (lreg), %xmm15, %xmm15; \ vpxor xreg, %xmm15, xreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm14); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ vmovdqu %xmm15, (%rdx); movq %rcx, %r10; /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor %xmm14, %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor %xmm7, %xmm6, %xmm6; vpxor %xmm5, %xmm4, %xmm4; vpxor %xmm3, %xmm2, %xmm2; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm15, %xmm14, %xmm14; vpxor %xmm13, %xmm12, %xmm12; vpxor %xmm11, %xmm10, %xmm10; vpxor %xmm9, %xmm8, %xmm8; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm4, %xmm0, %xmm0; vpxor %xmm12, %xmm8, %xmm8; vpxor %xmm0, %xmm8, %xmm0; vpxor (%r10), %xmm0, %xmm0; vmovdqu %xmm0, (%r10); vzeroall; movq (16 * 16 + 0 * 8)(%rsp), %r10; movq (16 * 16 + 1 * 8)(%rsp), %r11; movq (16 * 16 + 2 * 8)(%rsp), %r12; movq (16 * 16 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ocb_auth,.-_gcry_camellia_aesni_avx_ocb_auth;) /* * IN: * ab: 64-bit AB state * cd: 64-bit CD state */ #define camellia_f(ab, x, t0, t1, t2, t3, t4, inv_shift_row, sbox4mask, \ _0f0f0f0fmask, pre_s1lo_mask, pre_s1hi_mask, key) \ vmovq key, t0; \ vpxor x, x, t3; \ \ vpxor ab, t0, x; \ \ /* \ * S-function with AES subbytes \ */ \ \ /* input rotation for sbox4 (<<< 1) */ \ vpand x, sbox4mask, t0; \ vpandn x, sbox4mask, x; \ vpaddw t0, t0, t1; \ vpsrlw $7, t0, t0; \ vpor t0, t1, t0; \ vpand sbox4mask, t0, t0; \ vpor t0, x, x; \ \ vmovdqa .Lpost_tf_lo_s1 rRIP, t0; \ vmovdqa .Lpost_tf_hi_s1 rRIP, t1; \ \ /* prefilter sboxes */ \ filter_8bit(x, pre_s1lo_mask, pre_s1hi_mask, _0f0f0f0fmask, t2); \ \ /* AES subbytes + AES shift rows + AES inv shift rows */ \ vaesenclast t3, x, x; \ \ /* postfilter sboxes */ \ filter_8bit(x, t0, t1, _0f0f0f0fmask, t2); \ \ /* output rotation for sbox2 (<<< 1) */ \ /* output rotation for sbox3 (>>> 1) */ \ vpshufb inv_shift_row, x, t1; \ vpshufb .Lsp0044440444044404mask rRIP, x, t4; \ vpshufb .Lsp1110111010011110mask rRIP, x, x; \ vpaddb t1, t1, t2; \ vpsrlw $7, t1, t0; \ vpsllw $7, t1, t3; \ vpor t0, t2, t0; \ vpsrlw $1, t1, t1; \ vpshufb .Lsp0222022222000222mask rRIP, t0, t0; \ vpor t1, t3, t1; \ \ vpxor x, t4, t4; \ vpshufb .Lsp3033303303303033mask rRIP, t1, t1; \ vpxor t4, t0, t0; \ vpxor t1, t0, t0; \ vpsrldq $8, t0, x; \ vpxor t0, x, x; #define vec_rol128(in, out, nrol, t0) \ vpshufd $0x4e, in, out; \ vpsllq $(nrol), in, t0; \ vpsrlq $(64-(nrol)), out, out; \ vpaddd t0, out, out; #define vec_ror128(in, out, nror, t0) \ vpshufd $0x4e, in, out; \ vpsrlq $(nror), in, t0; \ vpsllq $(64-(nror)), out, out; \ vpaddd t0, out, out; +SECTION_RODATA + +ELF(.type _camellia_aesni_avx_keysetup_data,@object;) +_camellia_aesni_avx_keysetup_data: .align 16 .Linv_shift_row_and_unpcklbw: .byte 0x00, 0xff, 0x0d, 0xff, 0x0a, 0xff, 0x07, 0xff .byte 0x04, 0xff, 0x01, 0xff, 0x0e, 0xff, 0x0b, 0xff .Lsp0044440444044404mask: .long 0xffff0404, 0x0404ff04; .long 0x0d0dff0d, 0x0d0dff0d; .Lsp1110111010011110mask: .long 0x000000ff, 0x000000ff; .long 0x0bffff0b, 0x0b0b0bff; .Lsp0222022222000222mask: .long 0xff060606, 0xff060606; .long 0x0c0cffff, 0xff0c0c0c; .Lsp3033303303303033mask: .long 0x04ff0404, 0x04ff0404; .long 0xff0a0aff, 0x0aff0a0a; .Lsbox4_input_mask: .byte 0x00, 0xff, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00; .Lsigma1: .long 0x3BCC908B, 0xA09E667F; .Lsigma2: .long 0x4CAA73B2, 0xB67AE858; .Lsigma3: .long 0xE94F82BE, 0xC6EF372F; .Lsigma4: .long 0xF1D36F1C, 0x54FF53A5; .Lsigma5: .long 0xDE682D1D, 0x10E527FA; .Lsigma6: .long 0xB3E6C1FD, 0xB05688C2; +.text .align 16 ELF(.type __camellia_avx_setup128,@function;) __camellia_avx_setup128: /* input: * %rdi: ctx, CTX; subkey storage at key_table(CTX) * %xmm0: key */ CFI_STARTPROC(); #define cmll_sub(n, ctx) (key_table+((n)*8))(ctx) #define KL128 %xmm0 #define KA128 %xmm2 vpshufb .Lbswap128_mask rRIP, KL128, KL128; vmovdqa .Linv_shift_row_and_unpcklbw rRIP, %xmm11; vmovq .Lsbox4_input_mask rRIP, %xmm12; vbroadcastss .L0f0f0f0f rRIP, %xmm13; vmovdqa .Lpre_tf_lo_s1 rRIP, %xmm14; vmovdqa .Lpre_tf_hi_s1 rRIP, %xmm15; /* * Generate KA */ vpsrldq $8, KL128, %xmm2; vmovdqa KL128, %xmm3; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm2, %xmm4, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma1 rRIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm2, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma2 rRIP); camellia_f(%xmm2, %xmm3, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma3 rRIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm4, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma4 rRIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm4, %xmm2, %xmm2; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm2, KA128; vpor %xmm3, KA128, KA128; /* * Generate subkeys */ vmovdqu KA128, cmll_sub(24, CTX); vec_rol128(KL128, %xmm3, 15, %xmm15); vec_rol128(KA128, %xmm4, 15, %xmm15); vec_rol128(KA128, %xmm5, 30, %xmm15); vec_rol128(KL128, %xmm6, 45, %xmm15); vec_rol128(KA128, %xmm7, 45, %xmm15); vec_rol128(KL128, %xmm8, 60, %xmm15); vec_rol128(KA128, %xmm9, 60, %xmm15); vec_ror128(KL128, %xmm10, 128-77, %xmm15); /* absorb kw2 to other subkeys */ vpslldq $8, KL128, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, KA128, KA128; vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; /* subl(1) ^= subr(1) & ~subr(9); */ vpandn %xmm15, %xmm5, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm9, %xmm9; /* subl(1) ^= subr(1) & ~subr(17); */ vpandn %xmm15, %xmm10, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, KL128, KL128; vpshufd $0x1b, KA128, KA128; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vpshufd $0x1b, %xmm10, %xmm10; vmovdqu KL128, cmll_sub(0, CTX); vpshufd $0x1b, KL128, KL128; vmovdqu KA128, cmll_sub(2, CTX); vmovdqu %xmm3, cmll_sub(4, CTX); vmovdqu %xmm4, cmll_sub(6, CTX); vmovdqu %xmm5, cmll_sub(8, CTX); vmovdqu %xmm6, cmll_sub(10, CTX); vpsrldq $8, %xmm8, %xmm8; vmovq %xmm7, cmll_sub(12, CTX); vmovq %xmm8, cmll_sub(13, CTX); vmovdqu %xmm9, cmll_sub(14, CTX); vmovdqu %xmm10, cmll_sub(16, CTX); vmovdqu cmll_sub(24, CTX), KA128; vec_ror128(KL128, %xmm3, 128 - 94, %xmm7); vec_ror128(KA128, %xmm4, 128 - 94, %xmm7); vec_ror128(KL128, %xmm5, 128 - 111, %xmm7); vec_ror128(KA128, %xmm6, 128 - 111, %xmm7); vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; vpslldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; /* absorb kw4 to other subkeys */ vpslldq $8, %xmm6, %xmm15; vpxor %xmm15, %xmm5, %xmm5; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm3, %xmm3; /* subl(25) ^= subr(25) & ~subr(16); */ vpshufd $0x1b, cmll_sub(16, CTX), %xmm10; vpandn %xmm15, %xmm10, %xmm13; vpslldq $4, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(25) & subl(16), subr(25) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vmovdqu %xmm3, cmll_sub(18, CTX); vmovdqu %xmm4, cmll_sub(20, CTX); vmovdqu %xmm5, cmll_sub(22, CTX); vmovdqu %xmm6, cmll_sub(24, CTX); vpshufd $0x1b, cmll_sub(14, CTX), %xmm3; vpshufd $0x1b, cmll_sub(12, CTX), %xmm4; vpshufd $0x1b, cmll_sub(10, CTX), %xmm5; vpshufd $0x1b, cmll_sub(8, CTX), %xmm6; vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; /* subl(25) ^= subr(25) & ~subr(8); */ vpandn %xmm15, %xmm6, %xmm13; vpslldq $4, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(25) & subl(8), subr(25) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm6, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vmovdqu %xmm3, cmll_sub(14, CTX); vmovdqu %xmm4, cmll_sub(12, CTX); vmovdqu %xmm5, cmll_sub(10, CTX); vpshufd $0x1b, cmll_sub(6, CTX), %xmm6; vpshufd $0x1b, cmll_sub(4, CTX), %xmm4; vpshufd $0x1b, cmll_sub(2, CTX), %xmm2; vpshufd $0x1b, cmll_sub(0, CTX), %xmm0; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm2, %xmm2; vpxor %xmm15, %xmm0, %xmm0; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm0, %xmm0; vpsrldq $8, %xmm2, %xmm3; vpsrldq $8, %xmm4, %xmm5; vpsrldq $8, %xmm6, %xmm7; /* * key XOR is end of F-function. */ vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm4, %xmm2, %xmm2; vmovq %xmm0, cmll_sub(0, CTX); vmovq %xmm3, cmll_sub(2, CTX); vpxor %xmm5, %xmm3, %xmm3; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm7, %xmm5, %xmm5; vmovq %xmm2, cmll_sub(3, CTX); vmovq %xmm3, cmll_sub(4, CTX); vmovq %xmm4, cmll_sub(5, CTX); vmovq %xmm5, cmll_sub(6, CTX); vmovq cmll_sub(7, CTX), %xmm7; vmovq cmll_sub(8, CTX), %xmm8; vmovq cmll_sub(9, CTX), %xmm9; vmovq cmll_sub(10, CTX), %xmm10; /* tl = subl(10) ^ (subr(10) & ~subr(8)); */ vpandn %xmm10, %xmm8, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm10, %xmm0; /* dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw); */ vpand %xmm8, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm6, %xmm6; vmovq %xmm6, cmll_sub(7, CTX); vmovq cmll_sub(11, CTX), %xmm11; vmovq cmll_sub(12, CTX), %xmm12; vmovq cmll_sub(13, CTX), %xmm13; vmovq cmll_sub(14, CTX), %xmm14; vmovq cmll_sub(15, CTX), %xmm15; /* tl = subl(7) ^ (subr(7) & ~subr(9)); */ vpandn %xmm7, %xmm9, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm7, %xmm0; /* dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw); */ vpand %xmm9, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm11, %xmm0, %xmm0; vpxor %xmm12, %xmm10, %xmm10; vpxor %xmm13, %xmm11, %xmm11; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm15, %xmm13, %xmm13; vmovq %xmm0, cmll_sub(10, CTX); vmovq %xmm10, cmll_sub(11, CTX); vmovq %xmm11, cmll_sub(12, CTX); vmovq %xmm12, cmll_sub(13, CTX); vmovq %xmm13, cmll_sub(14, CTX); vmovq cmll_sub(16, CTX), %xmm6; vmovq cmll_sub(17, CTX), %xmm7; vmovq cmll_sub(18, CTX), %xmm8; vmovq cmll_sub(19, CTX), %xmm9; vmovq cmll_sub(20, CTX), %xmm10; /* tl = subl(18) ^ (subr(18) & ~subr(16)); */ vpandn %xmm8, %xmm6, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm8, %xmm0; /* dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw); */ vpand %xmm6, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm14, %xmm0, %xmm0; vmovq %xmm0, cmll_sub(15, CTX); /* tl = subl(15) ^ (subr(15) & ~subr(17)); */ vpandn %xmm15, %xmm7, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm15, %xmm0; /* dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw); */ vpand %xmm7, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vmovq cmll_sub(21, CTX), %xmm1; vmovq cmll_sub(22, CTX), %xmm2; vmovq cmll_sub(23, CTX), %xmm3; vmovq cmll_sub(24, CTX), %xmm4; vpxor %xmm9, %xmm0, %xmm0; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm1, %xmm9, %xmm9; vpxor %xmm2, %xmm10, %xmm10; vpxor %xmm3, %xmm1, %xmm1; vpxor %xmm4, %xmm3, %xmm3; vmovq %xmm0, cmll_sub(18, CTX); vmovq %xmm8, cmll_sub(19, CTX); vmovq %xmm9, cmll_sub(20, CTX); vmovq %xmm10, cmll_sub(21, CTX); vmovq %xmm1, cmll_sub(22, CTX); vmovq %xmm2, cmll_sub(23, CTX); vmovq %xmm3, cmll_sub(24, CTX); /* kw2 and kw4 are unused now. */ movq $0, cmll_sub(1, CTX); movq $0, cmll_sub(25, CTX); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_avx_setup128,.-__camellia_avx_setup128;) .align 16 ELF(.type __camellia_avx_setup256,@function;) __camellia_avx_setup256: /* input: * %rdi: ctx, CTX; subkey storage at key_table(CTX) * %xmm0 & %xmm1: key */ CFI_STARTPROC(); #define KL128 %xmm0 #define KR128 %xmm1 #define KA128 %xmm2 #define KB128 %xmm3 vpshufb .Lbswap128_mask rRIP, KL128, KL128; vpshufb .Lbswap128_mask rRIP, KR128, KR128; vmovdqa .Linv_shift_row_and_unpcklbw rRIP, %xmm11; vmovq .Lsbox4_input_mask rRIP, %xmm12; vbroadcastss .L0f0f0f0f rRIP, %xmm13; vmovdqa .Lpre_tf_lo_s1 rRIP, %xmm14; vmovdqa .Lpre_tf_hi_s1 rRIP, %xmm15; /* * Generate KA */ vpxor KL128, KR128, %xmm3; vpsrldq $8, KR128, %xmm6; vpsrldq $8, %xmm3, %xmm2; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm2, %xmm4, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma1 rRIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm2, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma2 rRIP); vpxor %xmm6, %xmm2, %xmm2; camellia_f(%xmm2, %xmm3, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma3 rRIP); vpxor %xmm4, %xmm3, %xmm3; vpxor KR128, %xmm3, %xmm3; camellia_f(%xmm3, %xmm4, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma4 rRIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm4, %xmm2, %xmm2; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm2, KA128; vpor %xmm3, KA128, KA128; /* * Generate KB */ vpxor KA128, KR128, %xmm3; vpsrldq $8, %xmm3, %xmm4; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma5 rRIP); vpxor %xmm5, %xmm3, %xmm3; camellia_f(%xmm3, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma6 rRIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm5, %xmm4, %xmm4; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm4, %xmm4; vpor %xmm3, %xmm4, KB128; /* * Generate subkeys */ vmovdqu KB128, cmll_sub(32, CTX); vec_rol128(KR128, %xmm4, 15, %xmm15); vec_rol128(KA128, %xmm5, 15, %xmm15); vec_rol128(KR128, %xmm6, 30, %xmm15); vec_rol128(KB128, %xmm7, 30, %xmm15); vec_rol128(KL128, %xmm8, 45, %xmm15); vec_rol128(KA128, %xmm9, 45, %xmm15); vec_rol128(KL128, %xmm10, 60, %xmm15); vec_rol128(KR128, %xmm11, 60, %xmm15); vec_rol128(KB128, %xmm12, 60, %xmm15); /* absorb kw2 to other subkeys */ vpslldq $8, KL128, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, KB128, KB128; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; /* subl(1) ^= subr(1) & ~subr(9); */ vpandn %xmm15, %xmm6, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm6, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm9, %xmm9; vpshufd $0x1b, KL128, KL128; vpshufd $0x1b, KB128, KB128; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vmovdqu KL128, cmll_sub(0, CTX); vpshufd $0x1b, KL128, KL128; vmovdqu KB128, cmll_sub(2, CTX); vmovdqu %xmm4, cmll_sub(4, CTX); vmovdqu %xmm5, cmll_sub(6, CTX); vmovdqu %xmm6, cmll_sub(8, CTX); vmovdqu %xmm7, cmll_sub(10, CTX); vmovdqu %xmm8, cmll_sub(12, CTX); vmovdqu %xmm9, cmll_sub(14, CTX); vmovdqu cmll_sub(32, CTX), KB128; /* subl(1) ^= subr(1) & ~subr(17); */ vpandn %xmm15, %xmm10, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm11, %xmm11; vpxor %xmm15, %xmm12, %xmm12; vec_ror128(KL128, %xmm4, 128-77, %xmm14); vec_ror128(KA128, %xmm5, 128-77, %xmm14); vec_ror128(KR128, %xmm6, 128-94, %xmm14); vec_ror128(KA128, %xmm7, 128-94, %xmm14); vec_ror128(KL128, %xmm8, 128-111, %xmm14); vec_ror128(KB128, %xmm9, 128-111, %xmm14); vpxor %xmm15, %xmm4, %xmm4; vpshufd $0x1b, %xmm10, %xmm10; vpshufd $0x1b, %xmm11, %xmm11; vpshufd $0x1b, %xmm12, %xmm12; vpshufd $0x1b, %xmm4, %xmm4; vmovdqu %xmm10, cmll_sub(16, CTX); vmovdqu %xmm11, cmll_sub(18, CTX); vmovdqu %xmm12, cmll_sub(20, CTX); vmovdqu %xmm4, cmll_sub(22, CTX); /* subl(1) ^= subr(1) & ~subr(25); */ vpandn %xmm15, %xmm5, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(25), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm8, %xmm8; vpslldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm9, %xmm9; /* absorb kw4 to other subkeys */ vpslldq $8, %xmm9, %xmm15; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm6, %xmm6; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm5, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vmovdqu %xmm5, cmll_sub(24, CTX); vmovdqu %xmm6, cmll_sub(26, CTX); vmovdqu %xmm7, cmll_sub(28, CTX); vmovdqu %xmm8, cmll_sub(30, CTX); vmovdqu %xmm9, cmll_sub(32, CTX); vpshufd $0x1b, cmll_sub(22, CTX), %xmm0; vpshufd $0x1b, cmll_sub(20, CTX), %xmm1; vpshufd $0x1b, cmll_sub(18, CTX), %xmm2; vpshufd $0x1b, cmll_sub(16, CTX), %xmm3; vpshufd $0x1b, cmll_sub(14, CTX), %xmm4; vpshufd $0x1b, cmll_sub(12, CTX), %xmm5; vpshufd $0x1b, cmll_sub(10, CTX), %xmm6; vpshufd $0x1b, cmll_sub(8, CTX), %xmm7; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm15, %xmm1, %xmm1; vpxor %xmm15, %xmm2, %xmm2; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm3, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm3, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; vpxor %xmm15, %xmm6, %xmm6; vpshufd $0x1b, %xmm0, %xmm0; vpshufd $0x1b, %xmm1, %xmm1; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vmovdqu %xmm0, cmll_sub(22, CTX); vmovdqu %xmm1, cmll_sub(20, CTX); vmovdqu %xmm2, cmll_sub(18, CTX); vmovdqu %xmm4, cmll_sub(14, CTX); vmovdqu %xmm5, cmll_sub(12, CTX); vmovdqu %xmm6, cmll_sub(10, CTX); vpshufd $0x1b, cmll_sub(6, CTX), %xmm6; vpshufd $0x1b, cmll_sub(4, CTX), %xmm4; vpshufd $0x1b, cmll_sub(2, CTX), %xmm2; vpshufd $0x1b, cmll_sub(0, CTX), %xmm0; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm7, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm7, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm2, %xmm2; vpxor %xmm15, %xmm0, %xmm0; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm0, %xmm0; vpsrldq $8, %xmm2, %xmm3; vpsrldq $8, %xmm4, %xmm5; vpsrldq $8, %xmm6, %xmm7; /* * key XOR is end of F-function. */ vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm4, %xmm2, %xmm2; vmovq %xmm0, cmll_sub(0, CTX); vmovq %xmm3, cmll_sub(2, CTX); vpxor %xmm5, %xmm3, %xmm3; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm7, %xmm5, %xmm5; vmovq %xmm2, cmll_sub(3, CTX); vmovq %xmm3, cmll_sub(4, CTX); vmovq %xmm4, cmll_sub(5, CTX); vmovq %xmm5, cmll_sub(6, CTX); vmovq cmll_sub(7, CTX), %xmm7; vmovq cmll_sub(8, CTX), %xmm8; vmovq cmll_sub(9, CTX), %xmm9; vmovq cmll_sub(10, CTX), %xmm10; /* tl = subl(10) ^ (subr(10) & ~subr(8)); */ vpandn %xmm10, %xmm8, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm10, %xmm0; /* dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw); */ vpand %xmm8, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm6, %xmm6; vmovq %xmm6, cmll_sub(7, CTX); vmovq cmll_sub(11, CTX), %xmm11; vmovq cmll_sub(12, CTX), %xmm12; vmovq cmll_sub(13, CTX), %xmm13; vmovq cmll_sub(14, CTX), %xmm14; vmovq cmll_sub(15, CTX), %xmm15; /* tl = subl(7) ^ (subr(7) & ~subr(9)); */ vpandn %xmm7, %xmm9, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm7, %xmm0; /* dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw); */ vpand %xmm9, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm11, %xmm0, %xmm0; vpxor %xmm12, %xmm10, %xmm10; vpxor %xmm13, %xmm11, %xmm11; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm15, %xmm13, %xmm13; vmovq %xmm0, cmll_sub(10, CTX); vmovq %xmm10, cmll_sub(11, CTX); vmovq %xmm11, cmll_sub(12, CTX); vmovq %xmm12, cmll_sub(13, CTX); vmovq %xmm13, cmll_sub(14, CTX); vmovq cmll_sub(16, CTX), %xmm6; vmovq cmll_sub(17, CTX), %xmm7; vmovq cmll_sub(18, CTX), %xmm8; vmovq cmll_sub(19, CTX), %xmm9; vmovq cmll_sub(20, CTX), %xmm10; /* tl = subl(18) ^ (subr(18) & ~subr(16)); */ vpandn %xmm8, %xmm6, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm8, %xmm0; /* dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw); */ vpand %xmm6, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm14, %xmm0, %xmm0; vmovq %xmm0, cmll_sub(15, CTX); /* tl = subl(15) ^ (subr(15) & ~subr(17)); */ vpandn %xmm15, %xmm7, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm15, %xmm0; /* dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw); */ vpand %xmm7, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vmovq cmll_sub(21, CTX), %xmm1; vmovq cmll_sub(22, CTX), %xmm2; vmovq cmll_sub(23, CTX), %xmm3; vmovq cmll_sub(24, CTX), %xmm4; vpxor %xmm9, %xmm0, %xmm0; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm1, %xmm9, %xmm9; vpxor %xmm2, %xmm10, %xmm10; vpxor %xmm3, %xmm1, %xmm1; vmovq %xmm0, cmll_sub(18, CTX); vmovq %xmm8, cmll_sub(19, CTX); vmovq %xmm9, cmll_sub(20, CTX); vmovq %xmm10, cmll_sub(21, CTX); vmovq %xmm1, cmll_sub(22, CTX); vmovq cmll_sub(25, CTX), %xmm5; vmovq cmll_sub(26, CTX), %xmm6; vmovq cmll_sub(27, CTX), %xmm7; vmovq cmll_sub(28, CTX), %xmm8; vmovq cmll_sub(29, CTX), %xmm9; vmovq cmll_sub(30, CTX), %xmm10; vmovq cmll_sub(31, CTX), %xmm11; vmovq cmll_sub(32, CTX), %xmm12; /* tl = subl(26) ^ (subr(26) & ~subr(24)); */ vpandn %xmm6, %xmm4, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm0; /* dw = tl & subl(26), tr = subr(24) ^ CAMELLIA_RL1(dw); */ vpand %xmm4, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm2, %xmm2; vmovq %xmm2, cmll_sub(23, CTX); /* tl = subl(23) ^ (subr(23) & ~subr(25)); */ vpandn %xmm3, %xmm5, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm3, %xmm0; /* dw = tl & subl(26), tr = subr(24) ^ CAMELLIA_RL1(dw); */ vpand %xmm5, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm7, %xmm0, %xmm0; vpxor %xmm8, %xmm6, %xmm6; vpxor %xmm9, %xmm7, %xmm7; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm11, %xmm9, %xmm9; vpxor %xmm12, %xmm11, %xmm11; vmovq %xmm0, cmll_sub(26, CTX); vmovq %xmm6, cmll_sub(27, CTX); vmovq %xmm7, cmll_sub(28, CTX); vmovq %xmm8, cmll_sub(29, CTX); vmovq %xmm9, cmll_sub(30, CTX); vmovq %xmm10, cmll_sub(31, CTX); vmovq %xmm11, cmll_sub(32, CTX); /* kw2 and kw4 are unused now. */ movq $0, cmll_sub(1, CTX); movq $0, cmll_sub(33, CTX); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_avx_setup256,.-__camellia_avx_setup256;) .align 16 .globl _gcry_camellia_aesni_avx_keygen ELF(.type _gcry_camellia_aesni_avx_keygen,@function;) _gcry_camellia_aesni_avx_keygen: /* input: * %rdi: ctx, CTX * %rsi: key * %rdx: keylen */ CFI_STARTPROC(); vzeroupper; vmovdqu (%rsi), %xmm0; cmpl $24, %edx; jb __camellia_avx_setup128; je .Lprepare_key192; vmovdqu 16(%rsi), %xmm1; jmp __camellia_avx_setup256; .Lprepare_key192: vpcmpeqd %xmm2, %xmm2, %xmm2; vmovq 16(%rsi), %xmm1; vpxor %xmm1, %xmm2, %xmm2; vpslldq $8, %xmm2, %xmm2; vpor %xmm2, %xmm1, %xmm1; jmp __camellia_avx_setup256; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_keygen,.-_gcry_camellia_aesni_avx_keygen;) #endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/camellia-aesni-avx2-amd64.h b/cipher/camellia-aesni-avx2-amd64.h index b97cc2e3..c92a0559 100644 --- a/cipher/camellia-aesni-avx2-amd64.h +++ b/cipher/camellia-aesni-avx2-amd64.h @@ -1,2218 +1,2221 @@ /* camellia-aesni-avx2-amd64.h - AES-NI/VAES/GFNI/AVX2 implementation of Camellia * * Copyright (C) 2013-2015,2020-2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifndef GCRY_CAMELLIA_AESNI_AVX2_AMD64_H #define GCRY_CAMELLIA_AESNI_AVX2_AMD64_H #include "asm-common-amd64.h" #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #ifndef CAMELLIA_GFNI_BUILD #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; #endif #define ymm0_x xmm0 #define ymm1_x xmm1 #define ymm2_x xmm2 #define ymm3_x xmm3 #define ymm4_x xmm4 #define ymm5_x xmm5 #define ymm6_x xmm6 #define ymm7_x xmm7 #define ymm8_x xmm8 #define ymm9_x xmm9 #define ymm10_x xmm10 #define ymm11_x xmm11 #define ymm12_x xmm12 #define ymm13_x xmm13 #define ymm14_x xmm14 #define ymm15_x xmm15 #ifdef CAMELLIA_VAES_BUILD # define IF_AESNI(...) # define IF_VAES(...) __VA_ARGS__ #else # define IF_AESNI(...) __VA_ARGS__ # define IF_VAES(...) #endif /********************************************************************** GFNI helper macros and constants **********************************************************************/ #ifdef CAMELLIA_GFNI_BUILD #define BV8(a0,a1,a2,a3,a4,a5,a6,a7) \ ( (((a0) & 1) << 0) | \ (((a1) & 1) << 1) | \ (((a2) & 1) << 2) | \ (((a3) & 1) << 3) | \ (((a4) & 1) << 4) | \ (((a5) & 1) << 5) | \ (((a6) & 1) << 6) | \ (((a7) & 1) << 7) ) #define BM8X8(l0,l1,l2,l3,l4,l5,l6,l7) \ ( ((l7) << (0 * 8)) | \ ((l6) << (1 * 8)) | \ ((l5) << (2 * 8)) | \ ((l4) << (3 * 8)) | \ ((l3) << (4 * 8)) | \ ((l2) << (5 * 8)) | \ ((l1) << (6 * 8)) | \ ((l0) << (7 * 8)) ) /* Pre-filters and post-filters constants for Camellia sboxes s1, s2, s3 and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Constant from "θ₁(x)" and "θ₄(x)" functions. */ #define pre_filter_constant_s1234 BV8(1, 0, 1, 0, 0, 0, 1, 0) /* Constant from "ψ₁(A(x))" function: */ #define post_filter_constant_s14 BV8(0, 1, 1, 1, 0, 1, 1, 0) /* Constant from "ψ₂(A(x))" function: */ #define post_filter_constant_s2 BV8(0, 0, 1, 1, 1, 0, 1, 1) /* Constant from "ψ₃(A(x))" function: */ #define post_filter_constant_s3 BV8(1, 1, 1, 0, 1, 1, 0, 0) #endif /* CAMELLIA_GFNI_BUILD */ /********************************************************************** 32-way camellia **********************************************************************/ #ifdef CAMELLIA_GFNI_BUILD /* roundsm32 (GFNI version) * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, \ t6, t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vpbroadcastq .Lpre_filter_bitmatrix_s123 rRIP, t5; \ vpbroadcastq .Lpre_filter_bitmatrix_s4 rRIP, t2; \ vpbroadcastq .Lpost_filter_bitmatrix_s14 rRIP, t4; \ vpbroadcastq .Lpost_filter_bitmatrix_s2 rRIP, t3; \ vpbroadcastq .Lpost_filter_bitmatrix_s3 rRIP, t6; \ vpxor t7##_x, t7##_x, t7##_x; \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* prefilter sboxes */ \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x0, x0; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x7, x7; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x3, x3; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x6, x6; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x2, x2; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x5, x5; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x1, x1; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x4, x4; \ \ /* sbox GF8 inverse + postfilter sboxes 1 and 4 */ \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x0, x0; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x7, x7; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x3, x3; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x6, x6; \ \ /* sbox GF8 inverse + postfilter sbox 3 */ \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x2, x2; \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x5, x5; \ \ /* sbox GF8 inverse + postfilter sbox 2 */ \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x1, x1; \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x4, x4; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 32(mem_cd), x0, x0; \ \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 32(mem_cd), x3, x3; \ \ vpxor t3, x4, x4; \ vpxor 0 * 32(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 32(mem_cd), x5, x5; \ \ vpxor t1, x6, x6; \ vpxor 2 * 32(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 32(mem_cd), x7, x7; #else /* CAMELLIA_GFNI_BUILD */ /* roundsm32 (AES-NI / VAES version) * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, \ t6, t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vbroadcasti128 .Linv_shift_row rRIP, t4; \ vpbroadcastd .L0f0f0f0f rRIP, t7; \ vbroadcasti128 .Lpre_tf_lo_s1 rRIP, t5; \ vbroadcasti128 .Lpre_tf_hi_s1 rRIP, t6; \ vbroadcasti128 .Lpre_tf_lo_s4 rRIP, t2; \ vbroadcasti128 .Lpre_tf_hi_s4 rRIP, t3; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ \ /* prefilter sboxes 1, 2 and 3 */ \ /* prefilter sbox 4 */ \ filter_8bit(x0, t5, t6, t7, t4); \ filter_8bit(x7, t5, t6, t7, t4); \ IF_AESNI(vextracti128 $1, x0, t0##_x); \ IF_AESNI(vextracti128 $1, x7, t1##_x); \ filter_8bit(x3, t2, t3, t7, t4); \ filter_8bit(x6, t2, t3, t7, t4); \ IF_AESNI(vextracti128 $1, x3, t3##_x); \ IF_AESNI(vextracti128 $1, x6, t2##_x); \ filter_8bit(x2, t5, t6, t7, t4); \ filter_8bit(x5, t5, t6, t7, t4); \ filter_8bit(x1, t5, t6, t7, t4); \ filter_8bit(x4, t5, t6, t7, t4); \ \ vpxor t4##_x, t4##_x, t4##_x; \ \ /* AES subbytes + AES shift rows */ \ IF_AESNI(vextracti128 $1, x2, t6##_x; \ vextracti128 $1, x5, t5##_x; \ vaesenclast t4##_x, x0##_x, x0##_x; \ vaesenclast t4##_x, t0##_x, t0##_x; \ vaesenclast t4##_x, x7##_x, x7##_x; \ vaesenclast t4##_x, t1##_x, t1##_x; \ vaesenclast t4##_x, x3##_x, x3##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vaesenclast t4##_x, x6##_x, x6##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t0##_x, x0, x0; \ vinserti128 $1, t1##_x, x7, x7; \ vinserti128 $1, t3##_x, x3, x3; \ vinserti128 $1, t2##_x, x6, x6; \ vextracti128 $1, x1, t3##_x; \ vextracti128 $1, x4, t2##_x); \ vbroadcasti128 .Lpost_tf_lo_s1 rRIP, t0; \ vbroadcasti128 .Lpost_tf_hi_s1 rRIP, t1; \ IF_AESNI(vaesenclast t4##_x, x2##_x, x2##_x; \ vaesenclast t4##_x, t6##_x, t6##_x; \ vaesenclast t4##_x, x5##_x, x5##_x; \ vaesenclast t4##_x, t5##_x, t5##_x; \ vaesenclast t4##_x, x1##_x, x1##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vaesenclast t4##_x, x4##_x, x4##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t6##_x, x2, x2; \ vinserti128 $1, t5##_x, x5, x5; \ vinserti128 $1, t3##_x, x1, x1; \ vinserti128 $1, t2##_x, x4, x4); \ IF_VAES(vaesenclast t4, x0, x0; \ vaesenclast t4, x7, x7; \ vaesenclast t4, x3, x3; \ vaesenclast t4, x6, x6; \ vaesenclast t4, x2, x2; \ vaesenclast t4, x5, x5; \ vaesenclast t4, x1, x1; \ vaesenclast t4, x4, x4); \ \ /* postfilter sboxes 1 and 4 */ \ vbroadcasti128 .Lpost_tf_lo_s3 rRIP, t2; \ vbroadcasti128 .Lpost_tf_hi_s3 rRIP, t3; \ filter_8bit(x0, t0, t1, t7, t4); \ filter_8bit(x7, t0, t1, t7, t4); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vbroadcasti128 .Lpost_tf_lo_s2 rRIP, t4; \ vbroadcasti128 .Lpost_tf_hi_s2 rRIP, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ vpxor t7##_x, t7##_x, t7##_x; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 32(mem_cd), x0, x0; \ \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 32(mem_cd), x3, x3; \ \ vpxor t3, x4, x4; \ vpxor 0 * 32(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 32(mem_cd), x5, x5; \ \ vpxor t1, x6, x6; \ vpxor 2 * 32(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 32(mem_cd), x7, x7; #endif /* CAMELLIA_GFNI_BUILD */ /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu x0, 4 * 32(mem_cd); \ vmovdqu x1, 5 * 32(mem_cd); \ vmovdqu x2, 6 * 32(mem_cd); \ vmovdqu x3, 7 * 32(mem_cd); \ vmovdqu x4, 0 * 32(mem_cd); \ vmovdqu x5, 1 * 32(mem_cd); \ vmovdqu x6, 2 * 32(mem_cd); \ vmovdqu x7, 3 * 32(mem_cd); \ \ roundsm32(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpxor tt0, tt0, tt0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu l4, 4 * 32(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 32(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 32(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 32(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 32(r), t0, t0; \ vpor 5 * 32(r), t1, t1; \ vpor 6 * 32(r), t2, t2; \ vpor 7 * 32(r), t3, t3; \ \ vpxor 0 * 32(r), t0, t0; \ vpxor 1 * 32(r), t1, t1; \ vpxor 2 * 32(r), t2, t2; \ vpxor 3 * 32(r), t3, t3; \ vmovdqu t0, 0 * 32(r); \ vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 1 * 32(r); \ vmovdqu t2, 2 * 32(r); \ vmovdqu t3, 3 * 32(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 32(r), t0, t0; \ vpand 1 * 32(r), t1, t1; \ vpand 2 * 32(r), t2, t2; \ vpand 3 * 32(r), t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 32(r), t0, t0; \ vpxor 5 * 32(r), t1, t1; \ vpxor 6 * 32(r), t2, t2; \ vpxor 7 * 32(r), t3, t3; \ vmovdqu t0, 4 * 32(r); \ vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 5 * 32(r); \ vmovdqu t2, 6 * 32(r); \ vmovdqu t3, 7 * 32(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 32(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 32(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 32(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 32(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vbroadcasti128 .Lshufb_16x16b rRIP, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor 0 * 32(rio), x0, y7; \ vpxor 1 * 32(rio), x0, y6; \ vpxor 2 * 32(rio), x0, y5; \ vpxor 3 * 32(rio), x0, y4; \ vpxor 4 * 32(rio), x0, y3; \ vpxor 5 * 32(rio), x0, y2; \ vpxor 6 * 32(rio), x0, y1; \ vpxor 7 * 32(rio), x0, y0; \ vpxor 8 * 32(rio), x0, x7; \ vpxor 9 * 32(rio), x0, x6; \ vpxor 10 * 32(rio), x0, x5; \ vpxor 11 * 32(rio), x0, x4; \ vpxor 12 * 32(rio), x0, x3; \ vpxor 13 * 32(rio), x0, x2; \ vpxor 14 * 32(rio), x0, x1; \ vpxor 15 * 32(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu y0, 0 * 32(mem_cd); \ vmovdqu y1, 1 * 32(mem_cd); \ vmovdqu y2, 2 * 32(mem_cd); \ vmovdqu y3, 3 * 32(mem_cd); \ vmovdqu y4, 4 * 32(mem_cd); \ vmovdqu y5, 5 * 32(mem_cd); \ vmovdqu y6, 6 * 32(mem_cd); \ vmovdqu y7, 7 * 32(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 32(rio); \ vmovdqu x1, 1 * 32(rio); \ vmovdqu x2, 2 * 32(rio); \ vmovdqu x3, 3 * 32(rio); \ vmovdqu x4, 4 * 32(rio); \ vmovdqu x5, 5 * 32(rio); \ vmovdqu x6, 6 * 32(rio); \ vmovdqu x7, 7 * 32(rio); \ vmovdqu y0, 8 * 32(rio); \ vmovdqu y1, 9 * 32(rio); \ vmovdqu y2, 10 * 32(rio); \ vmovdqu y3, 11 * 32(rio); \ vmovdqu y4, 12 * 32(rio); \ vmovdqu y5, 13 * 32(rio); \ vmovdqu y6, 14 * 32(rio); \ vmovdqu y7, 15 * 32(rio); -.text +SECTION_RODATA + .align 32 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) FUNC_NAME(_constants): ELF(.type FUNC_NAME(_constants),@object;) .Lpack_bswap: .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 #ifdef CAMELLIA_GFNI_BUILD /* Pre-filters and post-filters bit-matrixes for Camellia sboxes s1, s2, s3 * and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Bit-matrix from "θ₁(x)" function: */ .Lpre_filter_bitmatrix_s123: .quad BM8X8(BV8(1, 1, 1, 0, 1, 1, 0, 1), BV8(0, 0, 1, 1, 0, 0, 1, 0), BV8(1, 1, 0, 1, 0, 0, 0, 0), BV8(1, 0, 1, 1, 0, 0, 1, 1), BV8(0, 0, 0, 0, 1, 1, 0, 0), BV8(1, 0, 1, 0, 0, 1, 0, 0), BV8(0, 0, 1, 0, 1, 1, 0, 0), BV8(1, 0, 0, 0, 0, 1, 1, 0)) /* Bit-matrix from "θ₄(x)" function: */ .Lpre_filter_bitmatrix_s4: .quad BM8X8(BV8(1, 1, 0, 1, 1, 0, 1, 1), BV8(0, 1, 1, 0, 0, 1, 0, 0), BV8(1, 0, 1, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 1), BV8(0, 0, 0, 1, 1, 0, 0, 0), BV8(0, 1, 0, 0, 1, 0, 0, 1), BV8(0, 1, 0, 1, 1, 0, 0, 0), BV8(0, 0, 0, 0, 1, 1, 0, 1)) /* Bit-matrix from "ψ₁(A(x))" function: */ .Lpost_filter_bitmatrix_s14: .quad BM8X8(BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0)) /* Bit-matrix from "ψ₂(A(x))" function: */ .Lpost_filter_bitmatrix_s2: .quad BM8X8(BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1)) /* Bit-matrix from "ψ₃(A(x))" function: */ .Lpost_filter_bitmatrix_s3: .quad BM8X8(BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) #else /* CAMELLIA_GFNI_BUILD */ /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f #endif /* CAMELLIA_GFNI_BUILD */ ELF(.size FUNC_NAME(_constants),.-FUNC_NAME(_constants);) +.text + .align 16 ELF(.type FUNC_NAME(enc_blk32),@function;) FUNC_NAME(enc_blk32): /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %r8d: 24 for 16 byte key, 32 for larger * %ymm0..%ymm15: 32 plaintext blocks * output: * %ymm0..%ymm15: 32 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); leaq 8 * 32(%rax), %rcx; leaq (-8 * 8)(CTX, %r8, 8), %r8; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); .align 8 .Lenc_loop: enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Lenc_done; leaq (8 * 8)(CTX), CTX; fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), ((key_table) + 8)(CTX), ((key_table) + 12)(CTX)); jmp .Lenc_loop; .align 8 .Lenc_done: /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table) + 8 * 8)(%r8), (%rax), 1 * 32(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(enc_blk32),.-FUNC_NAME(enc_blk32);) .align 16 ELF(.type FUNC_NAME(dec_blk32),@function;) FUNC_NAME(dec_blk32): /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %r8d: 24 for 16 byte key, 32 for larger * %ymm0..%ymm15: 32 encrypted blocks * output: * %ymm0..%ymm15: 32 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); movq %r8, %rcx; movq CTX, %r8 leaq (-8 * 8)(CTX, %rcx, 8), CTX; leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); .align 8 .Ldec_loop: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Ldec_done; fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), ((key_table) + 0)(CTX), ((key_table) + 4)(CTX)); leaq (-8 * 8)(CTX), CTX; jmp .Ldec_loop; .align 8 .Ldec_done: /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(dec_blk32),.-FUNC_NAME(dec_blk32);) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl FUNC_NAME(ctr_enc) ELF(.type FUNC_NAME(ctr_enc),@function;) FUNC_NAME(ctr_enc): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movq 8(%rcx), %r11; bswapq %r11; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; vpcmpeqd %ymm15, %ymm15, %ymm15; vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */ /* load IV and byteswap */ vmovdqu (%rcx), %xmm0; vpshufb .Lbswap128_mask rRIP, %xmm0, %xmm0; vmovdqa %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm14); vbroadcasti128 .Lbswap128_mask rRIP, %ymm14; vinserti128 $1, %xmm0, %ymm1, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 15 * 32(%rax); /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 32), %r11; ja .Lload_ctr_carry; /* construct IVs */ vpaddq %ymm15, %ymm15, %ymm15; /* ab: -2:0 ; cd: -2:0 */ vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 14 * 32(%rax); vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 13 * 32(%rax); vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm12; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm11; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm10; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm9; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm8; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm7; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm6; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm5; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm4; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm3; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm2; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm1; vpsubq %ymm15, %ymm0, %ymm0; /* +30 ; +31 */ vpsubq %xmm15, %xmm0, %xmm13; /* +32 */ vpshufb %ymm14, %ymm0, %ymm0; vpshufb %xmm14, %xmm13, %xmm13; vmovdqu %xmm13, (%rcx); jmp .Lload_ctr_done; .align 4 .Lload_ctr_carry: /* construct IVs */ inc_le128(%ymm0, %ymm15, %ymm13); /* ab: le1 ; cd: le2 */ inc_le128(%ymm0, %ymm15, %ymm13); /* ab: le2 ; cd: le3 */ vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 14 * 32(%rax); inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 13 * 32(%rax); inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm12; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm11; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm10; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm9; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm8; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm7; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm6; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm5; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm4; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm3; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm2; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm1; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vextracti128 $1, %ymm0, %xmm13; vpshufb %ymm14, %ymm0, %ymm0; inc_le128(%xmm13, %xmm15, %xmm14); vpshufb .Lbswap128_mask rRIP, %xmm13, %xmm13; vmovdqu %xmm13, (%rcx); .align 4 .Lload_ctr_done: /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(enc_blk32); vpxor 0 * 32(%rdx), %ymm7, %ymm7; vpxor 1 * 32(%rdx), %ymm6, %ymm6; vpxor 2 * 32(%rdx), %ymm5, %ymm5; vpxor 3 * 32(%rdx), %ymm4, %ymm4; vpxor 4 * 32(%rdx), %ymm3, %ymm3; vpxor 5 * 32(%rdx), %ymm2, %ymm2; vpxor 6 * 32(%rdx), %ymm1, %ymm1; vpxor 7 * 32(%rdx), %ymm0, %ymm0; vpxor 8 * 32(%rdx), %ymm15, %ymm15; vpxor 9 * 32(%rdx), %ymm14, %ymm14; vpxor 10 * 32(%rdx), %ymm13, %ymm13; vpxor 11 * 32(%rdx), %ymm12, %ymm12; vpxor 12 * 32(%rdx), %ymm11, %ymm11; vpxor 13 * 32(%rdx), %ymm10, %ymm10; vpxor 14 * 32(%rdx), %ymm9, %ymm9; vpxor 15 * 32(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(ctr_enc),.-FUNC_NAME(ctr_enc);) .align 16 .globl FUNC_NAME(cbc_dec) ELF(.type FUNC_NAME(cbc_dec),@function;) FUNC_NAME(cbc_dec): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); call FUNC_NAME(dec_blk32); /* XOR output with IV */ vmovdqu %ymm8, (%rax); vmovdqu (%r9), %xmm8; vinserti128 $1, (%rdx), %ymm8, %ymm8; vpxor %ymm8, %ymm7, %ymm7; vmovdqu (%rax), %ymm8; vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6; vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5; vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4; vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3; vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2; vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0; vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15; vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14; vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13; vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12; vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11; vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10; vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9; vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8; movq (15 * 32 + 16 + 0)(%rdx), %rax; movq (15 * 32 + 16 + 8)(%rdx), %rcx; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); /* store new IV */ movq %rax, (0)(%r9); movq %rcx, (8)(%r9); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(cbc_dec),.-FUNC_NAME(cbc_dec);) .align 16 .globl FUNC_NAME(cfb_dec) ELF(.type FUNC_NAME(cfb_dec),@function;) FUNC_NAME(cfb_dec): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm0; vpshufb .Lpack_bswap rRIP, %ymm0, %ymm0; vmovdqu (%rcx), %xmm15; vinserti128 $1, (%rdx), %ymm15, %ymm15; vpxor %ymm15, %ymm0, %ymm15; vmovdqu (15 * 32 + 16)(%rdx), %xmm1; vmovdqu %xmm1, (%rcx); /* store new IV */ vpxor (0 * 32 + 16)(%rdx), %ymm0, %ymm14; vpxor (1 * 32 + 16)(%rdx), %ymm0, %ymm13; vpxor (2 * 32 + 16)(%rdx), %ymm0, %ymm12; vpxor (3 * 32 + 16)(%rdx), %ymm0, %ymm11; vpxor (4 * 32 + 16)(%rdx), %ymm0, %ymm10; vpxor (5 * 32 + 16)(%rdx), %ymm0, %ymm9; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm8; vpxor (7 * 32 + 16)(%rdx), %ymm0, %ymm7; vpxor (8 * 32 + 16)(%rdx), %ymm0, %ymm6; vpxor (9 * 32 + 16)(%rdx), %ymm0, %ymm5; vpxor (10 * 32 + 16)(%rdx), %ymm0, %ymm4; vpxor (11 * 32 + 16)(%rdx), %ymm0, %ymm3; vpxor (12 * 32 + 16)(%rdx), %ymm0, %ymm2; vpxor (13 * 32 + 16)(%rdx), %ymm0, %ymm1; vpxor (14 * 32 + 16)(%rdx), %ymm0, %ymm0; call FUNC_NAME(enc_blk32); vpxor 0 * 32(%rdx), %ymm7, %ymm7; vpxor 1 * 32(%rdx), %ymm6, %ymm6; vpxor 2 * 32(%rdx), %ymm5, %ymm5; vpxor 3 * 32(%rdx), %ymm4, %ymm4; vpxor 4 * 32(%rdx), %ymm3, %ymm3; vpxor 5 * 32(%rdx), %ymm2, %ymm2; vpxor 6 * 32(%rdx), %ymm1, %ymm1; vpxor 7 * 32(%rdx), %ymm0, %ymm0; vpxor 8 * 32(%rdx), %ymm15, %ymm15; vpxor 9 * 32(%rdx), %ymm14, %ymm14; vpxor 10 * 32(%rdx), %ymm13, %ymm13; vpxor 11 * 32(%rdx), %ymm12, %ymm12; vpxor 12 * 32(%rdx), %ymm11, %ymm11; vpxor 13 * 32(%rdx), %ymm10, %ymm10; vpxor 14 * 32(%rdx), %ymm9, %ymm9; vpxor 15 * 32(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(cfb_dec),.-FUNC_NAME(cfb_dec);) .align 16 .globl FUNC_NAME(ocb_enc) ELF(.type FUNC_NAME(ocb_enc),@function;) FUNC_NAME(ocb_enc): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rsp); movq %r11, (16 * 32 + 1 * 8)(%rsp); movq %r12, (16 * 32 + 2 * 8)(%rsp); movq %r13, (16 * 32 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 32 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 32 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 32 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 32 + 3 * 8); vmovdqu (%rcx), %xmm14; vmovdqu (%r8), %xmm13; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm13, %ymm13; \ vpxor yreg, %ymm15, yreg; \ vmovdqu %ymm15, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, %ymm0); vmovdqu %ymm0, (13 * 32)(%rax); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r9), %r10; movq (17 * 8)(%r9), %r11; movq (18 * 8)(%r9), %r12; movq (19 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r9), %r10; movq (21 * 8)(%r9), %r11; movq (22 * 8)(%r9), %r12; movq (23 * 8)(%r9), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r9), %r10; movq (25 * 8)(%r9), %r11; movq (26 * 8)(%r9), %r12; movq (27 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r9), %r10; movq (29 * 8)(%r9), %r11; movq (30 * 8)(%r9), %r12; movq (31 * 8)(%r9), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vextracti128 $1, %ymm13, %xmm15; vmovdqu %xmm14, (%rcx); vpxor %xmm13, %xmm15, %xmm15; vmovdqu %xmm15, (%r8); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(enc_blk32); vpxor 0 * 32(%rsi), %ymm7, %ymm7; vpxor 1 * 32(%rsi), %ymm6, %ymm6; vpxor 2 * 32(%rsi), %ymm5, %ymm5; vpxor 3 * 32(%rsi), %ymm4, %ymm4; vpxor 4 * 32(%rsi), %ymm3, %ymm3; vpxor 5 * 32(%rsi), %ymm2, %ymm2; vpxor 6 * 32(%rsi), %ymm1, %ymm1; vpxor 7 * 32(%rsi), %ymm0, %ymm0; vpxor 8 * 32(%rsi), %ymm15, %ymm15; vpxor 9 * 32(%rsi), %ymm14, %ymm14; vpxor 10 * 32(%rsi), %ymm13, %ymm13; vpxor 11 * 32(%rsi), %ymm12, %ymm12; vpxor 12 * 32(%rsi), %ymm11, %ymm11; vpxor 13 * 32(%rsi), %ymm10, %ymm10; vpxor 14 * 32(%rsi), %ymm9, %ymm9; vpxor 15 * 32(%rsi), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; movq (16 * 32 + 0 * 8)(%rsp), %r10; movq (16 * 32 + 1 * 8)(%rsp), %r11; movq (16 * 32 + 2 * 8)(%rsp), %r12; movq (16 * 32 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(ocb_enc),.-FUNC_NAME(ocb_enc);) .align 16 .globl FUNC_NAME(ocb_dec) ELF(.type FUNC_NAME(ocb_dec),@function;) FUNC_NAME(ocb_dec): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rsp); movq %r11, (16 * 32 + 1 * 8)(%rsp); movq %r12, (16 * 32 + 2 * 8)(%rsp); movq %r13, (16 * 32 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 32 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 32 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 32 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 32 + 3 * 8); vmovdqu (%rcx), %xmm14; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm15, yreg; \ vmovdqu %ymm15, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, %ymm13); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r9), %r10; movq (17 * 8)(%r9), %r11; movq (18 * 8)(%r9), %r12; movq (19 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r9), %r10; movq (21 * 8)(%r9), %r11; movq (22 * 8)(%r9), %r12; movq (23 * 8)(%r9), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r9), %r10; movq (25 * 8)(%r9), %r11; movq (26 * 8)(%r9), %r12; movq (27 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r9), %r10; movq (29 * 8)(%r9), %r11; movq (30 * 8)(%r9), %r12; movq (31 * 8)(%r9), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vmovdqu %xmm14, (%rcx); movq %r8, %r10; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r9d; cmovel %r9d, %r8d; /* max */ /* inpack32_pre: */ vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor %ymm13, %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(dec_blk32); vpxor 0 * 32(%rsi), %ymm7, %ymm7; vpxor 1 * 32(%rsi), %ymm6, %ymm6; vpxor 2 * 32(%rsi), %ymm5, %ymm5; vpxor 3 * 32(%rsi), %ymm4, %ymm4; vpxor 4 * 32(%rsi), %ymm3, %ymm3; vpxor 5 * 32(%rsi), %ymm2, %ymm2; vpxor 6 * 32(%rsi), %ymm1, %ymm1; vpxor 7 * 32(%rsi), %ymm0, %ymm0; vmovdqu %ymm7, (7 * 32)(%rax); vmovdqu %ymm6, (6 * 32)(%rax); vpxor 8 * 32(%rsi), %ymm15, %ymm15; vpxor 9 * 32(%rsi), %ymm14, %ymm14; vpxor 10 * 32(%rsi), %ymm13, %ymm13; vpxor 11 * 32(%rsi), %ymm12, %ymm12; vpxor 12 * 32(%rsi), %ymm11, %ymm11; vpxor 13 * 32(%rsi), %ymm10, %ymm10; vpxor 14 * 32(%rsi), %ymm9, %ymm9; vpxor 15 * 32(%rsi), %ymm8, %ymm8; /* Checksum_i = Checksum_{i-1} xor P_i */ vpxor %ymm5, %ymm7, %ymm7; vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm3, %ymm7, %ymm7; vpxor %ymm2, %ymm6, %ymm6; vpxor %ymm1, %ymm7, %ymm7; vpxor %ymm0, %ymm6, %ymm6; vpxor %ymm15, %ymm7, %ymm7; vpxor %ymm14, %ymm6, %ymm6; vpxor %ymm13, %ymm7, %ymm7; vpxor %ymm12, %ymm6, %ymm6; vpxor %ymm11, %ymm7, %ymm7; vpxor %ymm10, %ymm6, %ymm6; vpxor %ymm9, %ymm7, %ymm7; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm7, %ymm6, %ymm7; vextracti128 $1, %ymm7, %xmm6; vpxor %xmm6, %xmm7, %xmm7; vpxor (%r10), %xmm7, %xmm7; vmovdqu %xmm7, (%r10); vmovdqu 7 * 32(%rax), %ymm7; vmovdqu 6 * 32(%rax), %ymm6; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; movq (16 * 32 + 0 * 8)(%rsp), %r10; movq (16 * 32 + 1 * 8)(%rsp), %r11; movq (16 * 32 + 2 * 8)(%rsp), %r12; movq (16 * 32 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(ocb_dec),.-FUNC_NAME(ocb_dec);) .align 16 .globl FUNC_NAME(ocb_auth) ELF(.type FUNC_NAME(ocb_auth),@function;) FUNC_NAME(ocb_auth): /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rsp); movq %r11, (16 * 32 + 1 * 8)(%rsp); movq %r12, (16 * 32 + 2 * 8)(%rsp); movq %r13, (16 * 32 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 32 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 32 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 32 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 32 + 3 * 8); vmovdqu (%rdx), %xmm14; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm15, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, %ymm13); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r8), %r10; movq (17 * 8)(%r8), %r11; movq (18 * 8)(%r8), %r12; movq (19 * 8)(%r8), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r8), %r10; movq (21 * 8)(%r8), %r11; movq (22 * 8)(%r8), %r12; movq (23 * 8)(%r8), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r8), %r10; movq (25 * 8)(%r8), %r11; movq (26 * 8)(%r8), %r12; movq (27 * 8)(%r8), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r8), %r10; movq (29 * 8)(%r8), %r11; movq (30 * 8)(%r8), %r12; movq (31 * 8)(%r8), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vmovdqu %xmm14, (%rdx); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ movq %rcx, %r10; /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor %ymm13, %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(enc_blk32); vpxor %ymm7, %ymm6, %ymm6; vpxor %ymm5, %ymm4, %ymm4; vpxor %ymm3, %ymm2, %ymm2; vpxor %ymm1, %ymm0, %ymm0; vpxor %ymm15, %ymm14, %ymm14; vpxor %ymm13, %ymm12, %ymm12; vpxor %ymm11, %ymm10, %ymm10; vpxor %ymm9, %ymm8, %ymm8; vpxor %ymm6, %ymm4, %ymm4; vpxor %ymm2, %ymm0, %ymm0; vpxor %ymm14, %ymm12, %ymm12; vpxor %ymm10, %ymm8, %ymm8; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm12, %ymm8, %ymm8; vpxor %ymm0, %ymm8, %ymm0; vextracti128 $1, %ymm0, %xmm1; vpxor (%r10), %xmm0, %xmm0; vpxor %xmm0, %xmm1, %xmm0; vmovdqu %xmm0, (%r10); vzeroall; movq (16 * 32 + 0 * 8)(%rsp), %r10; movq (16 * 32 + 1 * 8)(%rsp), %r11; movq (16 * 32 + 2 * 8)(%rsp), %r12; movq (16 * 32 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(ocb_auth),.-FUNC_NAME(ocb_auth);) .align 16 .globl FUNC_NAME(enc_blk1_32) ELF(.type FUNC_NAME(enc_blk1_32),@function;) FUNC_NAME(enc_blk1_32): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %ecx: nblocks (1 to 32) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movl %ecx, %r9d; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; cmpl $31, %ecx; vpxor %xmm0, %xmm0, %xmm0; ja 1f; jb 2f; vmovdqu 15 * 32(%rdx), %xmm0; jmp 2f; 1: vmovdqu 15 * 32(%rdx), %ymm0; 2: vmovdqu %ymm0, (%rax); vpbroadcastq (key_table)(CTX), %ymm0; vpshufb .Lpack_bswap rRIP, %ymm0, %ymm0; #define LOAD_INPUT(offset, ymm) \ cmpl $(1 + 2 * (offset)), %ecx; \ jb 2f; \ ja 1f; \ vmovdqu (offset) * 32(%rdx), %ymm##_x; \ vpxor %ymm0, %ymm, %ymm; \ jmp 2f; \ 1: \ vpxor (offset) * 32(%rdx), %ymm0, %ymm; LOAD_INPUT(0, ymm15); LOAD_INPUT(1, ymm14); LOAD_INPUT(2, ymm13); LOAD_INPUT(3, ymm12); LOAD_INPUT(4, ymm11); LOAD_INPUT(5, ymm10); LOAD_INPUT(6, ymm9); LOAD_INPUT(7, ymm8); LOAD_INPUT(8, ymm7); LOAD_INPUT(9, ymm6); LOAD_INPUT(10, ymm5); LOAD_INPUT(11, ymm4); LOAD_INPUT(12, ymm3); LOAD_INPUT(13, ymm2); LOAD_INPUT(14, ymm1); vpxor (%rax), %ymm0, %ymm0; 2: call FUNC_NAME(enc_blk32); #define STORE_OUTPUT(ymm, offset) \ cmpl $(1 + 2 * (offset)), %r9d; \ jb 2f; \ ja 1f; \ vmovdqu %ymm##_x, (offset) * 32(%rsi); \ jmp 2f; \ 1: \ vmovdqu %ymm, (offset) * 32(%rsi); STORE_OUTPUT(ymm7, 0); STORE_OUTPUT(ymm6, 1); STORE_OUTPUT(ymm5, 2); STORE_OUTPUT(ymm4, 3); STORE_OUTPUT(ymm3, 4); STORE_OUTPUT(ymm2, 5); STORE_OUTPUT(ymm1, 6); STORE_OUTPUT(ymm0, 7); STORE_OUTPUT(ymm15, 8); STORE_OUTPUT(ymm14, 9); STORE_OUTPUT(ymm13, 10); STORE_OUTPUT(ymm12, 11); STORE_OUTPUT(ymm11, 12); STORE_OUTPUT(ymm10, 13); STORE_OUTPUT(ymm9, 14); STORE_OUTPUT(ymm8, 15); 2: vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(enc_blk1_32),.-FUNC_NAME(enc_blk1_32);) .align 16 .globl FUNC_NAME(dec_blk1_32) ELF(.type FUNC_NAME(dec_blk1_32),@function;) FUNC_NAME(dec_blk1_32): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %ecx: nblocks (1 to 32) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movl %ecx, %r9d; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; cmpl $31, %ecx; vpxor %xmm0, %xmm0, %xmm0; ja 1f; jb 2f; vmovdqu 15 * 32(%rdx), %xmm0; jmp 2f; 1: vmovdqu 15 * 32(%rdx), %ymm0; 2: vmovdqu %ymm0, (%rax); vpbroadcastq (key_table)(CTX, %r8, 8), %ymm0; vpshufb .Lpack_bswap rRIP, %ymm0, %ymm0; LOAD_INPUT(0, ymm15); LOAD_INPUT(1, ymm14); LOAD_INPUT(2, ymm13); LOAD_INPUT(3, ymm12); LOAD_INPUT(4, ymm11); LOAD_INPUT(5, ymm10); LOAD_INPUT(6, ymm9); LOAD_INPUT(7, ymm8); LOAD_INPUT(8, ymm7); LOAD_INPUT(9, ymm6); LOAD_INPUT(10, ymm5); LOAD_INPUT(11, ymm4); LOAD_INPUT(12, ymm3); LOAD_INPUT(13, ymm2); LOAD_INPUT(14, ymm1); vpxor (%rax), %ymm0, %ymm0; 2: call FUNC_NAME(dec_blk32); STORE_OUTPUT(ymm7, 0); STORE_OUTPUT(ymm6, 1); STORE_OUTPUT(ymm5, 2); STORE_OUTPUT(ymm4, 3); STORE_OUTPUT(ymm3, 4); STORE_OUTPUT(ymm2, 5); STORE_OUTPUT(ymm1, 6); STORE_OUTPUT(ymm0, 7); STORE_OUTPUT(ymm15, 8); STORE_OUTPUT(ymm14, 9); STORE_OUTPUT(ymm13, 10); STORE_OUTPUT(ymm12, 11); STORE_OUTPUT(ymm11, 12); STORE_OUTPUT(ymm10, 13); STORE_OUTPUT(ymm9, 14); STORE_OUTPUT(ymm8, 15); 2: vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(dec_blk1_32),.-FUNC_NAME(dec_blk1_32);) #endif /* GCRY_CAMELLIA_AESNI_AVX2_AMD64_H */ diff --git a/cipher/camellia-gfni-avx512-amd64.S b/cipher/camellia-gfni-avx512-amd64.S index 66949d43..64fef8b6 100644 --- a/cipher/camellia-gfni-avx512-amd64.S +++ b/cipher/camellia-gfni-avx512-amd64.S @@ -1,1572 +1,1574 @@ /* camellia-gfni-avx512-amd64.S - GFNI/AVX512 implementation of Camellia * * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT) #include "asm-common-amd64.h" #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #define zmm0_x xmm0 #define zmm1_x xmm1 #define zmm2_x xmm2 #define zmm3_x xmm3 #define zmm4_x xmm4 #define zmm5_x xmm5 #define zmm6_x xmm6 #define zmm7_x xmm7 #define zmm8_x xmm8 #define zmm9_x xmm9 #define zmm10_x xmm10 #define zmm11_x xmm11 #define zmm12_x xmm12 #define zmm13_x xmm13 #define zmm14_x xmm14 #define zmm15_x xmm15 #define zmm0_y ymm0 #define zmm1_y ymm1 #define zmm2_y ymm2 #define zmm3_y ymm3 #define zmm4_y ymm4 #define zmm5_y ymm5 #define zmm6_y ymm6 #define zmm7_y ymm7 #define zmm8_y ymm8 #define zmm9_y ymm9 #define zmm10_y ymm10 #define zmm11_y ymm11 #define zmm12_y ymm12 #define zmm13_y ymm13 #define zmm14_y ymm14 #define zmm15_y ymm15 #define mem_ab_0 %zmm16 #define mem_ab_1 %zmm17 #define mem_ab_2 %zmm31 #define mem_ab_3 %zmm18 #define mem_ab_4 %zmm19 #define mem_ab_5 %zmm20 #define mem_ab_6 %zmm21 #define mem_ab_7 %zmm22 #define mem_cd_0 %zmm23 #define mem_cd_1 %zmm24 #define mem_cd_2 %zmm30 #define mem_cd_3 %zmm25 #define mem_cd_4 %zmm26 #define mem_cd_5 %zmm27 #define mem_cd_6 %zmm28 #define mem_cd_7 %zmm29 #define clear_vec4(v0,v1,v2,v3) \ vpxord v0, v0, v0; \ vpxord v1, v1, v1; \ vpxord v2, v2, v2; \ vpxord v3, v3, v3 #define clear_zmm16_zmm31() \ clear_vec4(%ymm16, %ymm20, %ymm24, %ymm28); \ clear_vec4(%ymm17, %ymm21, %ymm25, %ymm29); \ clear_vec4(%ymm18, %ymm22, %ymm26, %ymm30); \ clear_vec4(%ymm19, %ymm23, %ymm27, %ymm31) #define clear_regs() \ kxorq %k1, %k1, %k1; \ vzeroall; \ clear_zmm16_zmm31() /********************************************************************** GFNI helper macros and constants **********************************************************************/ #define BV8(a0,a1,a2,a3,a4,a5,a6,a7) \ ( (((a0) & 1) << 0) | \ (((a1) & 1) << 1) | \ (((a2) & 1) << 2) | \ (((a3) & 1) << 3) | \ (((a4) & 1) << 4) | \ (((a5) & 1) << 5) | \ (((a6) & 1) << 6) | \ (((a7) & 1) << 7) ) #define BM8X8(l0,l1,l2,l3,l4,l5,l6,l7) \ ( ((l7) << (0 * 8)) | \ ((l6) << (1 * 8)) | \ ((l5) << (2 * 8)) | \ ((l4) << (3 * 8)) | \ ((l3) << (4 * 8)) | \ ((l2) << (5 * 8)) | \ ((l1) << (6 * 8)) | \ ((l0) << (7 * 8)) ) /* Pre-filters and post-filters constants for Camellia sboxes s1, s2, s3 and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Constant from "θ₁(x)" and "θ₄(x)" functions. */ #define pre_filter_constant_s1234 BV8(1, 0, 1, 0, 0, 0, 1, 0) /* Constant from "ψ₁(A(x))" function: */ #define post_filter_constant_s14 BV8(0, 1, 1, 1, 0, 1, 1, 0) /* Constant from "ψ₂(A(x))" function: */ #define post_filter_constant_s2 BV8(0, 0, 1, 1, 1, 0, 1, 1) /* Constant from "ψ₃(A(x))" function: */ #define post_filter_constant_s3 BV8(1, 1, 1, 0, 1, 1, 0, 0) /********************************************************************** 64-way parallel camellia **********************************************************************/ /* roundsm64 (GFNI/AVX512 version) * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, \ t6, t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vpbroadcastq .Lpre_filter_bitmatrix_s123 rRIP, t5; \ vpbroadcastq .Lpre_filter_bitmatrix_s4 rRIP, t2; \ vpbroadcastq .Lpost_filter_bitmatrix_s14 rRIP, t4; \ vpbroadcastq .Lpost_filter_bitmatrix_s2 rRIP, t3; \ vpbroadcastq .Lpost_filter_bitmatrix_s3 rRIP, t6; \ vpxor t7##_x, t7##_x, t7##_x; \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* prefilter sboxes */ \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x0, x0; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x7, x7; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x3, x3; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x6, x6; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x2, x2; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x5, x5; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x1, x1; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x4, x4; \ \ /* sbox GF8 inverse + postfilter sboxes 1 and 4 */ \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x0, x0; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x7, x7; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x3, x3; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x6, x6; \ \ /* sbox GF8 inverse + postfilter sbox 3 */ \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x2, x2; \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x5, x5; \ \ /* sbox GF8 inverse + postfilter sbox 2 */ \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x1, x1; \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x4, x4; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxorq x5, x0, x0; \ vpxorq x6, x1, x1; \ vpxorq x7, x2, x2; \ vpxorq x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxorq x2, x4, x4; \ vpxorq x3, x5, x5; \ vpxorq x0, x6, x6; \ vpxorq x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxorq x7, x0, x0; \ vpxorq x4, x1, x1; \ vpxorq x5, x2, x2; \ vpxorq x6, x3, x3; \ \ vpxorq x3, x4, x4; \ vpxorq x0, x5, x5; \ vpxorq x1, x6, x6; \ vpxorq x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpternlogq $0x96, mem_cd##_5, t6, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpternlogq $0x96, mem_cd##_4, t7, x0; \ vpternlogq $0x96, mem_cd##_6, t5, x2; \ vpternlogq $0x96, mem_cd##_7, t4, x3; \ vpternlogq $0x96, mem_cd##_0, t3, x4; \ vpternlogq $0x96, mem_cd##_1, t2, x5; \ vpternlogq $0x96, mem_cd##_2, t1, x6; \ vpternlogq $0x96, mem_cd##_3, t0, x7; /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu64 x0, mem_cd##_4; \ vmovdqu64 x1, mem_cd##_5; \ vmovdqu64 x2, mem_cd##_6; \ vmovdqu64 x3, mem_cd##_7; \ vmovdqu64 x4, mem_cd##_0; \ vmovdqu64 x5, mem_cd##_1; \ vmovdqu64 x6, mem_cd##_2; \ vmovdqu64 x7, mem_cd##_3; \ \ roundsm64(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu64 x4, mem_ab##_4; \ vmovdqu64 x5, mem_ab##_5; \ vmovdqu64 x6, mem_ab##_6; \ vmovdqu64 x7, mem_ab##_7; \ vmovdqu64 x0, mem_ab##_0; \ vmovdqu64 x1, mem_ab##_1; \ vmovdqu64 x2, mem_ab##_2; \ vmovdqu64 x3, mem_ab##_3; #define enc_rounds64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN << 1) * t0, t1, t2, zero: (IN >> 7) */ #define rol32_1_64(v0, v1, v2, v3, t0, t1, t2, zero, one) \ vpcmpltb zero, v0, %k1; \ vpaddb v0, v0, v0; \ vpaddb one, zero, t0{%k1}{z}; \ \ vpcmpltb zero, v1, %k1; \ vpaddb v1, v1, v1; \ vpaddb one, zero, t1{%k1}{z}; \ \ vpcmpltb zero, v2, %k1; \ vpaddb v2, v2, v2; \ vpaddb one, zero, t2{%k1}{z}; \ \ vpcmpltb zero, v3, %k1; \ vpaddb v3, v3, v3; \ vpaddb one, zero, zero{%k1}{z}; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls64(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr, tmp) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpbroadcastq .Lbyte_ones rRIP, tmp; \ vpxor tt3##_x, tt3##_x, tt3##_x; \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpandq l0, t0, t0; \ vpandq l1, t1, t1; \ vpandq l2, t2, t2; \ vpandq l3, t3, t3; \ \ rol32_1_64(t3, t2, t1, t0, tt0, tt1, tt2, tt3, tmp); \ \ vpternlogq $0x96, tt2, t0, l4; \ vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu64 l4, l##_4; \ vpternlogq $0x96, tt1, t1, l5; \ vmovdqu64 l5, l##_5; \ vpternlogq $0x96, tt0, t2, l6; \ vmovdqu64 l6, l##_6; \ vpternlogq $0x96, tt3, t3, l7; \ vmovdqu64 l7, l##_7; \ vpxor tt3##_x, tt3##_x, tt3##_x; \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpternlogq $0x1e, r##_4, t0, r##_0; \ vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vpternlogq $0x1e, r##_5, t1, r##_1; \ vpternlogq $0x1e, r##_6, t2, r##_2; \ vpternlogq $0x1e, r##_7, t3, r##_3; \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpandq r##_0, t0, t0; \ vpandq r##_1, t1, t1; \ vpandq r##_2, t2, t2; \ vpandq r##_3, t3, t3; \ \ rol32_1_64(t3, t2, t1, t0, tt0, tt1, tt2, tt3, tmp); \ \ vpternlogq $0x96, tt2, t0, r##_4; \ vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vpternlogq $0x96, tt1, t1, r##_5; \ vpternlogq $0x96, tt0, t2, r##_6; \ vpternlogq $0x96, tt3, t3, r##_7; \ vpxor tt3##_x, tt3##_x, tt3##_x; \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpternlogq $0x1e, l4, t0, l0; \ vmovdqu64 l0, l##_0; \ vpternlogq $0x1e, l5, t1, l1; \ vmovdqu64 l1, l##_1; \ vpternlogq $0x1e, l6, t2, l2; \ vmovdqu64 l2, l##_2; \ vpternlogq $0x1e, l7, t3, l3; \ vmovdqu64 l3, l##_3; #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ transpose_4x4(a0, a1, a2, a3, st0, st1); \ transpose_4x4(b0, b1, b2, b3, st0, st1); \ \ transpose_4x4(c0, c1, c2, c3, st0, st1); \ transpose_4x4(d0, d1, d2, d3, st0, st1); \ \ vbroadcasti64x2 .Lshufb_16x16b rRIP, st0; \ vpshufb st0, a0, a0; \ vpshufb st0, a1, a1; \ vpshufb st0, a2, a2; \ vpshufb st0, a3, a3; \ vpshufb st0, b0, b0; \ vpshufb st0, b1, b1; \ vpshufb st0, b2, b2; \ vpshufb st0, b3, b3; \ vpshufb st0, c0, c0; \ vpshufb st0, c1, c1; \ vpshufb st0, c2, c2; \ vpshufb st0, c3, c3; \ vpshufb st0, d0, d0; \ vpshufb st0, d1, d1; \ vpshufb st0, d2, d2; \ vpshufb st0, d3, d3; \ \ transpose_4x4(a0, b0, c0, d0, st0, st1); \ transpose_4x4(a1, b1, c1, d1, st0, st1); \ \ transpose_4x4(a2, b2, c2, d2, st0, st1); \ transpose_4x4(a3, b3, c3, d3, st0, st1); \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack64_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxorq 0 * 64(rio), x0, y7; \ vpxorq 1 * 64(rio), x0, y6; \ vpxorq 2 * 64(rio), x0, y5; \ vpxorq 3 * 64(rio), x0, y4; \ vpxorq 4 * 64(rio), x0, y3; \ vpxorq 5 * 64(rio), x0, y2; \ vpxorq 6 * 64(rio), x0, y1; \ vpxorq 7 * 64(rio), x0, y0; \ vpxorq 8 * 64(rio), x0, x7; \ vpxorq 9 * 64(rio), x0, x6; \ vpxorq 10 * 64(rio), x0, x5; \ vpxorq 11 * 64(rio), x0, x4; \ vpxorq 12 * 64(rio), x0, x3; \ vpxorq 13 * 64(rio), x0, x2; \ vpxorq 14 * 64(rio), x0, x1; \ vpxorq 15 * 64(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack64_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, tmp0, tmp1) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, tmp0, tmp1); \ \ vmovdqu64 x0, mem_ab##_0; \ vmovdqu64 x1, mem_ab##_1; \ vmovdqu64 x2, mem_ab##_2; \ vmovdqu64 x3, mem_ab##_3; \ vmovdqu64 x4, mem_ab##_4; \ vmovdqu64 x5, mem_ab##_5; \ vmovdqu64 x6, mem_ab##_6; \ vmovdqu64 x7, mem_ab##_7; \ vmovdqu64 y0, mem_cd##_0; \ vmovdqu64 y1, mem_cd##_1; \ vmovdqu64 y2, mem_cd##_2; \ vmovdqu64 y3, mem_cd##_3; \ vmovdqu64 y4, mem_cd##_4; \ vmovdqu64 y5, mem_cd##_5; \ vmovdqu64 y6, mem_cd##_6; \ vmovdqu64 y7, mem_cd##_7; /* de-byteslice, apply post-whitening and store blocks */ #define outunpack64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, tmp0, tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, tmp0, tmp1); \ \ vpbroadcastq key, tmp0; \ vpshufb .Lpack_bswap rRIP, tmp0, tmp0; \ \ vpxorq tmp0, y7, y7; \ vpxorq tmp0, y6, y6; \ vpxorq tmp0, y5, y5; \ vpxorq tmp0, y4, y4; \ vpxorq tmp0, y3, y3; \ vpxorq tmp0, y2, y2; \ vpxorq tmp0, y1, y1; \ vpxorq tmp0, y0, y0; \ vpxorq tmp0, x7, x7; \ vpxorq tmp0, x6, x6; \ vpxorq tmp0, x5, x5; \ vpxorq tmp0, x4, x4; \ vpxorq tmp0, x3, x3; \ vpxorq tmp0, x2, x2; \ vpxorq tmp0, x1, x1; \ vpxorq tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu64 x0, 0 * 64(rio); \ vmovdqu64 x1, 1 * 64(rio); \ vmovdqu64 x2, 2 * 64(rio); \ vmovdqu64 x3, 3 * 64(rio); \ vmovdqu64 x4, 4 * 64(rio); \ vmovdqu64 x5, 5 * 64(rio); \ vmovdqu64 x6, 6 * 64(rio); \ vmovdqu64 x7, 7 * 64(rio); \ vmovdqu64 y0, 8 * 64(rio); \ vmovdqu64 y1, 9 * 64(rio); \ vmovdqu64 y2, 10 * 64(rio); \ vmovdqu64 y3, 11 * 64(rio); \ vmovdqu64 y4, 12 * 64(rio); \ vmovdqu64 y5, 13 * 64(rio); \ vmovdqu64 y6, 14 * 64(rio); \ vmovdqu64 y7, 15 * 64(rio); -.text +SECTION_RODATA #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) _gcry_camellia_gfni_avx512__constants: ELF(.type _gcry_camellia_gfni_avx512__constants,@object;) .align 64 .Lpack_bswap: .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .Lcounter0123_lo: .quad 0, 0 .quad 1, 0 .quad 2, 0 .quad 3, 0 .align 16 .Lcounter4444_lo: .quad 4, 0 .Lcounter8888_lo: .quad 8, 0 .Lcounter16161616_lo: .quad 16, 0 .Lcounter1111_hi: .quad 0, 1 .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .Lbyte_ones: .byte 1, 1, 1, 1, 1, 1, 1, 1 /* Pre-filters and post-filters bit-matrixes for Camellia sboxes s1, s2, s3 * and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Bit-matrix from "θ₁(x)" function: */ .Lpre_filter_bitmatrix_s123: .quad BM8X8(BV8(1, 1, 1, 0, 1, 1, 0, 1), BV8(0, 0, 1, 1, 0, 0, 1, 0), BV8(1, 1, 0, 1, 0, 0, 0, 0), BV8(1, 0, 1, 1, 0, 0, 1, 1), BV8(0, 0, 0, 0, 1, 1, 0, 0), BV8(1, 0, 1, 0, 0, 1, 0, 0), BV8(0, 0, 1, 0, 1, 1, 0, 0), BV8(1, 0, 0, 0, 0, 1, 1, 0)) /* Bit-matrix from "θ₄(x)" function: */ .Lpre_filter_bitmatrix_s4: .quad BM8X8(BV8(1, 1, 0, 1, 1, 0, 1, 1), BV8(0, 1, 1, 0, 0, 1, 0, 0), BV8(1, 0, 1, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 1), BV8(0, 0, 0, 1, 1, 0, 0, 0), BV8(0, 1, 0, 0, 1, 0, 0, 1), BV8(0, 1, 0, 1, 1, 0, 0, 0), BV8(0, 0, 0, 0, 1, 1, 0, 1)) /* Bit-matrix from "ψ₁(A(x))" function: */ .Lpost_filter_bitmatrix_s14: .quad BM8X8(BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0)) /* Bit-matrix from "ψ₂(A(x))" function: */ .Lpost_filter_bitmatrix_s2: .quad BM8X8(BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1)) /* Bit-matrix from "ψ₃(A(x))" function: */ .Lpost_filter_bitmatrix_s3: .quad BM8X8(BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) ELF(.size _gcry_camellia_gfni_avx512__constants,.-_gcry_camellia_gfni_avx512__constants;) +.text + .align 16 ELF(.type __camellia_gfni_avx512_enc_blk64,@function;) __camellia_gfni_avx512_enc_blk64: /* input: * %rdi: ctx, CTX * %r8d: 24 for 16 byte key, 32 for larger * %zmm0..%zmm15: 64 plaintext blocks * output: * %zmm0..%zmm15: 64 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); leaq (-8 * 8)(CTX, %r8, 8), %r8; inpack64_post(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, %zmm30, %zmm31); .align 8 .Lenc_loop: enc_rounds64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, 0); cmpq %r8, CTX; je .Lenc_done; leaq (8 * 8)(CTX), CTX; fls64(mem_ab, %zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, mem_cd, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), %zmm31); jmp .Lenc_loop; .align 8 .Lenc_done: /* load CD for output */ vmovdqu64 mem_cd_0, %zmm8; vmovdqu64 mem_cd_1, %zmm9; vmovdqu64 mem_cd_2, %zmm10; vmovdqu64 mem_cd_3, %zmm11; vmovdqu64 mem_cd_4, %zmm12; vmovdqu64 mem_cd_5, %zmm13; vmovdqu64 mem_cd_6, %zmm14; vmovdqu64 mem_cd_7, %zmm15; outunpack64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, ((key_table) + 8 * 8)(%r8), %zmm30, %zmm31); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_gfni_avx512_enc_blk64,.-__camellia_gfni_avx512_enc_blk64;) .align 16 ELF(.type __camellia_gfni_avx512_dec_blk64,@function;) __camellia_gfni_avx512_dec_blk64: /* input: * %rdi: ctx, CTX * %r8d: 24 for 16 byte key, 32 for larger * %zmm0..%zmm15: 64 encrypted blocks * output: * %zmm0..%zmm15: 64 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); movq %r8, %rcx; movq CTX, %r8 leaq (-8 * 8)(CTX, %rcx, 8), CTX; inpack64_post(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, %zmm30, %zmm31); .align 8 .Ldec_loop: dec_rounds64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, 0); cmpq %r8, CTX; je .Ldec_done; fls64(mem_ab, %zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, mem_cd, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), %zmm31); leaq (-8 * 8)(CTX), CTX; jmp .Ldec_loop; .align 8 .Ldec_done: /* load CD for output */ vmovdqu64 mem_cd_0, %zmm8; vmovdqu64 mem_cd_1, %zmm9; vmovdqu64 mem_cd_2, %zmm10; vmovdqu64 mem_cd_3, %zmm11; vmovdqu64 mem_cd_4, %zmm12; vmovdqu64 mem_cd_5, %zmm13; vmovdqu64 mem_cd_6, %zmm14; vmovdqu64 mem_cd_7, %zmm15; outunpack64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, (key_table)(CTX), %zmm30, %zmm31); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_gfni_avx512_dec_blk64,.-__camellia_gfni_avx512_dec_blk64;) #define add_le128(out, in, lo_counter, hi_counter1) \ vpaddq lo_counter, in, out; \ vpcmpuq $1, lo_counter, out, %k1; \ kaddb %k1, %k1, %k1; \ vpaddq hi_counter1, out, out{%k1}; .align 16 .globl _gcry_camellia_gfni_avx512_ctr_enc ELF(.type _gcry_camellia_gfni_avx512_ctr_enc,@function;) _gcry_camellia_gfni_avx512_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); spec_stop_avx512; vbroadcasti64x2 .Lbswap128_mask rRIP, %zmm19; vmovdqa64 .Lcounter0123_lo rRIP, %zmm21; vbroadcasti64x2 .Lcounter4444_lo rRIP, %zmm22; vbroadcasti64x2 .Lcounter8888_lo rRIP, %zmm23; vbroadcasti64x2 .Lcounter16161616_lo rRIP, %zmm24; vbroadcasti64x2 .Lcounter1111_hi rRIP, %zmm25; /* load IV and byteswap */ movq 8(%rcx), %r11; movq (%rcx), %r10; bswapq %r11; bswapq %r10; vbroadcasti64x2 (%rcx), %zmm0; vpshufb %zmm19, %zmm0, %zmm0; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 64), %r11; ja .Lload_ctr_carry; /* construct IVs */ vpaddq %zmm21, %zmm0, %zmm15; /* +0:+1:+2:+3 */ vpaddq %zmm22, %zmm15, %zmm14; /* +4:+5:+6:+7 */ vpaddq %zmm23, %zmm15, %zmm13; /* +8:+9:+10:+11 */ vpaddq %zmm23, %zmm14, %zmm12; /* +12:+13:+14:+15 */ vpaddq %zmm24, %zmm15, %zmm11; /* +16... */ vpaddq %zmm24, %zmm14, %zmm10; /* +20... */ vpaddq %zmm24, %zmm13, %zmm9; /* +24... */ vpaddq %zmm24, %zmm12, %zmm8; /* +28... */ vpaddq %zmm24, %zmm11, %zmm7; /* +32... */ vpaddq %zmm24, %zmm10, %zmm6; /* +36... */ vpaddq %zmm24, %zmm9, %zmm5; /* +40... */ vpaddq %zmm24, %zmm8, %zmm4; /* +44... */ vpaddq %zmm24, %zmm7, %zmm3; /* +48... */ vpaddq %zmm24, %zmm6, %zmm2; /* +52... */ vpaddq %zmm24, %zmm5, %zmm1; /* +56... */ vpaddq %zmm24, %zmm4, %zmm0; /* +60... */ jmp .Lload_ctr_done; .align 4 .Lload_ctr_carry: /* construct IVs */ add_le128(%zmm15, %zmm0, %zmm21, %zmm25); /* +0:+1:+2:+3 */ add_le128(%zmm14, %zmm15, %zmm22, %zmm25); /* +4:+5:+6:+7 */ add_le128(%zmm13, %zmm15, %zmm23, %zmm25); /* +8:+9:+10:+11 */ add_le128(%zmm12, %zmm14, %zmm23, %zmm25); /* +12:+13:+14:+15 */ add_le128(%zmm11, %zmm15, %zmm24, %zmm25); /* +16... */ add_le128(%zmm10, %zmm14, %zmm24, %zmm25); /* +20... */ add_le128(%zmm9, %zmm13, %zmm24, %zmm25); /* +24... */ add_le128(%zmm8, %zmm12, %zmm24, %zmm25); /* +28... */ add_le128(%zmm7, %zmm11, %zmm24, %zmm25); /* +32... */ add_le128(%zmm6, %zmm10, %zmm24, %zmm25); /* +36... */ add_le128(%zmm5, %zmm9, %zmm24, %zmm25); /* +40... */ add_le128(%zmm4, %zmm8, %zmm24, %zmm25); /* +44... */ add_le128(%zmm3, %zmm7, %zmm24, %zmm25); /* +48... */ add_le128(%zmm2, %zmm6, %zmm24, %zmm25); /* +52... */ add_le128(%zmm1, %zmm5, %zmm24, %zmm25); /* +56... */ add_le128(%zmm0, %zmm4, %zmm24, %zmm25); /* +60... */ .align 4 .Lload_ctr_done: vpbroadcastq (key_table)(CTX), %zmm16; vpshufb .Lpack_bswap rRIP, %zmm16, %zmm16; /* Byte-swap IVs and update counter. */ addq $64, %r11; adcq $0, %r10; vpshufb %zmm19, %zmm15, %zmm15; vpshufb %zmm19, %zmm14, %zmm14; vpshufb %zmm19, %zmm13, %zmm13; vpshufb %zmm19, %zmm12, %zmm12; vpshufb %zmm19, %zmm11, %zmm11; vpshufb %zmm19, %zmm10, %zmm10; vpshufb %zmm19, %zmm9, %zmm9; vpshufb %zmm19, %zmm8, %zmm8; bswapq %r11; bswapq %r10; vpshufb %zmm19, %zmm7, %zmm7; vpshufb %zmm19, %zmm6, %zmm6; vpshufb %zmm19, %zmm5, %zmm5; vpshufb %zmm19, %zmm4, %zmm4; vpshufb %zmm19, %zmm3, %zmm3; vpshufb %zmm19, %zmm2, %zmm2; vpshufb %zmm19, %zmm1, %zmm1; vpshufb %zmm19, %zmm0, %zmm0; movq %r11, 8(%rcx); movq %r10, (%rcx); /* inpack64_pre: */ vpxorq %zmm0, %zmm16, %zmm0; vpxorq %zmm1, %zmm16, %zmm1; vpxorq %zmm2, %zmm16, %zmm2; vpxorq %zmm3, %zmm16, %zmm3; vpxorq %zmm4, %zmm16, %zmm4; vpxorq %zmm5, %zmm16, %zmm5; vpxorq %zmm6, %zmm16, %zmm6; vpxorq %zmm7, %zmm16, %zmm7; vpxorq %zmm8, %zmm16, %zmm8; vpxorq %zmm9, %zmm16, %zmm9; vpxorq %zmm10, %zmm16, %zmm10; vpxorq %zmm11, %zmm16, %zmm11; vpxorq %zmm12, %zmm16, %zmm12; vpxorq %zmm13, %zmm16, %zmm13; vpxorq %zmm14, %zmm16, %zmm14; vpxorq %zmm15, %zmm16, %zmm15; call __camellia_gfni_avx512_enc_blk64; vpxorq 0 * 64(%rdx), %zmm7, %zmm7; vpxorq 1 * 64(%rdx), %zmm6, %zmm6; vpxorq 2 * 64(%rdx), %zmm5, %zmm5; vpxorq 3 * 64(%rdx), %zmm4, %zmm4; vpxorq 4 * 64(%rdx), %zmm3, %zmm3; vpxorq 5 * 64(%rdx), %zmm2, %zmm2; vpxorq 6 * 64(%rdx), %zmm1, %zmm1; vpxorq 7 * 64(%rdx), %zmm0, %zmm0; vpxorq 8 * 64(%rdx), %zmm15, %zmm15; vpxorq 9 * 64(%rdx), %zmm14, %zmm14; vpxorq 10 * 64(%rdx), %zmm13, %zmm13; vpxorq 11 * 64(%rdx), %zmm12, %zmm12; vpxorq 12 * 64(%rdx), %zmm11, %zmm11; vpxorq 13 * 64(%rdx), %zmm10, %zmm10; vpxorq 14 * 64(%rdx), %zmm9, %zmm9; vpxorq 15 * 64(%rdx), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_ctr_enc,.-_gcry_camellia_gfni_avx512_ctr_enc;) .align 16 .globl _gcry_camellia_gfni_avx512_cbc_dec ELF(.type _gcry_camellia_gfni_avx512_cbc_dec,@function;) _gcry_camellia_gfni_avx512_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack64_pre(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, %rdx, (key_table)(CTX, %r8, 8)); call __camellia_gfni_avx512_dec_blk64; /* XOR output with IV */ vmovdqu64 (%r9), %xmm16; vinserti64x2 $1, (0 * 16)(%rdx), %ymm16, %ymm16; vinserti64x4 $1, (1 * 16)(%rdx), %zmm16, %zmm16; vpxorq %zmm16, %zmm7, %zmm7; vpxorq (0 * 64 + 48)(%rdx), %zmm6, %zmm6; vpxorq (1 * 64 + 48)(%rdx), %zmm5, %zmm5; vpxorq (2 * 64 + 48)(%rdx), %zmm4, %zmm4; vpxorq (3 * 64 + 48)(%rdx), %zmm3, %zmm3; vpxorq (4 * 64 + 48)(%rdx), %zmm2, %zmm2; vpxorq (5 * 64 + 48)(%rdx), %zmm1, %zmm1; vpxorq (6 * 64 + 48)(%rdx), %zmm0, %zmm0; vpxorq (7 * 64 + 48)(%rdx), %zmm15, %zmm15; vpxorq (8 * 64 + 48)(%rdx), %zmm14, %zmm14; vpxorq (9 * 64 + 48)(%rdx), %zmm13, %zmm13; vpxorq (10 * 64 + 48)(%rdx), %zmm12, %zmm12; vpxorq (11 * 64 + 48)(%rdx), %zmm11, %zmm11; vpxorq (12 * 64 + 48)(%rdx), %zmm10, %zmm10; vpxorq (13 * 64 + 48)(%rdx), %zmm9, %zmm9; vpxorq (14 * 64 + 48)(%rdx), %zmm8, %zmm8; vmovdqu64 (15 * 64 + 48)(%rdx), %xmm16; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); /* store new IV */ vmovdqu64 %xmm16, (0)(%r9); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_cbc_dec,.-_gcry_camellia_gfni_avx512_cbc_dec;) .align 16 .globl _gcry_camellia_gfni_avx512_cfb_dec ELF(.type _gcry_camellia_gfni_avx512_cfb_dec,@function;) _gcry_camellia_gfni_avx512_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ /* inpack64_pre: */ vpbroadcastq (key_table)(CTX), %zmm0; vpshufb .Lpack_bswap rRIP, %zmm0, %zmm0; vmovdqu64 (%rcx), %xmm15; vinserti64x2 $1, (%rdx), %ymm15, %ymm15; vinserti64x4 $1, 16(%rdx), %zmm15, %zmm15; vpxorq %zmm15, %zmm0, %zmm15; vpxorq (0 * 64 + 48)(%rdx), %zmm0, %zmm14; vpxorq (1 * 64 + 48)(%rdx), %zmm0, %zmm13; vpxorq (2 * 64 + 48)(%rdx), %zmm0, %zmm12; vpxorq (3 * 64 + 48)(%rdx), %zmm0, %zmm11; vpxorq (4 * 64 + 48)(%rdx), %zmm0, %zmm10; vpxorq (5 * 64 + 48)(%rdx), %zmm0, %zmm9; vpxorq (6 * 64 + 48)(%rdx), %zmm0, %zmm8; vpxorq (7 * 64 + 48)(%rdx), %zmm0, %zmm7; vpxorq (8 * 64 + 48)(%rdx), %zmm0, %zmm6; vpxorq (9 * 64 + 48)(%rdx), %zmm0, %zmm5; vpxorq (10 * 64 + 48)(%rdx), %zmm0, %zmm4; vpxorq (11 * 64 + 48)(%rdx), %zmm0, %zmm3; vpxorq (12 * 64 + 48)(%rdx), %zmm0, %zmm2; vpxorq (13 * 64 + 48)(%rdx), %zmm0, %zmm1; vpxorq (14 * 64 + 48)(%rdx), %zmm0, %zmm0; vmovdqu64 (15 * 64 + 48)(%rdx), %xmm16; vmovdqu64 %xmm16, (%rcx); /* store new IV */ call __camellia_gfni_avx512_enc_blk64; vpxorq 0 * 64(%rdx), %zmm7, %zmm7; vpxorq 1 * 64(%rdx), %zmm6, %zmm6; vpxorq 2 * 64(%rdx), %zmm5, %zmm5; vpxorq 3 * 64(%rdx), %zmm4, %zmm4; vpxorq 4 * 64(%rdx), %zmm3, %zmm3; vpxorq 5 * 64(%rdx), %zmm2, %zmm2; vpxorq 6 * 64(%rdx), %zmm1, %zmm1; vpxorq 7 * 64(%rdx), %zmm0, %zmm0; vpxorq 8 * 64(%rdx), %zmm15, %zmm15; vpxorq 9 * 64(%rdx), %zmm14, %zmm14; vpxorq 10 * 64(%rdx), %zmm13, %zmm13; vpxorq 11 * 64(%rdx), %zmm12, %zmm12; vpxorq 12 * 64(%rdx), %zmm11, %zmm11; vpxorq 13 * 64(%rdx), %zmm10, %zmm10; vpxorq 14 * 64(%rdx), %zmm9, %zmm9; vpxorq 15 * 64(%rdx), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_cfb_dec,.-_gcry_camellia_gfni_avx512_cfb_dec;) .align 16 .globl _gcry_camellia_gfni_avx512_ocb_enc ELF(.type _gcry_camellia_gfni_avx512_ocb_enc,@function;) _gcry_camellia_gfni_avx512_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[64]) */ CFI_STARTPROC(); spec_stop_avx512; pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rbx; CFI_PUSH(%rbx); vmovdqu64 (%rcx), %xmm30; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg, zplain) \ vmovdqu64 (n * 64)(%rdx), zplain; \ vpxorq (l0reg), %xmm30, %xmm16; \ vpxorq (l1reg), %xmm16, %xmm30; \ vinserti64x2 $1, %xmm30, %ymm16, %ymm16; \ vpxorq (l2reg), %xmm30, %xmm30; \ vinserti64x2 $2, %xmm30, %zmm16, %zmm16; \ vpxorq (l3reg), %xmm30, %xmm30; \ vinserti64x2 $3, %xmm30, %zmm16, %zmm16; \ vpxorq zplain, %zmm16, zreg; \ vmovdqu64 %zmm16, (n * 64)(%rsi); #define OCB_LOAD_PTRS(n) \ movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; OCB_LOAD_PTRS(0); OCB_INPUT(0, %r10, %r11, %r12, %r13, %zmm15, %zmm20); OCB_INPUT(1, %r14, %r15, %rax, %rbx, %zmm14, %zmm21); OCB_LOAD_PTRS(2); OCB_INPUT(2, %r10, %r11, %r12, %r13, %zmm13, %zmm22); vpternlogq $0x96, %zmm20, %zmm21, %zmm22; OCB_INPUT(3, %r14, %r15, %rax, %rbx, %zmm12, %zmm23); OCB_LOAD_PTRS(4); OCB_INPUT(4, %r10, %r11, %r12, %r13, %zmm11, %zmm24); OCB_INPUT(5, %r14, %r15, %rax, %rbx, %zmm10, %zmm25); vpternlogq $0x96, %zmm23, %zmm24, %zmm25; OCB_LOAD_PTRS(6); OCB_INPUT(6, %r10, %r11, %r12, %r13, %zmm9, %zmm20); OCB_INPUT(7, %r14, %r15, %rax, %rbx, %zmm8, %zmm21); OCB_LOAD_PTRS(8); OCB_INPUT(8, %r10, %r11, %r12, %r13, %zmm7, %zmm26); vpternlogq $0x96, %zmm20, %zmm21, %zmm26; OCB_INPUT(9, %r14, %r15, %rax, %rbx, %zmm6, %zmm23); OCB_LOAD_PTRS(10); OCB_INPUT(10, %r10, %r11, %r12, %r13, %zmm5, %zmm24); OCB_INPUT(11, %r14, %r15, %rax, %rbx, %zmm4, %zmm27); vpternlogq $0x96, %zmm23, %zmm24, %zmm27; OCB_LOAD_PTRS(12); OCB_INPUT(12, %r10, %r11, %r12, %r13, %zmm3, %zmm20); OCB_INPUT(13, %r14, %r15, %rax, %rbx, %zmm2, %zmm21); OCB_LOAD_PTRS(14); OCB_INPUT(14, %r10, %r11, %r12, %r13, %zmm1, %zmm23); vpternlogq $0x96, %zmm20, %zmm21, %zmm23; OCB_INPUT(15, %r14, %r15, %rax, %rbx, %zmm0, %zmm24); #undef OCB_LOAD_PTRS #undef OCB_INPUT vpbroadcastq (key_table)(CTX), %zmm16; vpshufb .Lpack_bswap rRIP, %zmm16, %zmm16; vpternlogq $0x96, %zmm24, %zmm22, %zmm25; vpternlogq $0x96, %zmm26, %zmm27, %zmm23; vpxorq %zmm25, %zmm23, %zmm20; vextracti64x4 $1, %zmm20, %ymm21; vpxorq %ymm21, %ymm20, %ymm20; vextracti64x2 $1, %ymm20, %xmm21; vpternlogq $0x96, (%r8), %xmm21, %xmm20; vmovdqu64 %xmm30, (%rcx); vmovdqu64 %xmm20, (%r8); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ /* inpack64_pre: */ vpxorq %zmm0, %zmm16, %zmm0; vpxorq %zmm1, %zmm16, %zmm1; vpxorq %zmm2, %zmm16, %zmm2; vpxorq %zmm3, %zmm16, %zmm3; vpxorq %zmm4, %zmm16, %zmm4; vpxorq %zmm5, %zmm16, %zmm5; vpxorq %zmm6, %zmm16, %zmm6; vpxorq %zmm7, %zmm16, %zmm7; vpxorq %zmm8, %zmm16, %zmm8; vpxorq %zmm9, %zmm16, %zmm9; vpxorq %zmm10, %zmm16, %zmm10; vpxorq %zmm11, %zmm16, %zmm11; vpxorq %zmm12, %zmm16, %zmm12; vpxorq %zmm13, %zmm16, %zmm13; vpxorq %zmm14, %zmm16, %zmm14; vpxorq %zmm15, %zmm16, %zmm15; call __camellia_gfni_avx512_enc_blk64; vpxorq 0 * 64(%rsi), %zmm7, %zmm7; vpxorq 1 * 64(%rsi), %zmm6, %zmm6; vpxorq 2 * 64(%rsi), %zmm5, %zmm5; vpxorq 3 * 64(%rsi), %zmm4, %zmm4; vpxorq 4 * 64(%rsi), %zmm3, %zmm3; vpxorq 5 * 64(%rsi), %zmm2, %zmm2; vpxorq 6 * 64(%rsi), %zmm1, %zmm1; vpxorq 7 * 64(%rsi), %zmm0, %zmm0; vpxorq 8 * 64(%rsi), %zmm15, %zmm15; vpxorq 9 * 64(%rsi), %zmm14, %zmm14; vpxorq 10 * 64(%rsi), %zmm13, %zmm13; vpxorq 11 * 64(%rsi), %zmm12, %zmm12; vpxorq 12 * 64(%rsi), %zmm11, %zmm11; vpxorq 13 * 64(%rsi), %zmm10, %zmm10; vpxorq 14 * 64(%rsi), %zmm9, %zmm9; vpxorq 15 * 64(%rsi), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); clear_regs(); popq %rbx; CFI_RESTORE(%rbx); popq %r15; CFI_RESTORE(%r15); popq %r14; CFI_RESTORE(%r14); popq %r13; CFI_RESTORE(%r12); popq %r12; CFI_RESTORE(%r13); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_ocb_enc,.-_gcry_camellia_gfni_avx512_ocb_enc;) .align 16 .globl _gcry_camellia_gfni_avx512_ocb_dec ELF(.type _gcry_camellia_gfni_avx512_ocb_dec,@function;) _gcry_camellia_gfni_avx512_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[64]) */ CFI_STARTPROC(); spec_stop_avx512; pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rbx; CFI_PUSH(%rbx); pushq %r8; CFI_PUSH(%r8); vmovdqu64 (%rcx), %xmm30; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor DECIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg) \ vpxorq (l0reg), %xmm30, %xmm16; \ vpxorq (l1reg), %xmm16, %xmm30; \ vinserti64x2 $1, %xmm30, %ymm16, %ymm16; \ vpxorq (l2reg), %xmm30, %xmm30; \ vinserti64x2 $2, %xmm30, %zmm16, %zmm16; \ vpxorq (l3reg), %xmm30, %xmm30; \ vinserti64x2 $3, %xmm30, %zmm16, %zmm16; \ vpxorq (n * 64)(%rdx), %zmm16, zreg; \ vmovdqu64 %zmm16, (n * 64)(%rsi); #define OCB_LOAD_PTRS(n) \ movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; OCB_LOAD_PTRS(0); OCB_INPUT(0, %r10, %r11, %r12, %r13, %zmm15); OCB_INPUT(1, %r14, %r15, %rax, %rbx, %zmm14); OCB_LOAD_PTRS(2); OCB_INPUT(2, %r10, %r11, %r12, %r13, %zmm13); OCB_INPUT(3, %r14, %r15, %rax, %rbx, %zmm12); OCB_LOAD_PTRS(4); OCB_INPUT(4, %r10, %r11, %r12, %r13, %zmm11); OCB_INPUT(5, %r14, %r15, %rax, %rbx, %zmm10); OCB_LOAD_PTRS(6); OCB_INPUT(6, %r10, %r11, %r12, %r13, %zmm9); OCB_INPUT(7, %r14, %r15, %rax, %rbx, %zmm8); OCB_LOAD_PTRS(8); OCB_INPUT(8, %r10, %r11, %r12, %r13, %zmm7); OCB_INPUT(9, %r14, %r15, %rax, %rbx, %zmm6); OCB_LOAD_PTRS(10); OCB_INPUT(10, %r10, %r11, %r12, %r13, %zmm5); OCB_INPUT(11, %r14, %r15, %rax, %rbx, %zmm4); OCB_LOAD_PTRS(12); OCB_INPUT(12, %r10, %r11, %r12, %r13, %zmm3); OCB_INPUT(13, %r14, %r15, %rax, %rbx, %zmm2); OCB_LOAD_PTRS(14); OCB_INPUT(14, %r10, %r11, %r12, %r13, %zmm1); OCB_INPUT(15, %r14, %r15, %rax, %rbx, %zmm0); #undef OCB_LOAD_PTRS #undef OCB_INPUT vmovdqu64 %xmm30, (%rcx); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ vpbroadcastq (key_table)(CTX, %r8, 8), %zmm16; vpshufb .Lpack_bswap rRIP, %zmm16, %zmm16; /* inpack64_pre: */ vpxorq %zmm0, %zmm16, %zmm0; vpxorq %zmm1, %zmm16, %zmm1; vpxorq %zmm2, %zmm16, %zmm2; vpxorq %zmm3, %zmm16, %zmm3; vpxorq %zmm4, %zmm16, %zmm4; vpxorq %zmm5, %zmm16, %zmm5; vpxorq %zmm6, %zmm16, %zmm6; vpxorq %zmm7, %zmm16, %zmm7; vpxorq %zmm8, %zmm16, %zmm8; vpxorq %zmm9, %zmm16, %zmm9; vpxorq %zmm10, %zmm16, %zmm10; vpxorq %zmm11, %zmm16, %zmm11; vpxorq %zmm12, %zmm16, %zmm12; vpxorq %zmm13, %zmm16, %zmm13; vpxorq %zmm14, %zmm16, %zmm14; vpxorq %zmm15, %zmm16, %zmm15; call __camellia_gfni_avx512_dec_blk64; vpxorq 0 * 64(%rsi), %zmm7, %zmm7; vpxorq 1 * 64(%rsi), %zmm6, %zmm6; vpxorq 2 * 64(%rsi), %zmm5, %zmm5; vpxorq 3 * 64(%rsi), %zmm4, %zmm4; vpxorq 4 * 64(%rsi), %zmm3, %zmm3; vpxorq 5 * 64(%rsi), %zmm2, %zmm2; vpxorq 6 * 64(%rsi), %zmm1, %zmm1; vpxorq 7 * 64(%rsi), %zmm0, %zmm0; vpxorq 8 * 64(%rsi), %zmm15, %zmm15; vpxorq 9 * 64(%rsi), %zmm14, %zmm14; vpxorq 10 * 64(%rsi), %zmm13, %zmm13; vpxorq 11 * 64(%rsi), %zmm12, %zmm12; vpxorq 12 * 64(%rsi), %zmm11, %zmm11; vpxorq 13 * 64(%rsi), %zmm10, %zmm10; vpxorq 14 * 64(%rsi), %zmm9, %zmm9; vpxorq 15 * 64(%rsi), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); popq %r8; CFI_RESTORE(%r8); /* Checksum_i = Checksum_{i-1} xor C_i */ vpternlogq $0x96, %zmm7, %zmm6, %zmm5; vpternlogq $0x96, %zmm4, %zmm3, %zmm2; vpternlogq $0x96, %zmm1, %zmm0, %zmm15; vpternlogq $0x96, %zmm14, %zmm13, %zmm12; vpternlogq $0x96, %zmm11, %zmm10, %zmm9; vpternlogq $0x96, %zmm5, %zmm2, %zmm15; vpternlogq $0x96, %zmm12, %zmm9, %zmm8; vpxorq %zmm15, %zmm8, %zmm8; vextracti64x4 $1, %zmm8, %ymm0; vpxor %ymm0, %ymm8, %ymm8; vextracti128 $1, %ymm8, %xmm0; vpternlogq $0x96, (%r8), %xmm0, %xmm8; vmovdqu64 %xmm8, (%r8); clear_regs(); popq %rbx; CFI_RESTORE(%rbx); popq %r15; CFI_RESTORE(%r15); popq %r14; CFI_RESTORE(%r14); popq %r13; CFI_RESTORE(%r12); popq %r12; CFI_RESTORE(%r13); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_ocb_dec,.-_gcry_camellia_gfni_avx512_ocb_dec;) .align 16 .globl _gcry_camellia_gfni_avx512_enc_blk64 ELF(.type _gcry_camellia_gfni_avx512_enc_blk64,@function;) _gcry_camellia_gfni_avx512_enc_blk64: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) */ CFI_STARTPROC(); spec_stop_avx512; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ xorl %eax, %eax; vpbroadcastq (key_table)(CTX), %zmm0; vpshufb .Lpack_bswap rRIP, %zmm0, %zmm0; vpxorq (0) * 64(%rdx), %zmm0, %zmm15; vpxorq (1) * 64(%rdx), %zmm0, %zmm14; vpxorq (2) * 64(%rdx), %zmm0, %zmm13; vpxorq (3) * 64(%rdx), %zmm0, %zmm12; vpxorq (4) * 64(%rdx), %zmm0, %zmm11; vpxorq (5) * 64(%rdx), %zmm0, %zmm10; vpxorq (6) * 64(%rdx), %zmm0, %zmm9; vpxorq (7) * 64(%rdx), %zmm0, %zmm8; vpxorq (8) * 64(%rdx), %zmm0, %zmm7; vpxorq (9) * 64(%rdx), %zmm0, %zmm6; vpxorq (10) * 64(%rdx), %zmm0, %zmm5; vpxorq (11) * 64(%rdx), %zmm0, %zmm4; vpxorq (12) * 64(%rdx), %zmm0, %zmm3; vpxorq (13) * 64(%rdx), %zmm0, %zmm2; vpxorq (14) * 64(%rdx), %zmm0, %zmm1; vpxorq (15) * 64(%rdx), %zmm0, %zmm0; call __camellia_gfni_avx512_enc_blk64; vmovdqu64 %zmm7, (0) * 64(%rsi); vmovdqu64 %zmm6, (1) * 64(%rsi); vmovdqu64 %zmm5, (2) * 64(%rsi); vmovdqu64 %zmm4, (3) * 64(%rsi); vmovdqu64 %zmm3, (4) * 64(%rsi); vmovdqu64 %zmm2, (5) * 64(%rsi); vmovdqu64 %zmm1, (6) * 64(%rsi); vmovdqu64 %zmm0, (7) * 64(%rsi); vmovdqu64 %zmm15, (8) * 64(%rsi); vmovdqu64 %zmm14, (9) * 64(%rsi); vmovdqu64 %zmm13, (10) * 64(%rsi); vmovdqu64 %zmm12, (11) * 64(%rsi); vmovdqu64 %zmm11, (12) * 64(%rsi); vmovdqu64 %zmm10, (13) * 64(%rsi); vmovdqu64 %zmm9, (14) * 64(%rsi); vmovdqu64 %zmm8, (15) * 64(%rsi); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_enc_blk64,.-_gcry_camellia_gfni_avx512_enc_blk64;) .align 16 .globl _gcry_camellia_gfni_avx512_dec_blk64 ELF(.type _gcry_camellia_gfni_avx512_dec_blk64,@function;) _gcry_camellia_gfni_avx512_dec_blk64: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) */ CFI_STARTPROC(); spec_stop_avx512; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ xorl %eax, %eax; vpbroadcastq (key_table)(CTX, %r8, 8), %zmm0; vpshufb .Lpack_bswap rRIP, %zmm0, %zmm0; vpxorq (0) * 64(%rdx), %zmm0, %zmm15; vpxorq (1) * 64(%rdx), %zmm0, %zmm14; vpxorq (2) * 64(%rdx), %zmm0, %zmm13; vpxorq (3) * 64(%rdx), %zmm0, %zmm12; vpxorq (4) * 64(%rdx), %zmm0, %zmm11; vpxorq (5) * 64(%rdx), %zmm0, %zmm10; vpxorq (6) * 64(%rdx), %zmm0, %zmm9; vpxorq (7) * 64(%rdx), %zmm0, %zmm8; vpxorq (8) * 64(%rdx), %zmm0, %zmm7; vpxorq (9) * 64(%rdx), %zmm0, %zmm6; vpxorq (10) * 64(%rdx), %zmm0, %zmm5; vpxorq (11) * 64(%rdx), %zmm0, %zmm4; vpxorq (12) * 64(%rdx), %zmm0, %zmm3; vpxorq (13) * 64(%rdx), %zmm0, %zmm2; vpxorq (14) * 64(%rdx), %zmm0, %zmm1; vpxorq (15) * 64(%rdx), %zmm0, %zmm0; call __camellia_gfni_avx512_dec_blk64; vmovdqu64 %zmm7, (0) * 64(%rsi); vmovdqu64 %zmm6, (1) * 64(%rsi); vmovdqu64 %zmm5, (2) * 64(%rsi); vmovdqu64 %zmm4, (3) * 64(%rsi); vmovdqu64 %zmm3, (4) * 64(%rsi); vmovdqu64 %zmm2, (5) * 64(%rsi); vmovdqu64 %zmm1, (6) * 64(%rsi); vmovdqu64 %zmm0, (7) * 64(%rsi); vmovdqu64 %zmm15, (8) * 64(%rsi); vmovdqu64 %zmm14, (9) * 64(%rsi); vmovdqu64 %zmm13, (10) * 64(%rsi); vmovdqu64 %zmm12, (11) * 64(%rsi); vmovdqu64 %zmm11, (12) * 64(%rsi); vmovdqu64 %zmm10, (13) * 64(%rsi); vmovdqu64 %zmm9, (14) * 64(%rsi); vmovdqu64 %zmm8, (15) * 64(%rsi); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_dec_blk64,.-_gcry_camellia_gfni_avx512_dec_blk64;) #endif /* defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT) */ #endif /* __x86_64 */ diff --git a/cipher/chacha20-amd64-avx2.S b/cipher/chacha20-amd64-avx2.S index 407d651f..54e2ffab 100644 --- a/cipher/chacha20-amd64-avx2.S +++ b/cipher/chacha20-amd64-avx2.S @@ -1,601 +1,604 @@ /* chacha20-amd64-avx2.S - AVX2 implementation of ChaCha20 cipher * * Copyright (C) 2017-2019 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Based on D. J. Bernstein reference implementation at * http://cr.yp.to/chacha.html: * * chacha-regs.c version 20080118 * D. J. Bernstein * Public domain. */ #ifdef __x86_64 #include #if defined(HAVE_GCC_INLINE_ASM_AVX2) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) -.text - #include "asm-common-amd64.h" #include "asm-poly1305-amd64.h" /* register macros */ #define INPUT %rdi #define DST %rsi #define SRC %rdx #define NBLKS %rcx #define ROUND %eax /* stack structure */ #define STACK_VEC_X12 (32) #define STACK_VEC_X13 (32 + STACK_VEC_X12) #define STACK_TMP (32 + STACK_VEC_X13) #define STACK_TMP1 (32 + STACK_TMP) #define STACK_MAX (32 + STACK_TMP1) /* vector registers */ #define X0 %ymm0 #define X1 %ymm1 #define X2 %ymm2 #define X3 %ymm3 #define X4 %ymm4 #define X5 %ymm5 #define X6 %ymm6 #define X7 %ymm7 #define X8 %ymm8 #define X9 %ymm9 #define X10 %ymm10 #define X11 %ymm11 #define X12 %ymm12 #define X13 %ymm13 #define X14 %ymm14 #define X15 %ymm15 #define X0h %xmm0 #define X1h %xmm1 #define X2h %xmm2 #define X3h %xmm3 #define X4h %xmm4 #define X5h %xmm5 #define X6h %xmm6 #define X7h %xmm7 #define X8h %xmm8 #define X9h %xmm9 #define X10h %xmm10 #define X11h %xmm11 #define X12h %xmm12 #define X13h %xmm13 #define X14h %xmm14 #define X15h %xmm15 /********************************************************************** helper macros **********************************************************************/ /* 4x4 32-bit integer matrix transpose */ #define transpose_4x4(x0,x1,x2,x3,t1,t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /* 2x2 128-bit matrix transpose */ #define transpose_16byte_2x2(x0,x1,t1) \ vmovdqa x0, t1; \ vperm2i128 $0x20, x1, x0, x0; \ vperm2i128 $0x31, x1, t1, x1; /* xor register with unaligned src and save to unaligned dst */ #define xor_src_dst(dst, src, offset, xreg) \ vpxor offset(src), xreg, xreg; \ vmovdqu xreg, offset(dst); /********************************************************************** 8-way chacha20 **********************************************************************/ #define ROTATE2(v1,v2,c,tmp) \ vpsrld $(32 - (c)), v1, tmp; \ vpslld $(c), v1, v1; \ vpaddb tmp, v1, v1; \ vpsrld $(32 - (c)), v2, tmp; \ vpslld $(c), v2, v2; \ vpaddb tmp, v2, v2; #define ROTATE_SHUF_2(v1,v2,shuf) \ vpshufb shuf, v1, v1; \ vpshufb shuf, v2, v2; #define XOR(ds,s) \ vpxor s, ds, ds; #define PLUS(ds,s) \ vpaddd s, ds, ds; #define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,\ interleave_op1,interleave_op2,\ interleave_op3,interleave_op4) \ vbroadcasti128 .Lshuf_rol16 rRIP, tmp1; \ interleave_op1; \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE_SHUF_2(d1, d2, tmp1); \ interleave_op2; \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE2(b1, b2, 12, tmp1); \ vbroadcasti128 .Lshuf_rol8 rRIP, tmp1; \ interleave_op3; \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE_SHUF_2(d1, d2, tmp1); \ interleave_op4; \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE2(b1, b2, 7, tmp1); +SECTION_RODATA + +ELF(.type _chacha20_avx2_data,@object;) .align 32 -chacha20_data: +_chacha20_avx2_data: .Lshuf_rol16: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .Lshuf_rol8: .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 .Linc_counter: .byte 0,1,2,3,4,5,6,7 .Lunsigned_cmp: .long 0x80000000 +.text + .align 16 .globl _gcry_chacha20_amd64_avx2_blocks8 ELF(.type _gcry_chacha20_amd64_avx2_blocks8,@function;) _gcry_chacha20_amd64_avx2_blocks8: /* input: * %rdi: input * %rsi: dst * %rdx: src * %rcx: nblks (multiple of 8) */ CFI_STARTPROC(); vzeroupper; pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $STACK_MAX, %rsp; andq $~31, %rsp; .Loop8: mov $20, ROUND; /* Construct counter vectors X12 and X13 */ vpmovzxbd .Linc_counter rRIP, X0; vpbroadcastd .Lunsigned_cmp rRIP, X2; vpbroadcastd (12 * 4)(INPUT), X12; vpbroadcastd (13 * 4)(INPUT), X13; vpaddd X0, X12, X12; vpxor X2, X0, X0; vpxor X2, X12, X1; vpcmpgtd X1, X0, X0; vpsubd X0, X13, X13; vmovdqa X12, (STACK_VEC_X12)(%rsp); vmovdqa X13, (STACK_VEC_X13)(%rsp); /* Load vectors */ vpbroadcastd (0 * 4)(INPUT), X0; vpbroadcastd (1 * 4)(INPUT), X1; vpbroadcastd (2 * 4)(INPUT), X2; vpbroadcastd (3 * 4)(INPUT), X3; vpbroadcastd (4 * 4)(INPUT), X4; vpbroadcastd (5 * 4)(INPUT), X5; vpbroadcastd (6 * 4)(INPUT), X6; vpbroadcastd (7 * 4)(INPUT), X7; vpbroadcastd (8 * 4)(INPUT), X8; vpbroadcastd (9 * 4)(INPUT), X9; vpbroadcastd (10 * 4)(INPUT), X10; vpbroadcastd (11 * 4)(INPUT), X11; vpbroadcastd (14 * 4)(INPUT), X14; vpbroadcastd (15 * 4)(INPUT), X15; vmovdqa X15, (STACK_TMP)(%rsp); .Lround2: QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X15,,,,) vmovdqa (STACK_TMP)(%rsp), X15; vmovdqa X8, (STACK_TMP)(%rsp); QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,,,,) QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,,,,) vmovdqa (STACK_TMP)(%rsp), X8; vmovdqa X15, (STACK_TMP)(%rsp); QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X15,,,,) sub $2, ROUND; jnz .Lround2; vmovdqa X8, (STACK_TMP1)(%rsp); /* tmp := X15 */ vpbroadcastd (0 * 4)(INPUT), X15; PLUS(X0, X15); vpbroadcastd (1 * 4)(INPUT), X15; PLUS(X1, X15); vpbroadcastd (2 * 4)(INPUT), X15; PLUS(X2, X15); vpbroadcastd (3 * 4)(INPUT), X15; PLUS(X3, X15); vpbroadcastd (4 * 4)(INPUT), X15; PLUS(X4, X15); vpbroadcastd (5 * 4)(INPUT), X15; PLUS(X5, X15); vpbroadcastd (6 * 4)(INPUT), X15; PLUS(X6, X15); vpbroadcastd (7 * 4)(INPUT), X15; PLUS(X7, X15); transpose_4x4(X0, X1, X2, X3, X8, X15); transpose_4x4(X4, X5, X6, X7, X8, X15); vmovdqa (STACK_TMP1)(%rsp), X8; transpose_16byte_2x2(X0, X4, X15); transpose_16byte_2x2(X1, X5, X15); transpose_16byte_2x2(X2, X6, X15); transpose_16byte_2x2(X3, X7, X15); vmovdqa (STACK_TMP)(%rsp), X15; xor_src_dst(DST, SRC, (64 * 0 + 16 * 0), X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 0), X1); vpbroadcastd (8 * 4)(INPUT), X0; PLUS(X8, X0); vpbroadcastd (9 * 4)(INPUT), X0; PLUS(X9, X0); vpbroadcastd (10 * 4)(INPUT), X0; PLUS(X10, X0); vpbroadcastd (11 * 4)(INPUT), X0; PLUS(X11, X0); vmovdqa (STACK_VEC_X12)(%rsp), X0; PLUS(X12, X0); vmovdqa (STACK_VEC_X13)(%rsp), X0; PLUS(X13, X0); vpbroadcastd (14 * 4)(INPUT), X0; PLUS(X14, X0); vpbroadcastd (15 * 4)(INPUT), X0; PLUS(X15, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 0), X2); xor_src_dst(DST, SRC, (64 * 3 + 16 * 0), X3); /* Update counter */ addq $8, (12 * 4)(INPUT); transpose_4x4(X8, X9, X10, X11, X0, X1); transpose_4x4(X12, X13, X14, X15, X0, X1); xor_src_dst(DST, SRC, (64 * 4 + 16 * 0), X4); xor_src_dst(DST, SRC, (64 * 5 + 16 * 0), X5); transpose_16byte_2x2(X8, X12, X0); transpose_16byte_2x2(X9, X13, X0); transpose_16byte_2x2(X10, X14, X0); transpose_16byte_2x2(X11, X15, X0); xor_src_dst(DST, SRC, (64 * 6 + 16 * 0), X6); xor_src_dst(DST, SRC, (64 * 7 + 16 * 0), X7); xor_src_dst(DST, SRC, (64 * 0 + 16 * 2), X8); xor_src_dst(DST, SRC, (64 * 1 + 16 * 2), X9); xor_src_dst(DST, SRC, (64 * 2 + 16 * 2), X10); xor_src_dst(DST, SRC, (64 * 3 + 16 * 2), X11); xor_src_dst(DST, SRC, (64 * 4 + 16 * 2), X12); xor_src_dst(DST, SRC, (64 * 5 + 16 * 2), X13); xor_src_dst(DST, SRC, (64 * 6 + 16 * 2), X14); xor_src_dst(DST, SRC, (64 * 7 + 16 * 2), X15); sub $8, NBLKS; lea (8 * 64)(DST), DST; lea (8 * 64)(SRC), SRC; jnz .Loop8; /* clear the used vector registers and stack */ vpxor X0, X0, X0; vmovdqa X0, (STACK_VEC_X12)(%rsp); vmovdqa X0, (STACK_VEC_X13)(%rsp); vmovdqa X0, (STACK_TMP)(%rsp); vmovdqa X0, (STACK_TMP1)(%rsp); vzeroall; /* eax zeroed by round loop. */ leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_chacha20_amd64_avx2_blocks8, .-_gcry_chacha20_amd64_avx2_blocks8;) /********************************************************************** 8-way stitched chacha20-poly1305 **********************************************************************/ #define _ /*_*/ .align 16 .globl _gcry_chacha20_poly1305_amd64_avx2_blocks8 ELF(.type _gcry_chacha20_poly1305_amd64_avx2_blocks8,@function;) _gcry_chacha20_poly1305_amd64_avx2_blocks8: /* input: * %rdi: input * %rsi: dst * %rdx: src * %rcx: nblks (multiple of 8) * %r9: poly1305-state * %r8: poly1305-src */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; subq $(9 * 8) + STACK_MAX + 32, %rsp; andq $~31, %rsp; movq %rbx, (STACK_MAX + 0 * 8)(%rsp); movq %r12, (STACK_MAX + 1 * 8)(%rsp); movq %r13, (STACK_MAX + 2 * 8)(%rsp); movq %r14, (STACK_MAX + 3 * 8)(%rsp); movq %r15, (STACK_MAX + 4 * 8)(%rsp); CFI_REG_ON_STACK(rbx, STACK_MAX + 0 * 8); CFI_REG_ON_STACK(r12, STACK_MAX + 1 * 8); CFI_REG_ON_STACK(r13, STACK_MAX + 2 * 8); CFI_REG_ON_STACK(r14, STACK_MAX + 3 * 8); CFI_REG_ON_STACK(r15, STACK_MAX + 4 * 8); movq %rdx, (STACK_MAX + 5 * 8)(%rsp); # SRC movq %rsi, (STACK_MAX + 6 * 8)(%rsp); # DST movq %rcx, (STACK_MAX + 7 * 8)(%rsp); # NBLKS /* Load state */ POLY1305_LOAD_STATE(); .Loop_poly8: /* Construct counter vectors X12 and X13 */ vpmovzxbd .Linc_counter rRIP, X0; vpbroadcastd .Lunsigned_cmp rRIP, X2; vpbroadcastd (12 * 4)(INPUT), X12; vpbroadcastd (13 * 4)(INPUT), X13; vpaddd X0, X12, X12; vpxor X2, X0, X0; vpxor X2, X12, X1; vpcmpgtd X1, X0, X0; vpsubd X0, X13, X13; vmovdqa X12, (STACK_VEC_X12)(%rsp); vmovdqa X13, (STACK_VEC_X13)(%rsp); /* Load vectors */ vpbroadcastd (0 * 4)(INPUT), X0; vpbroadcastd (1 * 4)(INPUT), X1; vpbroadcastd (2 * 4)(INPUT), X2; vpbroadcastd (3 * 4)(INPUT), X3; vpbroadcastd (4 * 4)(INPUT), X4; vpbroadcastd (5 * 4)(INPUT), X5; vpbroadcastd (6 * 4)(INPUT), X6; vpbroadcastd (7 * 4)(INPUT), X7; vpbroadcastd (8 * 4)(INPUT), X8; vpbroadcastd (9 * 4)(INPUT), X9; vpbroadcastd (10 * 4)(INPUT), X10; vpbroadcastd (11 * 4)(INPUT), X11; vpbroadcastd (14 * 4)(INPUT), X14; vpbroadcastd (15 * 4)(INPUT), X15; vmovdqa X15, (STACK_TMP)(%rsp); /* Process eight ChaCha20 blocks and 32 Poly1305 blocks. */ movl $20, (STACK_MAX + 8 * 8 + 4)(%rsp); .Lround8_with_poly1305_outer: movl $6, (STACK_MAX + 8 * 8)(%rsp); .Lround8_with_poly1305_inner1: /* rounds 0-5 & 10-15 */ POLY1305_BLOCK_PART1(0 * 16) QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X15, POLY1305_BLOCK_PART2(), POLY1305_BLOCK_PART3(), POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5()) vmovdqa (STACK_TMP)(%rsp), X15; vmovdqa X8, (STACK_TMP)(%rsp); POLY1305_BLOCK_PART1(1 * 16) QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8, POLY1305_BLOCK_PART2(), POLY1305_BLOCK_PART3(), POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5()) POLY1305_BLOCK_PART1(2 * 16) QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8, POLY1305_BLOCK_PART2(), POLY1305_BLOCK_PART3(), POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5()) vmovdqa (STACK_TMP)(%rsp), X8; vmovdqa X15, (STACK_TMP)(%rsp); POLY1305_BLOCK_PART1(3 * 16) lea (4 * 16)(POLY_RSRC), POLY_RSRC; QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X15, POLY1305_BLOCK_PART2(), POLY1305_BLOCK_PART3(), POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5()) subl $2, (STACK_MAX + 8 * 8)(%rsp); jnz .Lround8_with_poly1305_inner1; movl $4, (STACK_MAX + 8 * 8)(%rsp); .Lround8_with_poly1305_inner2: /* rounds 6-9 & 16-19 */ POLY1305_BLOCK_PART1(0 * 16) QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X15, POLY1305_BLOCK_PART2(), _, POLY1305_BLOCK_PART3(), _) vmovdqa (STACK_TMP)(%rsp), X15; vmovdqa X8, (STACK_TMP)(%rsp); QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8, _, POLY1305_BLOCK_PART4(), _, POLY1305_BLOCK_PART5()) POLY1305_BLOCK_PART1(1 * 16); lea (2 * 16)(POLY_RSRC), POLY_RSRC; QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8, _, POLY1305_BLOCK_PART2(), _, POLY1305_BLOCK_PART3()) vmovdqa (STACK_TMP)(%rsp), X8; vmovdqa X15, (STACK_TMP)(%rsp); QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X15, POLY1305_BLOCK_PART4(), _, POLY1305_BLOCK_PART5(), _) subl $2, (STACK_MAX + 8 * 8)(%rsp); jnz .Lround8_with_poly1305_inner2; subl $10, (STACK_MAX + 8 * 8 + 4)(%rsp); jnz .Lround8_with_poly1305_outer; movq (STACK_MAX + 5 * 8)(%rsp), SRC; movq (STACK_MAX + 6 * 8)(%rsp), DST; vmovdqa X8, (STACK_TMP1)(%rsp); /* tmp := X15 */ vpbroadcastd (0 * 4)(INPUT), X15; PLUS(X0, X15); vpbroadcastd (1 * 4)(INPUT), X15; PLUS(X1, X15); vpbroadcastd (2 * 4)(INPUT), X15; PLUS(X2, X15); vpbroadcastd (3 * 4)(INPUT), X15; PLUS(X3, X15); vpbroadcastd (4 * 4)(INPUT), X15; PLUS(X4, X15); vpbroadcastd (5 * 4)(INPUT), X15; PLUS(X5, X15); vpbroadcastd (6 * 4)(INPUT), X15; PLUS(X6, X15); vpbroadcastd (7 * 4)(INPUT), X15; PLUS(X7, X15); transpose_4x4(X0, X1, X2, X3, X8, X15); transpose_4x4(X4, X5, X6, X7, X8, X15); vmovdqa (STACK_TMP1)(%rsp), X8; transpose_16byte_2x2(X0, X4, X15); transpose_16byte_2x2(X1, X5, X15); transpose_16byte_2x2(X2, X6, X15); transpose_16byte_2x2(X3, X7, X15); vmovdqa (STACK_TMP)(%rsp), X15; xor_src_dst(DST, SRC, (64 * 0 + 16 * 0), X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 0), X1); vpbroadcastd (8 * 4)(INPUT), X0; PLUS(X8, X0); vpbroadcastd (9 * 4)(INPUT), X0; PLUS(X9, X0); vpbroadcastd (10 * 4)(INPUT), X0; PLUS(X10, X0); vpbroadcastd (11 * 4)(INPUT), X0; PLUS(X11, X0); vmovdqa (STACK_VEC_X12)(%rsp), X0; PLUS(X12, X0); vmovdqa (STACK_VEC_X13)(%rsp), X0; PLUS(X13, X0); vpbroadcastd (14 * 4)(INPUT), X0; PLUS(X14, X0); vpbroadcastd (15 * 4)(INPUT), X0; PLUS(X15, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 0), X2); xor_src_dst(DST, SRC, (64 * 3 + 16 * 0), X3); /* Update counter */ addq $8, (12 * 4)(INPUT); transpose_4x4(X8, X9, X10, X11, X0, X1); transpose_4x4(X12, X13, X14, X15, X0, X1); xor_src_dst(DST, SRC, (64 * 4 + 16 * 0), X4); xor_src_dst(DST, SRC, (64 * 5 + 16 * 0), X5); transpose_16byte_2x2(X8, X12, X0); transpose_16byte_2x2(X9, X13, X0); transpose_16byte_2x2(X10, X14, X0); transpose_16byte_2x2(X11, X15, X0); xor_src_dst(DST, SRC, (64 * 6 + 16 * 0), X6); xor_src_dst(DST, SRC, (64 * 7 + 16 * 0), X7); xor_src_dst(DST, SRC, (64 * 0 + 16 * 2), X8); xor_src_dst(DST, SRC, (64 * 1 + 16 * 2), X9); xor_src_dst(DST, SRC, (64 * 2 + 16 * 2), X10); xor_src_dst(DST, SRC, (64 * 3 + 16 * 2), X11); xor_src_dst(DST, SRC, (64 * 4 + 16 * 2), X12); xor_src_dst(DST, SRC, (64 * 5 + 16 * 2), X13); xor_src_dst(DST, SRC, (64 * 6 + 16 * 2), X14); xor_src_dst(DST, SRC, (64 * 7 + 16 * 2), X15); subq $8, (STACK_MAX + 7 * 8)(%rsp); # NBLKS lea (8 * 64)(DST), DST; lea (8 * 64)(SRC), SRC; movq SRC, (STACK_MAX + 5 * 8)(%rsp); movq DST, (STACK_MAX + 6 * 8)(%rsp); jnz .Loop_poly8; /* Store state */ POLY1305_STORE_STATE(); /* clear the used vector registers and stack */ vpxor X0, X0, X0; vmovdqa X0, (STACK_VEC_X12)(%rsp); vmovdqa X0, (STACK_VEC_X13)(%rsp); vmovdqa X0, (STACK_TMP)(%rsp); vmovdqa X0, (STACK_TMP1)(%rsp); vzeroall; movq (STACK_MAX + 0 * 8)(%rsp), %rbx; movq (STACK_MAX + 1 * 8)(%rsp), %r12; movq (STACK_MAX + 2 * 8)(%rsp), %r13; movq (STACK_MAX + 3 * 8)(%rsp), %r14; movq (STACK_MAX + 4 * 8)(%rsp), %r15; CFI_RESTORE(%rbx); CFI_RESTORE(%r12); CFI_RESTORE(%r13); CFI_RESTORE(%r14); CFI_RESTORE(%r15); xorl %eax, %eax; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_chacha20_poly1305_amd64_avx2_blocks8, .-_gcry_chacha20_poly1305_amd64_avx2_blocks8;) #endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/ #endif /*__x86_64*/ diff --git a/cipher/chacha20-amd64-avx512.S b/cipher/chacha20-amd64-avx512.S index 4b183528..2d140815 100644 --- a/cipher/chacha20-amd64-avx512.S +++ b/cipher/chacha20-amd64-avx512.S @@ -1,734 +1,736 @@ /* chacha20-amd64-avx512.S - AVX512 implementation of ChaCha20 cipher * * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Based on D. J. Bernstein reference implementation at * http://cr.yp.to/chacha.html: * * chacha-regs.c version 20080118 * D. J. Bernstein * Public domain. */ #ifdef __x86_64 #include #if defined(HAVE_GCC_INLINE_ASM_AVX512) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) -.text - #include "asm-common-amd64.h" /* register macros */ #define INPUT %rdi #define DST %rsi #define SRC %rdx #define NBLKS %rcx #define ROUND %eax /* vector registers */ #define X0 %zmm0 #define X1 %zmm1 #define X2 %zmm2 #define X3 %zmm3 #define X4 %zmm4 #define X5 %zmm5 #define X6 %zmm6 #define X7 %zmm7 #define X8 %zmm8 #define X9 %zmm9 #define X10 %zmm10 #define X11 %zmm11 #define X12 %zmm12 #define X13 %zmm13 #define X14 %zmm14 #define X15 %zmm15 #define X0y %ymm0 #define X1y %ymm1 #define X2y %ymm2 #define X3y %ymm3 #define X4y %ymm4 #define X5y %ymm5 #define X6y %ymm6 #define X7y %ymm7 #define X8y %ymm8 #define X9y %ymm9 #define X10y %ymm10 #define X11y %ymm11 #define X12y %ymm12 #define X13y %ymm13 #define X14y %ymm14 #define X15y %ymm15 #define X0x %xmm0 #define X1x %xmm1 #define X2x %xmm2 #define X3x %xmm3 #define X4x %xmm4 #define X5x %xmm5 #define X6x %xmm6 #define X7x %xmm7 #define X8x %xmm8 #define X9x %xmm9 #define X10x %xmm10 #define X11x %xmm11 #define X12x %xmm12 #define X13x %xmm13 #define X14x %xmm14 #define X15x %xmm15 #define TMP0 %zmm16 #define TMP1 %zmm17 #define TMP0y %ymm16 #define TMP1y %ymm17 #define TMP0x %xmm16 #define TMP1x %xmm17 #define COUNTER_ADD %zmm18 #define COUNTER_ADDy %ymm18 #define COUNTER_ADDx %xmm18 #define X12_SAVE %zmm19 #define X12_SAVEy %ymm19 #define X12_SAVEx %xmm19 #define X13_SAVE %zmm20 #define X13_SAVEy %ymm20 #define X13_SAVEx %xmm20 #define S0 %zmm21 #define S1 %zmm22 #define S2 %zmm23 #define S3 %zmm24 #define S4 %zmm25 #define S5 %zmm26 #define S6 %zmm27 #define S7 %zmm28 #define S8 %zmm29 #define S14 %zmm30 #define S15 %zmm31 #define S0y %ymm21 #define S1y %ymm22 #define S2y %ymm23 #define S3y %ymm24 #define S4y %ymm25 #define S5y %ymm26 #define S6y %ymm27 #define S7y %ymm28 #define S8y %ymm29 #define S14y %ymm30 #define S15y %ymm31 #define S0x %xmm21 #define S1x %xmm22 #define S2x %xmm23 #define S3x %xmm24 #define S4x %xmm25 #define S5x %xmm26 #define S6x %xmm27 #define S7x %xmm28 #define S8x %xmm29 #define S14x %xmm30 #define S15x %xmm31 /********************************************************************** helper macros **********************************************************************/ /* 4x4 32-bit integer matrix transpose */ #define transpose_4x4(x0,x1,x2,x3,t1,t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /* 4x4 128-bit matrix transpose */ #define transpose_16byte_4x4(x0,x1,x2,x3,t1,t2) \ vshufi32x4 $0xee, x1, x0, t2; \ vshufi32x4 $0x44, x1, x0, x0; \ \ vshufi32x4 $0x44, x3, x2, t1; \ vshufi32x4 $0xee, x3, x2, x2; \ \ vshufi32x4 $0xdd, t1, x0, x1; \ vshufi32x4 $0x88, t1, x0, x0; \ \ vshufi32x4 $0xdd, x2, t2, x3; \ vshufi32x4 $0x88, x2, t2, x2; /* 2x2 128-bit matrix transpose */ #define transpose_16byte_2x2(x0,x1,t1) \ vmovdqa32 x0, t1; \ vshufi32x4 $0x0, x1, x0, x0; \ vshufi32x4 $0x3, x1, t1, x1; #define xor_src_dst_4x4(dst, src, offset, add, x0, x4, x8, x12) \ vpxord (offset + 0 * (add))(src), x0, x0; \ vpxord (offset + 1 * (add))(src), x4, x4; \ vpxord (offset + 2 * (add))(src), x8, x8; \ vpxord (offset + 3 * (add))(src), x12, x12; \ vmovdqu32 x0, (offset + 0 * (add))(dst); \ vmovdqu32 x4, (offset + 1 * (add))(dst); \ vmovdqu32 x8, (offset + 2 * (add))(dst); \ vmovdqu32 x12, (offset + 3 * (add))(dst); #define xor_src_dst(dst, src, offset, xreg) \ vpxord offset(src), xreg, xreg; \ vmovdqu32 xreg, offset(dst); #define clear_vec4(v0,v1,v2,v3) \ vpxord v0, v0, v0; \ vpxord v1, v1, v1; \ vpxord v2, v2, v2; \ vpxord v3, v3, v3; #define clear_zmm16_zmm31() \ clear_vec4(%ymm16, %ymm20, %ymm24, %ymm28); \ clear_vec4(%ymm17, %ymm21, %ymm25, %ymm29); \ clear_vec4(%ymm18, %ymm22, %ymm26, %ymm30); \ clear_vec4(%ymm19, %ymm23, %ymm27, %ymm31); /********************************************************************** 16-way (zmm), 8-way (ymm), 4-way (xmm) chacha20 **********************************************************************/ #define ROTATE2(v1,v2,c) \ vprold $(c), v1, v1; \ vprold $(c), v2, v2; #define XOR(ds,s) \ vpxord s, ds, ds; #define PLUS(ds,s) \ vpaddd s, ds, ds; #define QUARTERROUND2V(a1,b1,c1,d1,a2,b2,c2,d2) \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE2(d1, d2, 16); \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE2(b1, b2, 12); \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE2(d1, d2, 8); \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE2(b1, b2, 7); /********************************************************************** 1-way/2-way (xmm) chacha20 **********************************************************************/ #define ROTATE(v1,c) \ vprold $(c), v1, v1; \ #define WORD_SHUF(v1,shuf) \ vpshufd $shuf, v1, v1; #define QUARTERROUND1H(x0,x1,x2,x3,shuf_x1,shuf_x2,shuf_x3) \ PLUS(x0, x1); XOR(x3, x0); ROTATE(x3, 16); \ PLUS(x2, x3); XOR(x1, x2); ROTATE(x1, 12); \ PLUS(x0, x1); XOR(x3, x0); ROTATE(x3, 8); \ PLUS(x2, x3); \ WORD_SHUF(x3, shuf_x3); \ XOR(x1, x2); \ WORD_SHUF(x2, shuf_x2); \ ROTATE(x1, 7); \ WORD_SHUF(x1, shuf_x1); #define QUARTERROUND2H(x0,x1,x2,x3,y0,y1,y2,y3,shuf_x1,shuf_x2,shuf_x3) \ PLUS(x0, x1); PLUS(y0, y1); XOR(x3, x0); XOR(y3, y0); \ ROTATE(x3, 16); ROTATE(y3, 16); \ PLUS(x2, x3); PLUS(y2, y3); XOR(x1, x2); XOR(y1, y2); \ ROTATE(x1, 12); ROTATE(y1, 12); \ PLUS(x0, x1); PLUS(y0, y1); XOR(x3, x0); XOR(y3, y0); \ ROTATE(x3, 8); ROTATE(y3, 8); \ PLUS(x2, x3); PLUS(y2, y3); \ WORD_SHUF(x3, shuf_x3); WORD_SHUF(y3, shuf_x3); \ XOR(x1, x2); XOR(y1, y2); \ WORD_SHUF(x2, shuf_x2); WORD_SHUF(y2, shuf_x2); \ ROTATE(x1, 7); ROTATE(y1, 7); \ WORD_SHUF(x1, shuf_x1); WORD_SHUF(y1, shuf_x1); +SECTION_RODATA + .align 64 ELF(.type _gcry_chacha20_amd64_avx512_data,@object;) _gcry_chacha20_amd64_avx512_data: .Lcounter_0_1_2_3: .Lcounter_0_1: .long 0,0,0,0 .Lone: .long 1,0,0,0 .Lcounter_2_3: .Ltwo: .long 2,0,0,0 .Lthree: .long 3,0,0,0 .Linc_counter: .byte 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 ELF(.size _gcry_chacha20_amd64_avx512_data,.-_gcry_chacha20_amd64_avx512_data) +.text + .align 16 .globl _gcry_chacha20_amd64_avx512_blocks ELF(.type _gcry_chacha20_amd64_avx512_blocks,@function;) _gcry_chacha20_amd64_avx512_blocks: /* input: * %rdi: input * %rsi: dst * %rdx: src * %rcx: nblks */ CFI_STARTPROC(); spec_stop_avx512; cmpq $4, NBLKS; jb .Lskip_vertical_handling; /* Load constants */ vpmovzxbd .Linc_counter rRIP, COUNTER_ADD; kxnorq %k1, %k1, %k1; cmpq $16, NBLKS; jae .Lprocess_16v; /* Preload state to YMM registers */ vpbroadcastd (0 * 4)(INPUT), S0y; vpbroadcastd (1 * 4)(INPUT), S1y; vpbroadcastd (2 * 4)(INPUT), S2y; vpbroadcastd (3 * 4)(INPUT), S3y; vpbroadcastd (4 * 4)(INPUT), S4y; vpbroadcastd (5 * 4)(INPUT), S5y; vpbroadcastd (6 * 4)(INPUT), S6y; vpbroadcastd (7 * 4)(INPUT), S7y; vpbroadcastd (8 * 4)(INPUT), S8y; vpbroadcastd (14 * 4)(INPUT), S14y; vpbroadcastd (15 * 4)(INPUT), S15y; jmp .Lskip16v; .align 16 .Lprocess_16v: /* Process 16 ChaCha20 blocks */ /* Preload state to ZMM registers */ vpbroadcastd (0 * 4)(INPUT), S0; vpbroadcastd (1 * 4)(INPUT), S1; vpbroadcastd (2 * 4)(INPUT), S2; vpbroadcastd (3 * 4)(INPUT), S3; vpbroadcastd (4 * 4)(INPUT), S4; vpbroadcastd (5 * 4)(INPUT), S5; vpbroadcastd (6 * 4)(INPUT), S6; vpbroadcastd (7 * 4)(INPUT), S7; vpbroadcastd (8 * 4)(INPUT), S8; vpbroadcastd (14 * 4)(INPUT), S14; vpbroadcastd (15 * 4)(INPUT), S15; movl $20, ROUND; subq $16, NBLKS; /* Construct counter vectors X12 and X13 */ vpmovm2d %k1, X9; vpaddd (12 * 4)(INPUT){1to16}, COUNTER_ADD, X12; vpbroadcastd (13 * 4)(INPUT), X13; vpcmpud $6, X12, COUNTER_ADD, %k2; vpsubd X9, X13, X13{%k2}; vmovdqa32 X12, X12_SAVE; vmovdqa32 X13, X13_SAVE; /* Load vectors */ vmovdqa32 S0, X0; vmovdqa32 S4, X4; vmovdqa32 S8, X8; vmovdqa32 S1, X1; vmovdqa32 S5, X5; vpbroadcastd (9 * 4)(INPUT), X9; QUARTERROUND2V(X0, X4, X8, X12, X1, X5, X9, X13) vmovdqa32 S2, X2; vmovdqa32 S6, X6; vpbroadcastd (10 * 4)(INPUT), X10; vmovdqa32 S14, X14; vmovdqa32 S3, X3; vmovdqa32 S7, X7; vpbroadcastd (11 * 4)(INPUT), X11; vmovdqa32 S15, X15; /* Update counter */ addq $16, (12 * 4)(INPUT); jmp .Lround2_entry_16v; .align 16 .Loop16v: movl $20, ROUND; subq $16, NBLKS; vmovdqa32 S0, X0; vmovdqa32 S4, X4; vmovdqa32 S8, X8; transpose_16byte_4x4(X1, X5, X9, X13, TMP0, TMP1); xor_src_dst_4x4(DST, SRC, (64 * 1), 256, X1, X5, X9, X13); vpmovm2d %k1, X9; vpaddd (12 * 4)(INPUT){1to16}, COUNTER_ADD, X12; vpbroadcastd (13 * 4)(INPUT), X13; vpcmpud $6, X12, COUNTER_ADD, %k2; vpsubd X9, X13, X13{%k2}; vmovdqa32 S1, X1; vmovdqa32 S5, X5; vpbroadcastd (9 * 4)(INPUT), X9; vmovdqa32 X12, X12_SAVE; vmovdqa32 X13, X13_SAVE; QUARTERROUND2V(X0, X4, X8, X12, X1, X5, X9, X13) transpose_16byte_4x4(X2, X6, X10, X14, TMP0, TMP1); xor_src_dst_4x4(DST, SRC, (64 * 2), 256, X2, X6, X10, X14); vmovdqa32 S2, X2; vmovdqa32 S6, X6; vpbroadcastd (10 * 4)(INPUT), X10; vmovdqa32 S14, X14; transpose_16byte_4x4(X3, X7, X11, X15, TMP0, TMP1); xor_src_dst_4x4(DST, SRC, (64 * 3), 256, X3, X7, X11, X15); leaq (16 * 64)(SRC), SRC; leaq (16 * 64)(DST), DST; vmovdqa32 S3, X3; vmovdqa32 S7, X7; vpbroadcastd (11 * 4)(INPUT), X11; vmovdqa32 S15, X15; /* Update counter */ addq $16, (12 * 4)(INPUT); jmp .Lround2_entry_16v; .align 16 .Lround2_16v: QUARTERROUND2V(X2, X7, X8, X13, X3, X4, X9, X14) QUARTERROUND2V(X0, X4, X8, X12, X1, X5, X9, X13) .align 16 .Lround2_entry_16v: QUARTERROUND2V(X2, X6, X10, X14, X3, X7, X11, X15) QUARTERROUND2V(X0, X5, X10, X15, X1, X6, X11, X12) subl $2, ROUND; jnz .Lround2_16v; PLUS(X0, S0); PLUS(X1, S1); QUARTERROUND2V(X2, X7, X8, X13, X3, X4, X9, X14) PLUS(X2, S2); PLUS(X3, S3); transpose_4x4(X0, X1, X2, X3, TMP0, TMP1); PLUS(X4, S4); PLUS(X5, S5); PLUS(X6, S6); PLUS(X7, S7); transpose_4x4(X4, X5, X6, X7, TMP0, TMP1); PLUS(X8, S8); PLUS(X9, (9 * 4)(INPUT){1to16}); PLUS(X10, (10 * 4)(INPUT){1to16}); PLUS(X11, (11 * 4)(INPUT){1to16}); transpose_4x4(X8, X9, X10, X11, TMP0, TMP1); PLUS(X12, X12_SAVE); PLUS(X13, X13_SAVE); PLUS(X14, S14); PLUS(X15, S15); transpose_4x4(X12, X13, X14, X15, TMP0, TMP1); transpose_16byte_4x4(X0, X4, X8, X12, TMP0, TMP1); xor_src_dst_4x4(DST, SRC, (64 * 0), 256, X0, X4, X8, X12); cmpq $16, NBLKS; jae .Loop16v; transpose_16byte_4x4(X1, X5, X9, X13, TMP0, TMP1); xor_src_dst_4x4(DST, SRC, (64 * 1), 256, X1, X5, X9, X13); transpose_16byte_4x4(X2, X6, X10, X14, TMP0, TMP1); xor_src_dst_4x4(DST, SRC, (64 * 2), 256, X2, X6, X10, X14); transpose_16byte_4x4(X3, X7, X11, X15, TMP0, TMP1); xor_src_dst_4x4(DST, SRC, (64 * 3), 256, X3, X7, X11, X15); leaq (16 * 64)(SRC), SRC; leaq (16 * 64)(DST), DST; .align 16 .Lskip16v: cmpq $8, NBLKS; jb .Lskip8v; /* Process 8 ChaCha20 blocks */ /* Construct counter vectors X12 and X13 */ vpmovm2d %k1, X9y; vpaddd (12 * 4)(INPUT){1to8}, COUNTER_ADDy, X12y; vpbroadcastd (13 * 4)(INPUT), X13y; vpcmpud $6, X12y, COUNTER_ADDy, %k2; vpsubd X9y, X13y, X13y{%k2}; vmovdqa32 X12y, X12_SAVEy; vmovdqa32 X13y, X13_SAVEy; /* Load vectors */ vmovdqa32 S0y, X0y; vmovdqa32 S4y, X4y; vmovdqa32 S8y, X8y; vmovdqa32 S1y, X1y; vmovdqa32 S5y, X5y; vpbroadcastd (9 * 4)(INPUT), X9y; vmovdqa32 S2y, X2y; vmovdqa32 S6y, X6y; vpbroadcastd (10 * 4)(INPUT), X10y; vmovdqa32 S14y, X14y; vmovdqa32 S3y, X3y; vmovdqa32 S7y, X7y; vpbroadcastd (11 * 4)(INPUT), X11y; vmovdqa32 S15y, X15y; /* Update counter */ addq $8, (12 * 4)(INPUT); movl $20, ROUND; subq $8, NBLKS; .align 16 .Lround2_8v: QUARTERROUND2V(X0y, X4y, X8y, X12y, X1y, X5y, X9y, X13y) QUARTERROUND2V(X2y, X6y, X10y, X14y, X3y, X7y, X11y, X15y) QUARTERROUND2V(X0y, X5y, X10y, X15y, X1y, X6y, X11y, X12y) QUARTERROUND2V(X2y, X7y, X8y, X13y, X3y, X4y, X9y, X14y) subl $2, ROUND; jnz .Lround2_8v; PLUS(X0y, S0y); PLUS(X1y, S1y); PLUS(X2y, S2y); PLUS(X3y, S3y); transpose_4x4(X0y, X1y, X2y, X3y, TMP0y, TMP1y); PLUS(X4y, S4y); PLUS(X5y, S5y); PLUS(X6y, S6y); PLUS(X7y, S7y); transpose_4x4(X4y, X5y, X6y, X7y, TMP0y, TMP1y); PLUS(X8y, S8y); transpose_16byte_2x2(X0y, X4y, TMP0y); PLUS(X9y, (9 * 4)(INPUT){1to8}); transpose_16byte_2x2(X1y, X5y, TMP0y); PLUS(X10y, (10 * 4)(INPUT){1to8}); transpose_16byte_2x2(X2y, X6y, TMP0y); PLUS(X11y, (11 * 4)(INPUT){1to8}); transpose_16byte_2x2(X3y, X7y, TMP0y); xor_src_dst_4x4(DST, SRC, (16 * 0), 64, X0y, X1y, X2y, X3y); transpose_4x4(X8y, X9y, X10y, X11y, TMP0y, TMP1y); PLUS(X12y, X12_SAVEy); PLUS(X13y, X13_SAVEy); PLUS(X14y, S14y); PLUS(X15y, S15y); xor_src_dst_4x4(DST, SRC, (16 * 16), 64, X4y, X5y, X6y, X7y); transpose_4x4(X12y, X13y, X14y, X15y, TMP0y, TMP1y); transpose_16byte_2x2(X8y, X12y, TMP0y); transpose_16byte_2x2(X9y, X13y, TMP0y); transpose_16byte_2x2(X10y, X14y, TMP0y); transpose_16byte_2x2(X11y, X15y, TMP0y); xor_src_dst_4x4(DST, SRC, (16 * 2), 64, X8y, X9y, X10y, X11y); xor_src_dst_4x4(DST, SRC, (16 * 18), 64, X12y, X13y, X14y, X15y); leaq (8 * 64)(SRC), SRC; leaq (8 * 64)(DST), DST; .align 16 .Lskip8v: cmpq $4, NBLKS; jb .Lskip4v; /* Process 4 ChaCha20 blocks */ /* Construct counter vectors X12 and X13 */ vpmovm2d %k1, X9x; vpaddd (12 * 4)(INPUT){1to4}, COUNTER_ADDx, X12x; vpbroadcastd (13 * 4)(INPUT), X13x; vpcmpud $6, X12x, COUNTER_ADDx, %k2; vpsubd X9x, X13x, X13x{%k2}; vmovdqa32 X12x, X12_SAVEx; vmovdqa32 X13x, X13_SAVEx; /* Load vectors */ vmovdqa32 S0x, X0x; vmovdqa32 S4x, X4x; vmovdqa32 S8x, X8x; vmovdqa32 S1x, X1x; vmovdqa32 S5x, X5x; vpbroadcastd (9 * 4)(INPUT), X9x; vmovdqa32 S2x, X2x; vmovdqa32 S6x, X6x; vpbroadcastd (10 * 4)(INPUT), X10x; vmovdqa32 S14x, X14x; vmovdqa32 S3x, X3x; vmovdqa32 S7x, X7x; vpbroadcastd (11 * 4)(INPUT), X11x; vmovdqa32 S15x, X15x; /* Update counter */ addq $4, (12 * 4)(INPUT); movl $20, ROUND; subq $4, NBLKS; .align 16 .Lround2_4v: QUARTERROUND2V(X0x, X4x, X8x, X12x, X1x, X5x, X9x, X13x) QUARTERROUND2V(X2x, X6x, X10x, X14x, X3x, X7x, X11x, X15x) QUARTERROUND2V(X0x, X5x, X10x, X15x, X1x, X6x, X11x, X12x) QUARTERROUND2V(X2x, X7x, X8x, X13x, X3x, X4x, X9x, X14x) subl $2, ROUND; jnz .Lround2_4v; PLUS(X0x, S0x); PLUS(X1x, S1x); PLUS(X2x, S2x); PLUS(X3x, S3x); transpose_4x4(X0x, X1x, X2x, X3x, TMP0x, TMP1x); PLUS(X4x, S4x); PLUS(X5x, S5x); PLUS(X6x, S6x); PLUS(X7x, S7x); xor_src_dst_4x4(DST, SRC, (16 * 0), 64, X0x, X1x, X2x, X3x); transpose_4x4(X4x, X5x, X6x, X7x, TMP0x, TMP1x); PLUS(X8x, S8x); PLUS(X9x, (9 * 4)(INPUT){1to4}); PLUS(X10x, (10 * 4)(INPUT){1to4}); PLUS(X11x, (11 * 4)(INPUT){1to4}); xor_src_dst_4x4(DST, SRC, (16 * 1), 64, X4x, X5x, X6x, X7x); transpose_4x4(X8x, X9x, X10x, X11x, TMP0x, TMP1x); PLUS(X12x, X12_SAVEx); PLUS(X13x, X13_SAVEx); PLUS(X14x, S14x); PLUS(X15x, S15x); xor_src_dst_4x4(DST, SRC, (16 * 2), 64, X8x, X9x, X10x, X11x); transpose_4x4(X12x, X13x, X14x, X15x, TMP0x, TMP1x); xor_src_dst_4x4(DST, SRC, (16 * 3), 64, X12x, X13x, X14x, X15x); leaq (4 * 64)(SRC), SRC; leaq (4 * 64)(DST), DST; .align 16 .Lskip4v: /* clear AVX512 registers */ kxorq %k2, %k2, %k2; vzeroupper; clear_zmm16_zmm31(); .align 16 .Lskip_vertical_handling: cmpq $0, NBLKS; je .Ldone; /* Load state */ vmovdqu (0 * 4)(INPUT), X10x; vmovdqu (4 * 4)(INPUT), X11x; vmovdqu (8 * 4)(INPUT), X12x; vmovdqu (12 * 4)(INPUT), X13x; /* Load constant */ vmovdqa .Lone rRIP, X4x; cmpq $1, NBLKS; je .Lhandle1; /* Process two ChaCha20 blocks (XMM) */ movl $20, ROUND; subq $2, NBLKS; vmovdqa X10x, X0x; vmovdqa X11x, X1x; vmovdqa X12x, X2x; vmovdqa X13x, X3x; vmovdqa X10x, X8x; vmovdqa X11x, X9x; vmovdqa X12x, X14x; vpaddq X4x, X13x, X15x; vmovdqa X15x, X7x; .align 16 .Lround2_2: QUARTERROUND2H(X0x, X1x, X2x, X3x, X8x, X9x, X14x, X15x, 0x39, 0x4e, 0x93); QUARTERROUND2H(X0x, X1x, X2x, X3x, X8x, X9x, X14x, X15x, 0x93, 0x4e, 0x39); subl $2, ROUND; jnz .Lround2_2; PLUS(X0x, X10x); PLUS(X1x, X11x); PLUS(X2x, X12x); PLUS(X3x, X13x); vpaddq .Ltwo rRIP, X13x, X13x; /* Update counter */ xor_src_dst_4x4(DST, SRC, 0 * 4, 4 * 4, X0x, X1x, X2x, X3x); PLUS(X8x, X10x); PLUS(X9x, X11x); PLUS(X14x, X12x); PLUS(X15x, X7x); xor_src_dst_4x4(DST, SRC, 16 * 4, 4 * 4, X8x, X9x, X14x, X15x); lea (2 * 64)(DST), DST; lea (2 * 64)(SRC), SRC; cmpq $0, NBLKS; je .Lskip1; .align 16 .Lhandle1: /* Process one ChaCha20 block (XMM) */ movl $20, ROUND; subq $1, NBLKS; vmovdqa X10x, X0x; vmovdqa X11x, X1x; vmovdqa X12x, X2x; vmovdqa X13x, X3x; .align 16 .Lround2_1: QUARTERROUND1H(X0x, X1x, X2x, X3x, 0x39, 0x4e, 0x93); QUARTERROUND1H(X0x, X1x, X2x, X3x, 0x93, 0x4e, 0x39); subl $2, ROUND; jnz .Lround2_1; PLUS(X0x, X10x); PLUS(X1x, X11x); PLUS(X2x, X12x); PLUS(X3x, X13x); vpaddq X4x, X13x, X13x; /* Update counter */ xor_src_dst_4x4(DST, SRC, 0 * 4, 4 * 4, X0x, X1x, X2x, X3x); .align 16 .Lskip1: /* Store counter */ vmovdqu X13x, (12 * 4)(INPUT); .align 16 .Ldone: vzeroall; /* clears ZMM0-ZMM15 */ xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_chacha20_amd64_avx512_blocks, .-_gcry_chacha20_amd64_avx512_blocks;) #endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/ #endif /*__x86_64*/ diff --git a/cipher/chacha20-amd64-ssse3.S b/cipher/chacha20-amd64-ssse3.S index 452d42e5..1ce5a8e6 100644 --- a/cipher/chacha20-amd64-ssse3.S +++ b/cipher/chacha20-amd64-ssse3.S @@ -1,1012 +1,1015 @@ /* chacha20-amd64-ssse3.S - SSSE3 implementation of ChaCha20 cipher * * Copyright (C) 2017-2019 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* * Based on D. J. Bernstein reference implementation at * http://cr.yp.to/chacha.html: * * chacha-regs.c version 20080118 * D. J. Bernstein * Public domain. */ #ifdef __x86_64 #include #if defined(HAVE_GCC_INLINE_ASM_SSSE3) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) -.text - #include "asm-common-amd64.h" #include "asm-poly1305-amd64.h" /* register macros */ #define INPUT %rdi #define DST %rsi #define SRC %rdx #define NBLKS %rcx #define ROUND %eax /* stack structure */ #define STACK_VEC_X12 (16) #define STACK_VEC_X13 (16 + STACK_VEC_X12) #define STACK_TMP (16 + STACK_VEC_X13) #define STACK_TMP1 (16 + STACK_TMP) #define STACK_TMP2 (16 + STACK_TMP1) #define STACK_MAX (16 + STACK_TMP2) /* vector registers */ #define X0 %xmm0 #define X1 %xmm1 #define X2 %xmm2 #define X3 %xmm3 #define X4 %xmm4 #define X5 %xmm5 #define X6 %xmm6 #define X7 %xmm7 #define X8 %xmm8 #define X9 %xmm9 #define X10 %xmm10 #define X11 %xmm11 #define X12 %xmm12 #define X13 %xmm13 #define X14 %xmm14 #define X15 %xmm15 /********************************************************************** helper macros **********************************************************************/ /* 4x4 32-bit integer matrix transpose */ #define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \ movdqa x0, t2; \ punpckhdq x1, t2; \ punpckldq x1, x0; \ \ movdqa x2, t1; \ punpckldq x3, t1; \ punpckhdq x3, x2; \ \ movdqa x0, x1; \ punpckhqdq t1, x1; \ punpcklqdq t1, x0; \ \ movdqa t2, x3; \ punpckhqdq x2, x3; \ punpcklqdq x2, t2; \ movdqa t2, x2; /* fill xmm register with 32-bit value from memory */ #define pbroadcastd(mem32, xreg) \ movd mem32, xreg; \ pshufd $0, xreg, xreg; /* xor with unaligned memory operand */ #define pxor_u(umem128, xreg, t) \ movdqu umem128, t; \ pxor t, xreg; /* xor register with unaligned src and save to unaligned dst */ #define xor_src_dst(dst, src, offset, xreg, t) \ pxor_u(offset(src), xreg, t); \ movdqu xreg, offset(dst); #define clear(x) pxor x,x; /********************************************************************** 4-way chacha20 **********************************************************************/ #define ROTATE2(v1,v2,c,tmp1,tmp2) \ movdqa v1, tmp1; \ movdqa v2, tmp2; \ psrld $(32 - (c)), v1; \ pslld $(c), tmp1; \ paddb tmp1, v1; \ psrld $(32 - (c)), v2; \ pslld $(c), tmp2; \ paddb tmp2, v2; #define ROTATE_SHUF_2(v1,v2,shuf) \ pshufb shuf, v1; \ pshufb shuf, v2; #define XOR(ds,s) \ pxor s, ds; #define PLUS(ds,s) \ paddd s, ds; #define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2,\ interleave_op1,interleave_op2) \ movdqa .Lshuf_rol16 rRIP, tmp1; \ interleave_op1; \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE_SHUF_2(d1, d2, tmp1); \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE2(b1, b2, 12, tmp1, tmp2); \ movdqa .Lshuf_rol8 rRIP, tmp1; \ interleave_op2; \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE_SHUF_2(d1, d2, tmp1); \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE2(b1, b2, 7, tmp1, tmp2); -chacha20_data: +SECTION_RODATA + +ELF(.type _chacha20_ssse3_data,@object;) +_chacha20_ssse3_data: .align 16 .Lshuf_rol16: .byte 2,3,0,1,6,7,4,5,10,11,8,9,14,15,12,13 .Lshuf_rol8: .byte 3,0,1,2,7,4,5,6,11,8,9,10,15,12,13,14 .Lcounter1: .long 1,0,0,0 .Linc_counter: .long 0,1,2,3 .Lunsigned_cmp: .long 0x80000000,0x80000000,0x80000000,0x80000000 +.text + .align 16 .globl _gcry_chacha20_amd64_ssse3_blocks4 ELF(.type _gcry_chacha20_amd64_ssse3_blocks4,@function;) _gcry_chacha20_amd64_ssse3_blocks4: /* input: * %rdi: input * %rsi: dst * %rdx: src * %rcx: nblks (multiple of 4) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $STACK_MAX, %rsp; andq $~15, %rsp; .Loop4: mov $20, ROUND; /* Construct counter vectors X12 and X13 */ movdqa .Linc_counter rRIP, X0; movdqa .Lunsigned_cmp rRIP, X2; pbroadcastd((12 * 4)(INPUT), X12); pbroadcastd((13 * 4)(INPUT), X13); paddd X0, X12; movdqa X12, X1; pxor X2, X0; pxor X2, X1; pcmpgtd X1, X0; psubd X0, X13; movdqa X12, (STACK_VEC_X12)(%rsp); movdqa X13, (STACK_VEC_X13)(%rsp); /* Load vectors */ pbroadcastd((0 * 4)(INPUT), X0); pbroadcastd((1 * 4)(INPUT), X1); pbroadcastd((2 * 4)(INPUT), X2); pbroadcastd((3 * 4)(INPUT), X3); pbroadcastd((4 * 4)(INPUT), X4); pbroadcastd((5 * 4)(INPUT), X5); pbroadcastd((6 * 4)(INPUT), X6); pbroadcastd((7 * 4)(INPUT), X7); pbroadcastd((8 * 4)(INPUT), X8); pbroadcastd((9 * 4)(INPUT), X9); pbroadcastd((10 * 4)(INPUT), X10); pbroadcastd((11 * 4)(INPUT), X11); pbroadcastd((14 * 4)(INPUT), X14); pbroadcastd((15 * 4)(INPUT), X15); movdqa X11, (STACK_TMP)(%rsp); movdqa X15, (STACK_TMP1)(%rsp); .Lround2_4: QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X11,X15,,) movdqa (STACK_TMP)(%rsp), X11; movdqa (STACK_TMP1)(%rsp), X15; movdqa X8, (STACK_TMP)(%rsp); movdqa X9, (STACK_TMP1)(%rsp); QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,X9,,) QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,X9,,) movdqa (STACK_TMP)(%rsp), X8; movdqa (STACK_TMP1)(%rsp), X9; movdqa X11, (STACK_TMP)(%rsp); movdqa X15, (STACK_TMP1)(%rsp); QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X11,X15,,) sub $2, ROUND; jnz .Lround2_4; /* tmp := X15 */ movdqa (STACK_TMP)(%rsp), X11; pbroadcastd((0 * 4)(INPUT), X15); PLUS(X0, X15); pbroadcastd((1 * 4)(INPUT), X15); PLUS(X1, X15); pbroadcastd((2 * 4)(INPUT), X15); PLUS(X2, X15); pbroadcastd((3 * 4)(INPUT), X15); PLUS(X3, X15); pbroadcastd((4 * 4)(INPUT), X15); PLUS(X4, X15); pbroadcastd((5 * 4)(INPUT), X15); PLUS(X5, X15); pbroadcastd((6 * 4)(INPUT), X15); PLUS(X6, X15); pbroadcastd((7 * 4)(INPUT), X15); PLUS(X7, X15); pbroadcastd((8 * 4)(INPUT), X15); PLUS(X8, X15); pbroadcastd((9 * 4)(INPUT), X15); PLUS(X9, X15); pbroadcastd((10 * 4)(INPUT), X15); PLUS(X10, X15); pbroadcastd((11 * 4)(INPUT), X15); PLUS(X11, X15); movdqa (STACK_VEC_X12)(%rsp), X15; PLUS(X12, X15); movdqa (STACK_VEC_X13)(%rsp), X15; PLUS(X13, X15); movdqa X13, (STACK_TMP)(%rsp); pbroadcastd((14 * 4)(INPUT), X15); PLUS(X14, X15); movdqa (STACK_TMP1)(%rsp), X15; movdqa X14, (STACK_TMP1)(%rsp); pbroadcastd((15 * 4)(INPUT), X13); PLUS(X15, X13); movdqa X15, (STACK_TMP2)(%rsp); /* Update counter */ addq $4, (12 * 4)(INPUT); transpose_4x4(X0, X1, X2, X3, X13, X14, X15); xor_src_dst(DST, SRC, (64 * 0 + 16 * 0), X0, X15); xor_src_dst(DST, SRC, (64 * 1 + 16 * 0), X1, X15); xor_src_dst(DST, SRC, (64 * 2 + 16 * 0), X2, X15); xor_src_dst(DST, SRC, (64 * 3 + 16 * 0), X3, X15); transpose_4x4(X4, X5, X6, X7, X0, X1, X2); movdqa (STACK_TMP)(%rsp), X13; movdqa (STACK_TMP1)(%rsp), X14; movdqa (STACK_TMP2)(%rsp), X15; xor_src_dst(DST, SRC, (64 * 0 + 16 * 1), X4, X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 1), X5, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 1), X6, X0); xor_src_dst(DST, SRC, (64 * 3 + 16 * 1), X7, X0); transpose_4x4(X8, X9, X10, X11, X0, X1, X2); xor_src_dst(DST, SRC, (64 * 0 + 16 * 2), X8, X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 2), X9, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 2), X10, X0); xor_src_dst(DST, SRC, (64 * 3 + 16 * 2), X11, X0); transpose_4x4(X12, X13, X14, X15, X0, X1, X2); xor_src_dst(DST, SRC, (64 * 0 + 16 * 3), X12, X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 3), X13, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 3), X14, X0); xor_src_dst(DST, SRC, (64 * 3 + 16 * 3), X15, X0); sub $4, NBLKS; lea (4 * 64)(DST), DST; lea (4 * 64)(SRC), SRC; jnz .Loop4; /* clear the used vector registers and stack */ clear(X0); movdqa X0, (STACK_VEC_X12)(%rsp); movdqa X0, (STACK_VEC_X13)(%rsp); movdqa X0, (STACK_TMP)(%rsp); movdqa X0, (STACK_TMP1)(%rsp); movdqa X0, (STACK_TMP2)(%rsp); clear(X1); clear(X2); clear(X3); clear(X4); clear(X5); clear(X6); clear(X7); clear(X8); clear(X9); clear(X10); clear(X11); clear(X12); clear(X13); clear(X14); clear(X15); /* eax zeroed by round loop. */ leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_chacha20_amd64_ssse3_blocks4, .-_gcry_chacha20_amd64_ssse3_blocks4;) /********************************************************************** 2-way && 1-way chacha20 **********************************************************************/ #define ROTATE_SHUF(v1,shuf) \ pshufb shuf, v1; #define ROTATE(v1,c,tmp1) \ movdqa v1, tmp1; \ psrld $(32 - (c)), v1; \ pslld $(c), tmp1; \ paddb tmp1, v1; #define WORD_SHUF(v1,shuf) \ pshufd $shuf, v1, v1; #define QUARTERROUND4(x0,x1,x2,x3,shuf_rol8,shuf_rol16,tmp1,shuf_x1,\ shuf_x2,shuf_x3) \ PLUS(x0, x1); XOR(x3, x0); ROTATE_SHUF(x3, shuf_rol16); \ PLUS(x2, x3); XOR(x1, x2); ROTATE(x1, 12, tmp1); \ PLUS(x0, x1); XOR(x3, x0); ROTATE_SHUF(x3, shuf_rol8); \ PLUS(x2, x3); \ WORD_SHUF(x3, shuf_x3); \ XOR(x1, x2); \ WORD_SHUF(x2, shuf_x2); \ ROTATE(x1, 7, tmp1); \ WORD_SHUF(x1, shuf_x1); .align 16 .globl _gcry_chacha20_amd64_ssse3_blocks1 ELF(.type _gcry_chacha20_amd64_ssse3_blocks1,@function;) _gcry_chacha20_amd64_ssse3_blocks1: /* input: * %rdi: input * %rsi: dst * %rdx: src * %rcx: nblks */ CFI_STARTPROC(); /* Load constants */ movdqa .Lcounter1 rRIP, X4; movdqa .Lshuf_rol8 rRIP, X5; movdqa .Lshuf_rol16 rRIP, X6; /* Load state */ movdqu (0 * 4)(INPUT), X10; movdqu (4 * 4)(INPUT), X11; movdqu (8 * 4)(INPUT), X12; movdqu (12 * 4)(INPUT), X13; cmp $2, NBLKS; jb .Loop1; mov $20, ROUND; movdqa X10, X0; movdqa X11, X1; movdqa X12, X2; movdqa X13, X3; movdqa X10, X8; movdqa X11, X9; movdqa X12, X14; movdqa X13, X15; paddq X4, X15; .Lround2_2: QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x39, 0x4e, 0x93); QUARTERROUND4(X8, X9, X14, X15, X5, X6, X7, 0x39, 0x4e, 0x93); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x93, 0x4e, 0x39); QUARTERROUND4(X8, X9, X14, X15, X5, X6, X7, 0x93, 0x4e, 0x39); sub $2, ROUND; jnz .Lround2_2; PLUS(X0, X10); PLUS(X1, X11); PLUS(X2, X12); PLUS(X3, X13); /* Update counter */ paddq X4, X13; PLUS(X8, X10); PLUS(X9, X11); PLUS(X14, X12); PLUS(X15, X13); /* Update counter */ paddq X4, X13; xor_src_dst(DST, SRC, 0 * 4, X0, X7); xor_src_dst(DST, SRC, 4 * 4, X1, X7); xor_src_dst(DST, SRC, 8 * 4, X2, X7); xor_src_dst(DST, SRC, 12 * 4, X3, X7); xor_src_dst(DST, SRC, 16 * 4, X8, X7); xor_src_dst(DST, SRC, 20 * 4, X9, X7); xor_src_dst(DST, SRC, 24 * 4, X14, X7); xor_src_dst(DST, SRC, 28 * 4, X15, X7); lea (2 * 64)(DST), DST; lea (2 * 64)(SRC), SRC; clear(X8); clear(X9); clear(X14); clear(X15); sub $2, NBLKS; jz .Ldone1; .Loop1: mov $20, ROUND; movdqa X10, X0; movdqa X11, X1; movdqa X12, X2; movdqa X13, X3; .Lround2_1: QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x39, 0x4e, 0x93); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x93, 0x4e, 0x39); sub $2, ROUND; jnz .Lround2_1; PLUS(X0, X10); PLUS(X1, X11); PLUS(X2, X12); PLUS(X3, X13); /* Update counter */ paddq X4, X13; xor_src_dst(DST, SRC, 0 * 4, X0, X7); xor_src_dst(DST, SRC, 4 * 4, X1, X7); xor_src_dst(DST, SRC, 8 * 4, X2, X7); xor_src_dst(DST, SRC, 12 * 4, X3, X7); lea (64)(DST), DST; lea (64)(SRC), SRC; sub $1, NBLKS; jnz .Loop1; .Ldone1: /* Store counter */ movdqu X13, (12 * 4)(INPUT); /* clear the used vector registers */ clear(X0); clear(X1); clear(X2); clear(X3); clear(X4); clear(X5); clear(X6); clear(X7); clear(X10); clear(X11); clear(X12); clear(X13); /* eax zeroed by round loop. */ ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_chacha20_amd64_ssse3_blocks1, .-_gcry_chacha20_amd64_ssse3_blocks1;) /********************************************************************** 4-way stitched chacha20-poly1305 **********************************************************************/ #define _ /*_*/ .align 16 .globl _gcry_chacha20_poly1305_amd64_ssse3_blocks4 ELF(.type _gcry_chacha20_poly1305_amd64_ssse3_blocks4,@function;) _gcry_chacha20_poly1305_amd64_ssse3_blocks4: /* input: * %rdi: input * %rsi: dst * %rdx: src * %rcx: nblks (multiple of 4) * %r9: poly1305-state * %r8: poly1305-src */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(9 * 8) + STACK_MAX + 16, %rsp; andq $~15, %rsp; movq %rbx, (STACK_MAX + 0 * 8)(%rsp); movq %r12, (STACK_MAX + 1 * 8)(%rsp); movq %r13, (STACK_MAX + 2 * 8)(%rsp); movq %r14, (STACK_MAX + 3 * 8)(%rsp); movq %r15, (STACK_MAX + 4 * 8)(%rsp); CFI_REG_ON_STACK(rbx, STACK_MAX + 0 * 8); CFI_REG_ON_STACK(r12, STACK_MAX + 1 * 8); CFI_REG_ON_STACK(r13, STACK_MAX + 2 * 8); CFI_REG_ON_STACK(r14, STACK_MAX + 3 * 8); CFI_REG_ON_STACK(r15, STACK_MAX + 4 * 8); movq %rdx, (STACK_MAX + 5 * 8)(%rsp); # SRC movq %rsi, (STACK_MAX + 6 * 8)(%rsp); # DST movq %rcx, (STACK_MAX + 7 * 8)(%rsp); # NBLKS /* Load state */ POLY1305_LOAD_STATE(); .Loop_poly4: /* Construct counter vectors X12 and X13 */ movdqa .Linc_counter rRIP, X0; movdqa .Lunsigned_cmp rRIP, X2; pbroadcastd((12 * 4)(INPUT), X12); pbroadcastd((13 * 4)(INPUT), X13); paddd X0, X12; movdqa X12, X1; pxor X2, X0; pxor X2, X1; pcmpgtd X1, X0; psubd X0, X13; movdqa X12, (STACK_VEC_X12)(%rsp); movdqa X13, (STACK_VEC_X13)(%rsp); /* Load vectors */ pbroadcastd((0 * 4)(INPUT), X0); pbroadcastd((1 * 4)(INPUT), X1); pbroadcastd((2 * 4)(INPUT), X2); pbroadcastd((3 * 4)(INPUT), X3); pbroadcastd((4 * 4)(INPUT), X4); pbroadcastd((5 * 4)(INPUT), X5); pbroadcastd((6 * 4)(INPUT), X6); pbroadcastd((7 * 4)(INPUT), X7); pbroadcastd((8 * 4)(INPUT), X8); pbroadcastd((9 * 4)(INPUT), X9); pbroadcastd((10 * 4)(INPUT), X10); pbroadcastd((11 * 4)(INPUT), X11); pbroadcastd((14 * 4)(INPUT), X14); pbroadcastd((15 * 4)(INPUT), X15); movdqa X11, (STACK_TMP)(%rsp); movdqa X15, (STACK_TMP1)(%rsp); /* Process four ChaCha20 blocks and sixteen Poly1305 blocks. */ movl $20, (STACK_MAX + 8 * 8 + 4)(%rsp); .Lround4_with_poly1305_outer: movl $6, (STACK_MAX + 8 * 8)(%rsp); .Lround4_with_poly1305_inner1: /* rounds 0-5 & 10-15 */ POLY1305_BLOCK_PART1(0 * 16) QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X11,X15, POLY1305_BLOCK_PART2(), POLY1305_BLOCK_PART3()) movdqa (STACK_TMP)(%rsp), X11; movdqa (STACK_TMP1)(%rsp), X15; movdqa X8, (STACK_TMP)(%rsp); movdqa X9, (STACK_TMP1)(%rsp); QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,X9, POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5()) POLY1305_BLOCK_PART1(1 * 16) lea (2 * 16)(POLY_RSRC), POLY_RSRC; QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,X9, POLY1305_BLOCK_PART2(), POLY1305_BLOCK_PART3()) movdqa (STACK_TMP)(%rsp), X8; movdqa (STACK_TMP1)(%rsp), X9; movdqa X11, (STACK_TMP)(%rsp); movdqa X15, (STACK_TMP1)(%rsp); QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X11,X15, POLY1305_BLOCK_PART4(), POLY1305_BLOCK_PART5()) subl $2, (STACK_MAX + 8 * 8)(%rsp); jnz .Lround4_with_poly1305_inner1; movl $4, (STACK_MAX + 8 * 8)(%rsp); .Lround4_with_poly1305_inner2: /* rounds 6-9 & 16-19 */ POLY1305_BLOCK_PART1(0 * 16) lea (1 * 16)(POLY_RSRC), POLY_RSRC; QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X11,X15, POLY1305_BLOCK_PART2(), _) movdqa (STACK_TMP)(%rsp), X11; movdqa (STACK_TMP1)(%rsp), X15; movdqa X8, (STACK_TMP)(%rsp); movdqa X9, (STACK_TMP1)(%rsp); QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,X9, POLY1305_BLOCK_PART3(), _) QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,X9, POLY1305_BLOCK_PART4(), _) movdqa (STACK_TMP)(%rsp), X8; movdqa (STACK_TMP1)(%rsp), X9; movdqa X11, (STACK_TMP)(%rsp); movdqa X15, (STACK_TMP1)(%rsp); QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X11,X15, POLY1305_BLOCK_PART5(), _) subl $2, (STACK_MAX + 8 * 8)(%rsp); jnz .Lround4_with_poly1305_inner2; subl $10, (STACK_MAX + 8 * 8 + 4)(%rsp); jnz .Lround4_with_poly1305_outer; /* tmp := X15 */ movdqa (STACK_TMP)(%rsp), X11; pbroadcastd((0 * 4)(INPUT), X15); PLUS(X0, X15); pbroadcastd((1 * 4)(INPUT), X15); PLUS(X1, X15); pbroadcastd((2 * 4)(INPUT), X15); PLUS(X2, X15); pbroadcastd((3 * 4)(INPUT), X15); PLUS(X3, X15); pbroadcastd((4 * 4)(INPUT), X15); PLUS(X4, X15); pbroadcastd((5 * 4)(INPUT), X15); PLUS(X5, X15); pbroadcastd((6 * 4)(INPUT), X15); PLUS(X6, X15); pbroadcastd((7 * 4)(INPUT), X15); PLUS(X7, X15); pbroadcastd((8 * 4)(INPUT), X15); PLUS(X8, X15); pbroadcastd((9 * 4)(INPUT), X15); PLUS(X9, X15); pbroadcastd((10 * 4)(INPUT), X15); PLUS(X10, X15); pbroadcastd((11 * 4)(INPUT), X15); PLUS(X11, X15); movdqa (STACK_VEC_X12)(%rsp), X15; PLUS(X12, X15); movdqa (STACK_VEC_X13)(%rsp), X15; PLUS(X13, X15); movdqa X13, (STACK_TMP)(%rsp); pbroadcastd((14 * 4)(INPUT), X15); PLUS(X14, X15); movdqa (STACK_TMP1)(%rsp), X15; movdqa X14, (STACK_TMP1)(%rsp); pbroadcastd((15 * 4)(INPUT), X13); PLUS(X15, X13); movdqa X15, (STACK_TMP2)(%rsp); /* Update counter */ addq $4, (12 * 4)(INPUT); movq (STACK_MAX + 5 * 8)(%rsp), SRC; movq (STACK_MAX + 6 * 8)(%rsp), DST; transpose_4x4(X0, X1, X2, X3, X13, X14, X15); xor_src_dst(DST, SRC, (64 * 0 + 16 * 0), X0, X15); xor_src_dst(DST, SRC, (64 * 1 + 16 * 0), X1, X15); xor_src_dst(DST, SRC, (64 * 2 + 16 * 0), X2, X15); xor_src_dst(DST, SRC, (64 * 3 + 16 * 0), X3, X15); transpose_4x4(X4, X5, X6, X7, X0, X1, X2); movdqa (STACK_TMP)(%rsp), X13; movdqa (STACK_TMP1)(%rsp), X14; movdqa (STACK_TMP2)(%rsp), X15; xor_src_dst(DST, SRC, (64 * 0 + 16 * 1), X4, X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 1), X5, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 1), X6, X0); xor_src_dst(DST, SRC, (64 * 3 + 16 * 1), X7, X0); transpose_4x4(X8, X9, X10, X11, X0, X1, X2); xor_src_dst(DST, SRC, (64 * 0 + 16 * 2), X8, X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 2), X9, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 2), X10, X0); xor_src_dst(DST, SRC, (64 * 3 + 16 * 2), X11, X0); transpose_4x4(X12, X13, X14, X15, X0, X1, X2); xor_src_dst(DST, SRC, (64 * 0 + 16 * 3), X12, X0); xor_src_dst(DST, SRC, (64 * 1 + 16 * 3), X13, X0); xor_src_dst(DST, SRC, (64 * 2 + 16 * 3), X14, X0); xor_src_dst(DST, SRC, (64 * 3 + 16 * 3), X15, X0); subq $4, (STACK_MAX + 7 * 8)(%rsp); # NBLKS lea (4 * 64)(DST), DST; lea (4 * 64)(SRC), SRC; movq SRC, (STACK_MAX + 5 * 8)(%rsp); movq DST, (STACK_MAX + 6 * 8)(%rsp); jnz .Loop_poly4; /* Store state */ POLY1305_STORE_STATE(); /* clear the used vector registers and stack */ clear(X0); movdqa X0, (STACK_VEC_X12)(%rsp); movdqa X0, (STACK_VEC_X13)(%rsp); movdqa X0, (STACK_TMP)(%rsp); movdqa X0, (STACK_TMP1)(%rsp); movdqa X0, (STACK_TMP2)(%rsp); clear(X1); clear(X2); clear(X3); clear(X4); clear(X5); clear(X6); clear(X7); clear(X8); clear(X9); clear(X10); clear(X11); clear(X12); clear(X13); clear(X14); clear(X15); movq (STACK_MAX + 0 * 8)(%rsp), %rbx; movq (STACK_MAX + 1 * 8)(%rsp), %r12; movq (STACK_MAX + 2 * 8)(%rsp), %r13; movq (STACK_MAX + 3 * 8)(%rsp), %r14; movq (STACK_MAX + 4 * 8)(%rsp), %r15; CFI_RESTORE(%rbx); CFI_RESTORE(%r12); CFI_RESTORE(%r13); CFI_RESTORE(%r14); CFI_RESTORE(%r15); xorl %eax, %eax; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_chacha20_poly1305_amd64_ssse3_blocks4, .-_gcry_chacha20_poly1305_amd64_ssse3_blocks4;) /********************************************************************** 2-way && 1-way stitched chacha20-poly1305 **********************************************************************/ .align 16 .globl _gcry_chacha20_poly1305_amd64_ssse3_blocks1 ELF(.type _gcry_chacha20_poly1305_amd64_ssse3_blocks1,@function;) _gcry_chacha20_poly1305_amd64_ssse3_blocks1: /* input: * %rdi: chacha20-state * %rsi: dst * %rdx: src * %rcx: nblks * %r9: poly1305-state * %r8: poly1305-src */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(9 * 8), %rsp; movq %rbx, (0 * 8)(%rsp); movq %r12, (1 * 8)(%rsp); movq %r13, (2 * 8)(%rsp); movq %r14, (3 * 8)(%rsp); movq %r15, (4 * 8)(%rsp); CFI_REG_ON_STACK(rbx, 0 * 8); CFI_REG_ON_STACK(r12, 1 * 8); CFI_REG_ON_STACK(r13, 2 * 8); CFI_REG_ON_STACK(r14, 3 * 8); CFI_REG_ON_STACK(r15, 4 * 8); movq %rdx, (5 * 8)(%rsp); # SRC movq %rsi, (6 * 8)(%rsp); # DST movq %rcx, (7 * 8)(%rsp); # NBLKS /* Load constants */ movdqa .Lcounter1 rRIP, X4; movdqa .Lshuf_rol8 rRIP, X5; movdqa .Lshuf_rol16 rRIP, X6; /* Load state */ movdqu (0 * 4)(INPUT), X10; movdqu (4 * 4)(INPUT), X11; movdqu (8 * 4)(INPUT), X12; movdqu (12 * 4)(INPUT), X13; POLY1305_LOAD_STATE(); cmpq $2, (7 * 8)(%rsp); #NBLKS jb .Loop_poly1; movdqa X10, X0; movdqa X11, X1; movdqa X12, X2; movdqa X13, X3; movdqa X10, X8; movdqa X11, X9; movdqa X12, X14; movdqa X13, X15; paddq X4, X15; /* Process two ChaCha20 blocks and eight Poly1305 blocks. */ movl $20, (8 * 8 + 4)(%rsp); .Lround2_with_poly1305_outer: movl $8, (8 * 8)(%rsp); .Lround2_with_poly1305_inner: POLY1305_BLOCK_PART1(0 * 16); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x39, 0x4e, 0x93); lea (1 * 16)(POLY_RSRC), POLY_RSRC; POLY1305_BLOCK_PART2(); QUARTERROUND4(X8, X9, X14, X15, X5, X6, X7, 0x39, 0x4e, 0x93); POLY1305_BLOCK_PART3(); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x93, 0x4e, 0x39); POLY1305_BLOCK_PART4(); QUARTERROUND4(X8, X9, X14, X15, X5, X6, X7, 0x93, 0x4e, 0x39); POLY1305_BLOCK_PART5(); subl $2, (8 * 8)(%rsp); jnz .Lround2_with_poly1305_inner; QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x39, 0x4e, 0x93); QUARTERROUND4(X8, X9, X14, X15, X5, X6, X7, 0x39, 0x4e, 0x93); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x93, 0x4e, 0x39); QUARTERROUND4(X8, X9, X14, X15, X5, X6, X7, 0x93, 0x4e, 0x39); subl $10, (8 * 8 + 4)(%rsp); jnz .Lround2_with_poly1305_outer; movq (5 * 8)(%rsp), SRC; movq (6 * 8)(%rsp), DST; PLUS(X0, X10); PLUS(X1, X11); PLUS(X2, X12); PLUS(X3, X13); /* Update counter */ paddq X4, X13; PLUS(X8, X10); PLUS(X9, X11); PLUS(X14, X12); PLUS(X15, X13); /* Update counter */ paddq X4, X13; xor_src_dst(DST, SRC, 0 * 4, X0, X7); xor_src_dst(DST, SRC, 4 * 4, X1, X7); xor_src_dst(DST, SRC, 8 * 4, X2, X7); xor_src_dst(DST, SRC, 12 * 4, X3, X7); xor_src_dst(DST, SRC, 16 * 4, X8, X7); xor_src_dst(DST, SRC, 20 * 4, X9, X7); xor_src_dst(DST, SRC, 24 * 4, X14, X7); xor_src_dst(DST, SRC, 28 * 4, X15, X7); clear(X8); clear(X9); clear(X14); clear(X15); subq $2, (7 * 8)(%rsp); # NBLKS lea (2 * 64)(SRC), SRC; lea (2 * 64)(DST), DST; movq SRC, (5 * 8)(%rsp); movq DST, (6 * 8)(%rsp); jz .Ldone_poly1; .Loop_poly1: movdqa X10, X0; movdqa X11, X1; movdqa X12, X2; movdqa X13, X3; /* Process one ChaCha20 block and four Poly1305 blocks. */ movl $20, (8 * 8 + 4)(%rsp); .Lround1_with_poly1305_outer: movl $8, (8 * 8)(%rsp); .Lround1_with_poly1305_inner: POLY1305_BLOCK_PART1(0 * 16); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x39, 0x4e, 0x93); POLY1305_BLOCK_PART2(); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x93, 0x4e, 0x39); lea (1 * 16)(POLY_RSRC), POLY_RSRC; POLY1305_BLOCK_PART3(); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x39, 0x4e, 0x93); POLY1305_BLOCK_PART4(); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x93, 0x4e, 0x39); POLY1305_BLOCK_PART5(); subl $4, (8 * 8)(%rsp); jnz .Lround1_with_poly1305_inner; QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x39, 0x4e, 0x93); QUARTERROUND4(X0, X1, X2, X3, X5, X6, X7, 0x93, 0x4e, 0x39); subl $10, (8 * 8 + 4)(%rsp); jnz .Lround1_with_poly1305_outer; movq (5 * 8)(%rsp), SRC; movq (6 * 8)(%rsp), DST; PLUS(X0, X10); PLUS(X1, X11); PLUS(X2, X12); PLUS(X3, X13); /* Update counter */ paddq X4, X13; xor_src_dst(DST, SRC, 0 * 4, X0, X7); xor_src_dst(DST, SRC, 4 * 4, X1, X7); xor_src_dst(DST, SRC, 8 * 4, X2, X7); xor_src_dst(DST, SRC, 12 * 4, X3, X7); subq $1, (7 * 8)(%rsp); # NBLKS lea (64)(SRC), SRC; lea (64)(DST), DST; movq SRC, (5 * 8)(%rsp); movq DST, (6 * 8)(%rsp); jnz .Loop_poly1; .Ldone_poly1: /* Store state */ POLY1305_STORE_STATE(); movdqu X13, (12 * 4)(INPUT); /* clear the used vector registers */ clear(X0); clear(X1); clear(X2); clear(X3); clear(X4); clear(X5); clear(X6); clear(X7); clear(X10); clear(X11); clear(X12); clear(X13); movq (0 * 8)(%rsp), %rbx; movq (1 * 8)(%rsp), %r12; movq (2 * 8)(%rsp), %r13; movq (3 * 8)(%rsp), %r14; movq (4 * 8)(%rsp), %r15; CFI_RESTORE(%rbx); CFI_RESTORE(%r12); CFI_RESTORE(%r13); CFI_RESTORE(%r14); CFI_RESTORE(%r15); xorl %eax, %eax; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_chacha20_poly1305_amd64_ssse3_blocks1, .-_gcry_chacha20_poly1305_amd64_ssse3_blocks1;) #endif /*defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS)*/ #endif /*__x86_64*/ diff --git a/cipher/des-amd64.S b/cipher/des-amd64.S index 51e40258..e4236a92 100644 --- a/cipher/des-amd64.S +++ b/cipher/des-amd64.S @@ -1,1111 +1,1116 @@ /* des-amd64.S - AMD64 assembly implementation of 3DES cipher * * Copyright (C) 2014 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifdef __x86_64 #include #if defined(USE_DES) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) #include "asm-common-amd64.h" .text #define s1 0 #define s2 ((s1) + (64*8)) #define s3 ((s2) + (64*8)) #define s4 ((s3) + (64*8)) #define s5 ((s4) + (64*8)) #define s6 ((s5) + (64*8)) #define s7 ((s6) + (64*8)) #define s8 ((s7) + (64*8)) /* register macros */ #define CTX %rdi #define SBOXES %rbp #define RL0 %r8 #define RL1 %r9 #define RL2 %r10 #define RL0d %r8d #define RL1d %r9d #define RL2d %r10d #define RR0 %r11 #define RR1 %r12 #define RR2 %r13 #define RR0d %r11d #define RR1d %r12d #define RR2d %r13d #define RW0 %rax #define RW1 %rbx #define RW2 %rcx #define RW0d %eax #define RW1d %ebx #define RW2d %ecx #define RW0bl %al #define RW1bl %bl #define RW2bl %cl #define RW0bh %ah #define RW1bh %bh #define RW2bh %ch #define RT0 %r15 #define RT1 %rsi #define RT2 %r14 #define RT3 %rdx #define RT0d %r15d #define RT1d %esi #define RT2d %r14d #define RT3d %edx /*********************************************************************** * 1-way 3DES ***********************************************************************/ #define do_permutation(a, b, offset, mask) \ movl a, RT0d; \ shrl $(offset), RT0d; \ xorl b, RT0d; \ andl $(mask), RT0d; \ xorl RT0d, b; \ shll $(offset), RT0d; \ xorl RT0d, a; #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation(left, right) \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ movl left##d, RW0d; \ roll $1, right##d; \ xorl right##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##d; \ xorl RW0d, right##d; \ roll $1, left##d; \ expand_to_64bits(right, RT3); \ expand_to_64bits(left, RT3); #define final_permutation(left, right) \ compress_to_64bits(right); \ compress_to_64bits(left); \ movl right##d, RW0d; \ rorl $1, left##d; \ xorl left##d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##d; \ xorl RW0d, left##d; \ rorl $1, right##d; \ do_permutation(right##d, left##d, 8, 0x00ff00ff); \ do_permutation(right##d, left##d, 2, 0x33333333); \ do_permutation(left##d, right##d, 16, 0x0000ffff); \ do_permutation(left##d, right##d, 4, 0x0f0f0f0f); #define round1(n, from, to, load_next_key) \ xorq from, RW0; \ \ movzbl RW0bl, RT0d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ shrq $16, RW0; \ movq s8(SBOXES, RT0, 8), RT0; \ xorq s6(SBOXES, RT1, 8), to; \ movzbl RW0bl, RL1d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s4(SBOXES, RT2, 8), RT0; \ xorq s2(SBOXES, RT3, 8), to; \ movzbl RW0bl, RT2d; \ movzbl RW0bh, RT3d; \ xorq s7(SBOXES, RL1, 8), RT0; \ xorq s5(SBOXES, RT1, 8), to; \ xorq s3(SBOXES, RT2, 8), RT0; \ load_next_key(n, RW0); \ xorq RT0, to; \ xorq s1(SBOXES, RT3, 8), to; \ #define load_next_key(n, RWx) \ movq (((n) + 1) * 8)(CTX), RWx; #define dummy2(a, b) /*_*/ #define read_block(io, left, right) \ movl (io), left##d; \ movl 4(io), right##d; \ bswapl left##d; \ bswapl right##d; #define write_block(io, left, right) \ bswapl left##d; \ bswapl right##d; \ movl left##d, (io); \ movl right##d, 4(io); .align 16 .globl _gcry_3des_amd64_crypt_block ELF(.type _gcry_3des_amd64_crypt_block,@function;) _gcry_3des_amd64_crypt_block: /* input: * %rdi: round keys, CTX * %rsi: dst * %rdx: src */ CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 pushq %rbp; CFI_PUSH(%rbp); pushq %rbx; CFI_PUSH(%rbx); pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rsi; /*dst*/ CFI_PUSH(%rsi); leaq .L_s1 rRIP, SBOXES; read_block(%rdx, RL0, RR0); initial_permutation(RL0, RR0); movq (CTX), RW0; round1(0, RR0, RL0, load_next_key); round1(1, RL0, RR0, load_next_key); round1(2, RR0, RL0, load_next_key); round1(3, RL0, RR0, load_next_key); round1(4, RR0, RL0, load_next_key); round1(5, RL0, RR0, load_next_key); round1(6, RR0, RL0, load_next_key); round1(7, RL0, RR0, load_next_key); round1(8, RR0, RL0, load_next_key); round1(9, RL0, RR0, load_next_key); round1(10, RR0, RL0, load_next_key); round1(11, RL0, RR0, load_next_key); round1(12, RR0, RL0, load_next_key); round1(13, RL0, RR0, load_next_key); round1(14, RR0, RL0, load_next_key); round1(15, RL0, RR0, load_next_key); round1(16+0, RL0, RR0, load_next_key); round1(16+1, RR0, RL0, load_next_key); round1(16+2, RL0, RR0, load_next_key); round1(16+3, RR0, RL0, load_next_key); round1(16+4, RL0, RR0, load_next_key); round1(16+5, RR0, RL0, load_next_key); round1(16+6, RL0, RR0, load_next_key); round1(16+7, RR0, RL0, load_next_key); round1(16+8, RL0, RR0, load_next_key); round1(16+9, RR0, RL0, load_next_key); round1(16+10, RL0, RR0, load_next_key); round1(16+11, RR0, RL0, load_next_key); round1(16+12, RL0, RR0, load_next_key); round1(16+13, RR0, RL0, load_next_key); round1(16+14, RL0, RR0, load_next_key); round1(16+15, RR0, RL0, load_next_key); round1(32+0, RR0, RL0, load_next_key); round1(32+1, RL0, RR0, load_next_key); round1(32+2, RR0, RL0, load_next_key); round1(32+3, RL0, RR0, load_next_key); round1(32+4, RR0, RL0, load_next_key); round1(32+5, RL0, RR0, load_next_key); round1(32+6, RR0, RL0, load_next_key); round1(32+7, RL0, RR0, load_next_key); round1(32+8, RR0, RL0, load_next_key); round1(32+9, RL0, RR0, load_next_key); round1(32+10, RR0, RL0, load_next_key); round1(32+11, RL0, RR0, load_next_key); round1(32+12, RR0, RL0, load_next_key); round1(32+13, RL0, RR0, load_next_key); round1(32+14, RR0, RL0, load_next_key); round1(32+15, RL0, RR0, dummy2); popq RW2; /*dst*/ CFI_POP_TMP_REG(); final_permutation(RR0, RL0); write_block(RW2, RR0, RL0); popq %r15; CFI_POP(%r15); popq %r14; CFI_POP(%r14); popq %r13; CFI_POP(%r13); popq %r12; CFI_POP(%r12); popq %rbx; CFI_POP(%rbx); popq %rbp; CFI_POP(%rbp); EXIT_SYSV_FUNC ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_3des_amd64_crypt_block,.-_gcry_3des_amd64_crypt_block;) /*********************************************************************** * 3-way 3DES ***********************************************************************/ #define expand_to_64bits(val, mask) \ movl val##d, RT0d; \ rorl $4, RT0d; \ shlq $32, RT0; \ orq RT0, val; \ andq mask, val; #define compress_to_64bits(val) \ movq val, RT0; \ shrq $32, RT0; \ roll $4, RT0d; \ orl RT0d, val##d; #define initial_permutation3(left, right) \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ \ movabs $0x3f3f3f3f3f3f3f3f, RT3; \ \ movl left##0d, RW0d; \ roll $1, right##0d; \ xorl right##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, left##0d; \ xorl RW0d, right##0d; \ roll $1, left##0d; \ expand_to_64bits(right##0, RT3); \ expand_to_64bits(left##0, RT3); \ movl left##1d, RW1d; \ roll $1, right##1d; \ xorl right##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, left##1d; \ xorl RW1d, right##1d; \ roll $1, left##1d; \ expand_to_64bits(right##1, RT3); \ expand_to_64bits(left##1, RT3); \ movl left##2d, RW2d; \ roll $1, right##2d; \ xorl right##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, left##2d; \ xorl RW2d, right##2d; \ roll $1, left##2d; \ expand_to_64bits(right##2, RT3); \ expand_to_64bits(left##2, RT3); #define final_permutation3(left, right) \ compress_to_64bits(right##0); \ compress_to_64bits(left##0); \ movl right##0d, RW0d; \ rorl $1, left##0d; \ xorl left##0d, RW0d; \ andl $0xaaaaaaaa, RW0d; \ xorl RW0d, right##0d; \ xorl RW0d, left##0d; \ rorl $1, right##0d; \ compress_to_64bits(right##1); \ compress_to_64bits(left##1); \ movl right##1d, RW1d; \ rorl $1, left##1d; \ xorl left##1d, RW1d; \ andl $0xaaaaaaaa, RW1d; \ xorl RW1d, right##1d; \ xorl RW1d, left##1d; \ rorl $1, right##1d; \ compress_to_64bits(right##2); \ compress_to_64bits(left##2); \ movl right##2d, RW2d; \ rorl $1, left##2d; \ xorl left##2d, RW2d; \ andl $0xaaaaaaaa, RW2d; \ xorl RW2d, right##2d; \ xorl RW2d, left##2d; \ rorl $1, right##2d; \ \ do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ do_permutation(right##0d, left##0d, 2, 0x33333333); \ do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ do_permutation(right##1d, left##1d, 2, 0x33333333); \ do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ do_permutation(right##2d, left##2d, 2, 0x33333333); \ \ do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); #define round3(n, from, to, load_next_key, do_movq) \ xorq from##0, RW0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s8(SBOXES, RT3, 8), to##0; \ xorq s6(SBOXES, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrq $16, RW0; \ xorq s4(SBOXES, RT3, 8), to##0; \ xorq s2(SBOXES, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ shrl $16, RW0d; \ xorq s7(SBOXES, RT3, 8), to##0; \ xorq s5(SBOXES, RT1, 8), to##0; \ movzbl RW0bl, RT3d; \ movzbl RW0bh, RT1d; \ load_next_key(n, RW0); \ xorq s3(SBOXES, RT3, 8), to##0; \ xorq s1(SBOXES, RT1, 8), to##0; \ xorq from##1, RW1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s8(SBOXES, RT3, 8), to##1; \ xorq s6(SBOXES, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrq $16, RW1; \ xorq s4(SBOXES, RT3, 8), to##1; \ xorq s2(SBOXES, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ shrl $16, RW1d; \ xorq s7(SBOXES, RT3, 8), to##1; \ xorq s5(SBOXES, RT1, 8), to##1; \ movzbl RW1bl, RT3d; \ movzbl RW1bh, RT1d; \ do_movq(RW0, RW1); \ xorq s3(SBOXES, RT3, 8), to##1; \ xorq s1(SBOXES, RT1, 8), to##1; \ xorq from##2, RW2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s8(SBOXES, RT3, 8), to##2; \ xorq s6(SBOXES, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrq $16, RW2; \ xorq s4(SBOXES, RT3, 8), to##2; \ xorq s2(SBOXES, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ shrl $16, RW2d; \ xorq s7(SBOXES, RT3, 8), to##2; \ xorq s5(SBOXES, RT1, 8), to##2; \ movzbl RW2bl, RT3d; \ movzbl RW2bh, RT1d; \ do_movq(RW0, RW2); \ xorq s3(SBOXES, RT3, 8), to##2; \ xorq s1(SBOXES, RT1, 8), to##2; #define __movq(src, dst) \ movq src, dst; #define read_block(io, left, right) \ movl (io), left##d; \ movl 4(io), right##d; \ bswapl left##d; \ bswapl right##d; #define write_block(io, left, right) \ bswapl left##d; \ bswapl right##d; \ movl left##d, (io); \ movl right##d, 4(io); .align 16 ELF(.type _gcry_3des_amd64_crypt_blk3,@function;) _gcry_3des_amd64_crypt_blk3: /* input: * %rdi: round keys, CTX * RL0d, RR0d, RL1d, RR1d, RL2d, RR2d: 3 input blocks * RR0d, RL0d, RR1d, RL1d, RR2d, RL2d: 3 output blocks */ CFI_STARTPROC(); leaq .L_s1 rRIP, SBOXES; initial_permutation3(RL, RR); movq 0(CTX), RW0; movq RW0, RW1; movq RW0, RW2; round3(0, RR, RL, load_next_key, __movq); round3(1, RL, RR, load_next_key, __movq); round3(2, RR, RL, load_next_key, __movq); round3(3, RL, RR, load_next_key, __movq); round3(4, RR, RL, load_next_key, __movq); round3(5, RL, RR, load_next_key, __movq); round3(6, RR, RL, load_next_key, __movq); round3(7, RL, RR, load_next_key, __movq); round3(8, RR, RL, load_next_key, __movq); round3(9, RL, RR, load_next_key, __movq); round3(10, RR, RL, load_next_key, __movq); round3(11, RL, RR, load_next_key, __movq); round3(12, RR, RL, load_next_key, __movq); round3(13, RL, RR, load_next_key, __movq); round3(14, RR, RL, load_next_key, __movq); round3(15, RL, RR, load_next_key, __movq); round3(16+0, RL, RR, load_next_key, __movq); round3(16+1, RR, RL, load_next_key, __movq); round3(16+2, RL, RR, load_next_key, __movq); round3(16+3, RR, RL, load_next_key, __movq); round3(16+4, RL, RR, load_next_key, __movq); round3(16+5, RR, RL, load_next_key, __movq); round3(16+6, RL, RR, load_next_key, __movq); round3(16+7, RR, RL, load_next_key, __movq); round3(16+8, RL, RR, load_next_key, __movq); round3(16+9, RR, RL, load_next_key, __movq); round3(16+10, RL, RR, load_next_key, __movq); round3(16+11, RR, RL, load_next_key, __movq); round3(16+12, RL, RR, load_next_key, __movq); round3(16+13, RR, RL, load_next_key, __movq); round3(16+14, RL, RR, load_next_key, __movq); round3(16+15, RR, RL, load_next_key, __movq); round3(32+0, RR, RL, load_next_key, __movq); round3(32+1, RL, RR, load_next_key, __movq); round3(32+2, RR, RL, load_next_key, __movq); round3(32+3, RL, RR, load_next_key, __movq); round3(32+4, RR, RL, load_next_key, __movq); round3(32+5, RL, RR, load_next_key, __movq); round3(32+6, RR, RL, load_next_key, __movq); round3(32+7, RL, RR, load_next_key, __movq); round3(32+8, RR, RL, load_next_key, __movq); round3(32+9, RL, RR, load_next_key, __movq); round3(32+10, RR, RL, load_next_key, __movq); round3(32+11, RL, RR, load_next_key, __movq); round3(32+12, RR, RL, load_next_key, __movq); round3(32+13, RL, RR, load_next_key, __movq); round3(32+14, RR, RL, load_next_key, __movq); round3(32+15, RL, RR, dummy2, dummy2); final_permutation3(RR, RL); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_3des_amd64_crypt_blk3,.-_gcry_3des_amd64_crypt_blk3;) .align 16 .globl _gcry_3des_amd64_cbc_dec ELF(.type _gcry_3des_amd64_cbc_dec,@function;) _gcry_3des_amd64_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (3 blocks) * %rdx: src (3 blocks) * %rcx: iv (64bit) */ CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 pushq %rbp; CFI_PUSH(%rbp); pushq %rbx; CFI_PUSH(%rbx); pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rsi; /*dst*/ CFI_PUSH(%rsi); pushq %rdx; /*src*/ CFI_PUSH(%rdx); pushq %rcx; /*iv*/ CFI_PUSH(%rcx); /* load input */ movl 0 * 4(%rdx), RL0d; movl 1 * 4(%rdx), RR0d; movl 2 * 4(%rdx), RL1d; movl 3 * 4(%rdx), RR1d; movl 4 * 4(%rdx), RL2d; movl 5 * 4(%rdx), RR2d; bswapl RL0d; bswapl RR0d; bswapl RL1d; bswapl RR1d; bswapl RL2d; bswapl RR2d; call _gcry_3des_amd64_crypt_blk3; popq %rcx; /*iv*/ CFI_POP_TMP_REG(); popq %rdx; /*src*/ CFI_POP_TMP_REG(); popq %rsi; /*dst*/ CFI_POP_TMP_REG(); bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; movq 2 * 8(%rdx), RT0; xorl 0 * 4(%rcx), RR0d; xorl 1 * 4(%rcx), RL0d; xorl 0 * 4(%rdx), RR1d; xorl 1 * 4(%rdx), RL1d; xorl 2 * 4(%rdx), RR2d; xorl 3 * 4(%rdx), RL2d; movq RT0, (%rcx); /* store new IV */ movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; CFI_POP(%r15); popq %r14; CFI_POP(%r14); popq %r13; CFI_POP(%r13); popq %r12; CFI_POP(%r12); popq %rbx; CFI_POP(%rbx); popq %rbp; CFI_POP(%rbp); EXIT_SYSV_FUNC ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_3des_amd64_cbc_dec,.-_gcry_3des_amd64_cbc_dec;) .align 16 .globl _gcry_3des_amd64_ctr_enc ELF(.type _gcry_3des_amd64_ctr_enc,@function;) _gcry_3des_amd64_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (3 blocks) * %rdx: src (3 blocks) * %rcx: iv (64bit) */ CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 pushq %rbp; CFI_PUSH(%rbp); pushq %rbx; CFI_PUSH(%rbx); pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rsi; /*dst*/ CFI_PUSH(%rsi); pushq %rdx; /*src*/ CFI_PUSH(%rdx); movq %rcx, RW2; /* load IV and byteswap */ movq (RW2), RT0; bswapq RT0; movq RT0, RR0; /* construct IVs */ leaq 1(RT0), RR1; leaq 2(RT0), RR2; leaq 3(RT0), RT0; movq RR0, RL0; movq RR1, RL1; movq RR2, RL2; bswapq RT0; shrq $32, RL0; shrq $32, RL1; shrq $32, RL2; /* store new IV */ movq RT0, (RW2); call _gcry_3des_amd64_crypt_blk3; popq %rdx; /*src*/ CFI_POP_TMP_REG(); popq %rsi; /*dst*/ CFI_POP_TMP_REG(); bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; xorl 0 * 4(%rdx), RR0d; xorl 1 * 4(%rdx), RL0d; xorl 2 * 4(%rdx), RR1d; xorl 3 * 4(%rdx), RL1d; xorl 4 * 4(%rdx), RR2d; xorl 5 * 4(%rdx), RL2d; movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; CFI_POP(%r15); popq %r14; CFI_POP(%r14); popq %r13; CFI_POP(%r13); popq %r12; CFI_POP(%r12); popq %rbx; CFI_POP(%rbx); popq %rbp; CFI_POP(%rbp); EXIT_SYSV_FUNC ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_3des_amd64_cbc_dec,.-_gcry_3des_amd64_cbc_dec;) .align 16 .globl _gcry_3des_amd64_cfb_dec ELF(.type _gcry_3des_amd64_cfb_dec,@function;) _gcry_3des_amd64_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (3 blocks) * %rdx: src (3 blocks) * %rcx: iv (64bit) */ CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 pushq %rbp; CFI_PUSH(%rbp); pushq %rbx; CFI_PUSH(%rbx); pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rsi; /*dst*/ CFI_PUSH(%rsi); pushq %rdx; /*src*/ CFI_PUSH(%rdx); movq %rcx, RW2; /* Load input */ movl 0 * 4(RW2), RL0d; movl 1 * 4(RW2), RR0d; movl 0 * 4(%rdx), RL1d; movl 1 * 4(%rdx), RR1d; movl 2 * 4(%rdx), RL2d; movl 3 * 4(%rdx), RR2d; bswapl RL0d; bswapl RR0d; bswapl RL1d; bswapl RR1d; bswapl RL2d; bswapl RR2d; /* Update IV */ movq 4 * 4(%rdx), RW0; movq RW0, (RW2); call _gcry_3des_amd64_crypt_blk3; popq %rdx; /*src*/ CFI_POP_TMP_REG(); popq %rsi; /*dst*/ CFI_POP_TMP_REG(); bswapl RR0d; bswapl RL0d; bswapl RR1d; bswapl RL1d; bswapl RR2d; bswapl RL2d; xorl 0 * 4(%rdx), RR0d; xorl 1 * 4(%rdx), RL0d; xorl 2 * 4(%rdx), RR1d; xorl 3 * 4(%rdx), RL1d; xorl 4 * 4(%rdx), RR2d; xorl 5 * 4(%rdx), RL2d; movl RR0d, 0 * 4(%rsi); movl RL0d, 1 * 4(%rsi); movl RR1d, 2 * 4(%rsi); movl RL1d, 3 * 4(%rsi); movl RR2d, 4 * 4(%rsi); movl RL2d, 5 * 4(%rsi); popq %r15; CFI_POP(%r15); popq %r14; CFI_POP(%r14); popq %r13; CFI_POP(%r13); popq %r12; CFI_POP(%r12); popq %rbx; CFI_POP(%rbx); popq %rbp; CFI_POP(%rbp); EXIT_SYSV_FUNC ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_3des_amd64_cfb_dec,.-_gcry_3des_amd64_cfb_dec;) + +SECTION_RODATA +ELF(.type _des_amd64_data,@object;) + .align 16 +_des_amd64_data: .L_s1: .quad 0x0010100001010400, 0x0000000000000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0010100001010004, 0x0000100000010404 .quad 0x0000000000000004, 0x0000100000010000 .quad 0x0000000000000400, 0x0010100001010400 .quad 0x0010100001010404, 0x0000000000000400 .quad 0x0010000001000404, 0x0010100001010004 .quad 0x0010000001000000, 0x0000000000000004 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000100000010400 .quad 0x0000100000010400, 0x0010100001010000 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0000100000010004, 0x0010000001000004 .quad 0x0010000001000004, 0x0000100000010004 .quad 0x0000000000000000, 0x0000000000000404 .quad 0x0000100000010404, 0x0010000001000000 .quad 0x0000100000010000, 0x0010100001010404 .quad 0x0000000000000004, 0x0010100001010000 .quad 0x0010100001010400, 0x0010000001000000 .quad 0x0010000001000000, 0x0000000000000400 .quad 0x0010100001010004, 0x0000100000010000 .quad 0x0000100000010400, 0x0010000001000004 .quad 0x0000000000000400, 0x0000000000000004 .quad 0x0010000001000404, 0x0000100000010404 .quad 0x0010100001010404, 0x0000100000010004 .quad 0x0010100001010000, 0x0010000001000404 .quad 0x0010000001000004, 0x0000000000000404 .quad 0x0000100000010404, 0x0010100001010400 .quad 0x0000000000000404, 0x0010000001000400 .quad 0x0010000001000400, 0x0000000000000000 .quad 0x0000100000010004, 0x0000100000010400 .quad 0x0000000000000000, 0x0010100001010004 .L_s2: .quad 0x0801080200100020, 0x0800080000000000 .quad 0x0000080000000000, 0x0001080200100020 .quad 0x0001000000100000, 0x0000000200000020 .quad 0x0801000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0801080200100020 .quad 0x0801080000100000, 0x0800000000000000 .quad 0x0800080000000000, 0x0001000000100000 .quad 0x0000000200000020, 0x0801000200100020 .quad 0x0001080000100000, 0x0001000200100020 .quad 0x0800080200000020, 0x0000000000000000 .quad 0x0800000000000000, 0x0000080000000000 .quad 0x0001080200100020, 0x0801000000100000 .quad 0x0001000200100020, 0x0800000200000020 .quad 0x0000000000000000, 0x0001080000100000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0801000000100000, 0x0000080200000020 .quad 0x0000000000000000, 0x0001080200100020 .quad 0x0801000200100020, 0x0001000000100000 .quad 0x0800080200000020, 0x0801000000100000 .quad 0x0801080000100000, 0x0000080000000000 .quad 0x0801000000100000, 0x0800080000000000 .quad 0x0000000200000020, 0x0801080200100020 .quad 0x0001080200100020, 0x0000000200000020 .quad 0x0000080000000000, 0x0800000000000000 .quad 0x0000080200000020, 0x0801080000100000 .quad 0x0001000000100000, 0x0800000200000020 .quad 0x0001000200100020, 0x0800080200000020 .quad 0x0800000200000020, 0x0001000200100020 .quad 0x0001080000100000, 0x0000000000000000 .quad 0x0800080000000000, 0x0000080200000020 .quad 0x0800000000000000, 0x0801000200100020 .quad 0x0801080200100020, 0x0001080000100000 .L_s3: .quad 0x0000002000000208, 0x0000202008020200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000202000020208, 0x0000002008000200 .quad 0x0000200000020008, 0x0000000008000008 .quad 0x0000000008000008, 0x0000200000020000 .quad 0x0000202008020208, 0x0000200000020008 .quad 0x0000200008020000, 0x0000002000000208 .quad 0x0000000008000000, 0x0000000000000008 .quad 0x0000202008020200, 0x0000002000000200 .quad 0x0000202000020200, 0x0000200008020000 .quad 0x0000200008020008, 0x0000202000020208 .quad 0x0000002008000208, 0x0000202000020200 .quad 0x0000200000020000, 0x0000002008000208 .quad 0x0000000000000008, 0x0000202008020208 .quad 0x0000002000000200, 0x0000000008000000 .quad 0x0000202008020200, 0x0000000008000000 .quad 0x0000200000020008, 0x0000002000000208 .quad 0x0000200000020000, 0x0000202008020200 .quad 0x0000002008000200, 0x0000000000000000 .quad 0x0000002000000200, 0x0000200000020008 .quad 0x0000202008020208, 0x0000002008000200 .quad 0x0000000008000008, 0x0000002000000200 .quad 0x0000000000000000, 0x0000200008020008 .quad 0x0000002008000208, 0x0000200000020000 .quad 0x0000000008000000, 0x0000202008020208 .quad 0x0000000000000008, 0x0000202000020208 .quad 0x0000202000020200, 0x0000000008000008 .quad 0x0000200008020000, 0x0000002008000208 .quad 0x0000002000000208, 0x0000200008020000 .quad 0x0000202000020208, 0x0000000000000008 .quad 0x0000200008020008, 0x0000202000020200 .L_s4: .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x0000020000002000, 0x0008020800002000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x0008000800000000, 0x1008000000000001 .quad 0x0008020000002000, 0x1008020800002001 .quad 0x1000000800000001, 0x0000000000000000 .quad 0x0000000000000000, 0x0008020000002000 .quad 0x0000020800002000, 0x0008000800000000 .quad 0x1008000800000001, 0x1000000000000001 .quad 0x1008020000002001, 0x1000020800002001 .quad 0x1000020800002001, 0x0000000800000000 .quad 0x1008020800002001, 0x1000000800000001 .quad 0x1000000000000001, 0x0000020000002000 .quad 0x1008000000000001, 0x1000020000002001 .quad 0x0008020800002000, 0x1008000800000001 .quad 0x1000020000002001, 0x0000020800002000 .quad 0x0008000000000000, 0x1008020000002001 .quad 0x0000000800000000, 0x0008000000000000 .quad 0x0000020000002000, 0x0008020800002000 .L_s5: .quad 0x0000001000000100, 0x0020001002080100 .quad 0x0020000002080000, 0x0420001002000100 .quad 0x0000000000080000, 0x0000001000000100 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0400001000080100, 0x0000000000080000 .quad 0x0020001002000100, 0x0400001000080100 .quad 0x0420001002000100, 0x0420000002080000 .quad 0x0000001000080100, 0x0400000000000000 .quad 0x0020000002000000, 0x0400000000080000 .quad 0x0400000000080000, 0x0000000000000000 .quad 0x0400001000000100, 0x0420001002080100 .quad 0x0420001002080100, 0x0020001002000100 .quad 0x0420000002080000, 0x0400001000000100 .quad 0x0000000000000000, 0x0420000002000000 .quad 0x0020001002080100, 0x0020000002000000 .quad 0x0420000002000000, 0x0000001000080100 .quad 0x0000000000080000, 0x0420001002000100 .quad 0x0000001000000100, 0x0020000002000000 .quad 0x0400000000000000, 0x0020000002080000 .quad 0x0420001002000100, 0x0400001000080100 .quad 0x0020001002000100, 0x0400000000000000 .quad 0x0420000002080000, 0x0020001002080100 .quad 0x0400001000080100, 0x0000001000000100 .quad 0x0020000002000000, 0x0420000002080000 .quad 0x0420001002080100, 0x0000001000080100 .quad 0x0420000002000000, 0x0420001002080100 .quad 0x0020000002080000, 0x0000000000000000 .quad 0x0400000000080000, 0x0420000002000000 .quad 0x0000001000080100, 0x0020001002000100 .quad 0x0400001000000100, 0x0000000000080000 .quad 0x0000000000000000, 0x0400000000080000 .quad 0x0020001002080100, 0x0400001000000100 .L_s6: .quad 0x0200000120000010, 0x0204000020000000 .quad 0x0000040000000000, 0x0204040120000010 .quad 0x0204000020000000, 0x0000000100000010 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0200040020000000, 0x0004040100000010 .quad 0x0004000000000000, 0x0200000120000010 .quad 0x0004000100000010, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0000000000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000040000000000 .quad 0x0004040000000000, 0x0200040120000010 .quad 0x0000000100000010, 0x0204000120000010 .quad 0x0204000120000010, 0x0000000000000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000040100000010, 0x0004040000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0200040020000000, 0x0000000100000010 .quad 0x0204000120000010, 0x0004040000000000 .quad 0x0204040120000010, 0x0004000000000000 .quad 0x0000040100000010, 0x0200000120000010 .quad 0x0004000000000000, 0x0200040020000000 .quad 0x0200000020000000, 0x0000040100000010 .quad 0x0200000120000010, 0x0204040120000010 .quad 0x0004040000000000, 0x0204000020000000 .quad 0x0004040100000010, 0x0204040020000000 .quad 0x0000000000000000, 0x0204000120000010 .quad 0x0000000100000010, 0x0000040000000000 .quad 0x0204000020000000, 0x0004040100000010 .quad 0x0000040000000000, 0x0004000100000010 .quad 0x0200040120000010, 0x0000000000000000 .quad 0x0204040020000000, 0x0200000020000000 .quad 0x0004000100000010, 0x0200040120000010 .L_s7: .quad 0x0002000000200000, 0x2002000004200002 .quad 0x2000000004000802, 0x0000000000000000 .quad 0x0000000000000800, 0x2000000004000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2002000004200802, 0x0002000000200000 .quad 0x0000000000000000, 0x2000000004000002 .quad 0x2000000000000002, 0x0000000004000000 .quad 0x2002000004200002, 0x2000000000000802 .quad 0x0000000004000800, 0x2002000000200802 .quad 0x2002000000200002, 0x0000000004000800 .quad 0x2000000004000002, 0x0002000004200000 .quad 0x0002000004200800, 0x2002000000200002 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000000000802, 0x2002000004200802 .quad 0x0002000000200800, 0x2000000000000002 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0000000004000000, 0x0002000000200800 .quad 0x0002000000200000, 0x2000000004000802 .quad 0x2000000004000802, 0x2002000004200002 .quad 0x2002000004200002, 0x2000000000000002 .quad 0x2002000000200002, 0x0000000004000000 .quad 0x0000000004000800, 0x0002000000200000 .quad 0x0002000004200800, 0x2000000000000802 .quad 0x2002000000200802, 0x0002000004200800 .quad 0x2000000000000802, 0x2000000004000002 .quad 0x2002000004200802, 0x0002000004200000 .quad 0x0002000000200800, 0x0000000000000000 .quad 0x2000000000000002, 0x2002000004200802 .quad 0x0000000000000000, 0x2002000000200802 .quad 0x0002000004200000, 0x0000000000000800 .quad 0x2000000004000002, 0x0000000004000800 .quad 0x0000000000000800, 0x2002000000200002 .L_s8: .quad 0x0100010410001000, 0x0000010000001000 .quad 0x0000000000040000, 0x0100010410041000 .quad 0x0100000010000000, 0x0100010410001000 .quad 0x0000000400000000, 0x0100000010000000 .quad 0x0000000400040000, 0x0100000010040000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0100010010041000, 0x0000010400041000 .quad 0x0000010000001000, 0x0000000400000000 .quad 0x0100000010040000, 0x0100000410000000 .quad 0x0100010010001000, 0x0000010400001000 .quad 0x0000010000041000, 0x0000000400040000 .quad 0x0100000410040000, 0x0100010010041000 .quad 0x0000010400001000, 0x0000000000000000 .quad 0x0000000000000000, 0x0100000410040000 .quad 0x0100000410000000, 0x0100010010001000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0000010400041000, 0x0000000000040000 .quad 0x0100010010041000, 0x0000010000001000 .quad 0x0000000400000000, 0x0100000410040000 .quad 0x0000010000001000, 0x0000010400041000 .quad 0x0100010010001000, 0x0000000400000000 .quad 0x0100000410000000, 0x0100000010040000 .quad 0x0100000410040000, 0x0100000010000000 .quad 0x0000000000040000, 0x0100010410001000 .quad 0x0000000000000000, 0x0100010410041000 .quad 0x0000000400040000, 0x0100000410000000 .quad 0x0100000010040000, 0x0100010010001000 .quad 0x0100010410001000, 0x0000000000000000 .quad 0x0100010410041000, 0x0000010000041000 .quad 0x0000010000041000, 0x0000010400001000 .quad 0x0000010400001000, 0x0000000400040000 .quad 0x0100000010000000, 0x0100010010041000 #endif #endif diff --git a/cipher/rijndael-ssse3-amd64-asm.S b/cipher/rijndael-ssse3-amd64-asm.S index 0c5c8f46..5153cb28 100644 --- a/cipher/rijndael-ssse3-amd64-asm.S +++ b/cipher/rijndael-ssse3-amd64-asm.S @@ -1,876 +1,879 @@ /* SSSE3 vector permutation AES for Libgcrypt * Copyright (C) 2014-2017 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * * * The code is based on the public domain library libvpaes version 0.5 * available at http://crypto.stanford.edu/vpaes/ and which carries * this notice: * * libvpaes: constant-time SSSE3 AES encryption and decryption. * version 0.5 * * By Mike Hamburg, Stanford University, 2009. Public domain. * I wrote essentially all of this code. I did not write the test * vectors; they are the NIST known answer tests. I hereby release all * the code and documentation here that I wrote into the public domain. * * This is an implementation of AES following my paper, * "Accelerating AES with Vector Permute Instructions * CHES 2009; http://shiftleft.org/papers/vector_aes/ */ #if defined(__x86_64__) #include #if defined(HAVE_GCC_INLINE_ASM_SSSE3) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) #include "asm-common-amd64.h" .text ## ## _gcry_aes_ssse3_enc_preload ## .align 16 ELF(.type _gcry_aes_ssse3_enc_preload,@function) .globl _gcry_aes_ssse3_enc_preload _gcry_aes_ssse3_enc_preload: CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 lea .Laes_consts(%rip), %rax movdqa (%rax), %xmm9 # 0F movdqa .Lk_inv (%rax), %xmm10 # inv movdqa .Lk_inv+16(%rax), %xmm11 # inva movdqa .Lk_sb1 (%rax), %xmm13 # sb1u movdqa .Lk_sb1+16(%rax), %xmm12 # sb1t movdqa .Lk_sb2 (%rax), %xmm15 # sb2u movdqa .Lk_sb2+16(%rax), %xmm14 # sb2t EXIT_SYSV_FUNC ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ssse3_enc_preload,.-_gcry_aes_ssse3_enc_preload) ## ## _gcry_aes_ssse3_dec_preload ## .align 16 ELF(.type _gcry_aes_ssse3_dec_preload,@function) .globl _gcry_aes_ssse3_dec_preload _gcry_aes_ssse3_dec_preload: CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 lea .Laes_consts(%rip), %rax movdqa (%rax), %xmm9 # 0F movdqa .Lk_inv (%rax), %xmm10 # inv movdqa .Lk_inv+16(%rax), %xmm11 # inva movdqa .Lk_dsb9 (%rax), %xmm13 # sb9u movdqa .Lk_dsb9+16(%rax), %xmm12 # sb9t movdqa .Lk_dsbd (%rax), %xmm15 # sbdu movdqa .Lk_dsbb (%rax), %xmm14 # sbbu movdqa .Lk_dsbe (%rax), %xmm8 # sbeu EXIT_SYSV_FUNC ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ssse3_dec_preload,.-_gcry_aes_ssse3_dec_preload) ## ## Constant-time SSSE3 AES core implementation. ## ## By Mike Hamburg (Stanford University), 2009 ## Public domain. ## ## ## _aes_encrypt_core ## ## AES-encrypt %xmm0. ## ## Inputs: ## %xmm0 = input ## %xmm9-%xmm15 as in .Laes_preheat ## (%rdi) = scheduled keys ## %rsi = nrounds ## ## Output in %xmm0 ## Clobbers %xmm1-%xmm4, %r9, %r11, %rax, %rcx, %rdx ## Preserves %xmm6 - %xmm7 so you get some local vectors ## ## .align 16 ELF(.type _gcry_aes_ssse3_encrypt_core,@function) .globl _gcry_aes_ssse3_encrypt_core _gcry_aes_ssse3_encrypt_core: _aes_encrypt_core: CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 mov %rdi, %rdx leaq -1(%rsi), %rax lea .Laes_consts(%rip), %rcx leaq .Lk_mc_backward(%rcx), %rdi mov $16, %rsi movdqa .Lk_ipt (%rcx), %xmm2 # iptlo movdqa %xmm9, %xmm1 pandn %xmm0, %xmm1 psrld $4, %xmm1 pand %xmm9, %xmm0 pshufb %xmm0, %xmm2 movdqa .Lk_ipt+16(%rcx), %xmm0 # ipthi pshufb %xmm1, %xmm0 pxor (%rdx),%xmm2 pxor %xmm2, %xmm0 add $16, %rdx jmp .Laes_entry .align 8 .Laes_loop: # middle of middle round movdqa %xmm13, %xmm4 # 4 : sb1u pshufb %xmm2, %xmm4 # 4 = sb1u pxor (%rdx), %xmm4 # 4 = sb1u + k movdqa %xmm12, %xmm0 # 0 : sb1t pshufb %xmm3, %xmm0 # 0 = sb1t pxor %xmm4, %xmm0 # 0 = A movdqa %xmm15, %xmm4 # 4 : sb2u pshufb %xmm2, %xmm4 # 4 = sb2u movdqa .Lk_mc_forward-.Lk_mc_backward(%rsi,%rdi), %xmm1 movdqa %xmm14, %xmm2 # 2 : sb2t pshufb %xmm3, %xmm2 # 2 = sb2t pxor %xmm4, %xmm2 # 2 = 2A movdqa %xmm0, %xmm3 # 3 = A pshufb %xmm1, %xmm0 # 0 = B pxor %xmm2, %xmm0 # 0 = 2A+B pshufb (%rsi,%rdi), %xmm3 # 3 = D lea 16(%esi),%esi # next mc pxor %xmm0, %xmm3 # 3 = 2A+B+D lea 16(%rdx),%rdx # next key pshufb %xmm1, %xmm0 # 0 = 2B+C pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D and $48, %rsi # ... mod 4 dec %rax # nr-- .Laes_entry: # top of round movdqa %xmm9, %xmm1 # 1 : i pandn %xmm0, %xmm1 # 1 = i<<4 psrld $4, %xmm1 # 1 = i pand %xmm9, %xmm0 # 0 = k movdqa %xmm11, %xmm2 # 2 : a/k pshufb %xmm0, %xmm2 # 2 = a/k pxor %xmm1, %xmm0 # 0 = j movdqa %xmm10, %xmm3 # 3 : 1/i pshufb %xmm1, %xmm3 # 3 = 1/i pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k movdqa %xmm10, %xmm4 # 4 : 1/j pshufb %xmm0, %xmm4 # 4 = 1/j pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k movdqa %xmm10, %xmm2 # 2 : 1/iak pshufb %xmm3, %xmm2 # 2 = 1/iak pxor %xmm0, %xmm2 # 2 = io movdqa %xmm10, %xmm3 # 3 : 1/jak pshufb %xmm4, %xmm3 # 3 = 1/jak pxor %xmm1, %xmm3 # 3 = jo jnz .Laes_loop # middle of last round movdqa .Lk_sbo(%rcx), %xmm4 # 3 : sbou pshufb %xmm2, %xmm4 # 4 = sbou pxor (%rdx), %xmm4 # 4 = sb1u + k movdqa .Lk_sbo+16(%rcx), %xmm0 # 0 : sbot pshufb %xmm3, %xmm0 # 0 = sb1t pxor %xmm4, %xmm0 # 0 = A pshufb .Lk_sr(%rsi,%rcx), %xmm0 EXIT_SYSV_FUNC ret_spec_stop CFI_ENDPROC(); ELF(.size _aes_encrypt_core,.-_aes_encrypt_core) ## ## Decryption core ## ## Same API as encryption core. ## .align 16 .globl _gcry_aes_ssse3_decrypt_core ELF(.type _gcry_aes_ssse3_decrypt_core,@function) _gcry_aes_ssse3_decrypt_core: _aes_decrypt_core: CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_0_4 mov %rdi, %rdx lea .Laes_consts(%rip), %rcx subl $1, %esi movl %esi, %eax shll $4, %esi xorl $48, %esi andl $48, %esi movdqa .Lk_dipt (%rcx), %xmm2 # iptlo movdqa %xmm9, %xmm1 pandn %xmm0, %xmm1 psrld $4, %xmm1 pand %xmm9, %xmm0 pshufb %xmm0, %xmm2 movdqa .Lk_dipt+16(%rcx), %xmm0 # ipthi pshufb %xmm1, %xmm0 pxor (%rdx), %xmm2 pxor %xmm2, %xmm0 movdqa .Lk_mc_forward+48(%rcx), %xmm5 lea 16(%rdx), %rdx neg %rax jmp .Laes_dec_entry .align 16 .Laes_dec_loop: ## ## Inverse mix columns ## movdqa %xmm13, %xmm4 # 4 : sb9u pshufb %xmm2, %xmm4 # 4 = sb9u pxor (%rdx), %xmm4 movdqa %xmm12, %xmm0 # 0 : sb9t pshufb %xmm3, %xmm0 # 0 = sb9t movdqa .Lk_dsbd+16(%rcx),%xmm1 # 1 : sbdt pxor %xmm4, %xmm0 # 0 = ch lea 16(%rdx), %rdx # next round key pshufb %xmm5, %xmm0 # MC ch movdqa %xmm15, %xmm4 # 4 : sbdu pshufb %xmm2, %xmm4 # 4 = sbdu pxor %xmm0, %xmm4 # 4 = ch pshufb %xmm3, %xmm1 # 1 = sbdt pxor %xmm4, %xmm1 # 1 = ch pshufb %xmm5, %xmm1 # MC ch movdqa %xmm14, %xmm4 # 4 : sbbu pshufb %xmm2, %xmm4 # 4 = sbbu inc %rax # nr-- pxor %xmm1, %xmm4 # 4 = ch movdqa .Lk_dsbb+16(%rcx),%xmm0 # 0 : sbbt pshufb %xmm3, %xmm0 # 0 = sbbt pxor %xmm4, %xmm0 # 0 = ch pshufb %xmm5, %xmm0 # MC ch movdqa %xmm8, %xmm4 # 4 : sbeu pshufb %xmm2, %xmm4 # 4 = sbeu pshufd $0x93, %xmm5, %xmm5 pxor %xmm0, %xmm4 # 4 = ch movdqa .Lk_dsbe+16(%rcx),%xmm0 # 0 : sbet pshufb %xmm3, %xmm0 # 0 = sbet pxor %xmm4, %xmm0 # 0 = ch .Laes_dec_entry: # top of round movdqa %xmm9, %xmm1 # 1 : i pandn %xmm0, %xmm1 # 1 = i<<4 psrld $4, %xmm1 # 1 = i pand %xmm9, %xmm0 # 0 = k movdqa %xmm11, %xmm2 # 2 : a/k pshufb %xmm0, %xmm2 # 2 = a/k pxor %xmm1, %xmm0 # 0 = j movdqa %xmm10, %xmm3 # 3 : 1/i pshufb %xmm1, %xmm3 # 3 = 1/i pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k movdqa %xmm10, %xmm4 # 4 : 1/j pshufb %xmm0, %xmm4 # 4 = 1/j pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k movdqa %xmm10, %xmm2 # 2 : 1/iak pshufb %xmm3, %xmm2 # 2 = 1/iak pxor %xmm0, %xmm2 # 2 = io movdqa %xmm10, %xmm3 # 3 : 1/jak pshufb %xmm4, %xmm3 # 3 = 1/jak pxor %xmm1, %xmm3 # 3 = jo jnz .Laes_dec_loop # middle of last round movdqa .Lk_dsbo(%rcx), %xmm4 # 3 : sbou pshufb %xmm2, %xmm4 # 4 = sbou pxor (%rdx), %xmm4 # 4 = sb1u + k movdqa .Lk_dsbo+16(%rcx), %xmm0 # 0 : sbot pshufb %xmm3, %xmm0 # 0 = sb1t pxor %xmm4, %xmm0 # 0 = A pshufb .Lk_sr(%rsi,%rcx), %xmm0 EXIT_SYSV_FUNC ret_spec_stop CFI_ENDPROC(); ELF(.size _aes_decrypt_core,.-_aes_decrypt_core) ######################################################## ## ## ## AES key schedule ## ## ## ######################################################## .align 16 .globl _gcry_aes_ssse3_schedule_core ELF(.type _gcry_aes_ssse3_schedule_core,@function) _gcry_aes_ssse3_schedule_core: _aes_schedule_core: # rdi = key # rsi = size in bits # rdx = buffer # rcx = direction. 0=encrypt, 1=decrypt # r8 = rotoffs CFI_STARTPROC(); ENTER_SYSV_FUNC_PARAMS_5 # load the tables lea .Laes_consts(%rip), %r10 movdqa (%r10), %xmm9 # 0F movdqa .Lk_inv (%r10), %xmm10 # inv movdqa .Lk_inv+16(%r10), %xmm11 # inva movdqa .Lk_sb1 (%r10), %xmm13 # sb1u movdqa .Lk_sb1+16(%r10), %xmm12 # sb1t movdqa .Lk_sb2 (%r10), %xmm15 # sb2u movdqa .Lk_sb2+16(%r10), %xmm14 # sb2t movdqa .Lk_rcon(%r10), %xmm8 # load rcon movdqu (%rdi), %xmm0 # load key (unaligned) # input transform movdqu %xmm0, %xmm3 lea .Lk_ipt(%r10), %r11 call .Laes_schedule_transform movdqu %xmm0, %xmm7 test %rcx, %rcx jnz .Laes_schedule_am_decrypting # encrypting, output zeroth round key after transform movdqa %xmm0, (%rdx) jmp .Laes_schedule_go .Laes_schedule_am_decrypting: # decrypting, output zeroth round key after shiftrows pshufb .Lk_sr(%r8,%r10),%xmm3 movdqa %xmm3, (%rdx) xor $48, %r8 .Laes_schedule_go: cmp $192, %rsi je .Laes_schedule_192 cmp $256, %rsi je .Laes_schedule_256 # 128: fall though ## ## .Laes_schedule_128 ## ## 128-bit specific part of key schedule. ## ## This schedule is really simple, because all its parts ## are accomplished by the subroutines. ## .Laes_schedule_128: mov $10, %rsi .Laes_schedule_128_L: call .Laes_schedule_round dec %rsi jz .Laes_schedule_mangle_last call .Laes_schedule_mangle # write output jmp .Laes_schedule_128_L ## ## .Laes_schedule_192 ## ## 192-bit specific part of key schedule. ## ## The main body of this schedule is the same as the 128-bit ## schedule, but with more smearing. The long, high side is ## stored in %xmm7 as before, and the short, low side is in ## the high bits of %xmm6. ## ## This schedule is somewhat nastier, however, because each ## round produces 192 bits of key material, or 1.5 round keys. ## Therefore, on each cycle we do 2 rounds and produce 3 round ## keys. ## .Laes_schedule_192: movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned) call .Laes_schedule_transform # input transform pshufd $0x0E, %xmm0, %xmm6 pslldq $8, %xmm6 # clobber low side with zeros mov $4, %rsi .Laes_schedule_192_L: call .Laes_schedule_round palignr $8,%xmm6,%xmm0 call .Laes_schedule_mangle # save key n call .Laes_schedule_192_smear call .Laes_schedule_mangle # save key n+1 call .Laes_schedule_round dec %rsi jz .Laes_schedule_mangle_last call .Laes_schedule_mangle # save key n+2 call .Laes_schedule_192_smear jmp .Laes_schedule_192_L ## ## .Laes_schedule_192_smear ## ## Smear the short, low side in the 192-bit key schedule. ## ## Inputs: ## %xmm7: high side, b a x y ## %xmm6: low side, d c 0 0 ## %xmm13: 0 ## ## Outputs: ## %xmm6: b+c+d b+c 0 0 ## %xmm0: b+c+d b+c b a ## .Laes_schedule_192_smear: pshufd $0x80, %xmm6, %xmm0 # d c 0 0 -> c 0 0 0 pxor %xmm0, %xmm6 # -> c+d c 0 0 pshufd $0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a pxor %xmm6, %xmm0 # -> b+c+d b+c b a pshufd $0x0E, %xmm0, %xmm6 pslldq $8, %xmm6 # clobber low side with zeros ret_spec_stop ## ## .Laes_schedule_256 ## ## 256-bit specific part of key schedule. ## ## The structure here is very similar to the 128-bit ## schedule, but with an additional 'low side' in ## %xmm6. The low side's rounds are the same as the ## high side's, except no rcon and no rotation. ## .Laes_schedule_256: movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) call .Laes_schedule_transform # input transform mov $7, %rsi .Laes_schedule_256_L: call .Laes_schedule_mangle # output low result movdqa %xmm0, %xmm6 # save cur_lo in xmm6 # high round call .Laes_schedule_round dec %rsi jz .Laes_schedule_mangle_last call .Laes_schedule_mangle # low round. swap xmm7 and xmm6 pshufd $0xFF, %xmm0, %xmm0 movdqa %xmm7, %xmm5 movdqa %xmm6, %xmm7 call .Laes_schedule_low_round movdqa %xmm5, %xmm7 jmp .Laes_schedule_256_L ## ## .Laes_schedule_round ## ## Runs one main round of the key schedule on %xmm0, %xmm7 ## ## Specifically, runs subbytes on the high dword of %xmm0 ## then rotates it by one byte and xors into the low dword of ## %xmm7. ## ## Adds rcon from low byte of %xmm8, then rotates %xmm8 for ## next rcon. ## ## Smears the dwords of %xmm7 by xoring the low into the ## second low, result into third, result into highest. ## ## Returns results in %xmm7 = %xmm0. ## Clobbers %xmm1-%xmm4, %r11. ## .Laes_schedule_round: # extract rcon from xmm8 pxor %xmm1, %xmm1 palignr $15, %xmm8, %xmm1 palignr $15, %xmm8, %xmm8 pxor %xmm1, %xmm7 # rotate pshufd $0xFF, %xmm0, %xmm0 palignr $1, %xmm0, %xmm0 # fall through... # low round: same as high round, but no rotation and no rcon. .Laes_schedule_low_round: # smear xmm7 movdqa %xmm7, %xmm1 pslldq $4, %xmm7 pxor %xmm1, %xmm7 movdqa %xmm7, %xmm1 pslldq $8, %xmm7 pxor %xmm1, %xmm7 pxor .Lk_s63(%r10), %xmm7 # subbytes movdqa %xmm9, %xmm1 pandn %xmm0, %xmm1 psrld $4, %xmm1 # 1 = i pand %xmm9, %xmm0 # 0 = k movdqa %xmm11, %xmm2 # 2 : a/k pshufb %xmm0, %xmm2 # 2 = a/k pxor %xmm1, %xmm0 # 0 = j movdqa %xmm10, %xmm3 # 3 : 1/i pshufb %xmm1, %xmm3 # 3 = 1/i pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k movdqa %xmm10, %xmm4 # 4 : 1/j pshufb %xmm0, %xmm4 # 4 = 1/j pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k movdqa %xmm10, %xmm2 # 2 : 1/iak pshufb %xmm3, %xmm2 # 2 = 1/iak pxor %xmm0, %xmm2 # 2 = io movdqa %xmm10, %xmm3 # 3 : 1/jak pshufb %xmm4, %xmm3 # 3 = 1/jak pxor %xmm1, %xmm3 # 3 = jo movdqa .Lk_sb1(%r10), %xmm4 # 4 : sbou pshufb %xmm2, %xmm4 # 4 = sbou movdqa .Lk_sb1+16(%r10), %xmm0 # 0 : sbot pshufb %xmm3, %xmm0 # 0 = sb1t pxor %xmm4, %xmm0 # 0 = sbox output # add in smeared stuff pxor %xmm7, %xmm0 movdqa %xmm0, %xmm7 ret_spec_stop ## ## .Laes_schedule_transform ## ## Linear-transform %xmm0 according to tables at (%r11) ## ## Requires that %xmm9 = 0x0F0F... as in preheat ## Output in %xmm0 ## Clobbers %xmm1, %xmm2 ## .Laes_schedule_transform: movdqa %xmm9, %xmm1 pandn %xmm0, %xmm1 psrld $4, %xmm1 pand %xmm9, %xmm0 movdqa (%r11), %xmm2 # lo pshufb %xmm0, %xmm2 movdqa 16(%r11), %xmm0 # hi pshufb %xmm1, %xmm0 pxor %xmm2, %xmm0 ret_spec_stop ## ## .Laes_schedule_mangle ## ## Mangle xmm0 from (basis-transformed) standard version ## to our version. ## ## On encrypt, ## xor with 0x63 ## multiply by circulant 0,1,1,1 ## apply shiftrows transform ## ## On decrypt, ## xor with 0x63 ## multiply by 'inverse mixcolumns' circulant E,B,D,9 ## deskew ## apply shiftrows transform ## ## ## Writes out to (%rdx), and increments or decrements it ## Keeps track of round number mod 4 in %r8 ## Preserves xmm0 ## Clobbers xmm1-xmm5 ## .Laes_schedule_mangle: movdqa %xmm0, %xmm4 # save xmm0 for later movdqa .Lk_mc_forward(%r10),%xmm5 test %rcx, %rcx jnz .Laes_schedule_mangle_dec # encrypting add $16, %rdx pxor .Lk_s63(%r10),%xmm4 pshufb %xmm5, %xmm4 movdqa %xmm4, %xmm3 pshufb %xmm5, %xmm4 pxor %xmm4, %xmm3 pshufb %xmm5, %xmm4 pxor %xmm4, %xmm3 jmp .Laes_schedule_mangle_both .Laes_schedule_mangle_dec: lea .Lk_dks_1(%r10), %r11 # first table: *9 call .Laes_schedule_transform movdqa %xmm0, %xmm3 pshufb %xmm5, %xmm3 add $32, %r11 # next table: *B call .Laes_schedule_transform pxor %xmm0, %xmm3 pshufb %xmm5, %xmm3 add $32, %r11 # next table: *D call .Laes_schedule_transform pxor %xmm0, %xmm3 pshufb %xmm5, %xmm3 add $32, %r11 # next table: *E call .Laes_schedule_transform pxor %xmm0, %xmm3 pshufb %xmm5, %xmm3 movdqa %xmm4, %xmm0 # restore %xmm0 add $-16, %rdx .Laes_schedule_mangle_both: pshufb .Lk_sr(%r8,%r10),%xmm3 add $-16, %r8 and $48, %r8 movdqa %xmm3, (%rdx) ret_spec_stop ## ## .Laes_schedule_mangle_last ## ## Mangler for last round of key schedule ## Mangles %xmm0 ## when encrypting, outputs out(%xmm0) ^ 63 ## when decrypting, outputs unskew(%xmm0) ## ## Always called right before return... jumps to cleanup and exits ## .Laes_schedule_mangle_last: # schedule last round key from xmm0 lea .Lk_deskew(%r10),%r11 # prepare to deskew test %rcx, %rcx jnz .Laes_schedule_mangle_last_dec # encrypting pshufb .Lk_sr(%r8,%r10),%xmm0 # output permute lea .Lk_opt(%r10), %r11 # prepare to output transform add $32, %rdx .Laes_schedule_mangle_last_dec: add $-16, %rdx pxor .Lk_s63(%r10), %xmm0 call .Laes_schedule_transform # output transform movdqa %xmm0, (%rdx) # save last key #_aes_cleanup pxor %xmm0, %xmm0 pxor %xmm1, %xmm1 pxor %xmm2, %xmm2 pxor %xmm3, %xmm3 pxor %xmm4, %xmm4 pxor %xmm5, %xmm5 pxor %xmm6, %xmm6 pxor %xmm7, %xmm7 pxor %xmm8, %xmm8 EXIT_SYSV_FUNC ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ssse3_schedule_core,.-_gcry_aes_ssse3_schedule_core) ######################################################## ## ## ## Constants ## ## ## ######################################################## +SECTION_RODATA + .align 16 -ELF(.type _aes_consts,@object) +ELF(.type _aes_ssse3_consts,@object) +_aes_ssse3_consts: .Laes_consts: _aes_consts: # s0F .Lk_s0F = .-.Laes_consts .quad 0x0F0F0F0F0F0F0F0F .quad 0x0F0F0F0F0F0F0F0F # input transform (lo, hi) .Lk_ipt = .-.Laes_consts .quad 0xC2B2E8985A2A7000 .quad 0xCABAE09052227808 .quad 0x4C01307D317C4D00 .quad 0xCD80B1FCB0FDCC81 # inv, inva .Lk_inv = .-.Laes_consts .quad 0x0E05060F0D080180 .quad 0x040703090A0B0C02 .quad 0x01040A060F0B0780 .quad 0x030D0E0C02050809 # sb1u, sb1t .Lk_sb1 = .-.Laes_consts .quad 0xB19BE18FCB503E00 .quad 0xA5DF7A6E142AF544 .quad 0x3618D415FAE22300 .quad 0x3BF7CCC10D2ED9EF # sb2u, sb2t .Lk_sb2 = .-.Laes_consts .quad 0xE27A93C60B712400 .quad 0x5EB7E955BC982FCD .quad 0x69EB88400AE12900 .quad 0xC2A163C8AB82234A # sbou, sbot .Lk_sbo = .-.Laes_consts .quad 0xD0D26D176FBDC700 .quad 0x15AABF7AC502A878 .quad 0xCFE474A55FBB6A00 .quad 0x8E1E90D1412B35FA # mc_forward .Lk_mc_forward = .-.Laes_consts .quad 0x0407060500030201 .quad 0x0C0F0E0D080B0A09 .quad 0x080B0A0904070605 .quad 0x000302010C0F0E0D .quad 0x0C0F0E0D080B0A09 .quad 0x0407060500030201 .quad 0x000302010C0F0E0D .quad 0x080B0A0904070605 # mc_backward .Lk_mc_backward = .-.Laes_consts .quad 0x0605040702010003 .quad 0x0E0D0C0F0A09080B .quad 0x020100030E0D0C0F .quad 0x0A09080B06050407 .quad 0x0E0D0C0F0A09080B .quad 0x0605040702010003 .quad 0x0A09080B06050407 .quad 0x020100030E0D0C0F # sr .Lk_sr = .-.Laes_consts .quad 0x0706050403020100 .quad 0x0F0E0D0C0B0A0908 .quad 0x030E09040F0A0500 .quad 0x0B06010C07020D08 .quad 0x0F060D040B020900 .quad 0x070E050C030A0108 .quad 0x0B0E0104070A0D00 .quad 0x0306090C0F020508 # rcon .Lk_rcon = .-.Laes_consts .quad 0x1F8391B9AF9DEEB6 .quad 0x702A98084D7C7D81 # s63: all equal to 0x63 transformed .Lk_s63 = .-.Laes_consts .quad 0x5B5B5B5B5B5B5B5B .quad 0x5B5B5B5B5B5B5B5B # output transform .Lk_opt = .-.Laes_consts .quad 0xFF9F4929D6B66000 .quad 0xF7974121DEBE6808 .quad 0x01EDBD5150BCEC00 .quad 0xE10D5DB1B05C0CE0 # deskew tables: inverts the sbox's 'skew' .Lk_deskew = .-.Laes_consts .quad 0x07E4A34047A4E300 .quad 0x1DFEB95A5DBEF91A .quad 0x5F36B5DC83EA6900 .quad 0x2841C2ABF49D1E77 ## ## Decryption stuff ## Key schedule constants ## # decryption key schedule: x -> invskew x*9 .Lk_dks_1 = .-.Laes_consts .quad 0xB6116FC87ED9A700 .quad 0x4AED933482255BFC .quad 0x4576516227143300 .quad 0x8BB89FACE9DAFDCE # decryption key schedule: invskew x*9 -> invskew x*D .Lk_dks_2 = .-.Laes_consts .quad 0x27438FEBCCA86400 .quad 0x4622EE8AADC90561 .quad 0x815C13CE4F92DD00 .quad 0x73AEE13CBD602FF2 # decryption key schedule: invskew x*D -> invskew x*B .Lk_dks_3 = .-.Laes_consts .quad 0x03C4C50201C6C700 .quad 0xF83F3EF9FA3D3CFB .quad 0xEE1921D638CFF700 .quad 0xA5526A9D7384BC4B # decryption key schedule: invskew x*B -> invskew x*E + 0x63 .Lk_dks_4 = .-.Laes_consts .quad 0xE3C390B053732000 .quad 0xA080D3F310306343 .quad 0xA0CA214B036982E8 .quad 0x2F45AEC48CE60D67 ## ## Decryption stuff ## Round function constants ## # decryption input transform .Lk_dipt = .-.Laes_consts .quad 0x0F505B040B545F00 .quad 0x154A411E114E451A .quad 0x86E383E660056500 .quad 0x12771772F491F194 # decryption sbox output *9*u, *9*t .Lk_dsb9 = .-.Laes_consts .quad 0x851C03539A86D600 .quad 0xCAD51F504F994CC9 .quad 0xC03B1789ECD74900 .quad 0x725E2C9EB2FBA565 # decryption sbox output *D*u, *D*t .Lk_dsbd = .-.Laes_consts .quad 0x7D57CCDFE6B1A200 .quad 0xF56E9B13882A4439 .quad 0x3CE2FAF724C6CB00 .quad 0x2931180D15DEEFD3 # decryption sbox output *B*u, *B*t .Lk_dsbb = .-.Laes_consts .quad 0xD022649296B44200 .quad 0x602646F6B0F2D404 .quad 0xC19498A6CD596700 .quad 0xF3FF0C3E3255AA6B # decryption sbox output *E*u, *E*t .Lk_dsbe = .-.Laes_consts .quad 0x46F2929626D4D000 .quad 0x2242600464B4F6B0 .quad 0x0C55A6CDFFAAC100 .quad 0x9467F36B98593E32 # decryption sbox final output .Lk_dsbo = .-.Laes_consts .quad 0x1387EA537EF94000 .quad 0xC7AA6DB9D4943E2D .quad 0x12D7560F93441D00 .quad 0xCA4B8159D8C58E9C ELF(.size _aes_consts,.-_aes_consts) #endif #endif diff --git a/cipher/rijndael-vaes-avx2-amd64.S b/cipher/rijndael-vaes-avx2-amd64.S index 13fe7ab0..aceccb96 100644 --- a/cipher/rijndael-vaes-avx2-amd64.S +++ b/cipher/rijndael-vaes-avx2-amd64.S @@ -1,3408 +1,3410 @@ /* VAES/AVX2 AMD64 accelerated AES for Libgcrypt * Copyright (C) 2021 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #if defined(__x86_64__) #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) && \ defined(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL) #include "asm-common-amd64.h" .text /********************************************************************** helper macros **********************************************************************/ #define no(...) /*_*/ #define yes(...) __VA_ARGS__ #define AES_OP8(op, key, b0, b1, b2, b3, b4, b5, b6, b7) \ op key, b0, b0; \ op key, b1, b1; \ op key, b2, b2; \ op key, b3, b3; \ op key, b4, b4; \ op key, b5, b5; \ op key, b6, b6; \ op key, b7, b7; #define VAESENC8(key, b0, b1, b2, b3, b4, b5, b6, b7) \ AES_OP8(vaesenc, key, b0, b1, b2, b3, b4, b5, b6, b7) #define VAESDEC8(key, b0, b1, b2, b3, b4, b5, b6, b7) \ AES_OP8(vaesdec, key, b0, b1, b2, b3, b4, b5, b6, b7) #define XOR8(key, b0, b1, b2, b3, b4, b5, b6, b7) \ AES_OP8(vpxor, key, b0, b1, b2, b3, b4, b5, b6, b7) #define AES_OP4(op, key, b0, b1, b2, b3) \ op key, b0, b0; \ op key, b1, b1; \ op key, b2, b2; \ op key, b3, b3; #define VAESENC4(key, b0, b1, b2, b3) \ AES_OP4(vaesenc, key, b0, b1, b2, b3) #define VAESDEC4(key, b0, b1, b2, b3) \ AES_OP4(vaesdec, key, b0, b1, b2, b3) #define XOR4(key, b0, b1, b2, b3) \ AES_OP4(vpxor, key, b0, b1, b2, b3) #define AES_OP2(op, key, b0, b1) \ op key, b0, b0; \ op key, b1, b1; #define VAESENC2(key, b0, b1) \ AES_OP2(vaesenc, key, b0, b1) #define VAESDEC2(key, b0, b1) \ AES_OP2(vaesdec, key, b0, b1) #define XOR2(key, b0, b1) \ AES_OP2(vpxor, key, b0, b1) /********************************************************************** CBC-mode decryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_cbc_dec_amd64,@function) .globl _gcry_vaes_avx2_cbc_dec_amd64 .align 16 _gcry_vaes_avx2_cbc_dec_amd64: /* input: * %rdi: round keys * %rsi: iv * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); /* Load IV. */ vmovdqu (%rsi), %xmm15; /* Process 16 blocks per loop. */ .align 8 .Lcbc_dec_blk16: cmpq $16, %r8; jb .Lcbc_dec_blk8; leaq -16(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm8; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vmovdqu (8 * 16)(%rcx), %ymm4; vmovdqu (10 * 16)(%rcx), %ymm5; vmovdqu (12 * 16)(%rcx), %ymm6; vmovdqu (14 * 16)(%rcx), %ymm7; vpxor %ymm8, %ymm0, %ymm0; vpxor %ymm8, %ymm1, %ymm1; vpxor %ymm8, %ymm2, %ymm2; vpxor %ymm8, %ymm3, %ymm3; vpxor %ymm8, %ymm4, %ymm4; vpxor %ymm8, %ymm5, %ymm5; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm8, %ymm7, %ymm7; vbroadcasti128 (1 * 16)(%rdi), %ymm8; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm9; vmovdqu (1 * 16)(%rcx), %ymm10; vmovdqu (3 * 16)(%rcx), %ymm11; vmovdqu (5 * 16)(%rcx), %ymm12; vmovdqu (7 * 16)(%rcx), %ymm13; vmovdqu (9 * 16)(%rcx), %ymm14; vmovdqu (15 * 16)(%rcx), %xmm15; leaq (16 * 16)(%rcx), %rcx; /* AES rounds */ VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lcbc_dec_blk16_last; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lcbc_dec_blk16_last; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lcbc_dec_blk16_last: vpxor %ymm8, %ymm9, %ymm9; vpxor %ymm8, %ymm10, %ymm10; vpxor %ymm8, %ymm11, %ymm11; vpxor %ymm8, %ymm12, %ymm12; vpxor %ymm8, %ymm13, %ymm13; vpxor %ymm8, %ymm14, %ymm14; vaesdeclast %ymm9, %ymm0, %ymm0; vaesdeclast %ymm10, %ymm1, %ymm1; vpxor (-5 * 16)(%rcx), %ymm8, %ymm9; vpxor (-3 * 16)(%rcx), %ymm8, %ymm10; vaesdeclast %ymm11, %ymm2, %ymm2; vaesdeclast %ymm12, %ymm3, %ymm3; vaesdeclast %ymm13, %ymm4, %ymm4; vaesdeclast %ymm14, %ymm5, %ymm5; vaesdeclast %ymm9, %ymm6, %ymm6; vaesdeclast %ymm10, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lcbc_dec_blk16; /* Handle trailing eight blocks. */ .align 8 .Lcbc_dec_blk8: cmpq $8, %r8; jb .Lcbc_dec_blk4; leaq -8(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vpxor %ymm4, %ymm2, %ymm2; vpxor %ymm4, %ymm3, %ymm3; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm10; vmovdqu (1 * 16)(%rcx), %ymm11; vmovdqu (3 * 16)(%rcx), %ymm12; vmovdqu (5 * 16)(%rcx), %ymm13; vmovdqu (7 * 16)(%rcx), %xmm15; leaq (8 * 16)(%rcx), %rcx; /* AES rounds */ VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcbc_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcbc_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcbc_dec_blk8_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vpxor %ymm4, %ymm12, %ymm12; vpxor %ymm4, %ymm13, %ymm13; vaesdeclast %ymm10, %ymm0, %ymm0; vaesdeclast %ymm11, %ymm1, %ymm1; vaesdeclast %ymm12, %ymm2, %ymm2; vaesdeclast %ymm13, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lcbc_dec_blk4: cmpq $4, %r8; jb .Lcbc_dec_blk1; leaq -4(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm10; vmovdqu (1 * 16)(%rcx), %ymm11; vmovdqu (3 * 16)(%rcx), %xmm15; leaq (4 * 16)(%rcx), %rcx; /* AES rounds */ VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcbc_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcbc_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcbc_dec_blk4_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vaesdeclast %ymm10, %ymm0, %ymm0; vaesdeclast %ymm11, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lcbc_dec_blk1: cmpq $1, %r8; jb .Ldone_cbc_dec; leaq -1(%r8), %r8; /* Load input. */ vmovdqu (%rcx), %xmm2; leaq 16(%rcx), %rcx; /* Xor first key. */ vpxor (0 * 16)(%rdi), %xmm2, %xmm0; /* AES rounds. */ vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lcbc_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lcbc_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lcbc_dec_blk1_last: vpxor %xmm1, %xmm15, %xmm15; vaesdeclast %xmm15, %xmm0, %xmm0; vmovdqa %xmm2, %xmm15; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lcbc_dec_blk1; .align 8 .Ldone_cbc_dec: /* Store IV. */ vmovdqu %xmm15, (%rsi); vzeroall; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_cbc_dec_amd64,.-_gcry_vaes_avx2_cbc_dec_amd64) /********************************************************************** CFB-mode decryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_cfb_dec_amd64,@function) .globl _gcry_vaes_avx2_cfb_dec_amd64 .align 16 _gcry_vaes_avx2_cfb_dec_amd64: /* input: * %rdi: round keys * %rsi: iv * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); /* Load IV. */ vmovdqu (%rsi), %xmm15; /* Process 16 blocks per loop. */ .align 8 .Lcfb_dec_blk16: cmpq $16, %r8; jb .Lcfb_dec_blk8; leaq -16(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm8; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm0; vmovdqu (1 * 16)(%rcx), %ymm1; vmovdqu (3 * 16)(%rcx), %ymm2; vmovdqu (5 * 16)(%rcx), %ymm3; vmovdqu (7 * 16)(%rcx), %ymm4; vmovdqu (9 * 16)(%rcx), %ymm5; vmovdqu (11 * 16)(%rcx), %ymm6; vmovdqu (13 * 16)(%rcx), %ymm7; vmovdqu (15 * 16)(%rcx), %xmm15; vpxor %ymm8, %ymm0, %ymm0; vpxor %ymm8, %ymm1, %ymm1; vpxor %ymm8, %ymm2, %ymm2; vpxor %ymm8, %ymm3, %ymm3; vpxor %ymm8, %ymm4, %ymm4; vpxor %ymm8, %ymm5, %ymm5; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm8, %ymm7, %ymm7; vbroadcasti128 (1 * 16)(%rdi), %ymm8; vmovdqu (0 * 16)(%rcx), %ymm9; vmovdqu (2 * 16)(%rcx), %ymm10; vmovdqu (4 * 16)(%rcx), %ymm11; vmovdqu (6 * 16)(%rcx), %ymm12; vmovdqu (8 * 16)(%rcx), %ymm13; vmovdqu (10 * 16)(%rcx), %ymm14; leaq (16 * 16)(%rcx), %rcx; /* AES rounds */ VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lcfb_dec_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lcfb_dec_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lcfb_dec_blk16_last: vpxor %ymm8, %ymm9, %ymm9; vpxor %ymm8, %ymm10, %ymm10; vpxor %ymm8, %ymm11, %ymm11; vpxor %ymm8, %ymm12, %ymm12; vpxor %ymm8, %ymm13, %ymm13; vpxor %ymm8, %ymm14, %ymm14; vaesenclast %ymm9, %ymm0, %ymm0; vaesenclast %ymm10, %ymm1, %ymm1; vpxor (-4 * 16)(%rcx), %ymm8, %ymm9; vpxor (-2 * 16)(%rcx), %ymm8, %ymm10; vaesenclast %ymm11, %ymm2, %ymm2; vaesenclast %ymm12, %ymm3, %ymm3; vaesenclast %ymm13, %ymm4, %ymm4; vaesenclast %ymm14, %ymm5, %ymm5; vaesenclast %ymm9, %ymm6, %ymm6; vaesenclast %ymm10, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lcfb_dec_blk16; /* Handle trailing eight blocks. */ .align 8 .Lcfb_dec_blk8: cmpq $8, %r8; jb .Lcfb_dec_blk4; leaq -8(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm0; vmovdqu (1 * 16)(%rcx), %ymm1; vmovdqu (3 * 16)(%rcx), %ymm2; vmovdqu (5 * 16)(%rcx), %ymm3; vmovdqu (7 * 16)(%rcx), %xmm15; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vpxor %ymm4, %ymm2, %ymm2; vpxor %ymm4, %ymm3, %ymm3; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm10; vmovdqu (2 * 16)(%rcx), %ymm11; vmovdqu (4 * 16)(%rcx), %ymm12; vmovdqu (6 * 16)(%rcx), %ymm13; leaq (8 * 16)(%rcx), %rcx; /* AES rounds */ VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcfb_dec_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcfb_dec_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcfb_dec_blk8_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vpxor %ymm4, %ymm12, %ymm12; vpxor %ymm4, %ymm13, %ymm13; vaesenclast %ymm10, %ymm0, %ymm0; vaesenclast %ymm11, %ymm1, %ymm1; vaesenclast %ymm12, %ymm2, %ymm2; vaesenclast %ymm13, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lcfb_dec_blk4: cmpq $4, %r8; jb .Lcfb_dec_blk1; leaq -4(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm0; vmovdqu (1 * 16)(%rcx), %ymm1; vmovdqu (3 * 16)(%rcx), %xmm15; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm10; vmovdqu (2 * 16)(%rcx), %ymm11; leaq (4 * 16)(%rcx), %rcx; /* AES rounds */ VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcfb_dec_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcfb_dec_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcfb_dec_blk4_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vaesenclast %ymm10, %ymm0, %ymm0; vaesenclast %ymm11, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lcfb_dec_blk1: cmpq $1, %r8; jb .Ldone_cfb_dec; leaq -1(%r8), %r8; /* Xor first key. */ vpxor (0 * 16)(%rdi), %xmm15, %xmm0; /* Load input as next IV. */ vmovdqu (%rcx), %xmm15; leaq 16(%rcx), %rcx; /* AES rounds. */ vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lcfb_dec_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lcfb_dec_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lcfb_dec_blk1_last: vpxor %xmm15, %xmm1, %xmm1; vaesenclast %xmm1, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lcfb_dec_blk1; .align 8 .Ldone_cfb_dec: /* Store IV. */ vmovdqu %xmm15, (%rsi); vzeroall; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_cfb_dec_amd64,.-_gcry_vaes_avx2_cfb_dec_amd64) /********************************************************************** CTR-mode encryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_ctr_enc_amd64,@function) .globl _gcry_vaes_avx2_ctr_enc_amd64 .align 16 _gcry_vaes_avx2_ctr_enc_amd64: /* input: * %rdi: round keys * %rsi: counter * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); movq 8(%rsi), %r10; movq 0(%rsi), %r11; bswapq %r10; bswapq %r11; vpcmpeqd %ymm15, %ymm15, %ymm15; vpsrldq $8, %ymm15, %ymm15; // 0:-1 vpaddq %ymm15, %ymm15, %ymm14; // 0:-2 vbroadcasti128 .Lbswap128_mask rRIP, %ymm13; #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; #define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \ vpcmpeqq minus_one, x, tmp1; \ vpcmpeqq minus_two, x, tmp2; \ vpor tmp1, tmp2, tmp2; \ vpsubq minus_two, x, x; \ vpslldq $8, tmp2, tmp2; \ vpsubq tmp2, x, x; /* Process 16 blocks per loop. */ .align 8 .Lctr_enc_blk16: cmpq $16, %r8; jb .Lctr_enc_blk8; leaq -16(%r8), %r8; vbroadcasti128 (%rsi), %ymm7; vbroadcasti128 (0 * 16)(%rdi), %ymm8; /* detect if carry handling is needed */ addb $16, 15(%rsi); jc .Lctr_enc_blk16_handle_carry; /* Increment counters. */ vpaddb .Lbige_addb_0 rRIP, %ymm7, %ymm0; vpaddb .Lbige_addb_2 rRIP, %ymm7, %ymm1; vpaddb .Lbige_addb_4 rRIP, %ymm7, %ymm2; vpaddb .Lbige_addb_6 rRIP, %ymm7, %ymm3; vpaddb .Lbige_addb_8 rRIP, %ymm7, %ymm4; vpaddb .Lbige_addb_10 rRIP, %ymm7, %ymm5; vpaddb .Lbige_addb_12 rRIP, %ymm7, %ymm6; vpaddb .Lbige_addb_14 rRIP, %ymm7, %ymm7; leaq 16(%r10), %r10; .Lctr_enc_blk16_rounds: /* AES rounds */ XOR8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (1 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lctr_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lctr_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lctr_enc_blk16_last: vpxor (0 * 16)(%rcx), %ymm8, %ymm9; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm8, %ymm10; vpxor (4 * 16)(%rcx), %ymm8, %ymm11; vpxor (6 * 16)(%rcx), %ymm8, %ymm12; vaesenclast %ymm9, %ymm0, %ymm0; vaesenclast %ymm10, %ymm1, %ymm1; vaesenclast %ymm11, %ymm2, %ymm2; vaesenclast %ymm12, %ymm3, %ymm3; vpxor (8 * 16)(%rcx), %ymm8, %ymm9; vpxor (10 * 16)(%rcx), %ymm8, %ymm10; vpxor (12 * 16)(%rcx), %ymm8, %ymm11; vpxor (14 * 16)(%rcx), %ymm8, %ymm8; leaq (16 * 16)(%rcx), %rcx; vaesenclast %ymm9, %ymm4, %ymm4; vaesenclast %ymm10, %ymm5, %ymm5; vaesenclast %ymm11, %ymm6, %ymm6; vaesenclast %ymm8, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lctr_enc_blk16; .align 8 .Lctr_enc_blk16_handle_carry: /* Increment counters (handle carry). */ vpshufb %xmm13, %xmm7, %xmm1; /* be => le */ vmovdqa %xmm1, %xmm0; inc_le128(%xmm1, %xmm15, %xmm5); vinserti128 $1, %xmm1, %ymm0, %ymm7; /* ctr: +1:+0 */ vpshufb %ymm13, %ymm7, %ymm0; addq $16, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +3:+2 */ vpshufb %ymm13, %ymm7, %ymm1; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +5:+4 */ vpshufb %ymm13, %ymm7, %ymm2; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +7:+6 */ vpshufb %ymm13, %ymm7, %ymm3; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +9:+8 */ vpshufb %ymm13, %ymm7, %ymm4; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +11:+10 */ vpshufb %ymm13, %ymm7, %ymm5; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +13:+12 */ vpshufb %ymm13, %ymm7, %ymm6; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +15:+14 */ vpshufb %ymm13, %ymm7, %ymm7; jmp .Lctr_enc_blk16_rounds; /* Handle trailing eight blocks. */ .align 8 .Lctr_enc_blk8: cmpq $8, %r8; jb .Lctr_enc_blk4; leaq -8(%r8), %r8; vbroadcasti128 (%rsi), %ymm3; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* detect if carry handling is needed */ addb $8, 15(%rsi); jc .Lctr_enc_blk8_handle_carry; /* Increment counters. */ vpaddb .Lbige_addb_0 rRIP, %ymm3, %ymm0; vpaddb .Lbige_addb_2 rRIP, %ymm3, %ymm1; vpaddb .Lbige_addb_4 rRIP, %ymm3, %ymm2; vpaddb .Lbige_addb_6 rRIP, %ymm3, %ymm3; leaq 8(%r10), %r10; .Lctr_enc_blk8_rounds: /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr_enc_blk8_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; vpxor (4 * 16)(%rcx), %ymm4, %ymm7; vpxor (6 * 16)(%rcx), %ymm4, %ymm4; leaq (8 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Lctr_enc_blk4; .align 8 .Lctr_enc_blk8_handle_carry: /* Increment counters (handle carry). */ vpshufb %xmm13, %xmm3, %xmm1; /* be => le */ vmovdqa %xmm1, %xmm0; inc_le128(%xmm1, %xmm15, %xmm5); vinserti128 $1, %xmm1, %ymm0, %ymm3; /* ctr: +1:+0 */ vpshufb %ymm13, %ymm3, %ymm0; addq $8, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +3:+2 */ vpshufb %ymm13, %ymm3, %ymm1; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +5:+4 */ vpshufb %ymm13, %ymm3, %ymm2; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +7:+6 */ vpshufb %ymm13, %ymm3, %ymm3; jmp .Lctr_enc_blk8_rounds; /* Handle trailing four blocks. */ .align 8 .Lctr_enc_blk4: cmpq $4, %r8; jb .Lctr_enc_blk1; leaq -4(%r8), %r8; vbroadcasti128 (%rsi), %ymm3; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* detect if carry handling is needed */ addb $4, 15(%rsi); jc .Lctr_enc_blk4_handle_carry; /* Increment counters. */ vpaddb .Lbige_addb_0 rRIP, %ymm3, %ymm0; vpaddb .Lbige_addb_2 rRIP, %ymm3, %ymm1; leaq 4(%r10), %r10; .Lctr_enc_blk4_rounds: /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr_enc_blk4_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; leaq (4 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; jmp .Lctr_enc_blk1; .align 8 .Lctr_enc_blk4_handle_carry: /* Increment counters (handle carry). */ vpshufb %xmm13, %xmm3, %xmm1; /* be => le */ vmovdqa %xmm1, %xmm0; inc_le128(%xmm1, %xmm15, %xmm5); vinserti128 $1, %xmm1, %ymm0, %ymm3; /* ctr: +1:+0 */ vpshufb %ymm13, %ymm3, %ymm0; addq $4, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +3:+2 */ vpshufb %ymm13, %ymm3, %ymm1; jmp .Lctr_enc_blk4_rounds; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lctr_enc_blk1: cmpq $1, %r8; jb .Ldone_ctr_enc; leaq -1(%r8), %r8; /* Load and increament counter. */ vmovdqu (%rsi), %xmm0; addq $1, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lctr_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lctr_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lctr_enc_blk1_last: vpxor (%rcx), %xmm1, %xmm1; /* Xor src to last round key. */ leaq 16(%rcx), %rcx; vaesenclast %xmm1, %xmm0, %xmm0; /* Last round and xor with xmm1. */ vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lctr_enc_blk1; .align 8 .Ldone_ctr_enc: vzeroall; xorl %r10d, %r10d; xorl %r11d, %r11d; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_ctr_enc_amd64,.-_gcry_vaes_avx2_ctr_enc_amd64) /********************************************************************** Little-endian 32-bit CTR-mode encryption (GCM-SIV) **********************************************************************/ ELF(.type _gcry_vaes_avx2_ctr32le_enc_amd64,@function) .globl _gcry_vaes_avx2_ctr32le_enc_amd64 .align 16 _gcry_vaes_avx2_ctr32le_enc_amd64: /* input: * %rdi: round keys * %rsi: counter * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); vbroadcasti128 (%rsi), %ymm15; // CTR /* Process 16 blocks per loop. */ .align 8 .Lctr32le_enc_blk16: cmpq $16, %r8; jb .Lctr32le_enc_blk8; leaq -16(%r8), %r8; vbroadcasti128 (0 * 16)(%rdi), %ymm8; /* Increment counters. */ vpaddd .Lle_addd_0 rRIP, %ymm15, %ymm0; vpaddd .Lle_addd_2 rRIP, %ymm15, %ymm1; vpaddd .Lle_addd_4 rRIP, %ymm15, %ymm2; vpaddd .Lle_addd_6 rRIP, %ymm15, %ymm3; vpaddd .Lle_addd_8 rRIP, %ymm15, %ymm4; vpaddd .Lle_addd_10 rRIP, %ymm15, %ymm5; vpaddd .Lle_addd_12 rRIP, %ymm15, %ymm6; vpaddd .Lle_addd_14 rRIP, %ymm15, %ymm7; vpaddd .Lle_addd_16_2 rRIP, %ymm15, %ymm15; /* AES rounds */ XOR8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (1 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lctr32le_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lctr32le_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lctr32le_enc_blk16_last: vpxor (0 * 16)(%rcx), %ymm8, %ymm9; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm8, %ymm10; vpxor (4 * 16)(%rcx), %ymm8, %ymm11; vpxor (6 * 16)(%rcx), %ymm8, %ymm12; vaesenclast %ymm9, %ymm0, %ymm0; vaesenclast %ymm10, %ymm1, %ymm1; vaesenclast %ymm11, %ymm2, %ymm2; vaesenclast %ymm12, %ymm3, %ymm3; vpxor (8 * 16)(%rcx), %ymm8, %ymm9; vpxor (10 * 16)(%rcx), %ymm8, %ymm10; vpxor (12 * 16)(%rcx), %ymm8, %ymm11; vpxor (14 * 16)(%rcx), %ymm8, %ymm8; leaq (16 * 16)(%rcx), %rcx; vaesenclast %ymm9, %ymm4, %ymm4; vaesenclast %ymm10, %ymm5, %ymm5; vaesenclast %ymm11, %ymm6, %ymm6; vaesenclast %ymm8, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lctr32le_enc_blk16; /* Handle trailing eight blocks. */ .align 8 .Lctr32le_enc_blk8: cmpq $8, %r8; jb .Lctr32le_enc_blk4; leaq -8(%r8), %r8; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* Increment counters. */ vpaddd .Lle_addd_0 rRIP, %ymm15, %ymm0; vpaddd .Lle_addd_2 rRIP, %ymm15, %ymm1; vpaddd .Lle_addd_4 rRIP, %ymm15, %ymm2; vpaddd .Lle_addd_6 rRIP, %ymm15, %ymm3; vpaddd .Lle_addd_8_2 rRIP, %ymm15, %ymm15; /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr32le_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr32le_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr32le_enc_blk8_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; vpxor (4 * 16)(%rcx), %ymm4, %ymm7; vpxor (6 * 16)(%rcx), %ymm4, %ymm4; leaq (8 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lctr32le_enc_blk4: cmpq $4, %r8; jb .Lctr32le_enc_blk1; leaq -4(%r8), %r8; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* Increment counters. */ vpaddd .Lle_addd_0 rRIP, %ymm15, %ymm0; vpaddd .Lle_addd_2 rRIP, %ymm15, %ymm1; vpaddd .Lle_addd_4_2 rRIP, %ymm15, %ymm15; /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr32le_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr32le_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr32le_enc_blk4_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; leaq (4 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lctr32le_enc_blk1: cmpq $1, %r8; jb .Ldone_ctr32le_enc; leaq -1(%r8), %r8; /* Load and increament counter. */ vmovdqu %xmm15, %xmm0; vpaddd .Lle_addd_1 rRIP, %xmm15, %xmm15; /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lctr32le_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lctr32le_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lctr32le_enc_blk1_last: vpxor (%rcx), %xmm1, %xmm1; /* Xor src to last round key. */ leaq 16(%rcx), %rcx; vaesenclast %xmm1, %xmm0, %xmm0; /* Last round and xor with xmm1. */ vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lctr32le_enc_blk1; .align 8 .Ldone_ctr32le_enc: vmovdqu %xmm15, (%rsi); vzeroall; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_ctr32le_enc_amd64,.-_gcry_vaes_avx2_ctr32le_enc_amd64) /********************************************************************** OCB-mode encryption/decryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_ocb_crypt_amd64,@function) .globl _gcry_vaes_avx2_ocb_crypt_amd64 .align 16 _gcry_vaes_avx2_ocb_crypt_amd64: /* input: * %rdi: round keys * %esi: nblk * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds * 16(%rbp): offset * 24(%rbp): checksum * 32(%rbp): L-array * 40(%rbp): encrypt (%r15d) */ CFI_STARTPROC(); #define STACK_REGS_POS (16 * 16 + 4 * 16 + 2 * 16) #define STACK_ALLOC (STACK_REGS_POS + 5 * 8) #define OFFSET_PTR_Q 16(%rbp) #define CHECKSUM_PTR_Q 24(%rbp) #define L_ARRAY_PTR_L 32(%rbp) #define ENCRYPT_FLAG_L 40(%rbp) pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $STACK_ALLOC, %rsp; andq $~63, %rsp; movq %r12, (STACK_REGS_POS + 0 * 8)(%rsp); CFI_REG_ON_STACK(r12, STACK_REGS_POS + 0 * 8); movq %r13, (STACK_REGS_POS + 1 * 8)(%rsp); CFI_REG_ON_STACK(r13, STACK_REGS_POS + 1 * 8); movq %r14, (STACK_REGS_POS + 2 * 8)(%rsp); CFI_REG_ON_STACK(r14, STACK_REGS_POS + 2 * 8); movq %r15, (STACK_REGS_POS + 3 * 8)(%rsp); CFI_REG_ON_STACK(r15, STACK_REGS_POS + 3 * 8); movq %rbx, (STACK_REGS_POS + 4 * 8)(%rsp); CFI_REG_ON_STACK(rbx, STACK_REGS_POS + 4 * 8); movl ENCRYPT_FLAG_L, %r15d; /* encrypt-flag. */ movq OFFSET_PTR_Q, %r14; /* offset ptr. */ movq CHECKSUM_PTR_Q, %rbx; /* checksum ptr. */ leal (, %r9d, 4), %eax; vmovdqu (%r14), %xmm15; /* Load offset. */ movq L_ARRAY_PTR_L, %r14; /* L-array ptr. */ vmovdqa (0 * 16)(%rdi), %xmm0; /* first key */ vpxor %xmm14, %xmm14, %xmm14; vpxor %xmm13, %xmm13, %xmm13; vpxor (%rdi, %rax, 4), %xmm0, %xmm0; /* first key ^ last key */ vpxor (0 * 16)(%rdi), %xmm15, %xmm15; /* offset ^ first key */ vmovdqa %xmm0, (14 * 16)(%rsp); vmovdqa %xmm0, (15 * 16)(%rsp); .align 8 .Lhandle_unaligned_ocb: /* Get number of blocks to align nblk to 16 (and L-array optimization). */ movl %esi, %r10d; negl %r10d; andl $15, %r10d; cmpq %r8, %r10; cmovaq %r8, %r10; cmpq $1, %r10; jb .Lunaligned_ocb_done; /* Number of blocks after alignment. */ movq %r8, %r11; subq %r10, %r11; /* If number after alignment is less than 16, skip aligned handling * completely. */ cmp $16, %r11; cmovbq %r8, %r10; /* Unaligned: Process eight blocks per loop. */ .align 8 .Locb_unaligned_blk8: cmpq $8, %r10; jb .Locb_unaligned_blk4; leaq -8(%r8), %r8; leaq -8(%r10), %r10; leal 1(%esi), %r11d; leal 2(%esi), %r12d; leal 3(%esi), %r13d; leal 4(%esi), %eax; tzcntl %r11d, %r11d; tzcntl %r12d, %r12d; tzcntl %r13d, %r13d; tzcntl %eax, %eax; shll $4, %r11d; shll $4, %r12d; shll $4, %r13d; shll $4, %eax; vpxor (%r14, %r11), %xmm15, %xmm5; vpxor (%r14, %r12), %xmm5, %xmm6; vpxor (%r14, %r13), %xmm6, %xmm7; vpxor (%r14, %rax), %xmm7, %xmm8; leal 5(%esi), %r11d; leal 6(%esi), %r12d; leal 7(%esi), %r13d; leal 8(%esi), %esi; tzcntl %r11d, %r11d; tzcntl %r12d, %r12d; tzcntl %r13d, %r13d; tzcntl %esi, %eax; shll $4, %r11d; shll $4, %r12d; shll $4, %r13d; shll $4, %eax; vpxor (%r14, %r11), %xmm8, %xmm9; vpxor (%r14, %r12), %xmm9, %xmm10; vpxor (%r14, %r13), %xmm10, %xmm11; vpxor (%r14, %rax), %xmm11, %xmm15; vinserti128 $1, %xmm6, %ymm5, %ymm5; vinserti128 $1, %xmm8, %ymm7, %ymm6; vinserti128 $1, %xmm10, %ymm9, %ymm7; vinserti128 $1, %xmm15, %ymm11, %ymm8; testl %r15d, %r15d; jz .Locb_unaligned_blk8_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; leaq (8 * 16)(%rcx), %rcx; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vpxor %ymm2, %ymm14, %ymm14; vpxor %ymm3, %ymm13, %ymm13; vpxor %ymm5, %ymm0, %ymm0; vpxor %ymm6, %ymm1, %ymm1; vpxor %ymm7, %ymm2, %ymm2; vpxor %ymm8, %ymm3, %ymm3; vmovdqa (14 * 16)(%rsp), %ymm9; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_unaligned_blk8_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_unaligned_blk8_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); /* Last round and output handling. */ .Locb_unaligned_blk8_enc_last: vpxor %ymm5, %ymm9, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm9, %ymm6; vpxor %ymm7, %ymm9, %ymm7; vpxor %ymm8, %ymm9, %ymm4; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Locb_unaligned_blk8; .align 8 .Locb_unaligned_blk8_dec: vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; vpxor (4 * 16)(%rcx), %ymm7, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; leaq (8 * 16)(%rcx), %rcx; vmovdqa (14 * 16)(%rsp), %ymm9; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_unaligned_blk8_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_unaligned_blk8_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); /* Last round and output handling. */ .Locb_unaligned_blk8_dec_last: vpxor %ymm5, %ymm9, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm9, %ymm6; vpxor %ymm7, %ymm9, %ymm7; vpxor %ymm8, %ymm9, %ymm4; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vpxor %ymm2, %ymm14, %ymm14; vpxor %ymm3, %ymm13, %ymm13; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Locb_unaligned_blk8; /* Unaligned: Process four blocks. */ .align 8 .Locb_unaligned_blk4: cmpq $4, %r10; jb .Locb_unaligned_blk1; leaq -4(%r8), %r8; leaq -4(%r10), %r10; leal 1(%esi), %r11d; leal 2(%esi), %r12d; leal 3(%esi), %r13d; leal 4(%esi), %esi; tzcntl %r11d, %r11d; tzcntl %r12d, %r12d; tzcntl %r13d, %r13d; tzcntl %esi, %eax; shll $4, %r11d; shll $4, %r12d; shll $4, %r13d; shll $4, %eax; vpxor (%r14, %r11), %xmm15, %xmm5; vpxor (%r14, %r12), %xmm5, %xmm6; vinserti128 $1, %xmm6, %ymm5, %ymm5; vpxor (%r14, %r13), %xmm6, %xmm7; vpxor (%r14, %rax), %xmm7, %xmm15; vinserti128 $1, %xmm15, %ymm7, %ymm6; testl %r15d, %r15d; jz .Locb_unaligned_blk4_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; leaq (4 * 16)(%rcx), %rcx; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vpxor %ymm5, %ymm0, %ymm0; vpxor %ymm6, %ymm1, %ymm1; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); cmpl $12, %r9d; jb .Locb_unaligned_blk4_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); jz .Locb_unaligned_blk4_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); /* Last round and output handling. */ .Locb_unaligned_blk4_enc_last: vmovdqa (14 * 16)(%rsp), %ymm8; vpxor %ymm5, %ymm8, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm8, %ymm6; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; jmp .Locb_unaligned_blk1; .align 8 .Locb_unaligned_blk4_dec: vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; leaq (4 * 16)(%rcx), %rcx; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); cmpl $12, %r9d; jb .Locb_unaligned_blk4_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); jz .Locb_unaligned_blk4_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); /* Last round and output handling. */ .Locb_unaligned_blk4_dec_last: vmovdqa (14 * 16)(%rsp), %ymm8; vpxor %ymm5, %ymm8, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm8, %ymm6; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Unaligned: Process one block per loop. */ .align 8 .Locb_unaligned_blk1: cmpq $1, %r10; jb .Lunaligned_ocb_done; leaq -1(%r8), %r8; leaq -1(%r10), %r10; leal 1(%esi), %esi; tzcntl %esi, %r11d; shll $4, %r11d; vpxor (%r14, %r11), %xmm15, %xmm15; testl %r15d, %r15d; jz .Locb_unaligned_blk1_dec; vmovdqu (%rcx), %xmm0; vpxor %ymm0, %ymm14, %ymm14; vpxor %xmm15, %xmm0, %xmm0; leaq 16(%rcx), %rcx; /* AES rounds. */ vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; cmpl $12, %r9d; jb .Locb_unaligned_blk1_enc_last; vaesenc (10 * 16)(%rdi), %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; jz .Locb_unaligned_blk1_enc_last; vaesenc (12 * 16)(%rdi), %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; /* Last round and output handling. */ .Locb_unaligned_blk1_enc_last: vpxor (14 * 16)(%rsp), %xmm15, %xmm1; vaesenclast %xmm1, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Locb_unaligned_blk1; .align 8 .Locb_unaligned_blk1_dec: vpxor (%rcx), %xmm15, %xmm0; leaq 16(%rcx), %rcx; /* AES rounds. */ vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; cmpl $12, %r9d; jb .Locb_unaligned_blk1_dec_last; vaesdec (10 * 16)(%rdi), %xmm0, %xmm0; vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; jz .Locb_unaligned_blk1_dec_last; vaesdec (12 * 16)(%rdi), %xmm0, %xmm0; vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; /* Last round and output handling. */ .Locb_unaligned_blk1_dec_last: vpxor (14 * 16)(%rsp), %xmm15, %xmm1; vaesdeclast %xmm1, %xmm0, %xmm0; vpxor %ymm0, %ymm14, %ymm14; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Locb_unaligned_blk1; .align 8 .Lunaligned_ocb_done: cmpq $1, %r8; jb .Ldone_ocb; /* Short buffers do not benefit from L-array optimization. */ movq %r8, %r10; cmpq $16, %r8; jb .Locb_unaligned_blk8; vinserti128 $1, %xmm15, %ymm15, %ymm15; /* Prepare L-array optimization. * Since nblk is aligned to 16, offsets will have following * construction: * - block1 = ntz{0} = offset ^ L[0] * - block2 = ntz{1} = offset ^ L[0] ^ L[1] * - block3 = ntz{0} = offset ^ L[1] * - block4 = ntz{2} = offset ^ L[1] ^ L[2] * - block5 = ntz{0} = offset ^ L[0] ^ L[1] ^ L[2] * - block6 = ntz{1} = offset ^ L[0] ^ L[2] * - block7 = ntz{0} = offset ^ L[2] * - block8 = ntz{3} = offset ^ L[2] ^ L[3] * - block9 = ntz{0} = offset ^ L[0] ^ L[2] ^ L[3] * - block10 = ntz{1} = offset ^ L[0] ^ L[1] ^ L[2] ^ L[3] * - block11 = ntz{0} = offset ^ L[1] ^ L[2] ^ L[3] * - block12 = ntz{2} = offset ^ L[1] ^ L[3] * - block13 = ntz{0} = offset ^ L[0] ^ L[1] ^ L[3] * - block14 = ntz{1} = offset ^ L[0] ^ L[3] * - block15 = ntz{0} = offset ^ L[3] * - block16 = ntz{x} = offset ^ L[3] ^ L[ntz{x}] */ vmovdqu (0 * 16)(%r14), %xmm0; vmovdqu (1 * 16)(%r14), %xmm1; vmovdqu (2 * 16)(%r14), %xmm2; vmovdqu (3 * 16)(%r14), %xmm3; vpxor %ymm13, %ymm14, %ymm14; vmovdqa %ymm14, (20 * 16)(%rsp); vpxor %xmm0, %xmm1, %xmm4; /* L[0] ^ L[1] */ vpxor %xmm0, %xmm2, %xmm5; /* L[0] ^ L[2] */ vpxor %xmm0, %xmm3, %xmm6; /* L[0] ^ L[3] */ vpxor %xmm1, %xmm2, %xmm7; /* L[1] ^ L[2] */ vpxor %xmm1, %xmm3, %xmm8; /* L[1] ^ L[3] */ vpxor %xmm2, %xmm3, %xmm9; /* L[2] ^ L[3] */ vpxor %xmm4, %xmm2, %xmm10; /* L[0] ^ L[1] ^ L[2] */ vpxor %xmm5, %xmm3, %xmm11; /* L[0] ^ L[2] ^ L[3] */ vpxor %xmm7, %xmm3, %xmm12; /* L[1] ^ L[2] ^ L[3] */ vpxor %xmm0, %xmm8, %xmm13; /* L[0] ^ L[1] ^ L[3] */ vpxor %xmm4, %xmm9, %xmm14; /* L[0] ^ L[1] ^ L[2] ^ L[3] */ vinserti128 $1, %xmm4, %ymm0, %ymm0; vinserti128 $1, %xmm7, %ymm1, %ymm1; vinserti128 $1, %xmm5, %ymm10, %ymm10; vinserti128 $1, %xmm9, %ymm2, %ymm2; vinserti128 $1, %xmm14, %ymm11, %ymm11; vinserti128 $1, %xmm8, %ymm12, %ymm12; vinserti128 $1, %xmm6, %ymm13, %ymm13; vmovdqa %ymm0, (0 * 16)(%rsp); vmovdqa %ymm1, (2 * 16)(%rsp); vmovdqa %ymm10, (4 * 16)(%rsp); vmovdqa %ymm2, (6 * 16)(%rsp); vmovdqa %ymm11, (8 * 16)(%rsp); vmovdqa %ymm12, (10 * 16)(%rsp); vmovdqa %ymm13, (12 * 16)(%rsp); /* Aligned: Process 16 blocks per loop. */ .align 8 .Locb_aligned_blk16: cmpq $16, %r8; jb .Locb_aligned_blk8; leaq -16(%r8), %r8; leal 16(%esi), %esi; tzcntl %esi, %eax; shll $4, %eax; vpxor (0 * 16)(%rsp), %ymm15, %ymm8; vpxor (2 * 16)(%rsp), %ymm15, %ymm9; vpxor (4 * 16)(%rsp), %ymm15, %ymm10; vpxor (6 * 16)(%rsp), %ymm15, %ymm11; vpxor (8 * 16)(%rsp), %ymm15, %ymm12; vpxor (3 * 16)(%r14), %xmm15, %xmm13; /* offset ^ first key ^ L[3] */ vpxor (%r14, %rax), %xmm13, %xmm14; /* offset ^ first key ^ L[3] ^ L[ntz{nblk+16}] */ vinserti128 $1, %xmm14, %ymm13, %ymm14; testl %r15d, %r15d; jz .Locb_aligned_blk16_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vpxor (8 * 16)(%rcx), %ymm0, %ymm4; vpxor (10 * 16)(%rcx), %ymm1, %ymm5; vpxor (12 * 16)(%rcx), %ymm2, %ymm6; vpxor (14 * 16)(%rcx), %ymm3, %ymm7; vpxor %ymm4, %ymm5, %ymm5; vpxor %ymm6, %ymm7, %ymm7; vpxor %ymm5, %ymm7, %ymm7; vpxor (20 * 16)(%rsp), %ymm7, %ymm7; vmovdqa %ymm7, (20 * 16)(%rsp); vpxor (10 * 16)(%rsp), %ymm15, %ymm13; vpxor (14 * 16)(%rcx), %ymm14, %ymm7; vpxor %ymm8, %ymm0, %ymm0; vpxor %ymm9, %ymm1, %ymm1; vpxor %ymm10, %ymm2, %ymm2; vpxor %ymm11, %ymm3, %ymm3; vpxor (8 * 16)(%rcx), %ymm12, %ymm4; vpxor (10 * 16)(%rcx), %ymm13, %ymm5; vmovdqa %ymm13, (16 * 16)(%rsp); vpxor (12 * 16)(%rsp), %ymm15, %ymm13; vpxor (12 * 16)(%rcx), %ymm13, %ymm6; vmovdqa %ymm13, (18 * 16)(%rsp); leaq (16 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); cmpl $12, %r9d; jb .Locb_aligned_blk16_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); jz .Locb_aligned_blk16_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); /* Last round and output handling. */ .Locb_aligned_blk16_enc_last: vmovdqa (14 * 16)(%rsp), %ymm13; vpxor %ymm8, %ymm13, %ymm8; vpxor %ymm9, %ymm13, %ymm9; vpxor %ymm10, %ymm13, %ymm10; vpxor %ymm11, %ymm13, %ymm11; vaesenclast %ymm8, %ymm0, %ymm0; vaesenclast %ymm9, %ymm1, %ymm1; vaesenclast %ymm10, %ymm2, %ymm2; vaesenclast %ymm11, %ymm3, %ymm3; vpxor %ymm12, %ymm13, %ymm12; vpxor (16 * 16)(%rsp), %ymm13, %ymm8; vpxor (18 * 16)(%rsp), %ymm13, %ymm9; vpxor %ymm14, %ymm13, %ymm13; vaesenclast %ymm12, %ymm4, %ymm4; vaesenclast %ymm8, %ymm5, %ymm5; vaesenclast %ymm9, %ymm6, %ymm6; vaesenclast %ymm13, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Locb_aligned_blk16; .align 8 .Locb_aligned_blk16_dec: vpxor (10 * 16)(%rsp), %ymm15, %ymm13; vpxor (14 * 16)(%rcx), %ymm14, %ymm7; vpxor (0 * 16)(%rcx), %ymm8, %ymm0; vpxor (2 * 16)(%rcx), %ymm9, %ymm1; vpxor (4 * 16)(%rcx), %ymm10, %ymm2; vpxor (6 * 16)(%rcx), %ymm11, %ymm3; vpxor (8 * 16)(%rcx), %ymm12, %ymm4; vpxor (10 * 16)(%rcx), %ymm13, %ymm5; vmovdqa %ymm13, (16 * 16)(%rsp); vpxor (12 * 16)(%rsp), %ymm15, %ymm13; vpxor (12 * 16)(%rcx), %ymm13, %ymm6; vmovdqa %ymm13, (18 * 16)(%rsp); leaq (16 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); cmpl $12, %r9d; jb .Locb_aligned_blk16_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); jz .Locb_aligned_blk16_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); /* Last round and output handling. */ .Locb_aligned_blk16_dec_last: vmovdqa (14 * 16)(%rsp), %ymm13; vpxor %ymm8, %ymm13, %ymm8; vpxor %ymm9, %ymm13, %ymm9; vpxor %ymm10, %ymm13, %ymm10; vpxor %ymm11, %ymm13, %ymm11; vaesdeclast %ymm8, %ymm0, %ymm0; vaesdeclast %ymm9, %ymm1, %ymm1; vaesdeclast %ymm10, %ymm2, %ymm2; vaesdeclast %ymm11, %ymm3, %ymm3; vpxor %ymm12, %ymm13, %ymm12; vpxor (16 * 16)(%rsp), %ymm13, %ymm8; vpxor (18 * 16)(%rsp), %ymm13, %ymm9; vpxor %ymm14, %ymm13, %ymm13; vaesdeclast %ymm12, %ymm4, %ymm4; vaesdeclast %ymm8, %ymm5, %ymm5; vaesdeclast %ymm9, %ymm6, %ymm6; vaesdeclast %ymm13, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vpxor %ymm1, %ymm0, %ymm0; vpxor %ymm3, %ymm2, %ymm2; vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); vpxor %ymm5, %ymm4, %ymm4; vpxor %ymm7, %ymm6, %ymm6; leaq (16 * 16)(%rdx), %rdx; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm6, %ymm2, %ymm2; vpxor %ymm2, %ymm0, %ymm0; vpxor (20 * 16)(%rsp), %ymm0, %ymm0; vmovdqa %ymm0, (20 * 16)(%rsp); jmp .Locb_aligned_blk16; /* Aligned: Process trailing eight blocks. */ .align 8 .Locb_aligned_blk8: cmpq $8, %r8; jb .Locb_aligned_done; leaq -8(%r8), %r8; leal 8(%esi), %esi; tzcntl %esi, %eax; shll $4, %eax; vpxor (0 * 16)(%rsp), %ymm15, %ymm5; vpxor (2 * 16)(%rsp), %ymm15, %ymm6; vpxor (4 * 16)(%rsp), %ymm15, %ymm7; vpxor (2 * 16)(%r14), %xmm15, %xmm13; /* offset ^ first key ^ L[2] */ vpxor (%r14, %rax), %xmm13, %xmm14; /* offset ^ first key ^ L[2] ^ L[ntz{nblk+8}] */ vinserti128 $1, %xmm14, %ymm13, %ymm14; testl %r15d, %r15d; jz .Locb_aligned_blk8_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vpxor %ymm2, %ymm0, %ymm10; vpxor %ymm3, %ymm1, %ymm11; vpxor %ymm11, %ymm10, %ymm10; vpxor (20 * 16)(%rsp), %ymm10, %ymm10; vmovdqa %ymm10, (20 * 16)(%rsp); vpxor %ymm5, %ymm0, %ymm0; vpxor %ymm6, %ymm1, %ymm1; vpxor %ymm7, %ymm2, %ymm2; vpxor %ymm14, %ymm3, %ymm3; leaq (8 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; vmovdqa (14 * 16)(%rsp), %ymm8; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_aligned_blk8_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_aligned_blk8_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); /* Last round and output handling. */ .Locb_aligned_blk8_enc_last: vpxor %ymm5, %ymm8, %ymm5; vpxor %ymm6, %ymm8, %ymm6; vpxor %ymm7, %ymm8, %ymm7; vpxor %ymm14, %ymm8, %ymm4; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Locb_aligned_done; .align 8 .Locb_aligned_blk8_dec: vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; vpxor (4 * 16)(%rcx), %ymm7, %ymm2; vpxor (6 * 16)(%rcx), %ymm14, %ymm3; leaq (8 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; vmovdqa (14 * 16)(%rsp), %ymm8; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_aligned_blk8_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_aligned_blk8_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Locb_aligned_blk8_dec_last: vpxor %ymm5, %ymm8, %ymm5; vpxor %ymm6, %ymm8, %ymm6; vpxor %ymm7, %ymm8, %ymm7; vpxor %ymm14, %ymm8, %ymm4; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; vpxor %ymm1, %ymm0, %ymm0; vpxor %ymm3, %ymm2, %ymm2; vpxor %ymm2, %ymm0, %ymm0; vpxor (20 * 16)(%rsp), %ymm0, %ymm0; vmovdqa %ymm0, (20 * 16)(%rsp); .align 8 .Locb_aligned_done: vmovdqa (20 * 16)(%rsp), %ymm14; vpxor %xmm13, %xmm13, %xmm13; /* Burn stack. */ vmovdqa %ymm13, (0 * 16)(%rsp); vmovdqa %ymm13, (2 * 16)(%rsp); vmovdqa %ymm13, (4 * 16)(%rsp); vmovdqa %ymm13, (6 * 16)(%rsp); vmovdqa %ymm13, (8 * 16)(%rsp); vmovdqa %ymm13, (10 * 16)(%rsp); vmovdqa %ymm13, (12 * 16)(%rsp); vmovdqa %ymm13, (16 * 16)(%rsp); vmovdqa %ymm13, (18 * 16)(%rsp); vmovdqa %ymm13, (20 * 16)(%rsp); /* Handle tailing 1…7 blocks in nblk-unaligned loop. */ movq %r8, %r10; cmpq $1, %r8; jnb .Locb_unaligned_blk8; .align 8 .Ldone_ocb: vpxor %ymm13, %ymm14, %ymm14; vextracti128 $1, %ymm14, %xmm13; vpxor (%rbx), %xmm14, %xmm14; vpxor %xmm13, %xmm14, %xmm14; vmovdqu %xmm14, (%rbx); movq OFFSET_PTR_Q, %r14; /* offset ptr. */ vpxor (0 * 16)(%rdi), %xmm15, %xmm15; /* offset ^ first key ^ first key */ vmovdqu %xmm15, (%r14); /* Store offset. */ /* Burn stack. */ vpxor %ymm0, %ymm0, %ymm0; vmovdqa %ymm0, (14 * 16)(%rsp); vzeroall; movq (STACK_REGS_POS + 0 * 8)(%rsp), %r12; CFI_RESTORE(%r12); movq (STACK_REGS_POS + 1 * 8)(%rsp), %r13; CFI_RESTORE(%r13); movq (STACK_REGS_POS + 2 * 8)(%rsp), %r14; CFI_RESTORE(%r14); movq (STACK_REGS_POS + 3 * 8)(%rsp), %r15; CFI_RESTORE(%r15); movq (STACK_REGS_POS + 4 * 8)(%rsp), %rbx; CFI_RESTORE(%rbx); leave; CFI_LEAVE(); ret_spec_stop #undef STACK_REGS_POS #undef STACK_ALLOC CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_ocb_crypt_amd64,.-_gcry_vaes_avx2_ocb_crypt_amd64) /********************************************************************** XTS-mode encryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_xts_crypt_amd64,@function) .globl _gcry_vaes_avx2_xts_crypt_amd64 .align 16 _gcry_vaes_avx2_xts_crypt_amd64: /* input: * %rdi: round keys * %rsi: tweak * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds * 8(%rsp): encrypt */ CFI_STARTPROC(); movl 8(%rsp), %eax; #define tweak_clmul(shift, out, tweak, hi_tweak, tmp1, tmp2) \ vpsrld $(32-(shift)), hi_tweak, tmp2; \ vpsllq $(shift), tweak, out; \ vpclmulqdq $0, .Lxts_gfmul_clmul rRIP, tmp2, tmp1; \ vpunpckhqdq tmp2, tmp1, tmp1; \ vpxor tmp1, out, out; /* Prepare tweak. */ vmovdqu (%rsi), %xmm15; vpshufb .Lxts_high_bit_shuf rRIP, %xmm15, %xmm13; tweak_clmul(1, %xmm11, %xmm15, %xmm13, %xmm0, %xmm1); vinserti128 $1, %xmm11, %ymm15, %ymm15; /* tweak:tweak1 */ vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; cmpq $8, %r8; jb .Lxts_crypt_blk4; /* Process eight blocks per loop. */ leaq -8(%r8), %r8; vmovdqa %ymm15, %ymm5; tweak_clmul(2, %ymm6, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(4, %ymm7, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(6, %ymm8, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(8, %ymm15, %ymm15, %ymm13, %ymm0, %ymm1); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; vpxor (4 * 16)(%rcx), %ymm7, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; leaq (8 * 16)(%rcx), %rcx; .align 8 .Lxts_crypt_blk8_loop: cmpq $8, %r8; jb .Lxts_crypt_blk8_tail; leaq -8(%r8), %r8; testl %eax, %eax; jz .Lxts_dec_blk8; /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vmovdqa %ymm15, %ymm9; tweak_clmul(2, %ymm10, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(4, %ymm11, %ymm15, %ymm13, %ymm12, %ymm14); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_enc_blk8_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; tweak_clmul(6, %ymm8, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(8, %ymm15, %ymm15, %ymm13, %ymm12, %ymm14); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm9, %ymm0; vpxor (2 * 16)(%rcx), %ymm10, %ymm1; vpxor (4 * 16)(%rcx), %ymm11, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; vmovdqa %ymm9, %ymm5; vmovdqa %ymm10, %ymm6; vmovdqa %ymm11, %ymm7; leaq (8 * 16)(%rcx), %rcx; jmp .Lxts_crypt_blk8_loop; .align 8 .Lxts_dec_blk8: /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vmovdqa %ymm15, %ymm9; tweak_clmul(2, %ymm10, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(4, %ymm11, %ymm15, %ymm13, %ymm12, %ymm14); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_dec_blk8_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; tweak_clmul(6, %ymm8, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(8, %ymm15, %ymm15, %ymm13, %ymm12, %ymm14); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm9, %ymm0; vpxor (2 * 16)(%rcx), %ymm10, %ymm1; vpxor (4 * 16)(%rcx), %ymm11, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; vmovdqa %ymm9, %ymm5; vmovdqa %ymm10, %ymm6; vmovdqa %ymm11, %ymm7; leaq (8 * 16)(%rcx), %rcx; jmp .Lxts_crypt_blk8_loop; .align 8 .Lxts_crypt_blk8_tail: testl %eax, %eax; jz .Lxts_dec_tail_blk8; /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_enc_blk8_tail_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_enc_blk8_tail_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_enc_blk8_tail_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Lxts_crypt_blk4; .align 8 .Lxts_dec_tail_blk8: /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_dec_blk8_tail_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_dec_blk8_tail_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_dec_blk8_tail_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lxts_crypt_blk4: /* Try exit early as typically input length is large power of 2. */ cmpq $0, %r8; jb .Ldone_xts_crypt; cmpq $4, %r8; jb .Lxts_crypt_blk1; leaq -4(%r8), %r8; vmovdqa %ymm15, %ymm5; tweak_clmul(2, %ymm6, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(4, %ymm15, %ymm15, %ymm13, %ymm0, %ymm1); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; leaq (4 * 16)(%rcx), %rcx; testl %eax, %eax; jz .Lxts_dec_blk4; /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_enc_blk4_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; jmp .Lxts_crypt_blk1; .align 8 .Lxts_dec_blk4: /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_dec_blk4_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lxts_crypt_blk1: cmpq $1, %r8; jb .Ldone_xts_crypt; leaq -1(%r8), %r8; vpxor (%rcx), %xmm15, %xmm0; vmovdqa %xmm15, %xmm5; tweak_clmul(1, %xmm15, %xmm15, %xmm13, %xmm2, %xmm3); vpshufb .Lxts_high_bit_shuf rRIP, %xmm15, %xmm13; leaq 16(%rcx), %rcx; testl %eax, %eax; jz .Lxts_dec_blk1; /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lxts_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lxts_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lxts_enc_blk1_last: vpxor %xmm1, %xmm5, %xmm5; /* Xor tweak to last round key. */ vaesenclast %xmm5, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lxts_crypt_blk1; .align 8 .Lxts_dec_blk1: /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lxts_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lxts_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lxts_dec_blk1_last: vpxor %xmm1, %xmm5, %xmm5; /* Xor tweak to last round key. */ vaesdeclast %xmm5, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lxts_crypt_blk1; .align 8 .Ldone_xts_crypt: /* Store IV. */ vmovdqu %xmm15, (%rsi); vzeroall; xorl %eax, %eax ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_xts_crypt_amd64,.-_gcry_vaes_avx2_xts_crypt_amd64) /********************************************************************** ECB-mode encryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_ecb_crypt_amd64,@function) .globl _gcry_vaes_avx2_ecb_crypt_amd64 .align 16 _gcry_vaes_avx2_ecb_crypt_amd64: /* input: * %rdi: round keys * %esi: encrypt * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); /* Process 16 blocks per loop. */ .align 8 .Lecb_blk16: cmpq $16, %r8; jb .Lecb_blk8; leaq -16(%r8), %r8; /* Load input and xor first key. */ vbroadcasti128 (0 * 16)(%rdi), %ymm8; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vmovdqu (8 * 16)(%rcx), %ymm4; vmovdqu (10 * 16)(%rcx), %ymm5; vmovdqu (12 * 16)(%rcx), %ymm6; vmovdqu (14 * 16)(%rcx), %ymm7; vpxor %ymm8, %ymm0, %ymm0; vpxor %ymm8, %ymm1, %ymm1; vpxor %ymm8, %ymm2, %ymm2; vpxor %ymm8, %ymm3, %ymm3; vpxor %ymm8, %ymm4, %ymm4; vpxor %ymm8, %ymm5, %ymm5; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm8, %ymm7, %ymm7; vbroadcasti128 (1 * 16)(%rdi), %ymm8; leaq (16 * 16)(%rcx), %rcx; testl %esi, %esi; jz .Lecb_dec_blk16; /* AES rounds */ VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lecb_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lecb_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; .Lecb_enc_blk16_last: vaesenclast %ymm8, %ymm0, %ymm0; vaesenclast %ymm8, %ymm1, %ymm1; vaesenclast %ymm8, %ymm2, %ymm2; vaesenclast %ymm8, %ymm3, %ymm3; vaesenclast %ymm8, %ymm4, %ymm4; vaesenclast %ymm8, %ymm5, %ymm5; vaesenclast %ymm8, %ymm6, %ymm6; vaesenclast %ymm8, %ymm7, %ymm7; jmp .Lecb_blk16_end; .align 8 .Lecb_dec_blk16: /* AES rounds */ VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lecb_dec_blk16_last; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lecb_dec_blk16_last; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; .Lecb_dec_blk16_last: vaesdeclast %ymm8, %ymm0, %ymm0; vaesdeclast %ymm8, %ymm1, %ymm1; vaesdeclast %ymm8, %ymm2, %ymm2; vaesdeclast %ymm8, %ymm3, %ymm3; vaesdeclast %ymm8, %ymm4, %ymm4; vaesdeclast %ymm8, %ymm5, %ymm5; vaesdeclast %ymm8, %ymm6, %ymm6; vaesdeclast %ymm8, %ymm7, %ymm7; jmp .Lecb_blk16_end; .align 8 .Lecb_blk16_end: vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lecb_blk16; /* Handle trailing eight blocks. */ .align 8 .Lecb_blk8: cmpq $8, %r8; jmp .Lecb_blk4; leaq -8(%r8), %r8; /* Load input and xor first key. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vpxor %ymm4, %ymm2, %ymm2; vpxor %ymm4, %ymm3, %ymm3; vbroadcasti128 (1 * 16)(%rdi), %ymm4; leaq (8 * 16)(%rcx), %rcx; testl %esi, %esi; jz .Lecb_dec_blk8; /* AES rounds */ VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lecb_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lecb_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; .Lecb_enc_blk8_last: vaesenclast %ymm4, %ymm0, %ymm0; vaesenclast %ymm4, %ymm1, %ymm1; vaesenclast %ymm4, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Lecb_blk4; .align 8 .Lecb_dec_blk8: /* AES rounds */ VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lecb_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lecb_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; .Lecb_dec_blk8_last: vaesdeclast %ymm4, %ymm0, %ymm0; vaesdeclast %ymm4, %ymm1, %ymm1; vaesdeclast %ymm4, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lecb_blk4: cmpq $4, %r8; jb .Lecb_blk1; leaq -4(%r8), %r8; /* Load input and xor first key. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vbroadcasti128 (1 * 16)(%rdi), %ymm4; leaq (4 * 16)(%rcx), %rcx; testl %esi, %esi; jz .Lecb_dec_blk4; /* AES rounds */ VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lecb_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lecb_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; .Lecb_enc_blk4_last: vaesenclast %ymm4, %ymm0, %ymm0; vaesenclast %ymm4, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; jmp .Lecb_blk1; .align 8 .Lecb_dec_blk4: /* AES rounds */ VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lecb_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lecb_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; .Lecb_dec_blk4_last: vaesdeclast %ymm4, %ymm0, %ymm0; vaesdeclast %ymm4, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lecb_blk1: cmpq $1, %r8; jb .Ldone_ecb; leaq -1(%r8), %r8; /* Load input. */ vmovdqu (%rcx), %xmm2; leaq 16(%rcx), %rcx; /* Xor first key. */ vpxor (0 * 16)(%rdi), %xmm2, %xmm0; testl %esi, %esi; jz .Lecb_dec_blk1; /* AES rounds. */ vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lecb_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lecb_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; .Lecb_enc_blk1_last: vaesenclast %xmm1, %xmm0, %xmm0; jmp .Lecb_blk1_end; .align 8 .Lecb_dec_blk1: /* AES rounds. */ vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lecb_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lecb_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; .Lecb_dec_blk1_last: vaesdeclast %xmm1, %xmm0, %xmm0; jmp .Lecb_blk1_end; .align 8 .Lecb_blk1_end: vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lecb_blk1; .align 8 .Ldone_ecb: vzeroall; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_ecb_crypt_amd64,.-_gcry_vaes_avx2_ecb_crypt_amd64) /********************************************************************** constants **********************************************************************/ +SECTION_RODATA + ELF(.type _gcry_vaes_consts,@object) _gcry_vaes_consts: .align 32 .Lbige_addb_0: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lbige_addb_1: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 .Lbige_addb_2: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 .Lbige_addb_3: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 .Lbige_addb_4: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 .Lbige_addb_5: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 .Lbige_addb_6: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 .Lbige_addb_7: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 .Lbige_addb_8: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 .Lbige_addb_9: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9 .Lbige_addb_10: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 .Lbige_addb_11: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 .Lbige_addb_12: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 .Lbige_addb_13: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 .Lbige_addb_14: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 .Lbige_addb_15: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 .Lle_addd_0: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_1: .byte 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_2: .byte 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_3: .byte 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_4: .byte 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_5: .byte 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_6: .byte 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_7: .byte 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_8: .byte 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_9: .byte 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_10: .byte 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_11: .byte 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_12: .byte 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_13: .byte 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_14: .byte 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_15: .byte 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_4_2: .byte 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_8_2: .byte 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_16_2: .byte 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lxts_gfmul_clmul: .long 0x00, 0x87, 0x00, 0x00 .long 0x00, 0x87, 0x00, 0x00 .Lxts_high_bit_shuf: .byte -1, -1, -1, -1, 12, 13, 14, 15 .byte 4, 5, 6, 7, -1, -1, -1, -1 .byte -1, -1, -1, -1, 12, 13, 14, 15 .byte 4, 5, 6, 7, -1, -1, -1, -1 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 ELF(.size _gcry_vaes_consts,.-_gcry_vaes_consts) #endif /* HAVE_GCC_INLINE_ASM_VAES */ #endif /* __x86_64__ */ diff --git a/cipher/serpent-avx2-amd64.S b/cipher/serpent-avx2-amd64.S index 26a21a36..e25e7d3b 100644 --- a/cipher/serpent-avx2-amd64.S +++ b/cipher/serpent-avx2-amd64.S @@ -1,1210 +1,1214 @@ /* serpent-avx2-amd64.S - AVX2 implementation of Serpent cipher * * Copyright (C) 2013-2015 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && defined(USE_SERPENT) && \ defined(ENABLE_AVX2_SUPPORT) #include "asm-common-amd64.h" /* struct serpent_context: */ #define ctx_keys 0 /* register macros */ #define CTX %rdi /* vector registers */ #define RA0 %ymm0 #define RA1 %ymm1 #define RA2 %ymm2 #define RA3 %ymm3 #define RA4 %ymm4 #define RB0 %ymm5 #define RB1 %ymm6 #define RB2 %ymm7 #define RB3 %ymm8 #define RB4 %ymm9 #define RNOT %ymm10 #define RTMP0 %ymm11 #define RTMP1 %ymm12 #define RTMP2 %ymm13 #define RTMP3 %ymm14 #define RTMP4 %ymm15 #define RNOTx %xmm10 #define RTMP0x %xmm11 #define RTMP1x %xmm12 #define RTMP2x %xmm13 #define RTMP3x %xmm14 #define RTMP4x %xmm15 /********************************************************************** helper macros **********************************************************************/ /* vector 32-bit rotation to left */ #define vec_rol(reg, nleft, tmp) \ vpslld $(nleft), reg, tmp; \ vpsrld $(32 - (nleft)), reg, reg; \ vpor tmp, reg, reg; /* vector 32-bit rotation to right */ #define vec_ror(reg, nright, tmp) \ vec_rol(reg, 32 - nright, tmp) /* 4x4 32-bit integer matrix transpose */ #define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /********************************************************************** 16-way serpent **********************************************************************/ /* * These are the S-Boxes of Serpent from following research paper. * * D. A. Osvik, “Speeding up Serpent,” in Third AES Candidate Conference, * (New York, New York, USA), p. 317–329, National Institute of Standards and * Technology, 2000. * * Paper is also available at: http://www.ii.uib.no/~osvik/pub/aes3.pdf * */ #define SBOX0(r0, r1, r2, r3, r4) \ vpxor r0, r3, r3; vmovdqa r1, r4; \ vpand r3, r1, r1; vpxor r2, r4, r4; \ vpxor r0, r1, r1; vpor r3, r0, r0; \ vpxor r4, r0, r0; vpxor r3, r4, r4; \ vpxor r2, r3, r3; vpor r1, r2, r2; \ vpxor r4, r2, r2; vpxor RNOT, r4, r4; \ vpor r1, r4, r4; vpxor r3, r1, r1; \ vpxor r4, r1, r1; vpor r0, r3, r3; \ vpxor r3, r1, r1; vpxor r3, r4, r4; #define SBOX0_INVERSE(r0, r1, r2, r3, r4) \ vpxor RNOT, r2, r2; vmovdqa r1, r4; \ vpor r0, r1, r1; vpxor RNOT, r4, r4; \ vpxor r2, r1, r1; vpor r4, r2, r2; \ vpxor r3, r1, r1; vpxor r4, r0, r0; \ vpxor r0, r2, r2; vpand r3, r0, r0; \ vpxor r0, r4, r4; vpor r1, r0, r0; \ vpxor r2, r0, r0; vpxor r4, r3, r3; \ vpxor r1, r2, r2; vpxor r0, r3, r3; \ vpxor r1, r3, r3; \ vpand r3, r2, r2; \ vpxor r2, r4, r4; #define SBOX1(r0, r1, r2, r3, r4) \ vpxor RNOT, r0, r0; vpxor RNOT, r2, r2; \ vmovdqa r0, r4; vpand r1, r0, r0; \ vpxor r0, r2, r2; vpor r3, r0, r0; \ vpxor r2, r3, r3; vpxor r0, r1, r1; \ vpxor r4, r0, r0; vpor r1, r4, r4; \ vpxor r3, r1, r1; vpor r0, r2, r2; \ vpand r4, r2, r2; vpxor r1, r0, r0; \ vpand r2, r1, r1; \ vpxor r0, r1, r1; vpand r2, r0, r0; \ vpxor r4, r0, r0; #define SBOX1_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r1, r4; vpxor r3, r1, r1; \ vpand r1, r3, r3; vpxor r2, r4, r4; \ vpxor r0, r3, r3; vpor r1, r0, r0; \ vpxor r3, r2, r2; vpxor r4, r0, r0; \ vpor r2, r0, r0; vpxor r3, r1, r1; \ vpxor r1, r0, r0; vpor r3, r1, r1; \ vpxor r0, r1, r1; vpxor RNOT, r4, r4; \ vpxor r1, r4, r4; vpor r0, r1, r1; \ vpxor r0, r1, r1; \ vpor r4, r1, r1; \ vpxor r1, r3, r3; #define SBOX2(r0, r1, r2, r3, r4) \ vmovdqa r0, r4; vpand r2, r0, r0; \ vpxor r3, r0, r0; vpxor r1, r2, r2; \ vpxor r0, r2, r2; vpor r4, r3, r3; \ vpxor r1, r3, r3; vpxor r2, r4, r4; \ vmovdqa r3, r1; vpor r4, r3, r3; \ vpxor r0, r3, r3; vpand r1, r0, r0; \ vpxor r0, r4, r4; vpxor r3, r1, r1; \ vpxor r4, r1, r1; vpxor RNOT, r4, r4; #define SBOX2_INVERSE(r0, r1, r2, r3, r4) \ vpxor r3, r2, r2; vpxor r0, r3, r3; \ vmovdqa r3, r4; vpand r2, r3, r3; \ vpxor r1, r3, r3; vpor r2, r1, r1; \ vpxor r4, r1, r1; vpand r3, r4, r4; \ vpxor r3, r2, r2; vpand r0, r4, r4; \ vpxor r2, r4, r4; vpand r1, r2, r2; \ vpor r0, r2, r2; vpxor RNOT, r3, r3; \ vpxor r3, r2, r2; vpxor r3, r0, r0; \ vpand r1, r0, r0; vpxor r4, r3, r3; \ vpxor r0, r3, r3; #define SBOX3(r0, r1, r2, r3, r4) \ vmovdqa r0, r4; vpor r3, r0, r0; \ vpxor r1, r3, r3; vpand r4, r1, r1; \ vpxor r2, r4, r4; vpxor r3, r2, r2; \ vpand r0, r3, r3; vpor r1, r4, r4; \ vpxor r4, r3, r3; vpxor r1, r0, r0; \ vpand r0, r4, r4; vpxor r3, r1, r1; \ vpxor r2, r4, r4; vpor r0, r1, r1; \ vpxor r2, r1, r1; vpxor r3, r0, r0; \ vmovdqa r1, r2; vpor r3, r1, r1; \ vpxor r0, r1, r1; #define SBOX3_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r2, r4; vpxor r1, r2, r2; \ vpxor r2, r0, r0; vpand r2, r4, r4; \ vpxor r0, r4, r4; vpand r1, r0, r0; \ vpxor r3, r1, r1; vpor r4, r3, r3; \ vpxor r3, r2, r2; vpxor r3, r0, r0; \ vpxor r4, r1, r1; vpand r2, r3, r3; \ vpxor r1, r3, r3; vpxor r0, r1, r1; \ vpor r2, r1, r1; vpxor r3, r0, r0; \ vpxor r4, r1, r1; \ vpxor r1, r0, r0; #define SBOX4(r0, r1, r2, r3, r4) \ vpxor r3, r1, r1; vpxor RNOT, r3, r3; \ vpxor r3, r2, r2; vpxor r0, r3, r3; \ vmovdqa r1, r4; vpand r3, r1, r1; \ vpxor r2, r1, r1; vpxor r3, r4, r4; \ vpxor r4, r0, r0; vpand r4, r2, r2; \ vpxor r0, r2, r2; vpand r1, r0, r0; \ vpxor r0, r3, r3; vpor r1, r4, r4; \ vpxor r0, r4, r4; vpor r3, r0, r0; \ vpxor r2, r0, r0; vpand r3, r2, r2; \ vpxor RNOT, r0, r0; vpxor r2, r4, r4; #define SBOX4_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r2, r4; vpand r3, r2, r2; \ vpxor r1, r2, r2; vpor r3, r1, r1; \ vpand r0, r1, r1; vpxor r2, r4, r4; \ vpxor r1, r4, r4; vpand r2, r1, r1; \ vpxor RNOT, r0, r0; vpxor r4, r3, r3; \ vpxor r3, r1, r1; vpand r0, r3, r3; \ vpxor r2, r3, r3; vpxor r1, r0, r0; \ vpand r0, r2, r2; vpxor r0, r3, r3; \ vpxor r4, r2, r2; \ vpor r3, r2, r2; vpxor r0, r3, r3; \ vpxor r1, r2, r2; #define SBOX5(r0, r1, r2, r3, r4) \ vpxor r1, r0, r0; vpxor r3, r1, r1; \ vpxor RNOT, r3, r3; vmovdqa r1, r4; \ vpand r0, r1, r1; vpxor r3, r2, r2; \ vpxor r2, r1, r1; vpor r4, r2, r2; \ vpxor r3, r4, r4; vpand r1, r3, r3; \ vpxor r0, r3, r3; vpxor r1, r4, r4; \ vpxor r2, r4, r4; vpxor r0, r2, r2; \ vpand r3, r0, r0; vpxor RNOT, r2, r2; \ vpxor r4, r0, r0; vpor r3, r4, r4; \ vpxor r4, r2, r2; #define SBOX5_INVERSE(r0, r1, r2, r3, r4) \ vpxor RNOT, r1, r1; vmovdqa r3, r4; \ vpxor r1, r2, r2; vpor r0, r3, r3; \ vpxor r2, r3, r3; vpor r1, r2, r2; \ vpand r0, r2, r2; vpxor r3, r4, r4; \ vpxor r4, r2, r2; vpor r0, r4, r4; \ vpxor r1, r4, r4; vpand r2, r1, r1; \ vpxor r3, r1, r1; vpxor r2, r4, r4; \ vpand r4, r3, r3; vpxor r1, r4, r4; \ vpxor r4, r3, r3; vpxor RNOT, r4, r4; \ vpxor r0, r3, r3; #define SBOX6(r0, r1, r2, r3, r4) \ vpxor RNOT, r2, r2; vmovdqa r3, r4; \ vpand r0, r3, r3; vpxor r4, r0, r0; \ vpxor r2, r3, r3; vpor r4, r2, r2; \ vpxor r3, r1, r1; vpxor r0, r2, r2; \ vpor r1, r0, r0; vpxor r1, r2, r2; \ vpxor r0, r4, r4; vpor r3, r0, r0; \ vpxor r2, r0, r0; vpxor r3, r4, r4; \ vpxor r0, r4, r4; vpxor RNOT, r3, r3; \ vpand r4, r2, r2; \ vpxor r3, r2, r2; #define SBOX6_INVERSE(r0, r1, r2, r3, r4) \ vpxor r2, r0, r0; vmovdqa r2, r4; \ vpand r0, r2, r2; vpxor r3, r4, r4; \ vpxor RNOT, r2, r2; vpxor r1, r3, r3; \ vpxor r3, r2, r2; vpor r0, r4, r4; \ vpxor r2, r0, r0; vpxor r4, r3, r3; \ vpxor r1, r4, r4; vpand r3, r1, r1; \ vpxor r0, r1, r1; vpxor r3, r0, r0; \ vpor r2, r0, r0; vpxor r1, r3, r3; \ vpxor r0, r4, r4; #define SBOX7(r0, r1, r2, r3, r4) \ vmovdqa r1, r4; vpor r2, r1, r1; \ vpxor r3, r1, r1; vpxor r2, r4, r4; \ vpxor r1, r2, r2; vpor r4, r3, r3; \ vpand r0, r3, r3; vpxor r2, r4, r4; \ vpxor r1, r3, r3; vpor r4, r1, r1; \ vpxor r0, r1, r1; vpor r4, r0, r0; \ vpxor r2, r0, r0; vpxor r4, r1, r1; \ vpxor r1, r2, r2; vpand r0, r1, r1; \ vpxor r4, r1, r1; vpxor RNOT, r2, r2; \ vpor r0, r2, r2; \ vpxor r2, r4, r4; #define SBOX7_INVERSE(r0, r1, r2, r3, r4) \ vmovdqa r2, r4; vpxor r0, r2, r2; \ vpand r3, r0, r0; vpor r3, r4, r4; \ vpxor RNOT, r2, r2; vpxor r1, r3, r3; \ vpor r0, r1, r1; vpxor r2, r0, r0; \ vpand r4, r2, r2; vpand r4, r3, r3; \ vpxor r2, r1, r1; vpxor r0, r2, r2; \ vpor r2, r0, r0; vpxor r1, r4, r4; \ vpxor r3, r0, r0; vpxor r4, r3, r3; \ vpor r0, r4, r4; vpxor r2, r3, r3; \ vpxor r2, r4, r4; /* Apply SBOX number WHICH to to the block. */ #define SBOX(which, r0, r1, r2, r3, r4) \ SBOX##which (r0, r1, r2, r3, r4) /* Apply inverse SBOX number WHICH to to the block. */ #define SBOX_INVERSE(which, r0, r1, r2, r3, r4) \ SBOX##which##_INVERSE (r0, r1, r2, r3, r4) /* XOR round key into block state in r0,r1,r2,r3. r4 used as temporary. */ #define BLOCK_XOR_KEY(r0, r1, r2, r3, r4, round) \ vpbroadcastd (ctx_keys + (round) * 16 + 0 * 4)(CTX), r4; \ vpxor r4, r0, r0; \ vpbroadcastd (ctx_keys + (round) * 16 + 1 * 4)(CTX), r4; \ vpxor r4, r1, r1; \ vpbroadcastd (ctx_keys + (round) * 16 + 2 * 4)(CTX), r4; \ vpxor r4, r2, r2; \ vpbroadcastd (ctx_keys + (round) * 16 + 3 * 4)(CTX), r4; \ vpxor r4, r3, r3; /* Apply the linear transformation to BLOCK. */ #define LINEAR_TRANSFORMATION(r0, r1, r2, r3, r4) \ vec_rol(r0, 13, r4); \ vec_rol(r2, 3, r4); \ vpxor r0, r1, r1; \ vpxor r2, r1, r1; \ vpslld $3, r0, r4; \ vpxor r2, r3, r3; \ vpxor r4, r3, r3; \ vec_rol(r1, 1, r4); \ vec_rol(r3, 7, r4); \ vpxor r1, r0, r0; \ vpxor r3, r0, r0; \ vpslld $7, r1, r4; \ vpxor r3, r2, r2; \ vpxor r4, r2, r2; \ vec_rol(r0, 5, r4); \ vec_rol(r2, 22, r4); /* Apply the inverse linear transformation to BLOCK. */ #define LINEAR_TRANSFORMATION_INVERSE(r0, r1, r2, r3, r4) \ vec_ror(r2, 22, r4); \ vec_ror(r0, 5, r4); \ vpslld $7, r1, r4; \ vpxor r3, r2, r2; \ vpxor r4, r2, r2; \ vpxor r1, r0, r0; \ vpxor r3, r0, r0; \ vec_ror(r3, 7, r4); \ vec_ror(r1, 1, r4); \ vpslld $3, r0, r4; \ vpxor r2, r3, r3; \ vpxor r4, r3, r3; \ vpxor r0, r1, r1; \ vpxor r2, r1, r1; \ vec_ror(r2, 3, r4); \ vec_ror(r0, 13, r4); /* Apply a Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND(round, which, a0, a1, a2, a3, a4, na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, nb0, nb1, nb2, nb3, nb4) \ BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round); \ SBOX (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round); \ SBOX (which, b0, b1, b2, b3, b4); \ LINEAR_TRANSFORMATION (na0, na1, na2, na3, na4); \ LINEAR_TRANSFORMATION (nb0, nb1, nb2, nb3, nb4); /* Apply the last Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND_LAST(round, which, a0, a1, a2, a3, a4, na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, nb0, nb1, nb2, nb3, nb4) \ BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round); \ SBOX (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round); \ SBOX (which, b0, b1, b2, b3, b4); \ BLOCK_XOR_KEY (na0, na1, na2, na3, na4, ((round) + 1)); \ BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, ((round) + 1)); /* Apply an inverse Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND_INVERSE(round, which, a0, a1, a2, a3, a4, \ na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, \ nb0, nb1, nb2, nb3, nb4) \ LINEAR_TRANSFORMATION_INVERSE (a0, a1, a2, a3, a4); \ LINEAR_TRANSFORMATION_INVERSE (b0, b1, b2, b3, b4); \ SBOX_INVERSE (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (na0, na1, na2, na3, na4, round); \ SBOX_INVERSE (which, b0, b1, b2, b3, b4); \ BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, round); /* Apply the first inverse Serpent round to sixteen parallel blocks. This macro increments `round'. */ #define ROUND_FIRST_INVERSE(round, which, a0, a1, a2, a3, a4, \ na0, na1, na2, na3, na4, \ b0, b1, b2, b3, b4, \ nb0, nb1, nb2, nb3, nb4) \ BLOCK_XOR_KEY (a0, a1, a2, a3, a4, ((round) + 1)); \ BLOCK_XOR_KEY (b0, b1, b2, b3, b4, ((round) + 1)); \ SBOX_INVERSE (which, a0, a1, a2, a3, a4); \ BLOCK_XOR_KEY (na0, na1, na2, na3, na4, round); \ SBOX_INVERSE (which, b0, b1, b2, b3, b4); \ BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, round); .text .align 16 ELF(.type __serpent_enc_blk16,@function;) __serpent_enc_blk16: /* input: * %rdi: ctx, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks * output: * RA4, RA1, RA2, RA0, RB4, RB1, RB2, RB0: sixteen parallel * ciphertext blocks */ CFI_STARTPROC(); vpcmpeqd RNOT, RNOT, RNOT; transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1); ROUND (0, 0, RA0, RA1, RA2, RA3, RA4, RA1, RA4, RA2, RA0, RA3, RB0, RB1, RB2, RB3, RB4, RB1, RB4, RB2, RB0, RB3); ROUND (1, 1, RA1, RA4, RA2, RA0, RA3, RA2, RA1, RA0, RA4, RA3, RB1, RB4, RB2, RB0, RB3, RB2, RB1, RB0, RB4, RB3); ROUND (2, 2, RA2, RA1, RA0, RA4, RA3, RA0, RA4, RA1, RA3, RA2, RB2, RB1, RB0, RB4, RB3, RB0, RB4, RB1, RB3, RB2); ROUND (3, 3, RA0, RA4, RA1, RA3, RA2, RA4, RA1, RA3, RA2, RA0, RB0, RB4, RB1, RB3, RB2, RB4, RB1, RB3, RB2, RB0); ROUND (4, 4, RA4, RA1, RA3, RA2, RA0, RA1, RA0, RA4, RA2, RA3, RB4, RB1, RB3, RB2, RB0, RB1, RB0, RB4, RB2, RB3); ROUND (5, 5, RA1, RA0, RA4, RA2, RA3, RA0, RA2, RA1, RA4, RA3, RB1, RB0, RB4, RB2, RB3, RB0, RB2, RB1, RB4, RB3); ROUND (6, 6, RA0, RA2, RA1, RA4, RA3, RA0, RA2, RA3, RA1, RA4, RB0, RB2, RB1, RB4, RB3, RB0, RB2, RB3, RB1, RB4); ROUND (7, 7, RA0, RA2, RA3, RA1, RA4, RA4, RA1, RA2, RA0, RA3, RB0, RB2, RB3, RB1, RB4, RB4, RB1, RB2, RB0, RB3); ROUND (8, 0, RA4, RA1, RA2, RA0, RA3, RA1, RA3, RA2, RA4, RA0, RB4, RB1, RB2, RB0, RB3, RB1, RB3, RB2, RB4, RB0); ROUND (9, 1, RA1, RA3, RA2, RA4, RA0, RA2, RA1, RA4, RA3, RA0, RB1, RB3, RB2, RB4, RB0, RB2, RB1, RB4, RB3, RB0); ROUND (10, 2, RA2, RA1, RA4, RA3, RA0, RA4, RA3, RA1, RA0, RA2, RB2, RB1, RB4, RB3, RB0, RB4, RB3, RB1, RB0, RB2); ROUND (11, 3, RA4, RA3, RA1, RA0, RA2, RA3, RA1, RA0, RA2, RA4, RB4, RB3, RB1, RB0, RB2, RB3, RB1, RB0, RB2, RB4); ROUND (12, 4, RA3, RA1, RA0, RA2, RA4, RA1, RA4, RA3, RA2, RA0, RB3, RB1, RB0, RB2, RB4, RB1, RB4, RB3, RB2, RB0); ROUND (13, 5, RA1, RA4, RA3, RA2, RA0, RA4, RA2, RA1, RA3, RA0, RB1, RB4, RB3, RB2, RB0, RB4, RB2, RB1, RB3, RB0); ROUND (14, 6, RA4, RA2, RA1, RA3, RA0, RA4, RA2, RA0, RA1, RA3, RB4, RB2, RB1, RB3, RB0, RB4, RB2, RB0, RB1, RB3); ROUND (15, 7, RA4, RA2, RA0, RA1, RA3, RA3, RA1, RA2, RA4, RA0, RB4, RB2, RB0, RB1, RB3, RB3, RB1, RB2, RB4, RB0); ROUND (16, 0, RA3, RA1, RA2, RA4, RA0, RA1, RA0, RA2, RA3, RA4, RB3, RB1, RB2, RB4, RB0, RB1, RB0, RB2, RB3, RB4); ROUND (17, 1, RA1, RA0, RA2, RA3, RA4, RA2, RA1, RA3, RA0, RA4, RB1, RB0, RB2, RB3, RB4, RB2, RB1, RB3, RB0, RB4); ROUND (18, 2, RA2, RA1, RA3, RA0, RA4, RA3, RA0, RA1, RA4, RA2, RB2, RB1, RB3, RB0, RB4, RB3, RB0, RB1, RB4, RB2); ROUND (19, 3, RA3, RA0, RA1, RA4, RA2, RA0, RA1, RA4, RA2, RA3, RB3, RB0, RB1, RB4, RB2, RB0, RB1, RB4, RB2, RB3); ROUND (20, 4, RA0, RA1, RA4, RA2, RA3, RA1, RA3, RA0, RA2, RA4, RB0, RB1, RB4, RB2, RB3, RB1, RB3, RB0, RB2, RB4); ROUND (21, 5, RA1, RA3, RA0, RA2, RA4, RA3, RA2, RA1, RA0, RA4, RB1, RB3, RB0, RB2, RB4, RB3, RB2, RB1, RB0, RB4); ROUND (22, 6, RA3, RA2, RA1, RA0, RA4, RA3, RA2, RA4, RA1, RA0, RB3, RB2, RB1, RB0, RB4, RB3, RB2, RB4, RB1, RB0); ROUND (23, 7, RA3, RA2, RA4, RA1, RA0, RA0, RA1, RA2, RA3, RA4, RB3, RB2, RB4, RB1, RB0, RB0, RB1, RB2, RB3, RB4); ROUND (24, 0, RA0, RA1, RA2, RA3, RA4, RA1, RA4, RA2, RA0, RA3, RB0, RB1, RB2, RB3, RB4, RB1, RB4, RB2, RB0, RB3); ROUND (25, 1, RA1, RA4, RA2, RA0, RA3, RA2, RA1, RA0, RA4, RA3, RB1, RB4, RB2, RB0, RB3, RB2, RB1, RB0, RB4, RB3); ROUND (26, 2, RA2, RA1, RA0, RA4, RA3, RA0, RA4, RA1, RA3, RA2, RB2, RB1, RB0, RB4, RB3, RB0, RB4, RB1, RB3, RB2); ROUND (27, 3, RA0, RA4, RA1, RA3, RA2, RA4, RA1, RA3, RA2, RA0, RB0, RB4, RB1, RB3, RB2, RB4, RB1, RB3, RB2, RB0); ROUND (28, 4, RA4, RA1, RA3, RA2, RA0, RA1, RA0, RA4, RA2, RA3, RB4, RB1, RB3, RB2, RB0, RB1, RB0, RB4, RB2, RB3); ROUND (29, 5, RA1, RA0, RA4, RA2, RA3, RA0, RA2, RA1, RA4, RA3, RB1, RB0, RB4, RB2, RB3, RB0, RB2, RB1, RB4, RB3); ROUND (30, 6, RA0, RA2, RA1, RA4, RA3, RA0, RA2, RA3, RA1, RA4, RB0, RB2, RB1, RB4, RB3, RB0, RB2, RB3, RB1, RB4); ROUND_LAST (31, 7, RA0, RA2, RA3, RA1, RA4, RA4, RA1, RA2, RA0, RA3, RB0, RB2, RB3, RB1, RB4, RB4, RB1, RB2, RB0, RB3); transpose_4x4(RA4, RA1, RA2, RA0, RA3, RTMP0, RTMP1); transpose_4x4(RB4, RB1, RB2, RB0, RB3, RTMP0, RTMP1); ret_spec_stop; CFI_ENDPROC(); ELF(.size __serpent_enc_blk16,.-__serpent_enc_blk16;) .align 16 ELF(.type __serpent_dec_blk16,@function;) __serpent_dec_blk16: /* input: * %rdi: ctx, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * ciphertext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks */ CFI_STARTPROC(); vpcmpeqd RNOT, RNOT, RNOT; transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1); ROUND_FIRST_INVERSE (31, 7, RA0, RA1, RA2, RA3, RA4, RA3, RA0, RA1, RA4, RA2, RB0, RB1, RB2, RB3, RB4, RB3, RB0, RB1, RB4, RB2); ROUND_INVERSE (30, 6, RA3, RA0, RA1, RA4, RA2, RA0, RA1, RA2, RA4, RA3, RB3, RB0, RB1, RB4, RB2, RB0, RB1, RB2, RB4, RB3); ROUND_INVERSE (29, 5, RA0, RA1, RA2, RA4, RA3, RA1, RA3, RA4, RA2, RA0, RB0, RB1, RB2, RB4, RB3, RB1, RB3, RB4, RB2, RB0); ROUND_INVERSE (28, 4, RA1, RA3, RA4, RA2, RA0, RA1, RA2, RA4, RA0, RA3, RB1, RB3, RB4, RB2, RB0, RB1, RB2, RB4, RB0, RB3); ROUND_INVERSE (27, 3, RA1, RA2, RA4, RA0, RA3, RA4, RA2, RA0, RA1, RA3, RB1, RB2, RB4, RB0, RB3, RB4, RB2, RB0, RB1, RB3); ROUND_INVERSE (26, 2, RA4, RA2, RA0, RA1, RA3, RA2, RA3, RA0, RA1, RA4, RB4, RB2, RB0, RB1, RB3, RB2, RB3, RB0, RB1, RB4); ROUND_INVERSE (25, 1, RA2, RA3, RA0, RA1, RA4, RA4, RA2, RA1, RA0, RA3, RB2, RB3, RB0, RB1, RB4, RB4, RB2, RB1, RB0, RB3); ROUND_INVERSE (24, 0, RA4, RA2, RA1, RA0, RA3, RA4, RA3, RA2, RA0, RA1, RB4, RB2, RB1, RB0, RB3, RB4, RB3, RB2, RB0, RB1); ROUND_INVERSE (23, 7, RA4, RA3, RA2, RA0, RA1, RA0, RA4, RA3, RA1, RA2, RB4, RB3, RB2, RB0, RB1, RB0, RB4, RB3, RB1, RB2); ROUND_INVERSE (22, 6, RA0, RA4, RA3, RA1, RA2, RA4, RA3, RA2, RA1, RA0, RB0, RB4, RB3, RB1, RB2, RB4, RB3, RB2, RB1, RB0); ROUND_INVERSE (21, 5, RA4, RA3, RA2, RA1, RA0, RA3, RA0, RA1, RA2, RA4, RB4, RB3, RB2, RB1, RB0, RB3, RB0, RB1, RB2, RB4); ROUND_INVERSE (20, 4, RA3, RA0, RA1, RA2, RA4, RA3, RA2, RA1, RA4, RA0, RB3, RB0, RB1, RB2, RB4, RB3, RB2, RB1, RB4, RB0); ROUND_INVERSE (19, 3, RA3, RA2, RA1, RA4, RA0, RA1, RA2, RA4, RA3, RA0, RB3, RB2, RB1, RB4, RB0, RB1, RB2, RB4, RB3, RB0); ROUND_INVERSE (18, 2, RA1, RA2, RA4, RA3, RA0, RA2, RA0, RA4, RA3, RA1, RB1, RB2, RB4, RB3, RB0, RB2, RB0, RB4, RB3, RB1); ROUND_INVERSE (17, 1, RA2, RA0, RA4, RA3, RA1, RA1, RA2, RA3, RA4, RA0, RB2, RB0, RB4, RB3, RB1, RB1, RB2, RB3, RB4, RB0); ROUND_INVERSE (16, 0, RA1, RA2, RA3, RA4, RA0, RA1, RA0, RA2, RA4, RA3, RB1, RB2, RB3, RB4, RB0, RB1, RB0, RB2, RB4, RB3); ROUND_INVERSE (15, 7, RA1, RA0, RA2, RA4, RA3, RA4, RA1, RA0, RA3, RA2, RB1, RB0, RB2, RB4, RB3, RB4, RB1, RB0, RB3, RB2); ROUND_INVERSE (14, 6, RA4, RA1, RA0, RA3, RA2, RA1, RA0, RA2, RA3, RA4, RB4, RB1, RB0, RB3, RB2, RB1, RB0, RB2, RB3, RB4); ROUND_INVERSE (13, 5, RA1, RA0, RA2, RA3, RA4, RA0, RA4, RA3, RA2, RA1, RB1, RB0, RB2, RB3, RB4, RB0, RB4, RB3, RB2, RB1); ROUND_INVERSE (12, 4, RA0, RA4, RA3, RA2, RA1, RA0, RA2, RA3, RA1, RA4, RB0, RB4, RB3, RB2, RB1, RB0, RB2, RB3, RB1, RB4); ROUND_INVERSE (11, 3, RA0, RA2, RA3, RA1, RA4, RA3, RA2, RA1, RA0, RA4, RB0, RB2, RB3, RB1, RB4, RB3, RB2, RB1, RB0, RB4); ROUND_INVERSE (10, 2, RA3, RA2, RA1, RA0, RA4, RA2, RA4, RA1, RA0, RA3, RB3, RB2, RB1, RB0, RB4, RB2, RB4, RB1, RB0, RB3); ROUND_INVERSE (9, 1, RA2, RA4, RA1, RA0, RA3, RA3, RA2, RA0, RA1, RA4, RB2, RB4, RB1, RB0, RB3, RB3, RB2, RB0, RB1, RB4); ROUND_INVERSE (8, 0, RA3, RA2, RA0, RA1, RA4, RA3, RA4, RA2, RA1, RA0, RB3, RB2, RB0, RB1, RB4, RB3, RB4, RB2, RB1, RB0); ROUND_INVERSE (7, 7, RA3, RA4, RA2, RA1, RA0, RA1, RA3, RA4, RA0, RA2, RB3, RB4, RB2, RB1, RB0, RB1, RB3, RB4, RB0, RB2); ROUND_INVERSE (6, 6, RA1, RA3, RA4, RA0, RA2, RA3, RA4, RA2, RA0, RA1, RB1, RB3, RB4, RB0, RB2, RB3, RB4, RB2, RB0, RB1); ROUND_INVERSE (5, 5, RA3, RA4, RA2, RA0, RA1, RA4, RA1, RA0, RA2, RA3, RB3, RB4, RB2, RB0, RB1, RB4, RB1, RB0, RB2, RB3); ROUND_INVERSE (4, 4, RA4, RA1, RA0, RA2, RA3, RA4, RA2, RA0, RA3, RA1, RB4, RB1, RB0, RB2, RB3, RB4, RB2, RB0, RB3, RB1); ROUND_INVERSE (3, 3, RA4, RA2, RA0, RA3, RA1, RA0, RA2, RA3, RA4, RA1, RB4, RB2, RB0, RB3, RB1, RB0, RB2, RB3, RB4, RB1); ROUND_INVERSE (2, 2, RA0, RA2, RA3, RA4, RA1, RA2, RA1, RA3, RA4, RA0, RB0, RB2, RB3, RB4, RB1, RB2, RB1, RB3, RB4, RB0); ROUND_INVERSE (1, 1, RA2, RA1, RA3, RA4, RA0, RA0, RA2, RA4, RA3, RA1, RB2, RB1, RB3, RB4, RB0, RB0, RB2, RB4, RB3, RB1); ROUND_INVERSE (0, 0, RA0, RA2, RA4, RA3, RA1, RA0, RA1, RA2, RA3, RA4, RB0, RB2, RB4, RB3, RB1, RB0, RB1, RB2, RB3, RB4); transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1); ret_spec_stop; CFI_ENDPROC(); ELF(.size __serpent_dec_blk16,.-__serpent_dec_blk16;) .align 16 .globl _gcry_serpent_avx2_blk16 ELF(.type _gcry_serpent_avx2_blk16,@function;) _gcry_serpent_avx2_blk16: /* input: * %rdi: ctx, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %ecx: encrypt */ CFI_STARTPROC(); vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; vmovdqu (4 * 32)(%rdx), RB0; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RB2; vmovdqu (7 * 32)(%rdx), RB3; testl %ecx, %ecx; jz .Lblk16_dec; call __serpent_enc_blk16; vmovdqu RA4, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA0, (3 * 32)(%rsi); vmovdqu RB4, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB0, (7 * 32)(%rsi); jmp .Lblk16_end; .Lblk16_dec: call __serpent_dec_blk16; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); .Lblk16_end: vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_serpent_avx2_blk16,.-_gcry_serpent_avx2_blk16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl _gcry_serpent_avx2_ctr_enc ELF(.type _gcry_serpent_avx2_ctr_enc,@function;) _gcry_serpent_avx2_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); movq 8(%rcx), %rax; bswapq %rax; vzeroupper; vbroadcasti128 .Lbswap128_mask rRIP, RTMP3; vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */ vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */ /* load IV and byteswap */ vmovdqu (%rcx), RTMP4x; vpshufb RTMP3x, RTMP4x, RTMP4x; vmovdqa RTMP4x, RTMP0x; inc_le128(RTMP4x, RNOTx, RTMP1x); vinserti128 $1, RTMP4x, RTMP0, RTMP0; vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */ /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 16), %rax; ja .Lhandle_ctr_carry; /* construct IVs */ vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */ vpshufb RTMP3, RTMP0, RA1; vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */ vpshufb RTMP3, RTMP0, RA2; vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */ vpshufb RTMP3, RTMP0, RA3; vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */ vpshufb RTMP3, RTMP0, RB0; vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */ vpshufb RTMP3, RTMP0, RB1; vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */ vpshufb RTMP3, RTMP0, RB2; vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */ vpshufb RTMP3, RTMP0, RB3; vpsubq RTMP2, RTMP0, RTMP0; /* +16 */ vpshufb RTMP3x, RTMP0x, RTMP0x; jmp .Lctr_carry_done; .Lhandle_ctr_carry: /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */ inc_le128(RTMP0, RNOT, RTMP1); vextracti128 $1, RTMP0, RTMP0x; vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */ .align 4 .Lctr_carry_done: /* store new IV */ vmovdqu RTMP0x, (%rcx); call __serpent_enc_blk16; vpxor (0 * 32)(%rdx), RA4, RA4; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA0, RA0; vpxor (4 * 32)(%rdx), RB4, RB4; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB0, RB0; vmovdqu RA4, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA0, (3 * 32)(%rsi); vmovdqu RB4, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB0, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_serpent_avx2_ctr_enc,.-_gcry_serpent_avx2_ctr_enc;) .align 16 .globl _gcry_serpent_avx2_cbc_dec ELF(.type _gcry_serpent_avx2_cbc_dec,@function;) _gcry_serpent_avx2_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); vzeroupper; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; vmovdqu (4 * 32)(%rdx), RB0; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RB2; vmovdqu (7 * 32)(%rdx), RB3; call __serpent_dec_blk16; vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RNOT; vpxor RNOT, RA0, RA0; vpxor (0 * 32 + 16)(%rdx), RA1, RA1; vpxor (1 * 32 + 16)(%rdx), RA2, RA2; vpxor (2 * 32 + 16)(%rdx), RA3, RA3; vpxor (3 * 32 + 16)(%rdx), RB0, RB0; vpxor (4 * 32 + 16)(%rdx), RB1, RB1; vpxor (5 * 32 + 16)(%rdx), RB2, RB2; vpxor (6 * 32 + 16)(%rdx), RB3, RB3; vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_serpent_avx2_cbc_dec,.-_gcry_serpent_avx2_cbc_dec;) .align 16 .globl _gcry_serpent_avx2_cfb_dec ELF(.type _gcry_serpent_avx2_cfb_dec,@function;) _gcry_serpent_avx2_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); vzeroupper; /* Load input */ vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RA0; vmovdqu (0 * 32 + 16)(%rdx), RA1; vmovdqu (1 * 32 + 16)(%rdx), RA2; vmovdqu (2 * 32 + 16)(%rdx), RA3; vmovdqu (3 * 32 + 16)(%rdx), RB0; vmovdqu (4 * 32 + 16)(%rdx), RB1; vmovdqu (5 * 32 + 16)(%rdx), RB2; vmovdqu (6 * 32 + 16)(%rdx), RB3; /* Update IV */ vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __serpent_enc_blk16; vpxor (0 * 32)(%rdx), RA4, RA4; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA0, RA0; vpxor (4 * 32)(%rdx), RB4, RB4; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB0, RB0; vmovdqu RA4, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA0, (3 * 32)(%rsi); vmovdqu RB4, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB0, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_serpent_avx2_cfb_dec,.-_gcry_serpent_avx2_cfb_dec;) .align 16 .globl _gcry_serpent_avx2_ocb_enc ELF(.type _gcry_serpent_avx2_ocb_enc,@function;) _gcry_serpent_avx2_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); vzeroupper; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; vmovdqu (%r8), RTMP1x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RTMP1, RTMP1; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vextracti128 $1, RTMP1, RNOTx; vmovdqu RTMP0x, (%rcx); vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __serpent_enc_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor (0 * 32)(%rsi), RA4, RA4; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA0, RA0; vpxor (4 * 32)(%rsi), RB4, RB4; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB0, RB0; vmovdqu RA4, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA0, (3 * 32)(%rsi); vmovdqu RB4, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB0, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_serpent_avx2_ocb_enc,.-_gcry_serpent_avx2_ocb_enc;) .align 16 .globl _gcry_serpent_avx2_ocb_dec ELF(.type _gcry_serpent_avx2_ocb_dec,@function;) _gcry_serpent_avx2_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); vzeroupper; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rcx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __serpent_dec_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vmovdqu (%r8), RTMP1x; vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; /* Checksum_i = Checksum_{i-1} xor P_i */ vmovdqu RA0, (0 * 32)(%rsi); vpxor RA0, RTMP1, RTMP1; vmovdqu RA1, (1 * 32)(%rsi); vpxor RA1, RTMP1, RTMP1; vmovdqu RA2, (2 * 32)(%rsi); vpxor RA2, RTMP1, RTMP1; vmovdqu RA3, (3 * 32)(%rsi); vpxor RA3, RTMP1, RTMP1; vmovdqu RB0, (4 * 32)(%rsi); vpxor RB0, RTMP1, RTMP1; vmovdqu RB1, (5 * 32)(%rsi); vpxor RB1, RTMP1, RTMP1; vmovdqu RB2, (6 * 32)(%rsi); vpxor RB2, RTMP1, RTMP1; vmovdqu RB3, (7 * 32)(%rsi); vpxor RB3, RTMP1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_serpent_avx2_ocb_dec,.-_gcry_serpent_avx2_ocb_dec;) .align 16 .globl _gcry_serpent_avx2_ocb_auth ELF(.type _gcry_serpent_avx2_ocb_auth,@function;) _gcry_serpent_avx2_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); vzeroupper; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rdx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rdx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __serpent_enc_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor RA4, RB4, RA4; vpxor RA1, RB1, RA1; vpxor RA2, RB2, RA2; vpxor RA0, RB0, RA0; vpxor RA4, RA1, RA1; vpxor RA2, RA0, RA0; vpxor RA1, RA0, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor (%rcx), RTMP1x, RTMP1x; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%rcx); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_serpent_avx2_ocb_auth,.-_gcry_serpent_avx2_ocb_auth;) -.align 16 + +SECTION_RODATA +ELF(.type _serpent_avx2_consts,@object) +_serpent_avx2_consts: /* For CTR-mode IV byteswap */ +.align 16 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 #endif /*defined(USE_SERPENT) && defined(ENABLE_AVX2_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/sm4-aesni-avx-amd64.S b/cipher/sm4-aesni-avx-amd64.S index 88f6e5c5..c09b205d 100644 --- a/cipher/sm4-aesni-avx-amd64.S +++ b/cipher/sm4-aesni-avx-amd64.S @@ -1,987 +1,992 @@ /* sm4-avx-aesni-amd64.S - AES-NI/AVX implementation of SM4 cipher * * Copyright (C) 2020 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* Based on SM4 AES-NI work by Markku-Juhani O. Saarinen at: * https://github.com/mjosaarinen/sm4ni */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT) #include "asm-common-amd64.h" /* vector registers */ #define RX0 %xmm0 #define RX1 %xmm1 #define MASK_4BIT %xmm2 #define RTMP0 %xmm3 #define RTMP1 %xmm4 #define RTMP2 %xmm5 #define RTMP3 %xmm6 #define RTMP4 %xmm7 #define RA0 %xmm8 #define RA1 %xmm9 #define RA2 %xmm10 #define RA3 %xmm11 #define RB0 %xmm12 #define RB1 %xmm13 #define RB2 %xmm14 #define RB3 %xmm15 #define RNOT %xmm0 #define RBSWAP %xmm1 /********************************************************************** helper macros **********************************************************************/ /* Transpose four 32-bit words between 128-bit vectors. */ #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /* post-SubByte transform. */ #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /* post-SubByte transform. Note: x has been XOR'ed with mask4bit by * 'vaeslastenc' instruction. */ #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ vpandn mask4bit, x, tmp0; \ vpsrld $4, x, x; \ vpand x, mask4bit, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /********************************************************************** 4-way && 8-way SM4 with AES-NI and AVX **********************************************************************/ -.text +SECTION_RODATA .align 16 +ELF(.type _sm4_aesni_avx_consts,@object) +_sm4_aesni_avx_consts: + /* * Following four affine transform look-up tables are from work by * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni * * These allow exposing SM4 S-Box from AES SubByte. */ /* pre-SubByte affine transform, from SM4 field to AES field. */ .Lpre_tf_lo_s: .quad 0x9197E2E474720701, 0xC7C1B4B222245157 .Lpre_tf_hi_s: .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012 /* post-SubByte affine transform, from AES field to SM4 field. */ .Lpost_tf_lo_s: .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82 .Lpost_tf_hi_s: .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_8: .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06 /* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_16: .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01 .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09 /* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_24: .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04 .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For input word byte-swap */ .Lbswap32_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f +.text + .align 16 .globl _gcry_sm4_aesni_avx_expand_key ELF(.type _gcry_sm4_aesni_avx_expand_key,@function;) _gcry_sm4_aesni_avx_expand_key: /* input: * %rdi: 128-bit key * %rsi: rkey_enc * %rdx: rkey_dec * %rcx: fk array * %r8: ck array */ CFI_STARTPROC(); vmovd 0*4(%rdi), RA0; vmovd 1*4(%rdi), RA1; vmovd 2*4(%rdi), RA2; vmovd 3*4(%rdi), RA3; vmovdqa .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vmovd 0*4(%rcx), RB0; vmovd 1*4(%rcx), RB1; vmovd 2*4(%rcx), RB2; vmovd 3*4(%rcx), RB3; vpxor RB0, RA0, RA0; vpxor RB1, RA1, RA1; vpxor RB2, RA2, RA2; vpxor RB3, RA3, RA3; vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; vmovdqa .Lpre_tf_hi_s rRIP, RB0; vmovdqa .Lpost_tf_lo_s rRIP, RB1; vmovdqa .Lpost_tf_hi_s rRIP, RB2; vmovdqa .Linv_shift_row rRIP, RB3; #define ROUND(round, s0, s1, s2, s3) \ vbroadcastss (4*(round))(%r8), RX0; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ \ /* sbox, non-linear part */ \ transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \ vaesenclast MASK_4BIT, RX0, RX0; \ transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \ \ /* linear part */ \ vpshufb RB3, RX0, RX0; \ vpxor RX0, s0, s0; /* s0 ^ x */ \ vpslld $13, RX0, RTMP0; \ vpsrld $19, RX0, RTMP1; \ vpslld $23, RX0, RTMP2; \ vpsrld $9, RX0, RTMP3; \ vpxor RTMP0, RTMP1, RTMP1; \ vpxor RTMP2, RTMP3, RTMP3; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,13) */ \ vpxor RTMP3, s0, s0; /* s0 ^ x ^ rol(x,13) ^ rol(x,23) */ leaq (32*4)(%r8), %rax; leaq (32*4)(%rdx), %rdx; .align 16 .Lroundloop_expand_key: leaq (-4*4)(%rdx), %rdx; ROUND(0, RA0, RA1, RA2, RA3); ROUND(1, RA1, RA2, RA3, RA0); ROUND(2, RA2, RA3, RA0, RA1); ROUND(3, RA3, RA0, RA1, RA2); leaq (4*4)(%r8), %r8; vmovd RA0, (0*4)(%rsi); vmovd RA1, (1*4)(%rsi); vmovd RA2, (2*4)(%rsi); vmovd RA3, (3*4)(%rsi); vmovd RA0, (3*4)(%rdx); vmovd RA1, (2*4)(%rdx); vmovd RA2, (1*4)(%rdx); vmovd RA3, (0*4)(%rdx); leaq (4*4)(%rsi), %rsi; cmpq %rax, %r8; jne .Lroundloop_expand_key; #undef ROUND vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_expand_key,.-_gcry_sm4_aesni_avx_expand_key;) .align 16 ELF(.type sm4_aesni_avx_crypt_blk1_4,@function;) sm4_aesni_avx_crypt_blk1_4: /* input: * %rdi: round key array, CTX * %rsi: dst (1..4 blocks) * %rdx: src (1..4 blocks) * %rcx: num blocks (1..4) */ CFI_STARTPROC(); vmovdqu 0*16(%rdx), RA0; vmovdqa RA0, RA1; vmovdqa RA0, RA2; vmovdqa RA0, RA3; cmpq $2, %rcx; jb .Lblk4_load_input_done; vmovdqu 1*16(%rdx), RA1; je .Lblk4_load_input_done; vmovdqu 2*16(%rdx), RA2; cmpq $3, %rcx; je .Lblk4_load_input_done; vmovdqu 3*16(%rdx), RA3; .Lblk4_load_input_done: vmovdqa .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; vmovdqa .Lpre_tf_hi_s rRIP, RB0; vmovdqa .Lpost_tf_lo_s rRIP, RB1; vmovdqa .Lpost_tf_hi_s rRIP, RB2; vmovdqa .Linv_shift_row rRIP, RB3; vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2; vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3) \ vbroadcastss (4*(round))(%rdi), RX0; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ \ /* sbox, non-linear part */ \ transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \ vaesenclast MASK_4BIT, RX0, RX0; \ transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \ \ /* linear part */ \ vpshufb RB3, RX0, RTMP0; \ vpxor RTMP0, s0, s0; /* s0 ^ x */ \ vpshufb RTMP2, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ vpshufb RTMP3, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0, RTMP1; \ vpsrld $30, RTMP0, RTMP0; \ vpxor RTMP0, s0, s0; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk4: ROUND(0, RA0, RA1, RA2, RA3); ROUND(1, RA1, RA2, RA3, RA0); ROUND(2, RA2, RA3, RA0, RA1); ROUND(3, RA3, RA0, RA1, RA2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk4; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vmovdqu RA0, 0*16(%rsi); cmpq $2, %rcx; jb .Lblk4_store_output_done; vmovdqu RA1, 1*16(%rsi); je .Lblk4_store_output_done; vmovdqu RA2, 2*16(%rsi); cmpq $3, %rcx; je .Lblk4_store_output_done; vmovdqu RA3, 3*16(%rsi); .Lblk4_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size sm4_aesni_avx_crypt_blk1_4,.-sm4_aesni_avx_crypt_blk1_4;) .align 16 ELF(.type __sm4_crypt_blk8,@function;) __sm4_crypt_blk8: /* input: * %rdi: round key array, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel * ciphertext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext * blocks */ CFI_STARTPROC(); vmovdqa .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vbroadcastss (4*(round))(%rdi), RX0; \ vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; \ vmovdqa .Lpre_tf_hi_s rRIP, RTMP1; \ vmovdqa RX0, RX1; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ vmovdqa .Lpost_tf_lo_s rRIP, RTMP2; \ vmovdqa .Lpost_tf_hi_s rRIP, RTMP3; \ vpxor r1, RX1, RX1; \ vpxor r2, RX1, RX1; \ vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ vmovdqa .Linv_shift_row rRIP, RTMP4; \ vaesenclast MASK_4BIT, RX0, RX0; \ vaesenclast MASK_4BIT, RX1, RX1; \ transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ \ /* linear part */ \ vpshufb RTMP4, RX0, RTMP0; \ vpxor RTMP0, s0, s0; /* s0 ^ x */ \ vpshufb RTMP4, RX1, RTMP2; \ vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4; \ vpxor RTMP2, r0, r0; /* r0 ^ x */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX1, RTMP3; \ vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX1, RTMP3; \ vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0, RTMP1; \ vpsrld $30, RTMP0, RTMP0; \ vpxor RTMP0, s0, s0; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpshufb RTMP4, RX1, RTMP3; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP2, RTMP3; \ vpsrld $30, RTMP2, RTMP2; \ vpxor RTMP2, r0, r0; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk8: ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk8; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; ret_spec_stop; CFI_ENDPROC(); ELF(.size __sm4_crypt_blk8,.-__sm4_crypt_blk8;) .align 16 .globl _gcry_sm4_aesni_avx_crypt_blk1_8 ELF(.type _gcry_sm4_aesni_avx_crypt_blk1_8,@function;) _gcry_sm4_aesni_avx_crypt_blk1_8: /* input: * %rdi: round key array, CTX * %rsi: dst (1..8 blocks) * %rdx: src (1..8 blocks) * %rcx: num blocks (1..8) */ CFI_STARTPROC(); cmpq $5, %rcx; jb sm4_aesni_avx_crypt_blk1_4; vmovdqu (0 * 16)(%rdx), RA0; vmovdqu (1 * 16)(%rdx), RA1; vmovdqu (2 * 16)(%rdx), RA2; vmovdqu (3 * 16)(%rdx), RA3; vmovdqu (4 * 16)(%rdx), RB0; vmovdqa RB0, RB1; vmovdqa RB0, RB2; vmovdqa RB0, RB3; je .Lblk8_load_input_done; vmovdqu (5 * 16)(%rdx), RB1; cmpq $7, %rcx; jb .Lblk8_load_input_done; vmovdqu (6 * 16)(%rdx), RB2; je .Lblk8_load_input_done; vmovdqu (7 * 16)(%rdx), RB3; .Lblk8_load_input_done: call __sm4_crypt_blk8; cmpq $6, %rcx; vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); jb .Lblk8_store_output_done; vmovdqu RB1, (5 * 16)(%rsi); je .Lblk8_store_output_done; vmovdqu RB2, (6 * 16)(%rsi); cmpq $7, %rcx; je .Lblk8_store_output_done; vmovdqu RB3, (7 * 16)(%rsi); .Lblk8_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_crypt_blk1_8,.-_gcry_sm4_aesni_avx_crypt_blk1_8;) .align 16 .globl _gcry_sm4_aesni_avx_ctr_enc ELF(.type _gcry_sm4_aesni_avx_ctr_enc,@function;) _gcry_sm4_aesni_avx_ctr_enc: /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); /* load IV and byteswap */ vmovdqu (%rcx), RA0; vmovdqa .Lbswap128_mask rRIP, RBSWAP; vpshufb RBSWAP, RA0, RTMP0; /* be => le */ vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */ #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP2); /* +1 */ vpshufb RBSWAP, RTMP0, RA1; inc_le128(RTMP0, RNOT, RTMP2); /* +2 */ vpshufb RBSWAP, RTMP0, RA2; inc_le128(RTMP0, RNOT, RTMP2); /* +3 */ vpshufb RBSWAP, RTMP0, RA3; inc_le128(RTMP0, RNOT, RTMP2); /* +4 */ vpshufb RBSWAP, RTMP0, RB0; inc_le128(RTMP0, RNOT, RTMP2); /* +5 */ vpshufb RBSWAP, RTMP0, RB1; inc_le128(RTMP0, RNOT, RTMP2); /* +6 */ vpshufb RBSWAP, RTMP0, RB2; inc_le128(RTMP0, RNOT, RTMP2); /* +7 */ vpshufb RBSWAP, RTMP0, RB3; inc_le128(RTMP0, RNOT, RTMP2); /* +8 */ vpshufb RBSWAP, RTMP0, RTMP1; /* store new IV */ vmovdqu RTMP1, (%rcx); call __sm4_crypt_blk8; vpxor (0 * 16)(%rdx), RA0, RA0; vpxor (1 * 16)(%rdx), RA1, RA1; vpxor (2 * 16)(%rdx), RA2, RA2; vpxor (3 * 16)(%rdx), RA3, RA3; vpxor (4 * 16)(%rdx), RB0, RB0; vpxor (5 * 16)(%rdx), RB1, RB1; vpxor (6 * 16)(%rdx), RB2, RB2; vpxor (7 * 16)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); vmovdqu RB1, (5 * 16)(%rsi); vmovdqu RB2, (6 * 16)(%rsi); vmovdqu RB3, (7 * 16)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_ctr_enc,.-_gcry_sm4_aesni_avx_ctr_enc;) .align 16 .globl _gcry_sm4_aesni_avx_cbc_dec ELF(.type _gcry_sm4_aesni_avx_cbc_dec,@function;) _gcry_sm4_aesni_avx_cbc_dec: /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: iv */ CFI_STARTPROC(); vmovdqu (0 * 16)(%rdx), RA0; vmovdqu (1 * 16)(%rdx), RA1; vmovdqu (2 * 16)(%rdx), RA2; vmovdqu (3 * 16)(%rdx), RA3; vmovdqu (4 * 16)(%rdx), RB0; vmovdqu (5 * 16)(%rdx), RB1; vmovdqu (6 * 16)(%rdx), RB2; vmovdqu (7 * 16)(%rdx), RB3; call __sm4_crypt_blk8; vmovdqu (7 * 16)(%rdx), RNOT; vpxor (%rcx), RA0, RA0; vpxor (0 * 16)(%rdx), RA1, RA1; vpxor (1 * 16)(%rdx), RA2, RA2; vpxor (2 * 16)(%rdx), RA3, RA3; vpxor (3 * 16)(%rdx), RB0, RB0; vpxor (4 * 16)(%rdx), RB1, RB1; vpxor (5 * 16)(%rdx), RB2, RB2; vpxor (6 * 16)(%rdx), RB3, RB3; vmovdqu RNOT, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); vmovdqu RB1, (5 * 16)(%rsi); vmovdqu RB2, (6 * 16)(%rsi); vmovdqu RB3, (7 * 16)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_cbc_dec,.-_gcry_sm4_aesni_avx_cbc_dec;) .align 16 .globl _gcry_sm4_aesni_avx_cfb_dec ELF(.type _gcry_sm4_aesni_avx_cfb_dec,@function;) _gcry_sm4_aesni_avx_cfb_dec: /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: iv */ CFI_STARTPROC(); /* Load input */ vmovdqu (%rcx), RA0; vmovdqu 0 * 16(%rdx), RA1; vmovdqu 1 * 16(%rdx), RA2; vmovdqu 2 * 16(%rdx), RA3; vmovdqu 3 * 16(%rdx), RB0; vmovdqu 4 * 16(%rdx), RB1; vmovdqu 5 * 16(%rdx), RB2; vmovdqu 6 * 16(%rdx), RB3; /* Update IV */ vmovdqu 7 * 16(%rdx), RNOT; vmovdqu RNOT, (%rcx); call __sm4_crypt_blk8; vpxor (0 * 16)(%rdx), RA0, RA0; vpxor (1 * 16)(%rdx), RA1, RA1; vpxor (2 * 16)(%rdx), RA2, RA2; vpxor (3 * 16)(%rdx), RA3, RA3; vpxor (4 * 16)(%rdx), RB0, RB0; vpxor (5 * 16)(%rdx), RB1, RB1; vpxor (6 * 16)(%rdx), RB2, RB2; vpxor (7 * 16)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); vmovdqu RB1, (5 * 16)(%rsi); vmovdqu RB2, (6 * 16)(%rsi); vmovdqu RB3, (7 * 16)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_cfb_dec,.-_gcry_sm4_aesni_avx_cfb_dec;) .align 16 .globl _gcry_sm4_aesni_avx_ocb_enc ELF(.type _gcry_sm4_aesni_avx_ocb_enc,@function;) _gcry_sm4_aesni_avx_ocb_enc: /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[8]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0; vmovdqu (%r8), RTMP1; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), RTMP0, RTMP0; \ vpxor xreg, RTMP1, RTMP1; \ vpxor RTMP0, xreg, xreg; \ vmovdqu RTMP0, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, RA0); OCB_INPUT(1, %r11, RA1); OCB_INPUT(2, %r12, RA2); OCB_INPUT(3, %r13, RA3); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, RB0); OCB_INPUT(5, %r11, RB1); OCB_INPUT(6, %r12, RB2); OCB_INPUT(7, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0, (%rcx); vmovdqu RTMP1, (%r8); movq (0 * 8)(%rsp), %r10; CFI_RESTORE(%r10); movq (1 * 8)(%rsp), %r11; CFI_RESTORE(%r11); movq (2 * 8)(%rsp), %r12; CFI_RESTORE(%r12); movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r13); call __sm4_crypt_blk8; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor (0 * 16)(%rsi), RA0, RA0; vpxor (1 * 16)(%rsi), RA1, RA1; vpxor (2 * 16)(%rsi), RA2, RA2; vpxor (3 * 16)(%rsi), RA3, RA3; vpxor (4 * 16)(%rsi), RB0, RB0; vpxor (5 * 16)(%rsi), RB1, RB1; vpxor (6 * 16)(%rsi), RB2, RB2; vpxor (7 * 16)(%rsi), RB3, RB3; vmovdqu RA0, (0 * 16)(%rsi); vmovdqu RA1, (1 * 16)(%rsi); vmovdqu RA2, (2 * 16)(%rsi); vmovdqu RA3, (3 * 16)(%rsi); vmovdqu RB0, (4 * 16)(%rsi); vmovdqu RB1, (5 * 16)(%rsi); vmovdqu RB2, (6 * 16)(%rsi); vmovdqu RB3, (7 * 16)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_ocb_enc,.-_gcry_sm4_aesni_avx_ocb_enc;) .align 16 .globl _gcry_sm4_aesni_avx_ocb_dec ELF(.type _gcry_sm4_aesni_avx_ocb_dec,@function;) _gcry_sm4_aesni_avx_ocb_dec: /* input: * %rdi: round key array, CTX * %rsi: dst (8 blocks) * %rdx: src (8 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[8]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); movdqu (%rcx), RTMP0; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), RTMP0, RTMP0; \ vpxor RTMP0, xreg, xreg; \ vmovdqu RTMP0, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, RA0); OCB_INPUT(1, %r11, RA1); OCB_INPUT(2, %r12, RA2); OCB_INPUT(3, %r13, RA3); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, RB0); OCB_INPUT(5, %r11, RB1); OCB_INPUT(6, %r12, RB2); OCB_INPUT(7, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0, (%rcx); movq (0 * 8)(%rsp), %r10; CFI_RESTORE(%r10); movq (1 * 8)(%rsp), %r11; CFI_RESTORE(%r11); movq (2 * 8)(%rsp), %r12; CFI_RESTORE(%r12); movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r13); call __sm4_crypt_blk8; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vmovdqu (%r8), RTMP0; vpxor (0 * 16)(%rsi), RA0, RA0; vpxor (1 * 16)(%rsi), RA1, RA1; vpxor (2 * 16)(%rsi), RA2, RA2; vpxor (3 * 16)(%rsi), RA3, RA3; vpxor (4 * 16)(%rsi), RB0, RB0; vpxor (5 * 16)(%rsi), RB1, RB1; vpxor (6 * 16)(%rsi), RB2, RB2; vpxor (7 * 16)(%rsi), RB3, RB3; /* Checksum_i = Checksum_{i-1} xor P_i */ vmovdqu RA0, (0 * 16)(%rsi); vpxor RA0, RTMP0, RTMP0; vmovdqu RA1, (1 * 16)(%rsi); vpxor RA1, RTMP0, RTMP0; vmovdqu RA2, (2 * 16)(%rsi); vpxor RA2, RTMP0, RTMP0; vmovdqu RA3, (3 * 16)(%rsi); vpxor RA3, RTMP0, RTMP0; vmovdqu RB0, (4 * 16)(%rsi); vpxor RB0, RTMP0, RTMP0; vmovdqu RB1, (5 * 16)(%rsi); vpxor RB1, RTMP0, RTMP0; vmovdqu RB2, (6 * 16)(%rsi); vpxor RB2, RTMP0, RTMP0; vmovdqu RB3, (7 * 16)(%rsi); vpxor RB3, RTMP0, RTMP0; vmovdqu RTMP0, (%r8); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_ocb_dec,.-_gcry_sm4_aesni_avx_ocb_dec;) .align 16 .globl _gcry_sm4_aesni_avx_ocb_auth ELF(.type _gcry_sm4_aesni_avx_ocb_auth,@function;) _gcry_sm4_aesni_avx_ocb_auth: /* input: * %rdi: round key array, CTX * %rsi: abuf (8 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[8]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rdx), RTMP0; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rsi), xreg; \ vpxor (lreg), RTMP0, RTMP0; \ vpxor RTMP0, xreg, xreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, RA0); OCB_INPUT(1, %r11, RA1); OCB_INPUT(2, %r12, RA2); OCB_INPUT(3, %r13, RA3); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(4, %r10, RB0); OCB_INPUT(5, %r11, RB1); OCB_INPUT(6, %r12, RB2); OCB_INPUT(7, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0, (%rdx); movq (0 * 8)(%rsp), %r10; CFI_RESTORE(%r10); movq (1 * 8)(%rsp), %r11; CFI_RESTORE(%r11); movq (2 * 8)(%rsp), %r12; CFI_RESTORE(%r12); movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r13); call __sm4_crypt_blk8; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vmovdqu (%rcx), RTMP0; vpxor RB0, RA0, RA0; vpxor RB1, RA1, RA1; vpxor RB2, RA2, RA2; vpxor RB3, RA3, RA3; vpxor RTMP0, RA3, RA3; vpxor RA2, RA0, RA0; vpxor RA3, RA1, RA1; vpxor RA1, RA0, RA0; vmovdqu RA0, (%rcx); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx_ocb_auth,.-_gcry_sm4_aesni_avx_ocb_auth;) #endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/sm4-aesni-avx2-amd64.S b/cipher/sm4-aesni-avx2-amd64.S index 514a0b4e..acd37cff 100644 --- a/cipher/sm4-aesni-avx2-amd64.S +++ b/cipher/sm4-aesni-avx2-amd64.S @@ -1,907 +1,912 @@ /* sm4-avx2-amd64.S - AVX2 implementation of SM4 cipher * * Copyright (C) 2020, 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* Based on SM4 AES-NI work by Markku-Juhani O. Saarinen at: * https://github.com/mjosaarinen/sm4ni */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) #include "asm-common-amd64.h" /* vector registers */ #define RX0 %ymm0 #define RX1 %ymm1 #define MASK_4BIT %ymm2 #define RTMP0 %ymm3 #define RTMP1 %ymm4 #define RTMP2 %ymm5 #define RTMP3 %ymm6 #define RTMP4 %ymm7 #define RA0 %ymm8 #define RA1 %ymm9 #define RA2 %ymm10 #define RA3 %ymm11 #define RA0x %xmm8 #define RA1x %xmm9 #define RA2x %xmm10 #define RA3x %xmm11 #define RB0 %ymm12 #define RB1 %ymm13 #define RB2 %ymm14 #define RB3 %ymm15 #define RB0x %xmm12 #define RB1x %xmm13 #define RB2x %xmm14 #define RB3x %xmm15 #define RNOT %ymm0 #define RBSWAP %ymm1 #define RX0x %xmm0 #define RX1x %xmm1 #define MASK_4BITx %xmm2 #define RNOTx %xmm0 #define RBSWAPx %xmm1 #define RTMP0x %xmm3 #define RTMP1x %xmm4 #define RTMP2x %xmm5 #define RTMP3x %xmm6 #define RTMP4x %xmm7 /********************************************************************** helper macros **********************************************************************/ /* Transpose four 32-bit words between 128-bit vector lanes. */ #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /* post-SubByte transform. */ #define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /* post-SubByte transform. Note: x has been XOR'ed with mask4bit by * 'vaeslastenc' instruction. */ #define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \ vpandn mask4bit, x, tmp0; \ vpsrld $4, x, x; \ vpand x, mask4bit, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /********************************************************************** 16-way SM4 with AES-NI and AVX **********************************************************************/ -.text +SECTION_RODATA .align 16 +ELF(.type _sm4_aesni_avx2_consts,@object) +_sm4_aesni_avx2_consts: + /* * Following four affine transform look-up tables are from work by * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni * * These allow exposing SM4 S-Box from AES SubByte. */ /* pre-SubByte affine transform, from SM4 field to AES field. */ .Lpre_tf_lo_s: .quad 0x9197E2E474720701, 0xC7C1B4B222245157 .Lpre_tf_hi_s: .quad 0xE240AB09EB49A200, 0xF052B91BF95BB012 /* post-SubByte affine transform, from AES field to SM4 field. */ .Lpost_tf_lo_s: .quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82 .Lpost_tf_hi_s: .quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_8: .byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e .byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06 /* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_16: .byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01 .byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09 /* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */ .Linv_shift_row_rol_24: .byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04 .byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For input word byte-swap */ .Lbswap32_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f +.text + .align 16 ELF(.type __sm4_crypt_blk16,@function;) __sm4_crypt_blk16: /* input: * %rdi: ctx, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * ciphertext blocks */ CFI_STARTPROC(); vbroadcasti128 .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vpbroadcastd (4*(round))(%rdi), RX0; \ vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4; \ vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1; \ vmovdqa RX0, RX1; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2; \ vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3; \ vpxor r1, RX1, RX1; \ vpxor r2, RX1, RX1; \ vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \ vextracti128 $1, RX0, RTMP4x; \ vextracti128 $1, RX1, RTMP0x; \ vaesenclast MASK_4BITx, RX0x, RX0x; \ vaesenclast MASK_4BITx, RTMP4x, RTMP4x; \ vaesenclast MASK_4BITx, RX1x, RX1x; \ vaesenclast MASK_4BITx, RTMP0x, RTMP0x; \ vinserti128 $1, RTMP4x, RX0, RX0; \ vbroadcasti128 .Linv_shift_row rRIP, RTMP4; \ vinserti128 $1, RTMP0x, RX1, RX1; \ transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \ \ /* linear part */ \ vpshufb RTMP4, RX0, RTMP0; \ vpxor RTMP0, s0, s0; /* s0 ^ x */ \ vpshufb RTMP4, RX1, RTMP2; \ vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4; \ vpxor RTMP2, r0, r0; /* r0 ^ x */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX1, RTMP3; \ vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX1, RTMP3; \ vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0, RTMP1; \ vpsrld $30, RTMP0, RTMP0; \ vpxor RTMP0, s0, s0; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpshufb RTMP4, RX1, RTMP3; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP2, RTMP3; \ vpsrld $30, RTMP2, RTMP2; \ vpxor RTMP2, r0, r0; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk16: ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk16; #undef ROUND vbroadcasti128 .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; ret_spec_stop; CFI_ENDPROC(); ELF(.size __sm4_crypt_blk16,.-__sm4_crypt_blk16;) .align 16 .globl _gcry_sm4_aesni_avx2_crypt_blk1_16 ELF(.type _gcry_sm4_aesni_avx2_crypt_blk1_16,@function;) _gcry_sm4_aesni_avx2_crypt_blk1_16: /* input: * %rdi: round key array, CTX * %rsi: dst (1..16 blocks) * %rdx: src (1..16 blocks) * %rcx: num blocks (1..16) */ CFI_STARTPROC(); #define LOAD_INPUT(offset, yreg) \ cmpq $(1 + 2 * (offset)), %rcx; \ jb .Lblk16_load_input_done; \ ja 1f; \ vmovdqu (offset) * 32(%rdx), yreg##x; \ jmp .Lblk16_load_input_done; \ 1: \ vmovdqu (offset) * 32(%rdx), yreg; LOAD_INPUT(0, RA0); LOAD_INPUT(1, RA1); LOAD_INPUT(2, RA2); LOAD_INPUT(3, RA3); LOAD_INPUT(4, RB0); LOAD_INPUT(5, RB1); LOAD_INPUT(6, RB2); LOAD_INPUT(7, RB3); #undef LOAD_INPUT .Lblk16_load_input_done: call __sm4_crypt_blk16; #define STORE_OUTPUT(yreg, offset) \ cmpq $(1 + 2 * (offset)), %rcx; \ jb .Lblk16_store_output_done; \ ja 1f; \ vmovdqu yreg##x, (offset) * 32(%rsi); \ jmp .Lblk16_store_output_done; \ 1: \ vmovdqu yreg, (offset) * 32(%rsi); STORE_OUTPUT(RA0, 0); STORE_OUTPUT(RA1, 1); STORE_OUTPUT(RA2, 2); STORE_OUTPUT(RA3, 3); STORE_OUTPUT(RB0, 4); STORE_OUTPUT(RB1, 5); STORE_OUTPUT(RB2, 6); STORE_OUTPUT(RB3, 7); #undef STORE_OUTPUT .Lblk16_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx2_crypt_blk1_16,.-_gcry_sm4_aesni_avx2_crypt_blk1_16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl _gcry_sm4_aesni_avx2_ctr_enc ELF(.type _gcry_sm4_aesni_avx2_ctr_enc,@function;) _gcry_sm4_aesni_avx2_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); movq 8(%rcx), %rax; bswapq %rax; vbroadcasti128 .Lbswap128_mask rRIP, RTMP3; vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */ vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */ /* load IV and byteswap */ vmovdqu (%rcx), RTMP4x; vpshufb RTMP3x, RTMP4x, RTMP4x; vmovdqa RTMP4x, RTMP0x; inc_le128(RTMP4x, RNOTx, RTMP1x); vinserti128 $1, RTMP4x, RTMP0, RTMP0; vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */ /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 16), %rax; ja .Lhandle_ctr_carry; /* construct IVs */ vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */ vpshufb RTMP3, RTMP0, RA1; vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */ vpshufb RTMP3, RTMP0, RA2; vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */ vpshufb RTMP3, RTMP0, RA3; vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */ vpshufb RTMP3, RTMP0, RB0; vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */ vpshufb RTMP3, RTMP0, RB1; vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */ vpshufb RTMP3, RTMP0, RB2; vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */ vpshufb RTMP3, RTMP0, RB3; vpsubq RTMP2, RTMP0, RTMP0; /* +16 */ vpshufb RTMP3x, RTMP0x, RTMP0x; jmp .Lctr_carry_done; .Lhandle_ctr_carry: /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */ inc_le128(RTMP0, RNOT, RTMP1); vextracti128 $1, RTMP0, RTMP0x; vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */ .align 4 .Lctr_carry_done: /* store new IV */ vmovdqu RTMP0x, (%rcx); call __sm4_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx2_ctr_enc,.-_gcry_sm4_aesni_avx2_ctr_enc;) .align 16 .globl _gcry_sm4_aesni_avx2_cbc_dec ELF(.type _gcry_sm4_aesni_avx2_cbc_dec,@function;) _gcry_sm4_aesni_avx2_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; vmovdqu (4 * 32)(%rdx), RB0; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RB2; vmovdqu (7 * 32)(%rdx), RB3; call __sm4_crypt_blk16; vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RNOT; vpxor RNOT, RA0, RA0; vpxor (0 * 32 + 16)(%rdx), RA1, RA1; vpxor (1 * 32 + 16)(%rdx), RA2, RA2; vpxor (2 * 32 + 16)(%rdx), RA3, RA3; vpxor (3 * 32 + 16)(%rdx), RB0, RB0; vpxor (4 * 32 + 16)(%rdx), RB1, RB1; vpxor (5 * 32 + 16)(%rdx), RB2, RB2; vpxor (6 * 32 + 16)(%rdx), RB3, RB3; vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx2_cbc_dec,.-_gcry_sm4_aesni_avx2_cbc_dec;) .align 16 .globl _gcry_sm4_aesni_avx2_cfb_dec ELF(.type _gcry_sm4_aesni_avx2_cfb_dec,@function;) _gcry_sm4_aesni_avx2_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); /* Load input */ vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RA0; vmovdqu (0 * 32 + 16)(%rdx), RA1; vmovdqu (1 * 32 + 16)(%rdx), RA2; vmovdqu (2 * 32 + 16)(%rdx), RA3; vmovdqu (3 * 32 + 16)(%rdx), RB0; vmovdqu (4 * 32 + 16)(%rdx), RB1; vmovdqu (5 * 32 + 16)(%rdx), RB2; vmovdqu (6 * 32 + 16)(%rdx), RB3; /* Update IV */ vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __sm4_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx2_cfb_dec,.-_gcry_sm4_aesni_avx2_cfb_dec;) .align 16 .globl _gcry_sm4_aesni_avx2_ocb_enc ELF(.type _gcry_sm4_aesni_avx2_ocb_enc,@function;) _gcry_sm4_aesni_avx2_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; vmovdqu (%r8), RTMP1x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RTMP1, RTMP1; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vextracti128 $1, RTMP1, RNOTx; vmovdqu RTMP0x, (%rcx); vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx2_ocb_enc,.-_gcry_sm4_aesni_avx2_ocb_enc;) .align 16 .globl _gcry_sm4_aesni_avx2_ocb_dec ELF(.type _gcry_sm4_aesni_avx2_ocb_dec,@function;) _gcry_sm4_aesni_avx2_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rcx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vmovdqu (%r8), RTMP1x; vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; /* Checksum_i = Checksum_{i-1} xor P_i */ vmovdqu RA0, (0 * 32)(%rsi); vpxor RA0, RTMP1, RTMP1; vmovdqu RA1, (1 * 32)(%rsi); vpxor RA1, RTMP1, RTMP1; vmovdqu RA2, (2 * 32)(%rsi); vpxor RA2, RTMP1, RTMP1; vmovdqu RA3, (3 * 32)(%rsi); vpxor RA3, RTMP1, RTMP1; vmovdqu RB0, (4 * 32)(%rsi); vpxor RB0, RTMP1, RTMP1; vmovdqu RB1, (5 * 32)(%rsi); vpxor RB1, RTMP1, RTMP1; vmovdqu RB2, (6 * 32)(%rsi); vpxor RB2, RTMP1, RTMP1; vmovdqu RB3, (7 * 32)(%rsi); vpxor RB3, RTMP1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx2_ocb_dec,.-_gcry_sm4_aesni_avx2_ocb_dec;) .align 16 .globl _gcry_sm4_aesni_avx2_ocb_auth ELF(.type _gcry_sm4_aesni_avx2_ocb_auth,@function;) _gcry_sm4_aesni_avx2_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rdx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rdx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor RA0, RB0, RA0; vpxor RA1, RB1, RA1; vpxor RA2, RB2, RA2; vpxor RA3, RB3, RA3; vpxor RA1, RA0, RA0; vpxor RA3, RA2, RA2; vpxor RA2, RA0, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor (%rcx), RTMP1x, RTMP1x; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%rcx); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_aesni_avx2_ocb_auth,.-_gcry_sm4_aesni_avx2_ocb_auth;) #endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/sm4-gfni-avx2-amd64.S b/cipher/sm4-gfni-avx2-amd64.S index e21bd93b..2fbaffd5 100644 --- a/cipher/sm4-gfni-avx2-amd64.S +++ b/cipher/sm4-gfni-avx2-amd64.S @@ -1,1194 +1,1199 @@ /* sm4-gfni-avx2-amd64.S - GFNI/AVX2 implementation of SM4 cipher * * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) #include "asm-common-amd64.h" /********************************************************************** helper macros **********************************************************************/ /* Transpose four 32-bit words between 128-bit vectors. */ #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /********************************************************************** 4-way && 8-way SM4 with GFNI and AVX2 **********************************************************************/ /* vector registers */ #define RX0 %ymm0 #define RX1 %ymm1 #define RX0x %xmm0 #define RX1x %xmm1 #define RTMP0 %ymm2 #define RTMP1 %ymm3 #define RTMP2 %ymm4 #define RTMP3 %ymm5 #define RTMP4 %ymm6 #define RTMP0x %xmm2 #define RTMP1x %xmm3 #define RTMP2x %xmm4 #define RTMP3x %xmm5 #define RTMP4x %xmm6 #define RNOT %ymm7 #define RNOTx %xmm7 #define RA0 %ymm8 #define RA1 %ymm9 #define RA2 %ymm10 #define RA3 %ymm11 #define RA0x %xmm8 #define RA1x %xmm9 #define RA2x %xmm10 #define RA3x %xmm11 #define RB0 %ymm12 #define RB1 %ymm13 #define RB2 %ymm14 #define RB3 %ymm15 #define RB0x %xmm12 #define RB1x %xmm13 #define RB2x %xmm14 #define RB3x %xmm15 -.text +SECTION_RODATA .align 32 +ELF(.type _sm4_gfni_avx2_consts,@object) +_sm4_gfni_avx2_consts: + /* Affine transform, SM4 field to AES field */ .Lpre_affine_s: .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 /* Affine transform, AES field to SM4 field */ .Lpost_affine_s: .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 /* Rotate left by 8 bits on 32-bit words with vpshufb */ .Lrol_8: .byte 0x03, 0x00, 0x01, 0x02, 0x07, 0x04, 0x05, 0x06 .byte 0x0b, 0x08, 0x09, 0x0a, 0x0f, 0x0c, 0x0d, 0x0e .byte 0x03, 0x00, 0x01, 0x02, 0x07, 0x04, 0x05, 0x06 .byte 0x0b, 0x08, 0x09, 0x0a, 0x0f, 0x0c, 0x0d, 0x0e /* Rotate left by 16 bits on 32-bit words with vpshufb */ .Lrol_16: .byte 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05 .byte 0x0a, 0x0b, 0x08, 0x09, 0x0e, 0x0f, 0x0c, 0x0d .byte 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05 .byte 0x0a, 0x0b, 0x08, 0x09, 0x0e, 0x0f, 0x0c, 0x0d /* Rotate left by 24 bits on 32-bit words with vpshufb */ .Lrol_24: .byte 0x01, 0x02, 0x03, 0x00, 0x05, 0x06, 0x07, 0x04 .byte 0x09, 0x0a, 0x0b, 0x08, 0x0d, 0x0e, 0x0f, 0x0c .byte 0x01, 0x02, 0x03, 0x00, 0x05, 0x06, 0x07, 0x04 .byte 0x09, 0x0a, 0x0b, 0x08, 0x0d, 0x0e, 0x0f, 0x0c /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For input word byte-swap */ .Lbswap32_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 +.text + .align 16 .globl _gcry_sm4_gfni_avx2_expand_key ELF(.type _gcry_sm4_gfni_avx2_expand_key,@function;) _gcry_sm4_gfni_avx2_expand_key: /* input: * %rdi: 128-bit key * %rsi: rkey_enc * %rdx: rkey_dec * %rcx: fk array * %r8: ck array */ CFI_STARTPROC(); vmovd 0*4(%rdi), RA0x; vmovd 1*4(%rdi), RA1x; vmovd 2*4(%rdi), RA2x; vmovd 3*4(%rdi), RA3x; vmovdqa .Lbswap32_mask rRIP, RTMP2x; vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vmovd 0*4(%rcx), RB0x; vmovd 1*4(%rcx), RB1x; vmovd 2*4(%rcx), RB2x; vmovd 3*4(%rcx), RB3x; vpxor RB0x, RA0x, RA0x; vpxor RB1x, RA1x, RA1x; vpxor RB2x, RA2x, RA2x; vpxor RB3x, RA3x, RA3x; #define ROUND(round, s0, s1, s2, s3) \ vpbroadcastd (4*(round))(%r8), RX0x; \ vpxor s1, RX0x, RX0x; \ vpxor s2, RX0x, RX0x; \ vpxor s3, RX0x, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ \ /* sbox, non-linear part */ \ vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \ vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \ \ /* linear part */ \ vpxor RX0x, s0, s0; /* s0 ^ x */ \ vpslld $13, RX0x, RTMP0x; \ vpsrld $19, RX0x, RTMP1x; \ vpslld $23, RX0x, RTMP2x; \ vpsrld $9, RX0x, RTMP3x; \ vpxor RTMP0x, RTMP1x, RTMP1x; \ vpxor RTMP2x, RTMP3x, RTMP3x; \ vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,13) */ \ vpxor RTMP3x, s0, s0; /* s0 ^ x ^ rol(x,13) ^ rol(x,23) */ leaq (32*4)(%r8), %rax; leaq (32*4)(%rdx), %rdx; .align 16 .Lroundloop_expand_key: leaq (-4*4)(%rdx), %rdx; ROUND(0, RA0x, RA1x, RA2x, RA3x); ROUND(1, RA1x, RA2x, RA3x, RA0x); ROUND(2, RA2x, RA3x, RA0x, RA1x); ROUND(3, RA3x, RA0x, RA1x, RA2x); leaq (4*4)(%r8), %r8; vmovd RA0x, (0*4)(%rsi); vmovd RA1x, (1*4)(%rsi); vmovd RA2x, (2*4)(%rsi); vmovd RA3x, (3*4)(%rsi); vmovd RA0x, (3*4)(%rdx); vmovd RA1x, (2*4)(%rdx); vmovd RA2x, (1*4)(%rdx); vmovd RA3x, (0*4)(%rdx); leaq (4*4)(%rsi), %rsi; cmpq %rax, %r8; jne .Lroundloop_expand_key; #undef ROUND vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_expand_key,.-_gcry_sm4_gfni_avx2_expand_key;) .align 16 ELF(.type sm4_gfni_avx2_crypt_blk1_4,@function;) sm4_gfni_avx2_crypt_blk1_4: /* input: * %rdi: round key array, CTX * %rsi: dst (1..4 blocks) * %rdx: src (1..4 blocks) * %rcx: num blocks (1..4) */ CFI_STARTPROC(); vmovdqu 0*16(%rdx), RA0x; vmovdqa RA0x, RA1x; vmovdqa RA0x, RA2x; vmovdqa RA0x, RA3x; cmpq $2, %rcx; jb .Lblk4_load_input_done; vmovdqu 1*16(%rdx), RA1x; je .Lblk4_load_input_done; vmovdqu 2*16(%rdx), RA2x; cmpq $3, %rcx; je .Lblk4_load_input_done; vmovdqu 3*16(%rdx), RA3x; .Lblk4_load_input_done: vmovdqa .Lbswap32_mask rRIP, RTMP2x; vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vmovdqa .Lrol_8 rRIP, RTMP2x; vmovdqa .Lrol_16 rRIP, RTMP3x; vmovdqa .Lrol_24 rRIP, RB3x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); #define ROUND(round, s0, s1, s2, s3) \ vpbroadcastd (4*(round))(%rdi), RX0x; \ vpxor s1, RX0x, RX0x; \ vpxor s2, RX0x, RX0x; \ vpxor s3, RX0x, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ \ /* sbox, non-linear part */ \ vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \ vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \ \ /* linear part */ \ vpxor RX0x, s0, s0; /* s0 ^ x */ \ vpshufb RTMP2x, RX0x, RTMP1x; \ vpxor RTMP1x, RX0x, RTMP0x; /* x ^ rol(x,8) */ \ vpshufb RTMP3x, RX0x, RTMP1x; \ vpxor RTMP1x, RTMP0x, RTMP0x; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RB3x, RX0x, RTMP1x; \ vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0x, RTMP1x; \ vpsrld $30, RTMP0x, RTMP0x; \ vpxor RTMP0x, s0, s0; \ vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk4: ROUND(0, RA0x, RA1x, RA2x, RA3x); ROUND(1, RA1x, RA2x, RA3x, RA0x); ROUND(2, RA2x, RA3x, RA0x, RA1x); ROUND(3, RA3x, RA0x, RA1x, RA2x); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk4; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vmovdqu RA0x, 0*16(%rsi); cmpq $2, %rcx; jb .Lblk4_store_output_done; vmovdqu RA1x, 1*16(%rsi); je .Lblk4_store_output_done; vmovdqu RA2x, 2*16(%rsi); cmpq $3, %rcx; je .Lblk4_store_output_done; vmovdqu RA3x, 3*16(%rsi); .Lblk4_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size sm4_gfni_avx2_crypt_blk1_4,.-sm4_gfni_avx2_crypt_blk1_4;) .align 16 ELF(.type __sm4_gfni_crypt_blk8,@function;) __sm4_gfni_crypt_blk8: /* input: * %rdi: round key array, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel * ciphertext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext * blocks */ CFI_STARTPROC(); vmovdqa .Lbswap32_mask rRIP, RTMP2x; vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vpshufb RTMP2x, RB0x, RB0x; vpshufb RTMP2x, RB1x, RB1x; vpshufb RTMP2x, RB2x, RB2x; vpshufb RTMP2x, RB3x, RB3x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vpbroadcastd (4*(round))(%rdi), RX0x; \ vmovdqa .Lpre_affine_s rRIP, RTMP2x; \ vmovdqa .Lpost_affine_s rRIP, RTMP3x; \ vmovdqa RX0x, RX1x; \ vpxor s1, RX0x, RX0x; \ vpxor s2, RX0x, RX0x; \ vpxor s3, RX0x, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ vpxor r1, RX1x, RX1x; \ vpxor r2, RX1x, RX1x; \ vpxor r3, RX1x, RX1x; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ vmovdqa .Lrol_8 rRIP, RTMP4x; \ vgf2p8affineqb $0x65, RTMP2x, RX0x, RX0x; \ vgf2p8affineinvqb $0xd3, RTMP3x, RX0x, RX0x; \ vgf2p8affineqb $0x65, RTMP2x, RX1x, RX1x; \ vgf2p8affineinvqb $0xd3, RTMP3x, RX1x, RX1x; \ \ /* linear part */ \ vpxor RX0x, s0, s0; /* s0 ^ x */ \ vpshufb RTMP4x, RX0x, RTMP1x; \ vpxor RTMP1x, RX0x, RTMP0x; /* x ^ rol(x,8) */ \ vpxor RX1x, r0, r0; /* r0 ^ x */ \ vpshufb RTMP4x, RX1x, RTMP3x; \ vmovdqa .Lrol_16 rRIP, RTMP4x; \ vpxor RTMP3x, RX1x, RTMP2x; /* x ^ rol(x,8) */ \ vpshufb RTMP4x, RX0x, RTMP1x; \ vpxor RTMP1x, RTMP0x, RTMP0x; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4x, RX1x, RTMP3x; \ vmovdqa .Lrol_24 rRIP, RTMP4x; \ vpxor RTMP3x, RTMP2x, RTMP2x; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4x, RX0x, RTMP1x; \ vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0x, RTMP1x; \ vpsrld $30, RTMP0x, RTMP0x; \ vpxor RTMP0x, s0, s0; \ vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpshufb RTMP4x, RX1x, RTMP3x; \ vpxor RTMP3x, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP2x, RTMP3x; \ vpsrld $30, RTMP2x, RTMP2x; \ vpxor RTMP2x, r0, r0; \ vpxor RTMP3x, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk8: ROUND(0, RA0x, RA1x, RA2x, RA3x, RB0x, RB1x, RB2x, RB3x); ROUND(1, RA1x, RA2x, RA3x, RA0x, RB1x, RB2x, RB3x, RB0x); ROUND(2, RA2x, RA3x, RA0x, RA1x, RB2x, RB3x, RB0x, RB1x); ROUND(3, RA3x, RA0x, RA1x, RA2x, RB3x, RB0x, RB1x, RB2x); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk8; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x); vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vpshufb RTMP2x, RB0x, RB0x; vpshufb RTMP2x, RB1x, RB1x; vpshufb RTMP2x, RB2x, RB2x; vpshufb RTMP2x, RB3x, RB3x; ret_spec_stop; CFI_ENDPROC(); ELF(.size __sm4_gfni_crypt_blk8,.-__sm4_gfni_crypt_blk8;) .align 16 ELF(.type _gcry_sm4_gfni_avx2_crypt_blk1_8,@function;) _gcry_sm4_gfni_avx2_crypt_blk1_8: /* input: * %rdi: round key array, CTX * %rsi: dst (1..8 blocks) * %rdx: src (1..8 blocks) * %rcx: num blocks (1..8) */ CFI_STARTPROC(); cmpq $5, %rcx; jb sm4_gfni_avx2_crypt_blk1_4; vmovdqu (0 * 16)(%rdx), RA0x; vmovdqu (1 * 16)(%rdx), RA1x; vmovdqu (2 * 16)(%rdx), RA2x; vmovdqu (3 * 16)(%rdx), RA3x; vmovdqu (4 * 16)(%rdx), RB0x; vmovdqa RB0x, RB1x; vmovdqa RB0x, RB2x; vmovdqa RB0x, RB3x; je .Lblk8_load_input_done; vmovdqu (5 * 16)(%rdx), RB1x; cmpq $7, %rcx; jb .Lblk8_load_input_done; vmovdqu (6 * 16)(%rdx), RB2x; je .Lblk8_load_input_done; vmovdqu (7 * 16)(%rdx), RB3x; .Lblk8_load_input_done: call __sm4_gfni_crypt_blk8; cmpq $6, %rcx; vmovdqu RA0x, (0 * 16)(%rsi); vmovdqu RA1x, (1 * 16)(%rsi); vmovdqu RA2x, (2 * 16)(%rsi); vmovdqu RA3x, (3 * 16)(%rsi); vmovdqu RB0x, (4 * 16)(%rsi); jb .Lblk8_store_output_done; vmovdqu RB1x, (5 * 16)(%rsi); je .Lblk8_store_output_done; vmovdqu RB2x, (6 * 16)(%rsi); cmpq $7, %rcx; je .Lblk8_store_output_done; vmovdqu RB3x, (7 * 16)(%rsi); .Lblk8_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_crypt_blk1_8,.-_gcry_sm4_gfni_avx2_crypt_blk1_8;) /********************************************************************** 16-way SM4 with GFNI and AVX2 **********************************************************************/ .align 16 ELF(.type __sm4_gfni_crypt_blk16,@function;) __sm4_gfni_crypt_blk16: /* input: * %rdi: ctx, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * ciphertext blocks */ CFI_STARTPROC(); vbroadcasti128 .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vpbroadcastd (4*(round))(%rdi), RX0; \ vbroadcasti128 .Lpre_affine_s rRIP, RTMP2; \ vbroadcasti128 .Lpost_affine_s rRIP, RTMP3; \ vmovdqa RX0, RX1; \ vpxor s1, RX0, RX0; \ vpxor s2, RX0, RX0; \ vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ vpxor r1, RX1, RX1; \ vpxor r2, RX1, RX1; \ vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ vbroadcasti128 .Lrol_8 rRIP, RTMP4; \ vgf2p8affineqb $0x65, RTMP2, RX0, RX0; \ vgf2p8affineinvqb $0xd3, RTMP3, RX0, RX0; \ vgf2p8affineqb $0x65, RTMP2, RX1, RX1; \ vgf2p8affineinvqb $0xd3, RTMP3, RX1, RX1; \ \ /* linear part */ \ vpxor RX0, s0, s0; /* s0 ^ x */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RX0, RTMP0; /* x ^ rol(x,8) */ \ vpxor RX1, r0, r0; /* r0 ^ x */ \ vpshufb RTMP4, RX1, RTMP3; \ vbroadcasti128 .Lrol_16 rRIP, RTMP4; \ vpxor RTMP3, RX1, RTMP2; /* x ^ rol(x,8) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX1, RTMP3; \ vbroadcasti128 .Lrol_24 rRIP, RTMP4; \ vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \ vpshufb RTMP4, RX0, RTMP1; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP0, RTMP1; \ vpsrld $30, RTMP0, RTMP0; \ vpxor RTMP0, s0, s0; \ vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpshufb RTMP4, RX1, RTMP3; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \ vpslld $2, RTMP2, RTMP3; \ vpsrld $30, RTMP2, RTMP2; \ vpxor RTMP2, r0, r0; \ vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk16: ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk16; #undef ROUND vbroadcasti128 .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; ret_spec_stop; CFI_ENDPROC(); ELF(.size __sm4_gfni_crypt_blk16,.-__sm4_gfni_crypt_blk16;) .align 16 .globl _gcry_sm4_gfni_avx2_crypt_blk1_16 ELF(.type _gcry_sm4_gfni_avx2_crypt_blk1_16,@function;) _gcry_sm4_gfni_avx2_crypt_blk1_16: /* input: * %rdi: round key array, CTX * %rsi: dst (1..16 blocks) * %rdx: src (1..16 blocks) * %rcx: num blocks (1..16) */ CFI_STARTPROC(); #define LOAD_INPUT(offset, yreg) \ cmpq $(1 + 2 * (offset)), %rcx; \ jb .Lblk16_load_input_done; \ ja 1f; \ vmovdqu (offset) * 32(%rdx), yreg##x; \ jmp .Lblk16_load_input_done; \ 1: \ vmovdqu (offset) * 32(%rdx), yreg; cmpq $8, %rcx; jbe _gcry_sm4_gfni_avx2_crypt_blk1_8; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; LOAD_INPUT(4, RB0); LOAD_INPUT(5, RB1); LOAD_INPUT(6, RB2); LOAD_INPUT(7, RB3); #undef LOAD_INPUT .Lblk16_load_input_done: call __sm4_gfni_crypt_blk16; #define STORE_OUTPUT(yreg, offset) \ cmpq $(1 + 2 * (offset)), %rcx; \ jb .Lblk16_store_output_done; \ ja 1f; \ vmovdqu yreg##x, (offset) * 32(%rsi); \ jmp .Lblk16_store_output_done; \ 1: \ vmovdqu yreg, (offset) * 32(%rsi); vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); STORE_OUTPUT(RB0, 4); STORE_OUTPUT(RB1, 5); STORE_OUTPUT(RB2, 6); STORE_OUTPUT(RB3, 7); #undef STORE_OUTPUT .Lblk16_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_crypt_blk1_16,.-_gcry_sm4_gfni_avx2_crypt_blk1_16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl _gcry_sm4_gfni_avx2_ctr_enc ELF(.type _gcry_sm4_gfni_avx2_ctr_enc,@function;) _gcry_sm4_gfni_avx2_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); movq 8(%rcx), %rax; bswapq %rax; vbroadcasti128 .Lbswap128_mask rRIP, RTMP3; vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */ vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */ /* load IV and byteswap */ vmovdqu (%rcx), RTMP4x; vpshufb RTMP3x, RTMP4x, RTMP4x; vmovdqa RTMP4x, RTMP0x; inc_le128(RTMP4x, RNOTx, RTMP1x); vinserti128 $1, RTMP4x, RTMP0, RTMP0; vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */ /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 16), %rax; ja .Lhandle_ctr_carry; /* construct IVs */ vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */ vpshufb RTMP3, RTMP0, RA1; vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */ vpshufb RTMP3, RTMP0, RA2; vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */ vpshufb RTMP3, RTMP0, RA3; vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */ vpshufb RTMP3, RTMP0, RB0; vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */ vpshufb RTMP3, RTMP0, RB1; vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */ vpshufb RTMP3, RTMP0, RB2; vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */ vpshufb RTMP3, RTMP0, RB3; vpsubq RTMP2, RTMP0, RTMP0; /* +16 */ vpshufb RTMP3x, RTMP0x, RTMP0x; jmp .Lctr_carry_done; .Lhandle_ctr_carry: /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */ inc_le128(RTMP0, RNOT, RTMP1); vextracti128 $1, RTMP0, RTMP0x; vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */ .align 4 .Lctr_carry_done: /* store new IV */ vmovdqu RTMP0x, (%rcx); call __sm4_gfni_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_ctr_enc,.-_gcry_sm4_gfni_avx2_ctr_enc;) .align 16 .globl _gcry_sm4_gfni_avx2_cbc_dec ELF(.type _gcry_sm4_gfni_avx2_cbc_dec,@function;) _gcry_sm4_gfni_avx2_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; vmovdqu (4 * 32)(%rdx), RB0; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RB2; vmovdqu (7 * 32)(%rdx), RB3; call __sm4_gfni_crypt_blk16; vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RNOT; vpxor RNOT, RA0, RA0; vpxor (0 * 32 + 16)(%rdx), RA1, RA1; vpxor (1 * 32 + 16)(%rdx), RA2, RA2; vpxor (2 * 32 + 16)(%rdx), RA3, RA3; vpxor (3 * 32 + 16)(%rdx), RB0, RB0; vpxor (4 * 32 + 16)(%rdx), RB1, RB1; vpxor (5 * 32 + 16)(%rdx), RB2, RB2; vpxor (6 * 32 + 16)(%rdx), RB3, RB3; vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_cbc_dec,.-_gcry_sm4_gfni_avx2_cbc_dec;) .align 16 .globl _gcry_sm4_gfni_avx2_cfb_dec ELF(.type _gcry_sm4_gfni_avx2_cfb_dec,@function;) _gcry_sm4_gfni_avx2_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); /* Load input */ vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RA0; vmovdqu (0 * 32 + 16)(%rdx), RA1; vmovdqu (1 * 32 + 16)(%rdx), RA2; vmovdqu (2 * 32 + 16)(%rdx), RA3; vmovdqu (3 * 32 + 16)(%rdx), RB0; vmovdqu (4 * 32 + 16)(%rdx), RB1; vmovdqu (5 * 32 + 16)(%rdx), RB2; vmovdqu (6 * 32 + 16)(%rdx), RB3; /* Update IV */ vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __sm4_gfni_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_cfb_dec,.-_gcry_sm4_gfni_avx2_cfb_dec;) .align 16 .globl _gcry_sm4_gfni_avx2_ocb_enc ELF(.type _gcry_sm4_gfni_avx2_ocb_enc,@function;) _gcry_sm4_gfni_avx2_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; vmovdqu (%r8), RTMP1x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RTMP1, RTMP1; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vextracti128 $1, RTMP1, RNOTx; vmovdqu RTMP0x, (%rcx); vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_gfni_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_ocb_enc,.-_gcry_sm4_gfni_avx2_ocb_enc;) .align 16 .globl _gcry_sm4_gfni_avx2_ocb_dec ELF(.type _gcry_sm4_gfni_avx2_ocb_dec,@function;) _gcry_sm4_gfni_avx2_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rcx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_gfni_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vmovdqu (%r8), RTMP1x; vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; /* Checksum_i = Checksum_{i-1} xor P_i */ vmovdqu RA0, (0 * 32)(%rsi); vpxor RA0, RTMP1, RTMP1; vmovdqu RA1, (1 * 32)(%rsi); vpxor RA1, RTMP1, RTMP1; vmovdqu RA2, (2 * 32)(%rsi); vpxor RA2, RTMP1, RTMP1; vmovdqu RA3, (3 * 32)(%rsi); vpxor RA3, RTMP1, RTMP1; vmovdqu RB0, (4 * 32)(%rsi); vpxor RB0, RTMP1, RTMP1; vmovdqu RB1, (5 * 32)(%rsi); vpxor RB1, RTMP1, RTMP1; vmovdqu RB2, (6 * 32)(%rsi); vpxor RB2, RTMP1, RTMP1; vmovdqu RB3, (7 * 32)(%rsi); vpxor RB3, RTMP1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_ocb_dec,.-_gcry_sm4_gfni_avx2_ocb_dec;) .align 16 .globl _gcry_sm4_gfni_avx2_ocb_auth ELF(.type _gcry_sm4_gfni_avx2_ocb_auth,@function;) _gcry_sm4_gfni_avx2_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rdx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rdx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_gfni_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor RA0, RB0, RA0; vpxor RA1, RB1, RA1; vpxor RA2, RB2, RA2; vpxor RA3, RB3, RA3; vpxor RA1, RA0, RA0; vpxor RA3, RA2, RA2; vpxor RA2, RA0, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor (%rcx), RTMP1x, RTMP1x; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%rcx); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx2_ocb_auth,.-_gcry_sm4_gfni_avx2_ocb_auth;) #endif /*defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/sm4-gfni-avx512-amd64.S b/cipher/sm4-gfni-avx512-amd64.S index 0f9899d4..b095f85d 100644 --- a/cipher/sm4-gfni-avx512-amd64.S +++ b/cipher/sm4-gfni-avx512-amd64.S @@ -1,1758 +1,1760 @@ /* sm4-gfni-avx512-amd64.S - GFNI/AVX512 implementation of SM4 cipher * * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT) #include "asm-common-amd64.h" /********************************************************************** helper macros **********************************************************************/ /* Transpose four 32-bit words between 128-bit vectors. */ #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; /********************************************************************** 4-way && 8-way SM4 with GFNI and AVX512 (128-bit vectors) **********************************************************************/ /* vector registers */ #define RX0 %ymm0 #define RX1 %ymm1 #define RX0x %xmm0 #define RX1x %xmm1 #define RX0z %zmm0 #define RX1z %zmm1 #define RTMP0 %ymm2 #define RTMP1 %ymm3 #define RTMP2 %ymm4 #define RTMP3 %ymm5 #define RTMP4 %ymm6 #define RTMP0x %xmm2 #define RTMP1x %xmm3 #define RTMP2x %xmm4 #define RTMP3x %xmm5 #define RTMP4x %xmm6 #define RTMP0z %zmm2 #define RTMP1z %zmm3 #define RTMP2z %zmm4 #define RTMP3z %zmm5 #define RTMP4z %zmm6 #define RNOT %ymm7 #define RNOTx %xmm7 #define RNOTz %zmm7 #define RA0 %ymm8 #define RA1 %ymm9 #define RA2 %ymm10 #define RA3 %ymm11 #define RA0x %xmm8 #define RA1x %xmm9 #define RA2x %xmm10 #define RA3x %xmm11 #define RA0z %zmm8 #define RA1z %zmm9 #define RA2z %zmm10 #define RA3z %zmm11 #define RB0 %ymm12 #define RB1 %ymm13 #define RB2 %ymm14 #define RB3 %ymm15 #define RB0x %xmm12 #define RB1x %xmm13 #define RB2x %xmm14 #define RB3x %xmm15 #define RB0z %zmm12 #define RB1z %zmm13 #define RB2z %zmm14 #define RB3z %zmm15 -.text +SECTION_RODATA .align 32 /* Affine transform, SM4 field to AES field */ .Lpre_affine_s: .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 .byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34 /* Affine transform, AES field to SM4 field */ .Lpost_affine_s: .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 .byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* For input word byte-swap */ .Lbswap32_mask: .byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 .Lcounter2222_lo: .quad 2, 0 .Lcounter4444_lo: .quad 4, 0 .Lcounter8888_lo: .quad 8, 0 .Lcounter16161616_lo: .quad 16, 0 .Lcounter1111_hi: .quad 0, 1 .align 64 .Lcounter0123_lo: .quad 0, 0 .quad 1, 0 .quad 2, 0 .quad 3, 0 +.text + .align 16 .globl _gcry_sm4_gfni_avx512_expand_key ELF(.type _gcry_sm4_gfni_avx512_expand_key,@function;) _gcry_sm4_gfni_avx512_expand_key: /* input: * %rdi: 128-bit key * %rsi: rkey_enc * %rdx: rkey_dec * %rcx: fk array * %r8: ck array */ CFI_STARTPROC(); spec_stop_avx512; vmovd 0*4(%rdi), RA0x; vmovd 1*4(%rdi), RA1x; vmovd 2*4(%rdi), RA2x; vmovd 3*4(%rdi), RA3x; vmovdqa .Lbswap32_mask rRIP, RTMP2x; vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vmovd 0*4(%rcx), RB0x; vmovd 1*4(%rcx), RB1x; vmovd 2*4(%rcx), RB2x; vmovd 3*4(%rcx), RB3x; vpxor RB0x, RA0x, RA0x; vpxor RB1x, RA1x, RA1x; vpxor RB2x, RA2x, RA2x; vpxor RB3x, RA3x, RA3x; #define ROUND(round, s0, s1, s2, s3) \ vpxord (4*(round))(%r8) {1to4}, s1, RX0x; \ vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ \ /* sbox, non-linear part */ \ vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \ vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \ \ /* linear part */ \ vpxor RX0x, s0, s0; /* s0 ^ x */ \ vprold $13, RX0x, RTMP1x; \ vprold $23, RX0x, RTMP3x; \ vpternlogd $0x96, RTMP1x, RTMP3x, s0; /* s0 ^ x ^ rol(x,13) ^ rol(x,23) */ leaq (32*4)(%r8), %rax; leaq (32*4)(%rdx), %rdx; .align 16 .Lroundloop_expand_key: leaq (-4*4)(%rdx), %rdx; ROUND(0, RA0x, RA1x, RA2x, RA3x); ROUND(1, RA1x, RA2x, RA3x, RA0x); ROUND(2, RA2x, RA3x, RA0x, RA1x); ROUND(3, RA3x, RA0x, RA1x, RA2x); leaq (4*4)(%r8), %r8; vmovd RA0x, (0*4)(%rsi); vmovd RA1x, (1*4)(%rsi); vmovd RA2x, (2*4)(%rsi); vmovd RA3x, (3*4)(%rsi); vmovd RA0x, (3*4)(%rdx); vmovd RA1x, (2*4)(%rdx); vmovd RA2x, (1*4)(%rdx); vmovd RA3x, (0*4)(%rdx); leaq (4*4)(%rsi), %rsi; cmpq %rax, %r8; jne .Lroundloop_expand_key; #undef ROUND vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_expand_key,.-_gcry_sm4_gfni_avx512_expand_key;) .align 16 ELF(.type sm4_gfni_avx512_crypt_blk1_4,@function;) sm4_gfni_avx512_crypt_blk1_4: /* input: * %rdi: round key array, CTX * %rsi: dst (1..4 blocks) * %rdx: src (1..4 blocks) * %rcx: num blocks (1..4) */ CFI_STARTPROC(); vmovdqu 0*16(%rdx), RA0x; vmovdqa RA0x, RA1x; vmovdqa RA0x, RA2x; vmovdqa RA0x, RA3x; cmpq $2, %rcx; jb .Lblk4_load_input_done; vmovdqu 1*16(%rdx), RA1x; je .Lblk4_load_input_done; vmovdqu 2*16(%rdx), RA2x; cmpq $3, %rcx; je .Lblk4_load_input_done; vmovdqu 3*16(%rdx), RA3x; .Lblk4_load_input_done: vmovdqa .Lbswap32_mask rRIP, RTMP2x; vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); #define ROUND(round, s0, s1, s2, s3) \ vpxord (4*(round))(%rdi) {1to4}, s1, RX0x; \ vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ \ /* sbox, non-linear part */ \ vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \ vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \ \ /* linear part */ \ vprold $2, RX0x, RTMP0x; \ vprold $10, RX0x, RTMP1x; \ vprold $18, RX0x, RTMP2x; \ vpternlogd $0x96, RTMP0x, RX0x, s0; /* s0 ^ x ^ rol(x,2) */ \ vprold $24, RX0x, RX0x; \ vpternlogd $0x96, RTMP1x, RTMP2x, RX0x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RX0x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk4: ROUND(0, RA0x, RA1x, RA2x, RA3x); ROUND(1, RA1x, RA2x, RA3x, RA0x); ROUND(2, RA2x, RA3x, RA0x, RA1x); ROUND(3, RA3x, RA0x, RA1x, RA2x); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk4; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vmovdqu RA0x, 0*16(%rsi); cmpq $2, %rcx; jb .Lblk4_store_output_done; vmovdqu RA1x, 1*16(%rsi); je .Lblk4_store_output_done; vmovdqu RA2x, 2*16(%rsi); cmpq $3, %rcx; je .Lblk4_store_output_done; vmovdqu RA3x, 3*16(%rsi); .Lblk4_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size sm4_gfni_avx512_crypt_blk1_4,.-sm4_gfni_avx512_crypt_blk1_4;) .align 16 ELF(.type __sm4_gfni_crypt_blk8,@function;) __sm4_gfni_crypt_blk8: /* input: * %rdi: round key array, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel * ciphertext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext * blocks */ CFI_STARTPROC(); vmovdqa .Lbswap32_mask rRIP, RTMP2x; vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vpshufb RTMP2x, RB0x, RB0x; vpshufb RTMP2x, RB1x, RB1x; vpshufb RTMP2x, RB2x, RB2x; vpshufb RTMP2x, RB3x, RB3x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vpbroadcastd (4*(round))(%rdi), RX1x; \ vmovdqa .Lpre_affine_s rRIP, RTMP2x; \ vmovdqa .Lpost_affine_s rRIP, RTMP3x; \ vpxor s1, RX1x, RX0x; \ vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \ vpxor r1, RX1x, RX1x; \ vpternlogd $0x96, r2, r3, RX1x; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ vgf2p8affineqb $0x65, RTMP2x, RX0x, RX0x; \ vgf2p8affineinvqb $0xd3, RTMP3x, RX0x, RX0x; \ vgf2p8affineqb $0x65, RTMP2x, RX1x, RX1x; \ vgf2p8affineinvqb $0xd3, RTMP3x, RX1x, RX1x; \ \ /* linear part */ \ vprold $2, RX0x, RTMP0x; \ vprold $10, RX0x, RTMP1x; \ vprold $18, RX0x, RTMP2x; \ vpternlogd $0x96, RTMP0x, RX0x, s0; /* s0 ^ x ^ rol(x,2) */ \ vprold $24, RX0x, RX0x; \ vprold $2, RX1x, RTMP3x; \ vprold $10, RX1x, RTMP4x; \ vprold $18, RX1x, RTMP0x; \ vpternlogd $0x96, RTMP3x, RX1x, r0; /* r0 ^ x ^ rol(x,2) */ \ vprold $24, RX1x, RX1x; \ vpternlogd $0x96, RTMP1x, RTMP2x, RX0x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpternlogd $0x96, RTMP4x, RTMP0x, RX1x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RX0x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RX1x, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk8: ROUND(0, RA0x, RA1x, RA2x, RA3x, RB0x, RB1x, RB2x, RB3x); ROUND(1, RA1x, RA2x, RA3x, RA0x, RB1x, RB2x, RB3x, RB0x); ROUND(2, RA2x, RA3x, RA0x, RA1x, RB2x, RB3x, RB0x, RB1x); ROUND(3, RA3x, RA0x, RA1x, RA2x, RB3x, RB0x, RB1x, RB2x); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk8; #undef ROUND vmovdqa .Lbswap128_mask rRIP, RTMP2x; transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x); transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x); vpshufb RTMP2x, RA0x, RA0x; vpshufb RTMP2x, RA1x, RA1x; vpshufb RTMP2x, RA2x, RA2x; vpshufb RTMP2x, RA3x, RA3x; vpshufb RTMP2x, RB0x, RB0x; vpshufb RTMP2x, RB1x, RB1x; vpshufb RTMP2x, RB2x, RB2x; vpshufb RTMP2x, RB3x, RB3x; ret_spec_stop; CFI_ENDPROC(); ELF(.size __sm4_gfni_crypt_blk8,.-__sm4_gfni_crypt_blk8;) .align 16 ELF(.type _gcry_sm4_gfni_avx512_crypt_blk1_8,@function;) _gcry_sm4_gfni_avx512_crypt_blk1_8: /* input: * %rdi: round key array, CTX * %rsi: dst (1..8 blocks) * %rdx: src (1..8 blocks) * %rcx: num blocks (1..8) */ CFI_STARTPROC(); cmpq $5, %rcx; jb sm4_gfni_avx512_crypt_blk1_4; vmovdqu (0 * 16)(%rdx), RA0x; vmovdqu (1 * 16)(%rdx), RA1x; vmovdqu (2 * 16)(%rdx), RA2x; vmovdqu (3 * 16)(%rdx), RA3x; vmovdqu (4 * 16)(%rdx), RB0x; vmovdqa RB0x, RB1x; vmovdqa RB0x, RB2x; vmovdqa RB0x, RB3x; je .Lblk8_load_input_done; vmovdqu (5 * 16)(%rdx), RB1x; cmpq $7, %rcx; jb .Lblk8_load_input_done; vmovdqu (6 * 16)(%rdx), RB2x; je .Lblk8_load_input_done; vmovdqu (7 * 16)(%rdx), RB3x; .Lblk8_load_input_done: call __sm4_gfni_crypt_blk8; cmpq $6, %rcx; vmovdqu RA0x, (0 * 16)(%rsi); vmovdqu RA1x, (1 * 16)(%rsi); vmovdqu RA2x, (2 * 16)(%rsi); vmovdqu RA3x, (3 * 16)(%rsi); vmovdqu RB0x, (4 * 16)(%rsi); jb .Lblk8_store_output_done; vmovdqu RB1x, (5 * 16)(%rsi); je .Lblk8_store_output_done; vmovdqu RB2x, (6 * 16)(%rsi); cmpq $7, %rcx; je .Lblk8_store_output_done; vmovdqu RB3x, (7 * 16)(%rsi); .Lblk8_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_crypt_blk1_8,.-_gcry_sm4_gfni_avx512_crypt_blk1_8;) /********************************************************************** 16-way SM4 with GFNI and AVX512 (256-bit vectors) **********************************************************************/ .align 16 ELF(.type __sm4_gfni_crypt_blk16,@function;) __sm4_gfni_crypt_blk16: /* input: * %rdi: ctx, CTX * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * plaintext blocks * output: * RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel * ciphertext blocks */ CFI_STARTPROC(); vbroadcasti128 .Lbswap32_mask rRIP, RTMP2; vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vpbroadcastd (4*(round))(%rdi), RX1; \ vbroadcasti128 .Lpre_affine_s rRIP, RTMP2; \ vbroadcasti128 .Lpost_affine_s rRIP, RTMP3; \ vpxor s1, RX1, RX0; \ vpternlogd $0x96, s2, s3, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \ vpxor r1, RX1, RX1; \ vpternlogd $0x96, r2, r3, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ vgf2p8affineqb $0x65, RTMP2, RX0, RX0; \ vgf2p8affineinvqb $0xd3, RTMP3, RX0, RX0; \ vgf2p8affineqb $0x65, RTMP2, RX1, RX1; \ vgf2p8affineinvqb $0xd3, RTMP3, RX1, RX1; \ \ /* linear part */ \ vprold $2, RX0, RTMP0; \ vprold $10, RX0, RTMP1; \ vprold $18, RX0, RTMP2; \ vpternlogd $0x96, RTMP0, RX0, s0; /* s0 ^ x ^ rol(x,2) */ \ vprold $24, RX0, RX0; \ vprold $2, RX1, RTMP3; \ vprold $10, RX1, RTMP4; \ vprold $18, RX1, RTMP0; \ vpternlogd $0x96, RTMP3, RX1, r0; /* r0 ^ x ^ rol(x,2) */ \ vprold $24, RX1, RX1; \ vpternlogd $0x96, RTMP1, RTMP2, RX0; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpternlogd $0x96, RTMP4, RTMP0, RX1; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RX0, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxor RX1, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk16: ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3); ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0); ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1); ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk16; #undef ROUND vbroadcasti128 .Lbswap128_mask rRIP, RTMP2; transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1); transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1); vpshufb RTMP2, RA0, RA0; vpshufb RTMP2, RA1, RA1; vpshufb RTMP2, RA2, RA2; vpshufb RTMP2, RA3, RA3; vpshufb RTMP2, RB0, RB0; vpshufb RTMP2, RB1, RB1; vpshufb RTMP2, RB2, RB2; vpshufb RTMP2, RB3, RB3; ret_spec_stop; CFI_ENDPROC(); ELF(.size __sm4_gfni_crypt_blk16,.-__sm4_gfni_crypt_blk16;) .align 16 .globl _gcry_sm4_gfni_avx512_crypt_blk1_16 ELF(.type _gcry_sm4_gfni_avx512_crypt_blk1_16,@function;) _gcry_sm4_gfni_avx512_crypt_blk1_16: /* input: * %rdi: round key array, CTX * %rsi: dst (1..16 blocks) * %rdx: src (1..16 blocks) * %rcx: num blocks (1..16) */ CFI_STARTPROC(); spec_stop_avx512; #define LOAD_INPUT(offset, yreg) \ cmpq $(1 + 2 * (offset)), %rcx; \ jb .Lblk16_load_input_done; \ ja 1f; \ vmovdqu (offset) * 32(%rdx), yreg##x; \ jmp .Lblk16_load_input_done; \ 1: \ vmovdqu (offset) * 32(%rdx), yreg; cmpq $8, %rcx; jbe _gcry_sm4_gfni_avx512_crypt_blk1_8; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; LOAD_INPUT(4, RB0); LOAD_INPUT(5, RB1); LOAD_INPUT(6, RB2); LOAD_INPUT(7, RB3); #undef LOAD_INPUT .Lblk16_load_input_done: call __sm4_gfni_crypt_blk16; #define STORE_OUTPUT(yreg, offset) \ cmpq $(1 + 2 * (offset)), %rcx; \ jb .Lblk16_store_output_done; \ ja 1f; \ vmovdqu yreg##x, (offset) * 32(%rsi); \ jmp .Lblk16_store_output_done; \ 1: \ vmovdqu yreg, (offset) * 32(%rsi); vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); STORE_OUTPUT(RB0, 4); STORE_OUTPUT(RB1, 5); STORE_OUTPUT(RB2, 6); STORE_OUTPUT(RB3, 7); #undef STORE_OUTPUT .Lblk16_store_output_done: vzeroall; xorl %eax, %eax; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_crypt_blk1_16,.-_gcry_sm4_gfni_avx512_crypt_blk1_16;) #define add_le128(out, in, lo_counter, hi_counter1) \ vpaddq lo_counter, in, out; \ vpcmpuq $1, lo_counter, out, %k1; \ kaddb %k1, %k1, %k1; \ vpaddq hi_counter1, out, out{%k1}; .align 16 .globl _gcry_sm4_gfni_avx512_ctr_enc ELF(.type _gcry_sm4_gfni_avx512_ctr_enc,@function;) _gcry_sm4_gfni_avx512_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); spec_stop_avx512; vbroadcasti128 .Lbswap128_mask rRIP, RTMP0; vmovdqa .Lcounter0123_lo rRIP, RTMP1; vbroadcasti128 .Lcounter2222_lo rRIP, RTMP2; vbroadcasti128 .Lcounter4444_lo rRIP, RTMP3; vbroadcasti128 .Lcounter8888_lo rRIP, RTMP4; /* load IV and byteswap */ movq 8(%rcx), %r11; bswapq %r11; vbroadcasti128 (%rcx), RB3; vpshufb RTMP0, RB3, RB3; /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 16), %r11; ja .Lhandle_ctr_carry_blk16; /* construct IVs */ vpaddq RTMP1, RB3, RA0; /* +0:+1 */ vpaddq RTMP2, RA0, RA1; /* +2:+3 */ vpaddq RTMP3, RA0, RA2; /* +4:+5 */ vpaddq RTMP3, RA1, RA3; /* +6:+7 */ vpaddq RTMP4, RA0, RB0; /* +8... */ vpaddq RTMP4, RA1, RB1; /* +10... */ vpaddq RTMP4, RA2, RB2; /* +12... */ vpaddq RTMP4, RA3, RB3; /* +14... */ /* Update counter */ leaq 16(%r11), %r11; bswapq %r11; movq %r11, 8(%rcx); jmp .Lctr_carry_done_blk16; .Lhandle_ctr_carry_blk16: vbroadcasti128 .Lcounter1111_hi rRIP, RNOT; /* construct IVs */ add_le128(RA0, RB3, RTMP1, RNOT); /* +0:+1 */ add_le128(RA1, RA0, RTMP2, RNOT); /* +2:+3 */ add_le128(RA2, RA0, RTMP3, RNOT); /* +4:+5 */ add_le128(RA3, RA1, RTMP3, RNOT); /* +6:+7 */ add_le128(RB0, RA0, RTMP4, RNOT); /* +8... */ add_le128(RB1, RA1, RTMP4, RNOT); /* +10... */ add_le128(RB2, RA2, RTMP4, RNOT); /* +12... */ add_le128(RB3, RA3, RTMP4, RNOT); /* +14... */ /* Update counter */ addq $16, %r11; movq (%rcx), %r10; bswapq %r10; adcq $0, %r10; bswapq %r11; bswapq %r10; movq %r11, 8(%rcx); movq %r10, (%rcx); .align 16 .Lctr_carry_done_blk16: /* Byte-swap IVs. */ vpshufb RTMP0, RA0, RA0; vpshufb RTMP0, RA1, RA1; vpshufb RTMP0, RA2, RA2; vpshufb RTMP0, RA3, RA3; vpshufb RTMP0, RB0, RB0; vpshufb RTMP0, RB1, RB1; vpshufb RTMP0, RB2, RB2; vpshufb RTMP0, RB3, RB3; call __sm4_gfni_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; kxorq %k1, %k1, %k1; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_ctr_enc,.-_gcry_sm4_gfni_avx512_ctr_enc;) .align 16 .globl _gcry_sm4_gfni_avx512_cbc_dec ELF(.type _gcry_sm4_gfni_avx512_cbc_dec,@function;) _gcry_sm4_gfni_avx512_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RA1; vmovdqu (2 * 32)(%rdx), RA2; vmovdqu (3 * 32)(%rdx), RA3; vmovdqu (4 * 32)(%rdx), RB0; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RB2; vmovdqu (7 * 32)(%rdx), RB3; call __sm4_gfni_crypt_blk16; vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RNOT; vpxor RNOT, RA0, RA0; vpxor (0 * 32 + 16)(%rdx), RA1, RA1; vpxor (1 * 32 + 16)(%rdx), RA2, RA2; vpxor (2 * 32 + 16)(%rdx), RA3, RA3; vpxor (3 * 32 + 16)(%rdx), RB0, RB0; vpxor (4 * 32 + 16)(%rdx), RB1, RB1; vpxor (5 * 32 + 16)(%rdx), RB2, RB2; vpxor (6 * 32 + 16)(%rdx), RB3, RB3; vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_cbc_dec,.-_gcry_sm4_gfni_avx512_cbc_dec;) .align 16 .globl _gcry_sm4_gfni_avx512_cfb_dec ELF(.type _gcry_sm4_gfni_avx512_cfb_dec,@function;) _gcry_sm4_gfni_avx512_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; /* Load input */ vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RA0; vmovdqu (0 * 32 + 16)(%rdx), RA1; vmovdqu (1 * 32 + 16)(%rdx), RA2; vmovdqu (2 * 32 + 16)(%rdx), RA3; vmovdqu (3 * 32 + 16)(%rdx), RB0; vmovdqu (4 * 32 + 16)(%rdx), RB1; vmovdqu (5 * 32 + 16)(%rdx), RB2; vmovdqu (6 * 32 + 16)(%rdx), RB3; /* Update IV */ vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __sm4_gfni_crypt_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RA1, RA1; vpxor (2 * 32)(%rdx), RA2, RA2; vpxor (3 * 32)(%rdx), RA3, RA3; vpxor (4 * 32)(%rdx), RB0, RB0; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RB2, RB2; vpxor (7 * 32)(%rdx), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_cfb_dec,.-_gcry_sm4_gfni_avx512_cfb_dec;) .align 16 .globl _gcry_sm4_gfni_avx512_ocb_enc ELF(.type _gcry_sm4_gfni_avx512_ocb_enc,@function;) _gcry_sm4_gfni_avx512_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); spec_stop_avx512; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; vmovdqu (%r8), RTMP1x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg, inreg) \ vmovdqu (n * 32)(%rdx), inreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor inreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0, RTMP2); OCB_INPUT(1, %r12, %r13, RA1, RTMP3); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2, RTMP4); vpternlogd $0x96, RTMP2, RTMP3, RTMP4; OCB_INPUT(3, %r12, %r13, RA3, RX0); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0, RX1); OCB_INPUT(5, %r12, %r13, RB1, RTMP2); vpternlogd $0x96, RX0, RX1, RTMP2; movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2, RTMP3); OCB_INPUT(7, %r12, %r13, RB3, RX0); vpternlogd $0x96, RTMP3, RX0, RTMP1; #undef OCB_INPUT vpternlogd $0x96, RTMP4, RTMP2, RTMP1; vextracti128 $1, RTMP1, RNOTx; vmovdqu RTMP0x, (%rcx); vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_gfni_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_ocb_enc,.-_gcry_sm4_gfni_avx512_ocb_enc;) .align 16 .globl _gcry_sm4_gfni_avx512_ocb_dec ELF(.type _gcry_sm4_gfni_avx512_ocb_dec,@function;) _gcry_sm4_gfni_avx512_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); spec_stop_avx512; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rcx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_gfni_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RA1, RA1; vpxor (2 * 32)(%rsi), RA2, RA2; vpxor (3 * 32)(%rsi), RA3, RA3; vpxor (4 * 32)(%rsi), RB0, RB0; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RB2, RB2; vpxor (7 * 32)(%rsi), RB3, RB3; /* Checksum_i = Checksum_{i-1} xor P_i */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RA1, (1 * 32)(%rsi); vmovdqu RA2, (2 * 32)(%rsi); vmovdqu RA3, (3 * 32)(%rsi); vmovdqu RB0, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RB2, (6 * 32)(%rsi); vmovdqu RB3, (7 * 32)(%rsi); vpternlogd $0x96, RA0, RA1, RA2; vpternlogd $0x96, RA3, RB0, RB1; vpternlogd $0x96, RB2, RB3, RA2; vpxord RA2, RB1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpternlogd $0x96, (%r8), RNOTx, RTMP1x; vmovdqu RTMP1x, (%r8); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_ocb_dec,.-_gcry_sm4_gfni_avx512_ocb_dec;) .align 16 .globl _gcry_sm4_gfni_avx512_ocb_auth ELF(.type _gcry_sm4_gfni_avx512_ocb_auth,@function;) _gcry_sm4_gfni_avx512_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); spec_stop_avx512; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rdx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RA1); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, RA2); OCB_INPUT(3, %r12, %r13, RA3); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, RB0); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, RB2); OCB_INPUT(7, %r12, %r13, RB3); #undef OCB_INPUT vmovdqu RTMP0x, (%rdx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __sm4_gfni_crypt_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpternlogd $0x96, RA0, RA1, RA2; vpternlogd $0x96, RA3, RB0, RB1; vpternlogd $0x96, RB2, RB3, RA2; vpxor RA2, RB1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpternlogd $0x96, (%rcx), RNOTx, RTMP1x; vmovdqu RTMP1x, (%rcx); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_ocb_auth,.-_gcry_sm4_gfni_avx512_ocb_auth;) /********************************************************************** 32-way SM4 with GFNI and AVX512 (512-bit vectors) **********************************************************************/ .align 16 ELF(.type __sm4_gfni_crypt_blk32,@function;) __sm4_gfni_crypt_blk32: /* input: * %rdi: ctx, CTX * RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z: 32 parallel plaintext blocks * output: * RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z: 32 parallel ciphertext blocks */ CFI_STARTPROC(); vbroadcasti32x4 .Lbswap32_mask rRIP, RTMP2z; vpshufb RTMP2z, RA0z, RA0z; vpshufb RTMP2z, RA1z, RA1z; vpshufb RTMP2z, RA2z, RA2z; vpshufb RTMP2z, RA3z, RA3z; vpshufb RTMP2z, RB0z, RB0z; vpshufb RTMP2z, RB1z, RB1z; vpshufb RTMP2z, RB2z, RB2z; vpshufb RTMP2z, RB3z, RB3z; vbroadcasti32x4 .Lpre_affine_s rRIP, %zmm16; vbroadcasti32x4 .Lpost_affine_s rRIP, %zmm17; transpose_4x4(RA0z, RA1z, RA2z, RA3z, RTMP0z, RTMP1z); transpose_4x4(RB0z, RB1z, RB2z, RB3z, RTMP0z, RTMP1z); #define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \ vpbroadcastd (4*(round))(%rdi), RX1z; \ vpxord s1, RX1z, RX0z; \ vpternlogd $0x96, s2, s3, RX0z; /* s1 ^ s2 ^ s3 ^ rk */ \ vpxord r1, RX1z, RX1z; \ vpternlogd $0x96, r2, r3, RX1z; /* r1 ^ r2 ^ r3 ^ rk */ \ \ /* sbox, non-linear part */ \ vgf2p8affineqb $0x65, %zmm16, RX0z, RX0z; \ vgf2p8affineinvqb $0xd3, %zmm17, RX0z, RX0z; \ vgf2p8affineqb $0x65, %zmm16, RX1z, RX1z; \ vgf2p8affineinvqb $0xd3, %zmm17, RX1z, RX1z; \ \ /* linear part */ \ vprold $2, RX0z, RTMP0z; \ vprold $10, RX0z, RTMP1z; \ vprold $18, RX0z, RTMP2z; \ vpternlogd $0x96, RTMP0z, RX0z, s0; /* s0 ^ x ^ rol(x,2) */ \ vprold $24, RX0z, RX0z; \ vprold $2, RX1z, RTMP3z; \ vprold $10, RX1z, RTMP4z; \ vprold $18, RX1z, RTMP0z; \ vpternlogd $0x96, RTMP3z, RX1z, r0; /* r0 ^ x ^ rol(x,2) */ \ vprold $24, RX1z, RX1z; \ vpternlogd $0x96, RTMP1z, RTMP2z, RX0z; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpternlogd $0x96, RTMP4z, RTMP0z, RX1z; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxord RX0z, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \ vpxord RX1z, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ leaq (32*4)(%rdi), %rax; .align 16 .Lroundloop_blk32: ROUND(0, RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z); ROUND(1, RA1z, RA2z, RA3z, RA0z, RB1z, RB2z, RB3z, RB0z); ROUND(2, RA2z, RA3z, RA0z, RA1z, RB2z, RB3z, RB0z, RB1z); ROUND(3, RA3z, RA0z, RA1z, RA2z, RB3z, RB0z, RB1z, RB2z); leaq (4*4)(%rdi), %rdi; cmpq %rax, %rdi; jne .Lroundloop_blk32; #undef ROUND vbroadcasti32x4 .Lbswap128_mask rRIP, RTMP2z; transpose_4x4(RA0z, RA1z, RA2z, RA3z, RTMP0z, RTMP1z); transpose_4x4(RB0z, RB1z, RB2z, RB3z, RTMP0z, RTMP1z); vpshufb RTMP2z, RA0z, RA0z; vpshufb RTMP2z, RA1z, RA1z; vpshufb RTMP2z, RA2z, RA2z; vpshufb RTMP2z, RA3z, RA3z; vpshufb RTMP2z, RB0z, RB0z; vpshufb RTMP2z, RB1z, RB1z; vpshufb RTMP2z, RB2z, RB2z; vpshufb RTMP2z, RB3z, RB3z; vpxord %zmm16, %zmm16, %zmm16; vpxord %zmm17, %zmm17, %zmm17; ret_spec_stop; CFI_ENDPROC(); ELF(.size __sm4_gfni_crypt_blk32,.-__sm4_gfni_crypt_blk32;) .align 16 .globl _gcry_sm4_gfni_avx512_crypt_blk32 ELF(.type _gcry_sm4_gfni_avx512_crypt_blk32,@function;) _gcry_sm4_gfni_avx512_crypt_blk32: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) */ CFI_STARTPROC(); spec_stop_avx512; /* Load input */ vmovdqu32 (0 * 64)(%rdx), RA0z; vmovdqu32 (1 * 64)(%rdx), RA1z; vmovdqu32 (2 * 64)(%rdx), RA2z; vmovdqu32 (3 * 64)(%rdx), RA3z; vmovdqu32 (4 * 64)(%rdx), RB0z; vmovdqu32 (5 * 64)(%rdx), RB1z; vmovdqu32 (6 * 64)(%rdx), RB2z; vmovdqu32 (7 * 64)(%rdx), RB3z; call __sm4_gfni_crypt_blk32; vmovdqu32 RA0z, (0 * 64)(%rsi); vmovdqu32 RA1z, (1 * 64)(%rsi); vmovdqu32 RA2z, (2 * 64)(%rsi); vmovdqu32 RA3z, (3 * 64)(%rsi); vmovdqu32 RB0z, (4 * 64)(%rsi); vmovdqu32 RB1z, (5 * 64)(%rsi); vmovdqu32 RB2z, (6 * 64)(%rsi); vmovdqu32 RB3z, (7 * 64)(%rsi); xorl %eax, %eax; vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_crypt_blk32,.-_gcry_sm4_gfni_avx512_crypt_blk32;) .align 16 .globl _gcry_sm4_gfni_avx512_ctr_enc_blk32 ELF(.type _gcry_sm4_gfni_avx512_ctr_enc_blk32,@function;) _gcry_sm4_gfni_avx512_ctr_enc_blk32: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); spec_stop_avx512; vbroadcasti64x2 .Lbswap128_mask rRIP, RTMP0z; vmovdqa32 .Lcounter0123_lo rRIP, RTMP1z; vbroadcasti64x2 .Lcounter4444_lo rRIP, RTMP2z; vbroadcasti64x2 .Lcounter8888_lo rRIP, RTMP3z; vbroadcasti64x2 .Lcounter16161616_lo rRIP, RTMP4z; /* load IV and byteswap */ movq 8(%rcx), %r11; bswapq %r11; vbroadcasti64x2 (%rcx), RB3z; vpshufb RTMP0z, RB3z, RB3z; /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 32), %r11; ja .Lhandle_ctr_carry_blk32; /* construct IVs */ vpaddq RTMP1z, RB3z, RA0z; /* +0:+1:+2:+3 */ vpaddq RTMP2z, RA0z, RA1z; /* +4:+5:+6:+7 */ vpaddq RTMP3z, RA0z, RA2z; /* +8:+9:+10:+11 */ vpaddq RTMP3z, RA1z, RA3z; /* +12:+13:+14:+15 */ vpaddq RTMP4z, RA0z, RB0z; /* +16... */ vpaddq RTMP4z, RA1z, RB1z; /* +20... */ vpaddq RTMP4z, RA2z, RB2z; /* +24... */ vpaddq RTMP4z, RA3z, RB3z; /* +28... */ /* Update counter */ leaq 32(%r11), %r11; bswapq %r11; movq %r11, 8(%rcx); jmp .Lctr_carry_done_blk32; .Lhandle_ctr_carry_blk32: vbroadcasti64x2 .Lcounter1111_hi rRIP, RNOTz; /* construct IVs */ add_le128(RA0z, RB3z, RTMP1z, RNOTz); /* +0:+1:+2:+3 */ add_le128(RA1z, RA0z, RTMP2z, RNOTz); /* +4:+5:+6:+7 */ add_le128(RA2z, RA0z, RTMP3z, RNOTz); /* +8:+9:+10:+11 */ add_le128(RA3z, RA1z, RTMP3z, RNOTz); /* +12:+13:+14:+15 */ add_le128(RB0z, RA0z, RTMP4z, RNOTz); /* +16... */ add_le128(RB1z, RA1z, RTMP4z, RNOTz); /* +20... */ add_le128(RB2z, RA2z, RTMP4z, RNOTz); /* +24... */ add_le128(RB3z, RA3z, RTMP4z, RNOTz); /* +28... */ /* Update counter */ addq $32, %r11; movq (%rcx), %r10; bswapq %r10; adcq $0, %r10; bswapq %r11; bswapq %r10; movq %r11, 8(%rcx); movq %r10, (%rcx); .align 16 .Lctr_carry_done_blk32: /* Byte-swap IVs. */ vpshufb RTMP0z, RA0z, RA0z; vpshufb RTMP0z, RA1z, RA1z; vpshufb RTMP0z, RA2z, RA2z; vpshufb RTMP0z, RA3z, RA3z; vpshufb RTMP0z, RB0z, RB0z; vpshufb RTMP0z, RB1z, RB1z; vpshufb RTMP0z, RB2z, RB2z; vpshufb RTMP0z, RB3z, RB3z; call __sm4_gfni_crypt_blk32; vpxord (0 * 64)(%rdx), RA0z, RA0z; vpxord (1 * 64)(%rdx), RA1z, RA1z; vpxord (2 * 64)(%rdx), RA2z, RA2z; vpxord (3 * 64)(%rdx), RA3z, RA3z; vpxord (4 * 64)(%rdx), RB0z, RB0z; vpxord (5 * 64)(%rdx), RB1z, RB1z; vpxord (6 * 64)(%rdx), RB2z, RB2z; vpxord (7 * 64)(%rdx), RB3z, RB3z; vmovdqu32 RA0z, (0 * 64)(%rsi); vmovdqu32 RA1z, (1 * 64)(%rsi); vmovdqu32 RA2z, (2 * 64)(%rsi); vmovdqu32 RA3z, (3 * 64)(%rsi); vmovdqu32 RB0z, (4 * 64)(%rsi); vmovdqu32 RB1z, (5 * 64)(%rsi); vmovdqu32 RB2z, (6 * 64)(%rsi); vmovdqu32 RB3z, (7 * 64)(%rsi); vzeroall; kxorq %k1, %k1, %k1; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_ctr_enc_blk32,.-_gcry_sm4_gfni_avx512_ctr_enc_blk32;) .align 16 .globl _gcry_sm4_gfni_avx512_cbc_dec_blk32 ELF(.type _gcry_sm4_gfni_avx512_cbc_dec_blk32,@function;) _gcry_sm4_gfni_avx512_cbc_dec_blk32: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; vmovdqu32 (0 * 64)(%rdx), RA0z; vmovdqu32 (1 * 64)(%rdx), RA1z; vmovdqu32 (2 * 64)(%rdx), RA2z; vmovdqu32 (3 * 64)(%rdx), RA3z; vmovdqu32 (4 * 64)(%rdx), RB0z; vmovdqu32 (5 * 64)(%rdx), RB1z; vmovdqu32 (6 * 64)(%rdx), RB2z; vmovdqu32 (7 * 64)(%rdx), RB3z; call __sm4_gfni_crypt_blk32; vmovdqu (%rcx), RNOTx; vinserti64x2 $1, (0 * 16)(%rdx), RNOT, RNOT; vinserti64x4 $1, (1 * 16)(%rdx), RNOTz, RNOTz; vpxord RNOTz, RA0z, RA0z; vpxord (0 * 64 + 48)(%rdx), RA1z, RA1z; vpxord (1 * 64 + 48)(%rdx), RA2z, RA2z; vpxord (2 * 64 + 48)(%rdx), RA3z, RA3z; vpxord (3 * 64 + 48)(%rdx), RB0z, RB0z; vpxord (4 * 64 + 48)(%rdx), RB1z, RB1z; vpxord (5 * 64 + 48)(%rdx), RB2z, RB2z; vpxord (6 * 64 + 48)(%rdx), RB3z, RB3z; vmovdqu (7 * 64 + 48)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu32 RA0z, (0 * 64)(%rsi); vmovdqu32 RA1z, (1 * 64)(%rsi); vmovdqu32 RA2z, (2 * 64)(%rsi); vmovdqu32 RA3z, (3 * 64)(%rsi); vmovdqu32 RB0z, (4 * 64)(%rsi); vmovdqu32 RB1z, (5 * 64)(%rsi); vmovdqu32 RB2z, (6 * 64)(%rsi); vmovdqu32 RB3z, (7 * 64)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_cbc_dec_blk32,.-_gcry_sm4_gfni_avx512_cbc_dec_blk32;) .align 16 .globl _gcry_sm4_gfni_avx512_cfb_dec_blk32 ELF(.type _gcry_sm4_gfni_avx512_cfb_dec_blk32,@function;) _gcry_sm4_gfni_avx512_cfb_dec_blk32: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; /* Load input */ vmovdqu (%rcx), RA0x; vinserti64x2 $1, (%rdx), RA0, RA0; vinserti64x4 $1, 16(%rdx), RA0z, RA0z; vmovdqu32 (0 * 64 + 48)(%rdx), RA1z; vmovdqu32 (1 * 64 + 48)(%rdx), RA2z; vmovdqu32 (2 * 64 + 48)(%rdx), RA3z; vmovdqu32 (3 * 64 + 48)(%rdx), RB0z; vmovdqu32 (4 * 64 + 48)(%rdx), RB1z; vmovdqu32 (5 * 64 + 48)(%rdx), RB2z; vmovdqu32 (6 * 64 + 48)(%rdx), RB3z; /* Update IV */ vmovdqu (7 * 64 + 48)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __sm4_gfni_crypt_blk32; vpxord (0 * 64)(%rdx), RA0z, RA0z; vpxord (1 * 64)(%rdx), RA1z, RA1z; vpxord (2 * 64)(%rdx), RA2z, RA2z; vpxord (3 * 64)(%rdx), RA3z, RA3z; vpxord (4 * 64)(%rdx), RB0z, RB0z; vpxord (5 * 64)(%rdx), RB1z, RB1z; vpxord (6 * 64)(%rdx), RB2z, RB2z; vpxord (7 * 64)(%rdx), RB3z, RB3z; vmovdqu32 RA0z, (0 * 64)(%rsi); vmovdqu32 RA1z, (1 * 64)(%rsi); vmovdqu32 RA2z, (2 * 64)(%rsi); vmovdqu32 RA3z, (3 * 64)(%rsi); vmovdqu32 RB0z, (4 * 64)(%rsi); vmovdqu32 RB1z, (5 * 64)(%rsi); vmovdqu32 RB2z, (6 * 64)(%rsi); vmovdqu32 RB3z, (7 * 64)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_cfb_dec_blk32,.-_gcry_sm4_gfni_avx512_cfb_dec_blk32;) .align 16 .globl _gcry_sm4_gfni_avx512_ocb_enc_blk32 ELF(.type _gcry_sm4_gfni_avx512_ocb_enc_blk32,@function;) _gcry_sm4_gfni_avx512_ocb_enc_blk32: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ CFI_STARTPROC(); spec_stop_avx512; subq $(5 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(5 * 8); movq %r12, (0 * 8)(%rsp); movq %r13, (1 * 8)(%rsp); movq %r14, (2 * 8)(%rsp); movq %r15, (3 * 8)(%rsp); movq %rbx, (4 * 8)(%rsp); CFI_REL_OFFSET(%r12, 0 * 8); CFI_REL_OFFSET(%r13, 1 * 8); CFI_REL_OFFSET(%r14, 2 * 8); CFI_REL_OFFSET(%r15, 3 * 8); CFI_REL_OFFSET(%rbx, 4 * 8); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg, zplain) \ vmovdqu32 (n * 64)(%rdx), zplain; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti64x2 $1, RTMP0x, RNOT, RNOT; \ vpxor (l2reg), RTMP0x, RTMP0x; \ vinserti64x2 $2, RTMP0x, RNOTz, RNOTz; \ vpxor (l3reg), RTMP0x, RTMP0x; \ vinserti64x2 $3, RTMP0x, RNOTz, RNOTz; \ vpxord zplain, RNOTz, zreg; \ vmovdqu32 RNOTz, (n * 64)(%rsi); #define OCB_LOAD_PTRS(n) \ movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; OCB_LOAD_PTRS(0); OCB_INPUT(0, %r10, %r11, %r12, %r13, RA0z, RTMP1z); OCB_INPUT(1, %r14, %r15, %rax, %rbx, RA1z, RTMP2z); OCB_LOAD_PTRS(2); OCB_INPUT(2, %r10, %r11, %r12, %r13, RA2z, RTMP3z); vpternlogd $0x96, RTMP1z, RTMP2z, RTMP3z; OCB_INPUT(3, %r14, %r15, %rax, %rbx, RA3z, RTMP4z); OCB_LOAD_PTRS(4); OCB_INPUT(4, %r10, %r11, %r12, %r13, RB0z, RX0z); OCB_INPUT(5, %r14, %r15, %rax, %rbx, RB1z, RX1z); vpternlogd $0x96, RTMP4z, RX0z, RX1z; OCB_LOAD_PTRS(6); OCB_INPUT(6, %r10, %r11, %r12, %r13, RB2z, RTMP4z); OCB_INPUT(7, %r14, %r15, %rax, %rbx, RB3z, RX0z); #undef OCB_LOAD_PTRS #undef OCB_INPUT vpternlogd $0x96, RTMP3z, RTMP4z, RX0z; vpxord RX1z, RX0z, RNOTz; vextracti64x4 $1, RNOTz, RTMP1; vpxor RTMP1, RNOT, RNOT; vextracti128 $1, RNOT, RTMP1x; vpternlogd $0x96, (%r8), RTMP1x, RNOTx; movq (0 * 8)(%rsp), %r12; movq (1 * 8)(%rsp), %r13; movq (2 * 8)(%rsp), %r14; movq (3 * 8)(%rsp), %r15; movq (4 * 8)(%rsp), %rbx; CFI_RESTORE(%r12); CFI_RESTORE(%r13); CFI_RESTORE(%r14); CFI_RESTORE(%r15); CFI_RESTORE(%rbx); vmovdqu RTMP0x, (%rcx); vmovdqu RNOTx, (%r8); call __sm4_gfni_crypt_blk32; addq $(5 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-5 * 8); vpxord (0 * 64)(%rsi), RA0z, RA0z; vpxord (1 * 64)(%rsi), RA1z, RA1z; vpxord (2 * 64)(%rsi), RA2z, RA2z; vpxord (3 * 64)(%rsi), RA3z, RA3z; vpxord (4 * 64)(%rsi), RB0z, RB0z; vpxord (5 * 64)(%rsi), RB1z, RB1z; vpxord (6 * 64)(%rsi), RB2z, RB2z; vpxord (7 * 64)(%rsi), RB3z, RB3z; vmovdqu32 RA0z, (0 * 64)(%rsi); vmovdqu32 RA1z, (1 * 64)(%rsi); vmovdqu32 RA2z, (2 * 64)(%rsi); vmovdqu32 RA3z, (3 * 64)(%rsi); vmovdqu32 RB0z, (4 * 64)(%rsi); vmovdqu32 RB1z, (5 * 64)(%rsi); vmovdqu32 RB2z, (6 * 64)(%rsi); vmovdqu32 RB3z, (7 * 64)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_ocb_enc_blk32,.-_gcry_sm4_gfni_avx512_ocb_enc_blk32;) .align 16 .globl _gcry_sm4_gfni_avx512_ocb_dec_blk32 ELF(.type _gcry_sm4_gfni_avx512_ocb_dec_blk32,@function;) _gcry_sm4_gfni_avx512_ocb_dec_blk32: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ CFI_STARTPROC(); spec_stop_avx512; subq $(5 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(5 * 8); movq %r12, (0 * 8)(%rsp); movq %r13, (1 * 8)(%rsp); movq %r14, (2 * 8)(%rsp); movq %r15, (3 * 8)(%rsp); movq %rbx, (4 * 8)(%rsp); CFI_REL_OFFSET(%r12, 0 * 8); CFI_REL_OFFSET(%r13, 1 * 8); CFI_REL_OFFSET(%r14, 2 * 8); CFI_REL_OFFSET(%r15, 3 * 8); CFI_REL_OFFSET(%rbx, 4 * 8); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor DECIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg) \ vmovdqu32 (n * 64)(%rdx), RTMP1z; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti64x2 $1, RTMP0x, RNOT, RNOT; \ vpxor (l2reg), RTMP0x, RTMP0x; \ vinserti64x2 $2, RTMP0x, RNOTz, RNOTz; \ vpxor (l3reg), RTMP0x, RTMP0x; \ vinserti64x2 $3, RTMP0x, RNOTz, RNOTz; \ vpxord RTMP1z, RNOTz, zreg; \ vmovdqu32 RNOTz, (n * 64)(%rsi); #define OCB_LOAD_PTRS(n) \ movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; OCB_LOAD_PTRS(0); OCB_INPUT(0, %r10, %r11, %r12, %r13, RA0z); OCB_INPUT(1, %r14, %r15, %rax, %rbx, RA1z); OCB_LOAD_PTRS(2); OCB_INPUT(2, %r10, %r11, %r12, %r13, RA2z); OCB_INPUT(3, %r14, %r15, %rax, %rbx, RA3z); OCB_LOAD_PTRS(4); OCB_INPUT(4, %r10, %r11, %r12, %r13, RB0z); OCB_INPUT(5, %r14, %r15, %rax, %rbx, RB1z); OCB_LOAD_PTRS(6); OCB_INPUT(6, %r10, %r11, %r12, %r13, RB2z); OCB_INPUT(7, %r14, %r15, %rax, %rbx, RB3z); #undef OCB_LOAD_PTRS #undef OCB_INPUT movq (0 * 8)(%rsp), %r12; movq (1 * 8)(%rsp), %r13; movq (2 * 8)(%rsp), %r14; movq (3 * 8)(%rsp), %r15; movq (4 * 8)(%rsp), %rbx; CFI_RESTORE(%r12); CFI_RESTORE(%r13); CFI_RESTORE(%r14); CFI_RESTORE(%r15); CFI_RESTORE(%rbx); vmovdqu RTMP0x, (%rcx); call __sm4_gfni_crypt_blk32; addq $(5 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-5 * 8); vpxord (0 * 64)(%rsi), RA0z, RA0z; vpxord (1 * 64)(%rsi), RA1z, RA1z; vpxord (2 * 64)(%rsi), RA2z, RA2z; vpxord (3 * 64)(%rsi), RA3z, RA3z; vpxord (4 * 64)(%rsi), RB0z, RB0z; vpxord (5 * 64)(%rsi), RB1z, RB1z; vpxord (6 * 64)(%rsi), RB2z, RB2z; vpxord (7 * 64)(%rsi), RB3z, RB3z; vmovdqu32 RA0z, (0 * 64)(%rsi); vmovdqu32 RA1z, (1 * 64)(%rsi); vmovdqu32 RA2z, (2 * 64)(%rsi); vmovdqu32 RA3z, (3 * 64)(%rsi); vmovdqu32 RB0z, (4 * 64)(%rsi); vmovdqu32 RB1z, (5 * 64)(%rsi); vmovdqu32 RB2z, (6 * 64)(%rsi); vmovdqu32 RB3z, (7 * 64)(%rsi); /* Checksum_i = Checksum_{i-1} xor C_i */ vpternlogd $0x96, RA0z, RA1z, RA2z; vpternlogd $0x96, RA3z, RB0z, RB1z; vpternlogd $0x96, RB2z, RB3z, RA2z; vpxord RA2z, RB1z, RTMP1z; vextracti64x4 $1, RTMP1z, RNOT; vpxor RNOT, RTMP1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpternlogd $0x96, (%r8), RNOTx, RTMP1x; vmovdqu RTMP1x, (%r8); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_sm4_gfni_avx512_ocb_dec_blk32,.-_gcry_sm4_gfni_avx512_ocb_dec_blk32;) #endif /*defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/twofish-avx2-amd64.S b/cipher/twofish-avx2-amd64.S index 6c6729c0..8a6aae19 100644 --- a/cipher/twofish-avx2-amd64.S +++ b/cipher/twofish-avx2-amd64.S @@ -1,1094 +1,1097 @@ /* twofish-avx2-amd64.S - AMD64/AVX2 assembly implementation of Twofish cipher * * Copyright (C) 2013-2017 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifdef __x86_64 #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && defined(USE_TWOFISH) && \ defined(ENABLE_AVX2_SUPPORT) #include "asm-common-amd64.h" .text /* structure of TWOFISH_context: */ #define s0 0 #define s1 ((s0) + 4 * 256) #define s2 ((s1) + 4 * 256) #define s3 ((s2) + 4 * 256) #define w ((s3) + 4 * 256) #define k ((w) + 4 * 8) /* register macros */ #define CTX %rdi #define RROUND %rbp #define RROUNDd %ebp #define RS0 CTX #define RS1 %r8 #define RS2 %r9 #define RS3 %r10 #define RK %r11 #define RW %rax #define RA0 %ymm8 #define RB0 %ymm9 #define RC0 %ymm10 #define RD0 %ymm11 #define RA1 %ymm12 #define RB1 %ymm13 #define RC1 %ymm14 #define RD1 %ymm15 /* temp regs */ #define RX0 %ymm0 #define RY0 %ymm1 #define RX1 %ymm2 #define RY1 %ymm3 #define RT0 %ymm4 #define RIDX %ymm5 #define RX0x %xmm0 #define RY0x %xmm1 #define RX1x %xmm2 #define RY1x %xmm3 #define RT0x %xmm4 #define RIDXx %xmm5 #define RTMP0 RX0 #define RTMP0x RX0x #define RTMP1 RX1 #define RTMP1x RX1x #define RTMP2 RY0 #define RTMP2x RY0x #define RTMP3 RY1 #define RTMP3x RY1x #define RTMP4 RIDX #define RTMP4x RIDXx /* vpgatherdd mask and '-1' */ #define RNOT %ymm6 #define RNOTx %xmm6 /* byte mask, (-1 >> 24) */ #define RBYTE %ymm7 /********************************************************************** 16-way AVX2 twofish **********************************************************************/ #define init_round_constants() \ vpcmpeqd RNOT, RNOT, RNOT; \ leaq k(CTX), RK; \ leaq w(CTX), RW; \ vpsrld $24, RNOT, RBYTE; \ leaq s1(CTX), RS1; \ leaq s2(CTX), RS2; \ leaq s3(CTX), RS3; \ #define g16(ab, rs0, rs1, rs2, rs3, xy) \ vpand RBYTE, ab ## 0, RIDX; \ vpgatherdd RNOT, (rs0, RIDX, 4), xy ## 0; \ vpcmpeqd RNOT, RNOT, RNOT; \ \ vpand RBYTE, ab ## 1, RIDX; \ vpgatherdd RNOT, (rs0, RIDX, 4), xy ## 1; \ vpcmpeqd RNOT, RNOT, RNOT; \ \ vpsrld $8, ab ## 0, RIDX; \ vpand RBYTE, RIDX, RIDX; \ vpgatherdd RNOT, (rs1, RIDX, 4), RT0; \ vpcmpeqd RNOT, RNOT, RNOT; \ vpxor RT0, xy ## 0, xy ## 0; \ \ vpsrld $8, ab ## 1, RIDX; \ vpand RBYTE, RIDX, RIDX; \ vpgatherdd RNOT, (rs1, RIDX, 4), RT0; \ vpcmpeqd RNOT, RNOT, RNOT; \ vpxor RT0, xy ## 1, xy ## 1; \ \ vpsrld $16, ab ## 0, RIDX; \ vpand RBYTE, RIDX, RIDX; \ vpgatherdd RNOT, (rs2, RIDX, 4), RT0; \ vpcmpeqd RNOT, RNOT, RNOT; \ vpxor RT0, xy ## 0, xy ## 0; \ \ vpsrld $16, ab ## 1, RIDX; \ vpand RBYTE, RIDX, RIDX; \ vpgatherdd RNOT, (rs2, RIDX, 4), RT0; \ vpcmpeqd RNOT, RNOT, RNOT; \ vpxor RT0, xy ## 1, xy ## 1; \ \ vpsrld $24, ab ## 0, RIDX; \ vpgatherdd RNOT, (rs3, RIDX, 4), RT0; \ vpcmpeqd RNOT, RNOT, RNOT; \ vpxor RT0, xy ## 0, xy ## 0; \ \ vpsrld $24, ab ## 1, RIDX; \ vpgatherdd RNOT, (rs3, RIDX, 4), RT0; \ vpcmpeqd RNOT, RNOT, RNOT; \ vpxor RT0, xy ## 1, xy ## 1; #define g1_16(a, x) \ g16(a, RS0, RS1, RS2, RS3, x); #define g2_16(b, y) \ g16(b, RS1, RS2, RS3, RS0, y); #define encrypt_round_end16(a, b, c, d, nk, r) \ vpaddd RY0, RX0, RX0; \ vpaddd RX0, RY0, RY0; \ vpbroadcastd ((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RX0, RX0; \ vpbroadcastd 4+((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RY0, RY0; \ \ vpxor RY0, d ## 0, d ## 0; \ \ vpxor RX0, c ## 0, c ## 0; \ vpsrld $1, c ## 0, RT0; \ vpslld $31, c ## 0, c ## 0; \ vpor RT0, c ## 0, c ## 0; \ \ vpaddd RY1, RX1, RX1; \ vpaddd RX1, RY1, RY1; \ vpbroadcastd ((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RX1, RX1; \ vpbroadcastd 4+((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RY1, RY1; \ \ vpxor RY1, d ## 1, d ## 1; \ \ vpxor RX1, c ## 1, c ## 1; \ vpsrld $1, c ## 1, RT0; \ vpslld $31, c ## 1, c ## 1; \ vpor RT0, c ## 1, c ## 1; \ #define encrypt_round16(a, b, c, d, nk, r) \ g2_16(b, RY); \ \ vpslld $1, b ## 0, RT0; \ vpsrld $31, b ## 0, b ## 0; \ vpor RT0, b ## 0, b ## 0; \ \ vpslld $1, b ## 1, RT0; \ vpsrld $31, b ## 1, b ## 1; \ vpor RT0, b ## 1, b ## 1; \ \ g1_16(a, RX); \ \ encrypt_round_end16(a, b, c, d, nk, r); #define encrypt_round_first16(a, b, c, d, nk, r) \ vpslld $1, d ## 0, RT0; \ vpsrld $31, d ## 0, d ## 0; \ vpor RT0, d ## 0, d ## 0; \ \ vpslld $1, d ## 1, RT0; \ vpsrld $31, d ## 1, d ## 1; \ vpor RT0, d ## 1, d ## 1; \ \ encrypt_round16(a, b, c, d, nk, r); #define encrypt_round_last16(a, b, c, d, nk, r) \ g2_16(b, RY); \ \ g1_16(a, RX); \ \ encrypt_round_end16(a, b, c, d, nk, r); #define decrypt_round_end16(a, b, c, d, nk, r) \ vpaddd RY0, RX0, RX0; \ vpaddd RX0, RY0, RY0; \ vpbroadcastd ((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RX0, RX0; \ vpbroadcastd 4+((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RY0, RY0; \ \ vpxor RX0, c ## 0, c ## 0; \ \ vpxor RY0, d ## 0, d ## 0; \ vpsrld $1, d ## 0, RT0; \ vpslld $31, d ## 0, d ## 0; \ vpor RT0, d ## 0, d ## 0; \ \ vpaddd RY1, RX1, RX1; \ vpaddd RX1, RY1, RY1; \ vpbroadcastd ((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RX1, RX1; \ vpbroadcastd 4+((nk)+((r)*8))(RK), RT0; \ vpaddd RT0, RY1, RY1; \ \ vpxor RX1, c ## 1, c ## 1; \ \ vpxor RY1, d ## 1, d ## 1; \ vpsrld $1, d ## 1, RT0; \ vpslld $31, d ## 1, d ## 1; \ vpor RT0, d ## 1, d ## 1; #define decrypt_round16(a, b, c, d, nk, r) \ g1_16(a, RX); \ \ vpslld $1, a ## 0, RT0; \ vpsrld $31, a ## 0, a ## 0; \ vpor RT0, a ## 0, a ## 0; \ \ vpslld $1, a ## 1, RT0; \ vpsrld $31, a ## 1, a ## 1; \ vpor RT0, a ## 1, a ## 1; \ \ g2_16(b, RY); \ \ decrypt_round_end16(a, b, c, d, nk, r); #define decrypt_round_first16(a, b, c, d, nk, r) \ vpslld $1, c ## 0, RT0; \ vpsrld $31, c ## 0, c ## 0; \ vpor RT0, c ## 0, c ## 0; \ \ vpslld $1, c ## 1, RT0; \ vpsrld $31, c ## 1, c ## 1; \ vpor RT0, c ## 1, c ## 1; \ \ decrypt_round16(a, b, c, d, nk, r) #define decrypt_round_last16(a, b, c, d, nk, r) \ g1_16(a, RX); \ \ g2_16(b, RY); \ \ decrypt_round_end16(a, b, c, d, nk, r); #define encrypt_cycle16(r) \ encrypt_round16(RA, RB, RC, RD, 0, r); \ encrypt_round16(RC, RD, RA, RB, 8, r); #define encrypt_cycle_first16(r) \ encrypt_round_first16(RA, RB, RC, RD, 0, r); \ encrypt_round16(RC, RD, RA, RB, 8, r); #define encrypt_cycle_last16(r) \ encrypt_round16(RA, RB, RC, RD, 0, r); \ encrypt_round_last16(RC, RD, RA, RB, 8, r); #define decrypt_cycle16(r) \ decrypt_round16(RC, RD, RA, RB, 8, r); \ decrypt_round16(RA, RB, RC, RD, 0, r); #define decrypt_cycle_first16(r) \ decrypt_round_first16(RC, RD, RA, RB, 8, r); \ decrypt_round16(RA, RB, RC, RD, 0, r); #define decrypt_cycle_last16(r) \ decrypt_round16(RC, RD, RA, RB, 8, r); \ decrypt_round_last16(RA, RB, RC, RD, 0, r); #define transpose_4x4(x0,x1,x2,x3,t1,t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define read_blocks8(offs,a,b,c,d) \ vmovdqu 16*offs(RIO), a; \ vmovdqu 16*offs+32(RIO), b; \ vmovdqu 16*offs+64(RIO), c; \ vmovdqu 16*offs+96(RIO), d; \ \ transpose_4x4(a, b, c, d, RX0, RY0); #define write_blocks8(offs,a,b,c,d) \ transpose_4x4(a, b, c, d, RX0, RY0); \ \ vmovdqu a, 16*offs(RIO); \ vmovdqu b, 16*offs+32(RIO); \ vmovdqu c, 16*offs+64(RIO); \ vmovdqu d, 16*offs+96(RIO); #define inpack_enc8(a,b,c,d) \ vpbroadcastd 4*0(RW), RT0; \ vpxor RT0, a, a; \ \ vpbroadcastd 4*1(RW), RT0; \ vpxor RT0, b, b; \ \ vpbroadcastd 4*2(RW), RT0; \ vpxor RT0, c, c; \ \ vpbroadcastd 4*3(RW), RT0; \ vpxor RT0, d, d; #define outunpack_enc8(a,b,c,d) \ vpbroadcastd 4*4(RW), RX0; \ vpbroadcastd 4*5(RW), RY0; \ vpxor RX0, c, RX0; \ vpxor RY0, d, RY0; \ \ vpbroadcastd 4*6(RW), RT0; \ vpxor RT0, a, c; \ vpbroadcastd 4*7(RW), RT0; \ vpxor RT0, b, d; \ \ vmovdqa RX0, a; \ vmovdqa RY0, b; #define inpack_dec8(a,b,c,d) \ vpbroadcastd 4*4(RW), RX0; \ vpbroadcastd 4*5(RW), RY0; \ vpxor RX0, a, RX0; \ vpxor RY0, b, RY0; \ \ vpbroadcastd 4*6(RW), RT0; \ vpxor RT0, c, a; \ vpbroadcastd 4*7(RW), RT0; \ vpxor RT0, d, b; \ \ vmovdqa RX0, c; \ vmovdqa RY0, d; #define outunpack_dec8(a,b,c,d) \ vpbroadcastd 4*0(RW), RT0; \ vpxor RT0, a, a; \ \ vpbroadcastd 4*1(RW), RT0; \ vpxor RT0, b, b; \ \ vpbroadcastd 4*2(RW), RT0; \ vpxor RT0, c, c; \ \ vpbroadcastd 4*3(RW), RT0; \ vpxor RT0, d, d; #define transpose4x4_16(a,b,c,d) \ transpose_4x4(a ## 0, b ## 0, c ## 0, d ## 0, RX0, RY0); \ transpose_4x4(a ## 1, b ## 1, c ## 1, d ## 1, RX0, RY0); #define inpack_enc16(a,b,c,d) \ inpack_enc8(a ## 0, b ## 0, c ## 0, d ## 0); \ inpack_enc8(a ## 1, b ## 1, c ## 1, d ## 1); #define outunpack_enc16(a,b,c,d) \ outunpack_enc8(a ## 0, b ## 0, c ## 0, d ## 0); \ outunpack_enc8(a ## 1, b ## 1, c ## 1, d ## 1); #define inpack_dec16(a,b,c,d) \ inpack_dec8(a ## 0, b ## 0, c ## 0, d ## 0); \ inpack_dec8(a ## 1, b ## 1, c ## 1, d ## 1); #define outunpack_dec16(a,b,c,d) \ outunpack_dec8(a ## 0, b ## 0, c ## 0, d ## 0); \ outunpack_dec8(a ## 1, b ## 1, c ## 1, d ## 1); .align 16 ELF(.type __twofish_enc_blk16,@function;) __twofish_enc_blk16: /* input: * %rdi: ctx, CTX * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: sixteen parallel * plaintext blocks * output: * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: sixteen parallel * ciphertext blocks */ CFI_STARTPROC(); init_round_constants(); transpose4x4_16(RA, RB, RC, RD); inpack_enc16(RA, RB, RC, RD); encrypt_cycle_first16(0); encrypt_cycle16(2); encrypt_cycle16(4); encrypt_cycle16(6); encrypt_cycle16(8); encrypt_cycle16(10); encrypt_cycle16(12); encrypt_cycle_last16(14); outunpack_enc16(RA, RB, RC, RD); transpose4x4_16(RA, RB, RC, RD); ret_spec_stop; CFI_ENDPROC(); ELF(.size __twofish_enc_blk16,.-__twofish_enc_blk16;) .align 16 ELF(.type __twofish_dec_blk16,@function;) __twofish_dec_blk16: /* input: * %rdi: ctx, CTX * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: sixteen parallel * plaintext blocks * output: * RA0, RB0, RC0, RD0, RA1, RB1, RC1, RD1: sixteen parallel * ciphertext blocks */ CFI_STARTPROC(); init_round_constants(); transpose4x4_16(RA, RB, RC, RD); inpack_dec16(RA, RB, RC, RD); decrypt_cycle_first16(14); decrypt_cycle16(12); decrypt_cycle16(10); decrypt_cycle16(8); decrypt_cycle16(6); decrypt_cycle16(4); decrypt_cycle16(2); decrypt_cycle_last16(0); outunpack_dec16(RA, RB, RC, RD); transpose4x4_16(RA, RB, RC, RD); ret_spec_stop; CFI_ENDPROC(); ELF(.size __twofish_dec_blk16,.-__twofish_dec_blk16;) .align 16 .globl _gcry_twofish_avx2_blk16 ELF(.type _gcry_twofish_avx2_blk16,@function;) _gcry_twofish_avx2_blk16: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %ecx: encrypt */ CFI_STARTPROC(); vzeroupper; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RB0; vmovdqu (2 * 32)(%rdx), RC0; vmovdqu (3 * 32)(%rdx), RD0; vmovdqu (4 * 32)(%rdx), RA1; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RC1; vmovdqu (7 * 32)(%rdx), RD1; testl %ecx, %ecx; jz .Lblk16_dec; call __twofish_enc_blk16; jmp .Lblk16_end; .Lblk16_dec: call __twofish_dec_blk16; .Lblk16_end: vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RB0, (1 * 32)(%rsi); vmovdqu RC0, (2 * 32)(%rsi); vmovdqu RD0, (3 * 32)(%rsi); vmovdqu RA1, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RC1, (6 * 32)(%rsi); vmovdqu RD1, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_avx2_blk16,.-_gcry_twofish_avx2_blk16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl _gcry_twofish_avx2_ctr_enc ELF(.type _gcry_twofish_avx2_ctr_enc,@function;) _gcry_twofish_avx2_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); movq 8(%rcx), %rax; bswapq %rax; vzeroupper; vbroadcasti128 .Lbswap128_mask rRIP, RTMP3; vpcmpeqd RNOT, RNOT, RNOT; vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */ vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */ /* load IV and byteswap */ vmovdqu (%rcx), RTMP4x; vpshufb RTMP3x, RTMP4x, RTMP4x; vmovdqa RTMP4x, RTMP0x; inc_le128(RTMP4x, RNOTx, RTMP1x); vinserti128 $1, RTMP4x, RTMP0, RTMP0; vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */ /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 16), %rax; ja .Lhandle_ctr_carry; /* construct IVs */ vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */ vpshufb RTMP3, RTMP0, RB0; vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */ vpshufb RTMP3, RTMP0, RC0; vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */ vpshufb RTMP3, RTMP0, RD0; vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */ vpshufb RTMP3, RTMP0, RA1; vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */ vpshufb RTMP3, RTMP0, RB1; vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */ vpshufb RTMP3, RTMP0, RC1; vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */ vpshufb RTMP3, RTMP0, RD1; vpsubq RTMP2, RTMP0, RTMP0; /* +16 */ vpshufb RTMP3x, RTMP0x, RTMP0x; jmp .Lctr_carry_done; .Lhandle_ctr_carry: /* construct IVs */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB0; /* +3 ; +2 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RC0; /* +5 ; +4 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RD0; /* +7 ; +6 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RA1; /* +9 ; +8 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RC1; /* +13 ; +12 */ inc_le128(RTMP0, RNOT, RTMP1); inc_le128(RTMP0, RNOT, RTMP1); vpshufb RTMP3, RTMP0, RD1; /* +15 ; +14 */ inc_le128(RTMP0, RNOT, RTMP1); vextracti128 $1, RTMP0, RTMP0x; vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */ .align 4 .Lctr_carry_done: /* store new IV */ vmovdqu RTMP0x, (%rcx); call __twofish_enc_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RB0, RB0; vpxor (2 * 32)(%rdx), RC0, RC0; vpxor (3 * 32)(%rdx), RD0, RD0; vpxor (4 * 32)(%rdx), RA1, RA1; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RC1, RC1; vpxor (7 * 32)(%rdx), RD1, RD1; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RB0, (1 * 32)(%rsi); vmovdqu RC0, (2 * 32)(%rsi); vmovdqu RD0, (3 * 32)(%rsi); vmovdqu RA1, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RC1, (6 * 32)(%rsi); vmovdqu RD1, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_avx2_ctr_enc,.-_gcry_twofish_avx2_ctr_enc;) .align 16 .globl _gcry_twofish_avx2_cbc_dec ELF(.type _gcry_twofish_avx2_cbc_dec,@function;) _gcry_twofish_avx2_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); vzeroupper; vmovdqu (0 * 32)(%rdx), RA0; vmovdqu (1 * 32)(%rdx), RB0; vmovdqu (2 * 32)(%rdx), RC0; vmovdqu (3 * 32)(%rdx), RD0; vmovdqu (4 * 32)(%rdx), RA1; vmovdqu (5 * 32)(%rdx), RB1; vmovdqu (6 * 32)(%rdx), RC1; vmovdqu (7 * 32)(%rdx), RD1; call __twofish_dec_blk16; vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RNOT; vpxor RNOT, RA0, RA0; vpxor (0 * 32 + 16)(%rdx), RB0, RB0; vpxor (1 * 32 + 16)(%rdx), RC0, RC0; vpxor (2 * 32 + 16)(%rdx), RD0, RD0; vpxor (3 * 32 + 16)(%rdx), RA1, RA1; vpxor (4 * 32 + 16)(%rdx), RB1, RB1; vpxor (5 * 32 + 16)(%rdx), RC1, RC1; vpxor (6 * 32 + 16)(%rdx), RD1, RD1; vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); /* store new IV */ vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RB0, (1 * 32)(%rsi); vmovdqu RC0, (2 * 32)(%rsi); vmovdqu RD0, (3 * 32)(%rsi); vmovdqu RA1, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RC1, (6 * 32)(%rsi); vmovdqu RD1, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_avx2_cbc_dec,.-_gcry_twofish_avx2_cbc_dec;) .align 16 .globl _gcry_twofish_avx2_cfb_dec ELF(.type _gcry_twofish_avx2_cfb_dec,@function;) _gcry_twofish_avx2_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); vzeroupper; /* Load input */ vmovdqu (%rcx), RNOTx; vinserti128 $1, (%rdx), RNOT, RA0; vmovdqu (0 * 32 + 16)(%rdx), RB0; vmovdqu (1 * 32 + 16)(%rdx), RC0; vmovdqu (2 * 32 + 16)(%rdx), RD0; vmovdqu (3 * 32 + 16)(%rdx), RA1; vmovdqu (4 * 32 + 16)(%rdx), RB1; vmovdqu (5 * 32 + 16)(%rdx), RC1; vmovdqu (6 * 32 + 16)(%rdx), RD1; /* Update IV */ vmovdqu (7 * 32 + 16)(%rdx), RNOTx; vmovdqu RNOTx, (%rcx); call __twofish_enc_blk16; vpxor (0 * 32)(%rdx), RA0, RA0; vpxor (1 * 32)(%rdx), RB0, RB0; vpxor (2 * 32)(%rdx), RC0, RC0; vpxor (3 * 32)(%rdx), RD0, RD0; vpxor (4 * 32)(%rdx), RA1, RA1; vpxor (5 * 32)(%rdx), RB1, RB1; vpxor (6 * 32)(%rdx), RC1, RC1; vpxor (7 * 32)(%rdx), RD1, RD1; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RB0, (1 * 32)(%rsi); vmovdqu RC0, (2 * 32)(%rsi); vmovdqu RD0, (3 * 32)(%rsi); vmovdqu RA1, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RC1, (6 * 32)(%rsi); vmovdqu RD1, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_avx2_cfb_dec,.-_gcry_twofish_avx2_cfb_dec;) .align 16 .globl _gcry_twofish_avx2_ocb_enc ELF(.type _gcry_twofish_avx2_ocb_enc,@function;) _gcry_twofish_avx2_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); vzeroupper; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; vmovdqu (%r8), RTMP1x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RTMP1, RTMP1; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RB0); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RC0); OCB_INPUT(3, %r12, %r13, RD0); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RA1); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RC1); OCB_INPUT(7, %r12, %r13, RD1); #undef OCB_INPUT vextracti128 $1, RTMP1, RNOTx; vmovdqu RTMP0x, (%rcx); vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%r8); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __twofish_enc_blk16; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RB0, RB0; vpxor (2 * 32)(%rsi), RC0, RC0; vpxor (3 * 32)(%rsi), RD0, RD0; vpxor (4 * 32)(%rsi), RA1, RA1; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RC1, RC1; vpxor (7 * 32)(%rsi), RD1, RD1; vmovdqu RA0, (0 * 32)(%rsi); vmovdqu RB0, (1 * 32)(%rsi); vmovdqu RC0, (2 * 32)(%rsi); vmovdqu RD0, (3 * 32)(%rsi); vmovdqu RA1, (4 * 32)(%rsi); vmovdqu RB1, (5 * 32)(%rsi); vmovdqu RC1, (6 * 32)(%rsi); vmovdqu RD1, (7 * 32)(%rsi); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_avx2_ocb_enc,.-_gcry_twofish_avx2_ocb_enc;) .align 16 .globl _gcry_twofish_avx2_ocb_dec ELF(.type _gcry_twofish_avx2_ocb_dec,@function;) _gcry_twofish_avx2_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); vzeroupper; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rcx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; \ vmovdqu RNOT, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RB0); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, RC0); OCB_INPUT(3, %r12, %r13, RD0); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, RA1); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, RC1); OCB_INPUT(7, %r12, %r13, RD1); #undef OCB_INPUT vmovdqu RTMP0x, (%rcx); mov %r8, %rcx movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __twofish_dec_blk16; vmovdqu (%rcx), RTMP1x; vpxor (0 * 32)(%rsi), RA0, RA0; vpxor (1 * 32)(%rsi), RB0, RB0; vpxor (2 * 32)(%rsi), RC0, RC0; vpxor (3 * 32)(%rsi), RD0, RD0; vpxor (4 * 32)(%rsi), RA1, RA1; vpxor (5 * 32)(%rsi), RB1, RB1; vpxor (6 * 32)(%rsi), RC1, RC1; vpxor (7 * 32)(%rsi), RD1, RD1; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); /* Checksum_i = Checksum_{i-1} xor P_i */ vmovdqu RA0, (0 * 32)(%rsi); vpxor RA0, RTMP1, RTMP1; vmovdqu RB0, (1 * 32)(%rsi); vpxor RB0, RTMP1, RTMP1; vmovdqu RC0, (2 * 32)(%rsi); vpxor RC0, RTMP1, RTMP1; vmovdqu RD0, (3 * 32)(%rsi); vpxor RD0, RTMP1, RTMP1; vmovdqu RA1, (4 * 32)(%rsi); vpxor RA1, RTMP1, RTMP1; vmovdqu RB1, (5 * 32)(%rsi); vpxor RB1, RTMP1, RTMP1; vmovdqu RC1, (6 * 32)(%rsi); vpxor RC1, RTMP1, RTMP1; vmovdqu RD1, (7 * 32)(%rsi); vpxor RD1, RTMP1, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%rcx); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_avx2_ocb_dec,.-_gcry_twofish_avx2_ocb_dec;) .align 16 .globl _gcry_twofish_avx2_ocb_auth ELF(.type _gcry_twofish_avx2_ocb_auth,@function;) _gcry_twofish_avx2_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); vzeroupper; subq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(4 * 8); movq %r10, (0 * 8)(%rsp); movq %r11, (1 * 8)(%rsp); movq %r12, (2 * 8)(%rsp); movq %r13, (3 * 8)(%rsp); CFI_REL_OFFSET(%r10, 0 * 8); CFI_REL_OFFSET(%r11, 1 * 8); CFI_REL_OFFSET(%r12, 2 * 8); CFI_REL_OFFSET(%r13, 3 * 8); vmovdqu (%rdx), RTMP0x; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), RTMP0x, RNOTx; \ vpxor (l1reg), RNOTx, RTMP0x; \ vinserti128 $1, RTMP0x, RNOT, RNOT; \ vpxor yreg, RNOT, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, RA0); OCB_INPUT(1, %r12, %r13, RB0); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, RC0); OCB_INPUT(3, %r12, %r13, RD0); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, RA1); OCB_INPUT(5, %r12, %r13, RB1); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, RC1); OCB_INPUT(7, %r12, %r13, RD1); #undef OCB_INPUT vmovdqu RTMP0x, (%rdx); movq (0 * 8)(%rsp), %r10; movq (1 * 8)(%rsp), %r11; movq (2 * 8)(%rsp), %r12; movq (3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); call __twofish_enc_blk16; vpxor RA0, RB0, RA0; vpxor RC0, RD0, RC0; vpxor RA1, RB1, RA1; vpxor RC1, RD1, RC1; vpxor RA0, RC0, RA0; vpxor RA1, RC1, RA1; addq $(4 * 8), %rsp; CFI_ADJUST_CFA_OFFSET(-4 * 8); vpxor RA1, RA0, RTMP1; vextracti128 $1, RTMP1, RNOTx; vpxor (%rcx), RTMP1x, RTMP1x; vpxor RNOTx, RTMP1x, RTMP1x; vmovdqu RTMP1x, (%rcx); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_twofish_avx2_ocb_auth,.-_gcry_twofish_avx2_ocb_auth;) +SECTION_RODATA + .align 16 /* For CTR-mode IV byteswap */ - _gcry_twofish_bswap128_mask: +ELF(.type _gcry_twofish_bswap128_mask,@object) +_gcry_twofish_bswap128_mask: .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 ELF(.size _gcry_twofish_bswap128_mask,.-_gcry_twofish_bswap128_mask;) #endif /*defined(USE_TWOFISH) && defined(ENABLE_AVX2_SUPPORT)*/ #endif /*__x86_64*/