diff --git a/cipher/camellia-aesni-avx-amd64.S b/cipher/camellia-aesni-avx-amd64.S index 93c96791..5ec33b9b 100644 --- a/cipher/camellia-aesni-avx-amd64.S +++ b/cipher/camellia-aesni-avx-amd64.S @@ -1,2719 +1,2797 @@ /* camellia-avx-aesni-amd64.S - AES-NI/AVX implementation of Camellia cipher * * Copyright (C) 2013-2015,2020 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT) #include "asm-common-amd64.h" #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi /********************************************************************** helper macros **********************************************************************/ #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; /********************************************************************** 16-way camellia **********************************************************************/ /* * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \ t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vmovdqa .Linv_shift_row rRIP, t4; \ vbroadcastss .L0f0f0f0f rRIP, t7; \ vmovdqa .Lpre_tf_lo_s1 rRIP, t0; \ vmovdqa .Lpre_tf_hi_s1 rRIP, t1; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ \ /* prefilter sboxes 1, 2 and 3 */ \ vmovdqa .Lpre_tf_lo_s4 rRIP, t2; \ vmovdqa .Lpre_tf_hi_s4 rRIP, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x1, t0, t1, t7, t6); \ filter_8bit(x4, t0, t1, t7, t6); \ filter_8bit(x2, t0, t1, t7, t6); \ filter_8bit(x5, t0, t1, t7, t6); \ \ /* prefilter sbox 4 */ \ vpxor t4, t4, t4; \ filter_8bit(x3, t2, t3, t7, t6); \ filter_8bit(x6, t2, t3, t7, t6); \ \ /* AES subbytes + AES shift rows */ \ vmovdqa .Lpost_tf_lo_s1 rRIP, t0; \ vmovdqa .Lpost_tf_hi_s1 rRIP, t1; \ vaesenclast t4, x0, x0; \ vaesenclast t4, x7, x7; \ vaesenclast t4, x1, x1; \ vaesenclast t4, x4, x4; \ vaesenclast t4, x2, x2; \ vaesenclast t4, x5, x5; \ vaesenclast t4, x3, x3; \ vaesenclast t4, x6, x6; \ \ /* postfilter sboxes 1 and 4 */ \ vmovdqa .Lpost_tf_lo_s3 rRIP, t2; \ vmovdqa .Lpost_tf_hi_s3 rRIP, t3; \ filter_8bit(x0, t0, t1, t7, t6); \ filter_8bit(x7, t0, t1, t7, t6); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vmovdqa .Lpost_tf_lo_s2 rRIP, t4; \ vmovdqa .Lpost_tf_hi_s2 rRIP, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpxor t6, t6, t6; \ vmovq key, t0; \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ \ vpsrldq $5, t0, t5; \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpsrldq $3, t0, t3; \ vpsrldq $4, t0, t4; \ vpshufb t6, t0, t0; \ vpshufb t6, t1, t1; \ vpshufb t6, t2, t2; \ vpshufb t6, t3, t3; \ vpshufb t6, t4, t4; \ vpsrldq $2, t5, t7; \ vpshufb t6, t7, t7; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t3, x4, x4; \ vpxor 0 * 16(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 16(mem_cd), x5, x5; \ \ vpsrldq $1, t5, t3; \ vpshufb t6, t5, t5; \ vpshufb t6, t3, t6; \ \ vpxor t1, x6, x6; \ vpxor 2 * 16(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 16(mem_cd), x7, x7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 16(mem_cd), x0, x0; \ \ vpxor t6, x1, x1; \ vpxor 5 * 16(mem_cd), x1, x1; \ \ vpxor t5, x2, x2; \ vpxor 6 * 16(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 16(mem_cd), x3, x3; /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu x4, 0 * 16(mem_cd); \ vmovdqu x5, 1 * 16(mem_cd); \ vmovdqu x6, 2 * 16(mem_cd); \ vmovdqu x7, 3 * 16(mem_cd); \ vmovdqu x0, 4 * 16(mem_cd); \ vmovdqu x1, 5 * 16(mem_cd); \ vmovdqu x2, 6 * 16(mem_cd); \ vmovdqu x3, 7 * 16(mem_cd); \ \ roundsm16(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); #define enc_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_16(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls16(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpxor tt0, tt0, tt0; \ vmovd kll, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vmovdqu l4, 4 * 16(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 16(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 16(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 16(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vmovd krr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 16(r), t0, t0; \ vpor 5 * 16(r), t1, t1; \ vpor 6 * 16(r), t2, t2; \ vpor 7 * 16(r), t3, t3; \ \ vpxor 0 * 16(r), t0, t0; \ vpxor 1 * 16(r), t1, t1; \ vpxor 2 * 16(r), t2, t2; \ vpxor 3 * 16(r), t3, t3; \ vmovdqu t0, 0 * 16(r); \ vmovdqu t1, 1 * 16(r); \ vmovdqu t2, 2 * 16(r); \ vmovdqu t3, 3 * 16(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vmovd krl, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 16(r), t0, t0; \ vpand 1 * 16(r), t1, t1; \ vpand 2 * 16(r), t2, t2; \ vpand 3 * 16(r), t3, t3; \ \ rol32_1_16(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 16(r), t0, t0; \ vpxor 5 * 16(r), t1, t1; \ vpxor 6 * 16(r), t2, t2; \ vpxor 7 * 16(r), t3, t3; \ vmovdqu t0, 4 * 16(r); \ vmovdqu t1, 5 * 16(r); \ vmovdqu t2, 6 * 16(r); \ vmovdqu t3, 7 * 16(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vmovd klr, t0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 16(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 16(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 16(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 16(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vmovdqu .Lshufb_16x16b rRIP, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ #define transpose_8x8b(a, b, c, d, e, f, g, h, t0, t1, t2, t3, t4) \ vpunpcklbw a, b, t0; \ vpunpckhbw a, b, b; \ \ vpunpcklbw c, d, t1; \ vpunpckhbw c, d, d; \ \ vpunpcklbw e, f, t2; \ vpunpckhbw e, f, f; \ \ vpunpcklbw g, h, t3; \ vpunpckhbw g, h, h; \ \ vpunpcklwd t0, t1, g; \ vpunpckhwd t0, t1, t0; \ \ vpunpcklwd b, d, t1; \ vpunpckhwd b, d, e; \ \ vpunpcklwd t2, t3, c; \ vpunpckhwd t2, t3, t2; \ \ vpunpcklwd f, h, t3; \ vpunpckhwd f, h, b; \ \ vpunpcklwd e, b, t4; \ vpunpckhwd e, b, b; \ \ vpunpcklwd t1, t3, e; \ vpunpckhwd t1, t3, f; \ \ vmovdqa .Ltranspose_8x8_shuf rRIP, t3; \ \ vpunpcklwd g, c, d; \ vpunpckhwd g, c, c; \ \ vpunpcklwd t0, t2, t1; \ vpunpckhwd t0, t2, h; \ \ vpunpckhqdq b, h, a; \ vpshufb t3, a, a; \ vpunpcklqdq b, h, b; \ vpshufb t3, b, b; \ \ vpunpckhqdq e, d, g; \ vpshufb t3, g, g; \ vpunpcklqdq e, d, h; \ vpshufb t3, h, h; \ \ vpunpckhqdq f, c, e; \ vpshufb t3, e, e; \ vpunpcklqdq f, c, f; \ vpshufb t3, f, f; \ \ vpunpckhqdq t4, t1, c; \ vpshufb t3, c, c; \ vpunpcklqdq t4, t1, d; \ vpshufb t3, d, d; /* load blocks to registers and apply pre-whitening */ #define inpack16_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vmovq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor 0 * 16(rio), x0, y7; \ vpxor 1 * 16(rio), x0, y6; \ vpxor 2 * 16(rio), x0, y5; \ vpxor 3 * 16(rio), x0, y4; \ vpxor 4 * 16(rio), x0, y3; \ vpxor 5 * 16(rio), x0, y2; \ vpxor 6 * 16(rio), x0, y1; \ vpxor 7 * 16(rio), x0, y0; \ vpxor 8 * 16(rio), x0, x7; \ vpxor 9 * 16(rio), x0, x6; \ vpxor 10 * 16(rio), x0, x5; \ vpxor 11 * 16(rio), x0, x4; \ vpxor 12 * 16(rio), x0, x3; \ vpxor 13 * 16(rio), x0, x2; \ vpxor 14 * 16(rio), x0, x1; \ vpxor 15 * 16(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack16_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 16(mem_ab); \ vmovdqu x1, 1 * 16(mem_ab); \ vmovdqu x2, 2 * 16(mem_ab); \ vmovdqu x3, 3 * 16(mem_ab); \ vmovdqu x4, 4 * 16(mem_ab); \ vmovdqu x5, 5 * 16(mem_ab); \ vmovdqu x6, 6 * 16(mem_ab); \ vmovdqu x7, 7 * 16(mem_ab); \ vmovdqu y0, 0 * 16(mem_cd); \ vmovdqu y1, 1 * 16(mem_cd); \ vmovdqu y2, 2 * 16(mem_cd); \ vmovdqu y3, 3 * 16(mem_cd); \ vmovdqu y4, 4 * 16(mem_cd); \ vmovdqu y5, 5 * 16(mem_cd); \ vmovdqu y6, 6 * 16(mem_cd); \ vmovdqu y7, 7 * 16(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack16(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vmovq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 16(rio); \ vmovdqu x1, 1 * 16(rio); \ vmovdqu x2, 2 * 16(rio); \ vmovdqu x3, 3 * 16(rio); \ vmovdqu x4, 4 * 16(rio); \ vmovdqu x5, 5 * 16(rio); \ vmovdqu x6, 6 * 16(rio); \ vmovdqu x7, 7 * 16(rio); \ vmovdqu y0, 8 * 16(rio); \ vmovdqu y1, 9 * 16(rio); \ vmovdqu y2, 10 * 16(rio); \ vmovdqu y3, 11 * 16(rio); \ vmovdqu y4, 12 * 16(rio); \ vmovdqu y5, 13 * 16(rio); \ vmovdqu y6, 14 * 16(rio); \ vmovdqu y7, 15 * 16(rio); SECTION_RODATA ELF(.type _camellia_aesni_avx_data,@object;) _camellia_aesni_avx_data: .align 16 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); .Lpack_bswap: .long 0x00010203 .long 0x04050607 .long 0x80808080 .long 0x80808080 /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 /* shuffle mask for 8x8 byte transpose */ .Ltranspose_8x8_shuf: .byte 0, 1, 4, 5, 2, 3, 6, 7, 8+0, 8+1, 8+4, 8+5, 8+2, 8+3, 8+6, 8+7 +/* CTR byte addition constants */ +.Lbige_addb_1: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +.Lbige_addb_2: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 +.Lbige_addb_3: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 +.Lbige_addb_4: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 +.Lbige_addb_5: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 +.Lbige_addb_6: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 +.Lbige_addb_7: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 +.Lbige_addb_8: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 +.Lbige_addb_9: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9 +.Lbige_addb_10: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 +.Lbige_addb_11: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 +.Lbige_addb_12: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 +.Lbige_addb_13: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 +.Lbige_addb_14: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 +.Lbige_addb_15: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 + .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f .text .align 16 ELF(.type __camellia_enc_blk16,@function;) __camellia_enc_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %r8d: 24 for 16 byte key, 32 for larger * %xmm0..%xmm15: 16 plaintext blocks * output: * %xmm0..%xmm15: 16 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); leaq 8 * 16(%rax), %rcx; leaq (-8 * 8)(CTX, %r8, 8), %r8; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); .align 8 .Lenc_loop: enc_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Lenc_done; leaq (8 * 8)(CTX), CTX; fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), ((key_table) + 8)(CTX), ((key_table) + 12)(CTX)); jmp .Lenc_loop; .align 8 .Lenc_done: /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table) + 8 * 8)(%r8), (%rax), 1 * 16(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_enc_blk16,.-__camellia_enc_blk16;) .align 16 ELF(.type __camellia_dec_blk16,@function;) __camellia_dec_blk16: /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes * %r8d: 24 for 16 byte key, 32 for larger * %xmm0..%xmm15: 16 encrypted blocks * output: * %xmm0..%xmm15: 16 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); movq %r8, %rcx; movq CTX, %r8 leaq (-8 * 8)(CTX, %rcx, 8), CTX; leaq 8 * 16(%rax), %rcx; inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx); .align 8 .Ldec_loop: dec_rounds16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Ldec_done; fls16(%rax, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %rcx, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), ((key_table) + 0)(CTX), ((key_table) + 4)(CTX)); leaq (-8 * 8)(CTX), CTX; jmp .Ldec_loop; .align 8 .Ldec_done: /* load CD for output */ vmovdqu 0 * 16(%rcx), %xmm8; vmovdqu 1 * 16(%rcx), %xmm9; vmovdqu 2 * 16(%rcx), %xmm10; vmovdqu 3 * 16(%rcx), %xmm11; vmovdqu 4 * 16(%rcx), %xmm12; vmovdqu 5 * 16(%rcx), %xmm13; vmovdqu 6 * 16(%rcx), %xmm14; vmovdqu 7 * 16(%rcx), %xmm15; outunpack16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_dec_blk16,.-__camellia_dec_blk16;) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl _gcry_camellia_aesni_avx_ctr_enc ELF(.type _gcry_camellia_aesni_avx_ctr_enc,@function;) _gcry_camellia_aesni_avx_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; + cmpb $(0x100 - 16), 15(%rcx); + jbe .Lctr_byteadd; + vmovdqa .Lbswap128_mask rRIP, %xmm14; /* load IV and byteswap */ vmovdqu (%rcx), %xmm15; vmovdqu %xmm15, 15 * 16(%rax); vpshufb %xmm14, %xmm15, %xmm0; /* be => le */ vpcmpeqd %xmm15, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; /* low: -1, high: 0 */ /* construct IVs */ inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 14 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm13; vmovdqu %xmm13, 13 * 16(%rax); inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm12; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm11; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm10; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm9; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm8; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm7; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm6; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm5; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm4; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm3; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm2; inc_le128(%xmm0, %xmm15, %xmm13); vpshufb %xmm14, %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm13); vmovdqa %xmm0, %xmm13; vpshufb %xmm14, %xmm0, %xmm0; inc_le128(%xmm13, %xmm15, %xmm14); vpshufb .Lbswap128_mask rRIP, %xmm13, %xmm13; /* le => be */ vmovdqu %xmm13, (%rcx); +.align 8 +.Lload_ctr_done: /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor 13 * 16(%rax), %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor 0 * 16(%rdx), %xmm7, %xmm7; vpxor 1 * 16(%rdx), %xmm6, %xmm6; vpxor 2 * 16(%rdx), %xmm5, %xmm5; vpxor 3 * 16(%rdx), %xmm4, %xmm4; vpxor 4 * 16(%rdx), %xmm3, %xmm3; vpxor 5 * 16(%rdx), %xmm2, %xmm2; vpxor 6 * 16(%rdx), %xmm1, %xmm1; vpxor 7 * 16(%rdx), %xmm0, %xmm0; vpxor 8 * 16(%rdx), %xmm15, %xmm15; vpxor 9 * 16(%rdx), %xmm14, %xmm14; vpxor 10 * 16(%rdx), %xmm13, %xmm13; vpxor 11 * 16(%rdx), %xmm12, %xmm12; vpxor 12 * 16(%rdx), %xmm11, %xmm11; vpxor 13 * 16(%rdx), %xmm10, %xmm10; vpxor 14 * 16(%rdx), %xmm9, %xmm9; vpxor 15 * 16(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; + +.align 8 +.Lctr_byteadd_full_ctr_carry: + movq 8(%rcx), %r11; + movq (%rcx), %r10; + bswapq %r11; + bswapq %r10; + addq $16, %r11; + adcq $0, %r10; + bswapq %r11; + bswapq %r10; + movq %r11, 8(%rcx); + movq %r10, (%rcx); + jmp .Lctr_byteadd_xmm; +.align 8 +.Lctr_byteadd: + vmovdqu (%rcx), %xmm15; + je .Lctr_byteadd_full_ctr_carry; + addb $16, 15(%rcx); +.Lctr_byteadd_xmm: + vmovdqa %xmm15, %xmm0; + vpaddb .Lbige_addb_1 rRIP, %xmm15, %xmm14; + vmovdqu %xmm15, 15 * 16(%rax); + vpaddb .Lbige_addb_2 rRIP, %xmm15, %xmm13; + vmovdqu %xmm14, 14 * 16(%rax); + vpaddb .Lbige_addb_3 rRIP, %xmm15, %xmm12; + vmovdqu %xmm13, 13 * 16(%rax); + vpaddb .Lbige_addb_4 rRIP, %xmm15, %xmm11; + vpaddb .Lbige_addb_5 rRIP, %xmm15, %xmm10; + vpaddb .Lbige_addb_6 rRIP, %xmm15, %xmm9; + vpaddb .Lbige_addb_7 rRIP, %xmm15, %xmm8; + vpaddb .Lbige_addb_8 rRIP, %xmm0, %xmm7; + vpaddb .Lbige_addb_9 rRIP, %xmm0, %xmm6; + vpaddb .Lbige_addb_10 rRIP, %xmm0, %xmm5; + vpaddb .Lbige_addb_11 rRIP, %xmm0, %xmm4; + vpaddb .Lbige_addb_12 rRIP, %xmm0, %xmm3; + vpaddb .Lbige_addb_13 rRIP, %xmm0, %xmm2; + vpaddb .Lbige_addb_14 rRIP, %xmm0, %xmm1; + vpaddb .Lbige_addb_15 rRIP, %xmm0, %xmm0; + + jmp .Lload_ctr_done; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ctr_enc,.-_gcry_camellia_aesni_avx_ctr_enc;) .align 16 .globl _gcry_camellia_aesni_avx_ecb_enc ELF(.type _gcry_camellia_aesni_avx_ecb_enc,@function;) _gcry_camellia_aesni_avx_ecb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX)); subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; call __camellia_enc_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ecb_enc,.-_gcry_camellia_aesni_avx_ecb_enc;) .align 16 .globl _gcry_camellia_aesni_avx_ecb_dec ELF(.type _gcry_camellia_aesni_avx_ecb_dec,@function;) _gcry_camellia_aesni_avx_ecb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; call __camellia_dec_blk16; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ecb_dec,.-_gcry_camellia_aesni_avx_ecb_dec;) .align 16 .globl _gcry_camellia_aesni_avx_cbc_dec ELF(.type _gcry_camellia_aesni_avx_cbc_dec,@function;) _gcry_camellia_aesni_avx_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rdx, (key_table)(CTX, %r8, 8)); subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; call __camellia_dec_blk16; /* XOR output with IV */ vpxor (%r9), %xmm7, %xmm7; vpxor (0 * 16)(%rdx), %xmm6, %xmm6; vpxor (1 * 16)(%rdx), %xmm5, %xmm5; vpxor (2 * 16)(%rdx), %xmm4, %xmm4; vpxor (3 * 16)(%rdx), %xmm3, %xmm3; vpxor (4 * 16)(%rdx), %xmm2, %xmm2; vpxor (5 * 16)(%rdx), %xmm1, %xmm1; vpxor (6 * 16)(%rdx), %xmm0, %xmm0; vpxor (7 * 16)(%rdx), %xmm15, %xmm15; vpxor (8 * 16)(%rdx), %xmm14, %xmm14; vpxor (9 * 16)(%rdx), %xmm13, %xmm13; vpxor (10 * 16)(%rdx), %xmm12, %xmm12; vpxor (11 * 16)(%rdx), %xmm11, %xmm11; vpxor (12 * 16)(%rdx), %xmm10, %xmm10; vpxor (13 * 16)(%rdx), %xmm9, %xmm9; vpxor (14 * 16)(%rdx), %xmm8, %xmm8; movq (15 * 16 + 0)(%rdx), %r10; movq (15 * 16 + 8)(%rdx), %r11; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); /* store new IV */ movq %r10, (0)(%r9); movq %r11, (8)(%r9); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_cbc_dec,.-_gcry_camellia_aesni_avx_cbc_dec;) .align 16 .globl _gcry_camellia_aesni_avx_cfb_dec ELF(.type _gcry_camellia_aesni_avx_cfb_dec,@function;) _gcry_camellia_aesni_avx_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 16), %rsp; andq $~31, %rsp; movq %rsp, %rax; /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm0; vpshufb .Lpack_bswap rRIP, %xmm0, %xmm0; vpxor (%rcx), %xmm0, %xmm15; vmovdqu 15 * 16(%rdx), %xmm1; vmovdqu %xmm1, (%rcx); /* store new IV */ vpxor 0 * 16(%rdx), %xmm0, %xmm14; vpxor 1 * 16(%rdx), %xmm0, %xmm13; vpxor 2 * 16(%rdx), %xmm0, %xmm12; vpxor 3 * 16(%rdx), %xmm0, %xmm11; vpxor 4 * 16(%rdx), %xmm0, %xmm10; vpxor 5 * 16(%rdx), %xmm0, %xmm9; vpxor 6 * 16(%rdx), %xmm0, %xmm8; vpxor 7 * 16(%rdx), %xmm0, %xmm7; vpxor 8 * 16(%rdx), %xmm0, %xmm6; vpxor 9 * 16(%rdx), %xmm0, %xmm5; vpxor 10 * 16(%rdx), %xmm0, %xmm4; vpxor 11 * 16(%rdx), %xmm0, %xmm3; vpxor 12 * 16(%rdx), %xmm0, %xmm2; vpxor 13 * 16(%rdx), %xmm0, %xmm1; vpxor 14 * 16(%rdx), %xmm0, %xmm0; call __camellia_enc_blk16; vpxor 0 * 16(%rdx), %xmm7, %xmm7; vpxor 1 * 16(%rdx), %xmm6, %xmm6; vpxor 2 * 16(%rdx), %xmm5, %xmm5; vpxor 3 * 16(%rdx), %xmm4, %xmm4; vpxor 4 * 16(%rdx), %xmm3, %xmm3; vpxor 5 * 16(%rdx), %xmm2, %xmm2; vpxor 6 * 16(%rdx), %xmm1, %xmm1; vpxor 7 * 16(%rdx), %xmm0, %xmm0; vpxor 8 * 16(%rdx), %xmm15, %xmm15; vpxor 9 * 16(%rdx), %xmm14, %xmm14; vpxor 10 * 16(%rdx), %xmm13, %xmm13; vpxor 11 * 16(%rdx), %xmm12, %xmm12; vpxor 12 * 16(%rdx), %xmm11, %xmm11; vpxor 13 * 16(%rdx), %xmm10, %xmm10; vpxor 14 * 16(%rdx), %xmm9, %xmm9; vpxor 15 * 16(%rdx), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_cfb_dec,.-_gcry_camellia_aesni_avx_cfb_dec;) .align 16 .globl _gcry_camellia_aesni_avx_ocb_enc ELF(.type _gcry_camellia_aesni_avx_ocb_enc,@function;) _gcry_camellia_aesni_avx_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rsp); movq %r11, (16 * 16 + 1 * 8)(%rsp); movq %r12, (16 * 16 + 2 * 8)(%rsp); movq %r13, (16 * 16 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 16 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 16 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 16 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 16 + 3 * 8); vmovdqu (%rcx), %xmm14; vmovdqu (%r8), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), %xmm14, %xmm14; \ vpxor xreg, %xmm15, %xmm15; \ vpxor xreg, %xmm14, xreg; \ vmovdqu %xmm14, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm0); vmovdqu %xmm0, (14 * 16)(%rax); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT vmovdqu %xmm14, (%rcx); vmovdqu %xmm15, (%r8); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor 0 * 16(%rsi), %xmm7, %xmm7; vpxor 1 * 16(%rsi), %xmm6, %xmm6; vpxor 2 * 16(%rsi), %xmm5, %xmm5; vpxor 3 * 16(%rsi), %xmm4, %xmm4; vpxor 4 * 16(%rsi), %xmm3, %xmm3; vpxor 5 * 16(%rsi), %xmm2, %xmm2; vpxor 6 * 16(%rsi), %xmm1, %xmm1; vpxor 7 * 16(%rsi), %xmm0, %xmm0; vpxor 8 * 16(%rsi), %xmm15, %xmm15; vpxor 9 * 16(%rsi), %xmm14, %xmm14; vpxor 10 * 16(%rsi), %xmm13, %xmm13; vpxor 11 * 16(%rsi), %xmm12, %xmm12; vpxor 12 * 16(%rsi), %xmm11, %xmm11; vpxor 13 * 16(%rsi), %xmm10, %xmm10; vpxor 14 * 16(%rsi), %xmm9, %xmm9; vpxor 15 * 16(%rsi), %xmm8, %xmm8; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; movq (16 * 16 + 0 * 8)(%rsp), %r10; movq (16 * 16 + 1 * 8)(%rsp), %r11; movq (16 * 16 + 2 * 8)(%rsp), %r12; movq (16 * 16 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ocb_enc,.-_gcry_camellia_aesni_avx_ocb_enc;) .align 16 .globl _gcry_camellia_aesni_avx_ocb_dec ELF(.type _gcry_camellia_aesni_avx_ocb_dec,@function;) _gcry_camellia_aesni_avx_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) * %rdx: src (16 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rsp); movq %r11, (16 * 16 + 1 * 8)(%rsp); movq %r12, (16 * 16 + 2 * 8)(%rsp); movq %r13, (16 * 16 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 16 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 16 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 16 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 16 + 3 * 8); vmovdqu (%rcx), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rdx), xreg; \ vpxor (lreg), %xmm15, %xmm15; \ vpxor xreg, %xmm15, xreg; \ vmovdqu %xmm15, (n * 16)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm14); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT vmovdqu %xmm15, (%rcx); movq %r8, %r10; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r9d; cmovel %r9d, %r8d; /* max */ /* inpack16_pre: */ vmovq (key_table)(CTX, %r8, 8), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor %xmm14, %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_dec_blk16; vpxor 0 * 16(%rsi), %xmm7, %xmm7; vpxor 1 * 16(%rsi), %xmm6, %xmm6; vpxor 2 * 16(%rsi), %xmm5, %xmm5; vpxor 3 * 16(%rsi), %xmm4, %xmm4; vpxor 4 * 16(%rsi), %xmm3, %xmm3; vpxor 5 * 16(%rsi), %xmm2, %xmm2; vpxor 6 * 16(%rsi), %xmm1, %xmm1; vpxor 7 * 16(%rsi), %xmm0, %xmm0; vmovdqu %xmm7, (7 * 16)(%rax); vpxor 8 * 16(%rsi), %xmm15, %xmm15; vpxor 9 * 16(%rsi), %xmm14, %xmm14; vpxor 10 * 16(%rsi), %xmm13, %xmm13; vpxor 11 * 16(%rsi), %xmm12, %xmm12; vpxor 12 * 16(%rsi), %xmm11, %xmm11; vpxor 13 * 16(%rsi), %xmm10, %xmm10; vpxor 14 * 16(%rsi), %xmm9, %xmm9; vpxor 15 * 16(%rsi), %xmm8, %xmm8; /* Checksum_i = Checksum_{i-1} xor P_i */ vpxor (%r10), %xmm7, %xmm7; vpxor %xmm6, %xmm7, %xmm7; vpxor %xmm5, %xmm7, %xmm7; vpxor %xmm4, %xmm7, %xmm7; vpxor %xmm3, %xmm7, %xmm7; vpxor %xmm2, %xmm7, %xmm7; vpxor %xmm1, %xmm7, %xmm7; vpxor %xmm0, %xmm7, %xmm7; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm14, %xmm7, %xmm7; vpxor %xmm13, %xmm7, %xmm7; vpxor %xmm12, %xmm7, %xmm7; vpxor %xmm11, %xmm7, %xmm7; vpxor %xmm10, %xmm7, %xmm7; vpxor %xmm9, %xmm7, %xmm7; vpxor %xmm8, %xmm7, %xmm7; vmovdqu %xmm7, (%r10); vmovdqu (7 * 16)(%rax), %xmm7; write_output(%xmm7, %xmm6, %xmm5, %xmm4, %xmm3, %xmm2, %xmm1, %xmm0, %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9, %xmm8, %rsi); vzeroall; movq (16 * 16 + 0 * 8)(%rsp), %r10; movq (16 * 16 + 1 * 8)(%rsp), %r11; movq (16 * 16 + 2 * 8)(%rsp), %r12; movq (16 * 16 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ocb_dec,.-_gcry_camellia_aesni_avx_ocb_dec;) .align 16 .globl _gcry_camellia_aesni_avx_ocb_auth ELF(.type _gcry_camellia_aesni_avx_ocb_auth,@function;) _gcry_camellia_aesni_avx_ocb_auth: /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); vzeroupper; subq $(16 * 16 + 4 * 8), %rsp; andq $~31, %rsp; movq %rsp, %rax; movq %r10, (16 * 16 + 0 * 8)(%rsp); movq %r11, (16 * 16 + 1 * 8)(%rsp); movq %r12, (16 * 16 + 2 * 8)(%rsp); movq %r13, (16 * 16 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 16 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 16 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 16 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 16 + 3 * 8); vmovdqu (%rdx), %xmm15; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ #define OCB_INPUT(n, lreg, xreg) \ vmovdqu (n * 16)(%rsi), xreg; \ vpxor (lreg), %xmm15, %xmm15; \ vpxor xreg, %xmm15, xreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %xmm0); vmovdqu %xmm0, (15 * 16)(%rax); OCB_INPUT(1, %r11, %xmm14); OCB_INPUT(2, %r12, %xmm13); OCB_INPUT(3, %r13, %xmm12); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %xmm11); OCB_INPUT(5, %r11, %xmm10); OCB_INPUT(6, %r12, %xmm9); OCB_INPUT(7, %r13, %xmm8); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(8, %r10, %xmm7); OCB_INPUT(9, %r11, %xmm6); OCB_INPUT(10, %r12, %xmm5); OCB_INPUT(11, %r13, %xmm4); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(12, %r10, %xmm3); OCB_INPUT(13, %r11, %xmm2); OCB_INPUT(14, %r12, %xmm1); OCB_INPUT(15, %r13, %xmm0); #undef OCB_INPUT cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ vmovdqu %xmm15, (%rdx); movq %rcx, %r10; /* inpack16_pre: */ vmovq (key_table)(CTX), %xmm15; vpshufb .Lpack_bswap rRIP, %xmm15, %xmm15; vpxor %xmm0, %xmm15, %xmm0; vpxor %xmm1, %xmm15, %xmm1; vpxor %xmm2, %xmm15, %xmm2; vpxor %xmm3, %xmm15, %xmm3; vpxor %xmm4, %xmm15, %xmm4; vpxor %xmm5, %xmm15, %xmm5; vpxor %xmm6, %xmm15, %xmm6; vpxor %xmm7, %xmm15, %xmm7; vpxor %xmm8, %xmm15, %xmm8; vpxor %xmm9, %xmm15, %xmm9; vpxor %xmm10, %xmm15, %xmm10; vpxor %xmm11, %xmm15, %xmm11; vpxor %xmm12, %xmm15, %xmm12; vpxor %xmm13, %xmm15, %xmm13; vpxor %xmm14, %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; call __camellia_enc_blk16; vpxor %xmm7, %xmm6, %xmm6; vpxor %xmm5, %xmm4, %xmm4; vpxor %xmm3, %xmm2, %xmm2; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm15, %xmm14, %xmm14; vpxor %xmm13, %xmm12, %xmm12; vpxor %xmm11, %xmm10, %xmm10; vpxor %xmm9, %xmm8, %xmm8; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm4, %xmm0, %xmm0; vpxor %xmm12, %xmm8, %xmm8; vpxor %xmm0, %xmm8, %xmm0; vpxor (%r10), %xmm0, %xmm0; vmovdqu %xmm0, (%r10); vzeroall; movq (16 * 16 + 0 * 8)(%rsp), %r10; movq (16 * 16 + 1 * 8)(%rsp), %r11; movq (16 * 16 + 2 * 8)(%rsp), %r12; movq (16 * 16 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_ocb_auth,.-_gcry_camellia_aesni_avx_ocb_auth;) /* * IN: * ab: 64-bit AB state * cd: 64-bit CD state */ #define camellia_f(ab, x, t0, t1, t2, t3, t4, inv_shift_row, sbox4mask, \ _0f0f0f0fmask, pre_s1lo_mask, pre_s1hi_mask, key) \ vmovq key, t0; \ vpxor x, x, t3; \ \ vpxor ab, t0, x; \ \ /* \ * S-function with AES subbytes \ */ \ \ /* input rotation for sbox4 (<<< 1) */ \ vpand x, sbox4mask, t0; \ vpandn x, sbox4mask, x; \ vpaddw t0, t0, t1; \ vpsrlw $7, t0, t0; \ vpor t0, t1, t0; \ vpand sbox4mask, t0, t0; \ vpor t0, x, x; \ \ vmovdqa .Lpost_tf_lo_s1 rRIP, t0; \ vmovdqa .Lpost_tf_hi_s1 rRIP, t1; \ \ /* prefilter sboxes */ \ filter_8bit(x, pre_s1lo_mask, pre_s1hi_mask, _0f0f0f0fmask, t2); \ \ /* AES subbytes + AES shift rows + AES inv shift rows */ \ vaesenclast t3, x, x; \ \ /* postfilter sboxes */ \ filter_8bit(x, t0, t1, _0f0f0f0fmask, t2); \ \ /* output rotation for sbox2 (<<< 1) */ \ /* output rotation for sbox3 (>>> 1) */ \ vpshufb inv_shift_row, x, t1; \ vpshufb .Lsp0044440444044404mask rRIP, x, t4; \ vpshufb .Lsp1110111010011110mask rRIP, x, x; \ vpaddb t1, t1, t2; \ vpsrlw $7, t1, t0; \ vpsllw $7, t1, t3; \ vpor t0, t2, t0; \ vpsrlw $1, t1, t1; \ vpshufb .Lsp0222022222000222mask rRIP, t0, t0; \ vpor t1, t3, t1; \ \ vpxor x, t4, t4; \ vpshufb .Lsp3033303303303033mask rRIP, t1, t1; \ vpxor t4, t0, t0; \ vpxor t1, t0, t0; \ vpsrldq $8, t0, x; \ vpxor t0, x, x; #define vec_rol128(in, out, nrol, t0) \ vpshufd $0x4e, in, out; \ vpsllq $(nrol), in, t0; \ vpsrlq $(64-(nrol)), out, out; \ vpaddd t0, out, out; #define vec_ror128(in, out, nror, t0) \ vpshufd $0x4e, in, out; \ vpsrlq $(nror), in, t0; \ vpsllq $(64-(nror)), out, out; \ vpaddd t0, out, out; SECTION_RODATA ELF(.type _camellia_aesni_avx_keysetup_data,@object;) _camellia_aesni_avx_keysetup_data: .align 16 .Linv_shift_row_and_unpcklbw: .byte 0x00, 0xff, 0x0d, 0xff, 0x0a, 0xff, 0x07, 0xff .byte 0x04, 0xff, 0x01, 0xff, 0x0e, 0xff, 0x0b, 0xff .Lsp0044440444044404mask: .long 0xffff0404, 0x0404ff04; .long 0x0d0dff0d, 0x0d0dff0d; .Lsp1110111010011110mask: .long 0x000000ff, 0x000000ff; .long 0x0bffff0b, 0x0b0b0bff; .Lsp0222022222000222mask: .long 0xff060606, 0xff060606; .long 0x0c0cffff, 0xff0c0c0c; .Lsp3033303303303033mask: .long 0x04ff0404, 0x04ff0404; .long 0xff0a0aff, 0x0aff0a0a; .Lsbox4_input_mask: .byte 0x00, 0xff, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00; .Lsigma1: .long 0x3BCC908B, 0xA09E667F; .Lsigma2: .long 0x4CAA73B2, 0xB67AE858; .Lsigma3: .long 0xE94F82BE, 0xC6EF372F; .Lsigma4: .long 0xF1D36F1C, 0x54FF53A5; .Lsigma5: .long 0xDE682D1D, 0x10E527FA; .Lsigma6: .long 0xB3E6C1FD, 0xB05688C2; .text .align 16 ELF(.type __camellia_avx_setup128,@function;) __camellia_avx_setup128: /* input: * %rdi: ctx, CTX; subkey storage at key_table(CTX) * %xmm0: key */ CFI_STARTPROC(); #define cmll_sub(n, ctx) (key_table+((n)*8))(ctx) #define KL128 %xmm0 #define KA128 %xmm2 vpshufb .Lbswap128_mask rRIP, KL128, KL128; vmovdqa .Linv_shift_row_and_unpcklbw rRIP, %xmm11; vmovq .Lsbox4_input_mask rRIP, %xmm12; vbroadcastss .L0f0f0f0f rRIP, %xmm13; vmovdqa .Lpre_tf_lo_s1 rRIP, %xmm14; vmovdqa .Lpre_tf_hi_s1 rRIP, %xmm15; /* * Generate KA */ vpsrldq $8, KL128, %xmm2; vmovdqa KL128, %xmm3; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm2, %xmm4, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma1 rRIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm2, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma2 rRIP); camellia_f(%xmm2, %xmm3, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma3 rRIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm4, %xmm1, %xmm5, %xmm6, %xmm7, %xmm8, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma4 rRIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm4, %xmm2, %xmm2; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm2, KA128; vpor %xmm3, KA128, KA128; /* * Generate subkeys */ vmovdqu KA128, cmll_sub(24, CTX); vec_rol128(KL128, %xmm3, 15, %xmm15); vec_rol128(KA128, %xmm4, 15, %xmm15); vec_rol128(KA128, %xmm5, 30, %xmm15); vec_rol128(KL128, %xmm6, 45, %xmm15); vec_rol128(KA128, %xmm7, 45, %xmm15); vec_rol128(KL128, %xmm8, 60, %xmm15); vec_rol128(KA128, %xmm9, 60, %xmm15); vec_ror128(KL128, %xmm10, 128-77, %xmm15); /* absorb kw2 to other subkeys */ vpslldq $8, KL128, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, KA128, KA128; vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; /* subl(1) ^= subr(1) & ~subr(9); */ vpandn %xmm15, %xmm5, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm9, %xmm9; /* subl(1) ^= subr(1) & ~subr(17); */ vpandn %xmm15, %xmm10, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, KL128, KL128; vpshufd $0x1b, KA128, KA128; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vpshufd $0x1b, %xmm10, %xmm10; vmovdqu KL128, cmll_sub(0, CTX); vpshufd $0x1b, KL128, KL128; vmovdqu KA128, cmll_sub(2, CTX); vmovdqu %xmm3, cmll_sub(4, CTX); vmovdqu %xmm4, cmll_sub(6, CTX); vmovdqu %xmm5, cmll_sub(8, CTX); vmovdqu %xmm6, cmll_sub(10, CTX); vpsrldq $8, %xmm8, %xmm8; vmovq %xmm7, cmll_sub(12, CTX); vmovq %xmm8, cmll_sub(13, CTX); vmovdqu %xmm9, cmll_sub(14, CTX); vmovdqu %xmm10, cmll_sub(16, CTX); vmovdqu cmll_sub(24, CTX), KA128; vec_ror128(KL128, %xmm3, 128 - 94, %xmm7); vec_ror128(KA128, %xmm4, 128 - 94, %xmm7); vec_ror128(KL128, %xmm5, 128 - 111, %xmm7); vec_ror128(KA128, %xmm6, 128 - 111, %xmm7); vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; vpslldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; /* absorb kw4 to other subkeys */ vpslldq $8, %xmm6, %xmm15; vpxor %xmm15, %xmm5, %xmm5; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm3, %xmm3; /* subl(25) ^= subr(25) & ~subr(16); */ vpshufd $0x1b, cmll_sub(16, CTX), %xmm10; vpandn %xmm15, %xmm10, %xmm13; vpslldq $4, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(25) & subl(16), subr(25) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vmovdqu %xmm3, cmll_sub(18, CTX); vmovdqu %xmm4, cmll_sub(20, CTX); vmovdqu %xmm5, cmll_sub(22, CTX); vmovdqu %xmm6, cmll_sub(24, CTX); vpshufd $0x1b, cmll_sub(14, CTX), %xmm3; vpshufd $0x1b, cmll_sub(12, CTX), %xmm4; vpshufd $0x1b, cmll_sub(10, CTX), %xmm5; vpshufd $0x1b, cmll_sub(8, CTX), %xmm6; vpxor %xmm15, %xmm3, %xmm3; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; /* subl(25) ^= subr(25) & ~subr(8); */ vpandn %xmm15, %xmm6, %xmm13; vpslldq $4, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(25) & subl(8), subr(25) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm6, %xmm14; vpslld $1, %xmm14, %xmm11; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm11, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm3, %xmm3; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vmovdqu %xmm3, cmll_sub(14, CTX); vmovdqu %xmm4, cmll_sub(12, CTX); vmovdqu %xmm5, cmll_sub(10, CTX); vpshufd $0x1b, cmll_sub(6, CTX), %xmm6; vpshufd $0x1b, cmll_sub(4, CTX), %xmm4; vpshufd $0x1b, cmll_sub(2, CTX), %xmm2; vpshufd $0x1b, cmll_sub(0, CTX), %xmm0; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm2, %xmm2; vpxor %xmm15, %xmm0, %xmm0; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm0, %xmm0; vpsrldq $8, %xmm2, %xmm3; vpsrldq $8, %xmm4, %xmm5; vpsrldq $8, %xmm6, %xmm7; /* * key XOR is end of F-function. */ vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm4, %xmm2, %xmm2; vmovq %xmm0, cmll_sub(0, CTX); vmovq %xmm3, cmll_sub(2, CTX); vpxor %xmm5, %xmm3, %xmm3; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm7, %xmm5, %xmm5; vmovq %xmm2, cmll_sub(3, CTX); vmovq %xmm3, cmll_sub(4, CTX); vmovq %xmm4, cmll_sub(5, CTX); vmovq %xmm5, cmll_sub(6, CTX); vmovq cmll_sub(7, CTX), %xmm7; vmovq cmll_sub(8, CTX), %xmm8; vmovq cmll_sub(9, CTX), %xmm9; vmovq cmll_sub(10, CTX), %xmm10; /* tl = subl(10) ^ (subr(10) & ~subr(8)); */ vpandn %xmm10, %xmm8, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm10, %xmm0; /* dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw); */ vpand %xmm8, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm6, %xmm6; vmovq %xmm6, cmll_sub(7, CTX); vmovq cmll_sub(11, CTX), %xmm11; vmovq cmll_sub(12, CTX), %xmm12; vmovq cmll_sub(13, CTX), %xmm13; vmovq cmll_sub(14, CTX), %xmm14; vmovq cmll_sub(15, CTX), %xmm15; /* tl = subl(7) ^ (subr(7) & ~subr(9)); */ vpandn %xmm7, %xmm9, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm7, %xmm0; /* dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw); */ vpand %xmm9, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm11, %xmm0, %xmm0; vpxor %xmm12, %xmm10, %xmm10; vpxor %xmm13, %xmm11, %xmm11; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm15, %xmm13, %xmm13; vmovq %xmm0, cmll_sub(10, CTX); vmovq %xmm10, cmll_sub(11, CTX); vmovq %xmm11, cmll_sub(12, CTX); vmovq %xmm12, cmll_sub(13, CTX); vmovq %xmm13, cmll_sub(14, CTX); vmovq cmll_sub(16, CTX), %xmm6; vmovq cmll_sub(17, CTX), %xmm7; vmovq cmll_sub(18, CTX), %xmm8; vmovq cmll_sub(19, CTX), %xmm9; vmovq cmll_sub(20, CTX), %xmm10; /* tl = subl(18) ^ (subr(18) & ~subr(16)); */ vpandn %xmm8, %xmm6, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm8, %xmm0; /* dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw); */ vpand %xmm6, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm14, %xmm0, %xmm0; vmovq %xmm0, cmll_sub(15, CTX); /* tl = subl(15) ^ (subr(15) & ~subr(17)); */ vpandn %xmm15, %xmm7, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm15, %xmm0; /* dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw); */ vpand %xmm7, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vmovq cmll_sub(21, CTX), %xmm1; vmovq cmll_sub(22, CTX), %xmm2; vmovq cmll_sub(23, CTX), %xmm3; vmovq cmll_sub(24, CTX), %xmm4; vpxor %xmm9, %xmm0, %xmm0; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm1, %xmm9, %xmm9; vpxor %xmm2, %xmm10, %xmm10; vpxor %xmm3, %xmm1, %xmm1; vpxor %xmm4, %xmm3, %xmm3; vmovq %xmm0, cmll_sub(18, CTX); vmovq %xmm8, cmll_sub(19, CTX); vmovq %xmm9, cmll_sub(20, CTX); vmovq %xmm10, cmll_sub(21, CTX); vmovq %xmm1, cmll_sub(22, CTX); vmovq %xmm2, cmll_sub(23, CTX); vmovq %xmm3, cmll_sub(24, CTX); /* kw2 and kw4 are unused now. */ movq $0, cmll_sub(1, CTX); movq $0, cmll_sub(25, CTX); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_avx_setup128,.-__camellia_avx_setup128;) .align 16 ELF(.type __camellia_avx_setup256,@function;) __camellia_avx_setup256: /* input: * %rdi: ctx, CTX; subkey storage at key_table(CTX) * %xmm0 & %xmm1: key */ CFI_STARTPROC(); #define KL128 %xmm0 #define KR128 %xmm1 #define KA128 %xmm2 #define KB128 %xmm3 vpshufb .Lbswap128_mask rRIP, KL128, KL128; vpshufb .Lbswap128_mask rRIP, KR128, KR128; vmovdqa .Linv_shift_row_and_unpcklbw rRIP, %xmm11; vmovq .Lsbox4_input_mask rRIP, %xmm12; vbroadcastss .L0f0f0f0f rRIP, %xmm13; vmovdqa .Lpre_tf_lo_s1 rRIP, %xmm14; vmovdqa .Lpre_tf_hi_s1 rRIP, %xmm15; /* * Generate KA */ vpxor KL128, KR128, %xmm3; vpsrldq $8, KR128, %xmm6; vpsrldq $8, %xmm3, %xmm2; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm2, %xmm4, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma1 rRIP); vpxor %xmm4, %xmm3, %xmm3; camellia_f(%xmm3, %xmm2, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma2 rRIP); vpxor %xmm6, %xmm2, %xmm2; camellia_f(%xmm2, %xmm3, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma3 rRIP); vpxor %xmm4, %xmm3, %xmm3; vpxor KR128, %xmm3, %xmm3; camellia_f(%xmm3, %xmm4, %xmm5, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma4 rRIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm4, %xmm2, %xmm2; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm2, KA128; vpor %xmm3, KA128, KA128; /* * Generate KB */ vpxor KA128, KR128, %xmm3; vpsrldq $8, %xmm3, %xmm4; vpslldq $8, %xmm3, %xmm3; vpsrldq $8, %xmm3, %xmm3; camellia_f(%xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma5 rRIP); vpxor %xmm5, %xmm3, %xmm3; camellia_f(%xmm3, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, .Lsigma6 rRIP); vpslldq $8, %xmm3, %xmm3; vpxor %xmm5, %xmm4, %xmm4; vpsrldq $8, %xmm3, %xmm3; vpslldq $8, %xmm4, %xmm4; vpor %xmm3, %xmm4, KB128; /* * Generate subkeys */ vmovdqu KB128, cmll_sub(32, CTX); vec_rol128(KR128, %xmm4, 15, %xmm15); vec_rol128(KA128, %xmm5, 15, %xmm15); vec_rol128(KR128, %xmm6, 30, %xmm15); vec_rol128(KB128, %xmm7, 30, %xmm15); vec_rol128(KL128, %xmm8, 45, %xmm15); vec_rol128(KA128, %xmm9, 45, %xmm15); vec_rol128(KL128, %xmm10, 60, %xmm15); vec_rol128(KR128, %xmm11, 60, %xmm15); vec_rol128(KB128, %xmm12, 60, %xmm15); /* absorb kw2 to other subkeys */ vpslldq $8, KL128, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, KB128, KB128; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; /* subl(1) ^= subr(1) & ~subr(9); */ vpandn %xmm15, %xmm6, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(9), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm6, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm9, %xmm9; vpshufd $0x1b, KL128, KL128; vpshufd $0x1b, KB128, KB128; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vmovdqu KL128, cmll_sub(0, CTX); vpshufd $0x1b, KL128, KL128; vmovdqu KB128, cmll_sub(2, CTX); vmovdqu %xmm4, cmll_sub(4, CTX); vmovdqu %xmm5, cmll_sub(6, CTX); vmovdqu %xmm6, cmll_sub(8, CTX); vmovdqu %xmm7, cmll_sub(10, CTX); vmovdqu %xmm8, cmll_sub(12, CTX); vmovdqu %xmm9, cmll_sub(14, CTX); vmovdqu cmll_sub(32, CTX), KB128; /* subl(1) ^= subr(1) & ~subr(17); */ vpandn %xmm15, %xmm10, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(17), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm10, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm11, %xmm11; vpxor %xmm15, %xmm12, %xmm12; vec_ror128(KL128, %xmm4, 128-77, %xmm14); vec_ror128(KA128, %xmm5, 128-77, %xmm14); vec_ror128(KR128, %xmm6, 128-94, %xmm14); vec_ror128(KA128, %xmm7, 128-94, %xmm14); vec_ror128(KL128, %xmm8, 128-111, %xmm14); vec_ror128(KB128, %xmm9, 128-111, %xmm14); vpxor %xmm15, %xmm4, %xmm4; vpshufd $0x1b, %xmm10, %xmm10; vpshufd $0x1b, %xmm11, %xmm11; vpshufd $0x1b, %xmm12, %xmm12; vpshufd $0x1b, %xmm4, %xmm4; vmovdqu %xmm10, cmll_sub(16, CTX); vmovdqu %xmm11, cmll_sub(18, CTX); vmovdqu %xmm12, cmll_sub(20, CTX); vmovdqu %xmm4, cmll_sub(22, CTX); /* subl(1) ^= subr(1) & ~subr(25); */ vpandn %xmm15, %xmm5, %xmm13; vpslldq $12, %xmm13, %xmm13; vpsrldq $8, %xmm13, %xmm13; vpxor %xmm13, %xmm15, %xmm15; /* dw = subl(1) & subl(25), subr(1) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm8, %xmm8; vpslldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm9, %xmm9; /* absorb kw4 to other subkeys */ vpslldq $8, %xmm9, %xmm15; vpxor %xmm15, %xmm8, %xmm8; vpxor %xmm15, %xmm7, %xmm7; vpxor %xmm15, %xmm6, %xmm6; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm5, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm5, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm7, %xmm7; vpshufd $0x1b, %xmm8, %xmm8; vpshufd $0x1b, %xmm9, %xmm9; vmovdqu %xmm5, cmll_sub(24, CTX); vmovdqu %xmm6, cmll_sub(26, CTX); vmovdqu %xmm7, cmll_sub(28, CTX); vmovdqu %xmm8, cmll_sub(30, CTX); vmovdqu %xmm9, cmll_sub(32, CTX); vpshufd $0x1b, cmll_sub(22, CTX), %xmm0; vpshufd $0x1b, cmll_sub(20, CTX), %xmm1; vpshufd $0x1b, cmll_sub(18, CTX), %xmm2; vpshufd $0x1b, cmll_sub(16, CTX), %xmm3; vpshufd $0x1b, cmll_sub(14, CTX), %xmm4; vpshufd $0x1b, cmll_sub(12, CTX), %xmm5; vpshufd $0x1b, cmll_sub(10, CTX), %xmm6; vpshufd $0x1b, cmll_sub(8, CTX), %xmm7; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm15, %xmm1, %xmm1; vpxor %xmm15, %xmm2, %xmm2; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm3, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm3, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm5, %xmm5; vpxor %xmm15, %xmm6, %xmm6; vpshufd $0x1b, %xmm0, %xmm0; vpshufd $0x1b, %xmm1, %xmm1; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm5, %xmm5; vpshufd $0x1b, %xmm6, %xmm6; vmovdqu %xmm0, cmll_sub(22, CTX); vmovdqu %xmm1, cmll_sub(20, CTX); vmovdqu %xmm2, cmll_sub(18, CTX); vmovdqu %xmm4, cmll_sub(14, CTX); vmovdqu %xmm5, cmll_sub(12, CTX); vmovdqu %xmm6, cmll_sub(10, CTX); vpshufd $0x1b, cmll_sub(6, CTX), %xmm6; vpshufd $0x1b, cmll_sub(4, CTX), %xmm4; vpshufd $0x1b, cmll_sub(2, CTX), %xmm2; vpshufd $0x1b, cmll_sub(0, CTX), %xmm0; /* subl(33) ^= subr(33) & ~subr(24); */ vpandn %xmm15, %xmm7, %xmm14; vpslldq $4, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; /* dw = subl(33) & subl(24), subr(33) ^= CAMELLIA_RL1(dw); */ vpand %xmm15, %xmm7, %xmm14; vpslld $1, %xmm14, %xmm13; vpsrld $31, %xmm14, %xmm14; vpaddd %xmm13, %xmm14, %xmm14; vpsrldq $12, %xmm14, %xmm14; vpslldq $8, %xmm14, %xmm14; vpxor %xmm14, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm6; vpxor %xmm15, %xmm4, %xmm4; vpxor %xmm15, %xmm2, %xmm2; vpxor %xmm15, %xmm0, %xmm0; vpshufd $0x1b, %xmm6, %xmm6; vpshufd $0x1b, %xmm4, %xmm4; vpshufd $0x1b, %xmm2, %xmm2; vpshufd $0x1b, %xmm0, %xmm0; vpsrldq $8, %xmm2, %xmm3; vpsrldq $8, %xmm4, %xmm5; vpsrldq $8, %xmm6, %xmm7; /* * key XOR is end of F-function. */ vpxor %xmm2, %xmm0, %xmm0; vpxor %xmm4, %xmm2, %xmm2; vmovq %xmm0, cmll_sub(0, CTX); vmovq %xmm3, cmll_sub(2, CTX); vpxor %xmm5, %xmm3, %xmm3; vpxor %xmm6, %xmm4, %xmm4; vpxor %xmm7, %xmm5, %xmm5; vmovq %xmm2, cmll_sub(3, CTX); vmovq %xmm3, cmll_sub(4, CTX); vmovq %xmm4, cmll_sub(5, CTX); vmovq %xmm5, cmll_sub(6, CTX); vmovq cmll_sub(7, CTX), %xmm7; vmovq cmll_sub(8, CTX), %xmm8; vmovq cmll_sub(9, CTX), %xmm9; vmovq cmll_sub(10, CTX), %xmm10; /* tl = subl(10) ^ (subr(10) & ~subr(8)); */ vpandn %xmm10, %xmm8, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm10, %xmm0; /* dw = tl & subl(8), tr = subr(10) ^ CAMELLIA_RL1(dw); */ vpand %xmm8, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm6, %xmm6; vmovq %xmm6, cmll_sub(7, CTX); vmovq cmll_sub(11, CTX), %xmm11; vmovq cmll_sub(12, CTX), %xmm12; vmovq cmll_sub(13, CTX), %xmm13; vmovq cmll_sub(14, CTX), %xmm14; vmovq cmll_sub(15, CTX), %xmm15; /* tl = subl(7) ^ (subr(7) & ~subr(9)); */ vpandn %xmm7, %xmm9, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm7, %xmm0; /* dw = tl & subl(9), tr = subr(7) ^ CAMELLIA_RL1(dw); */ vpand %xmm9, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm11, %xmm0, %xmm0; vpxor %xmm12, %xmm10, %xmm10; vpxor %xmm13, %xmm11, %xmm11; vpxor %xmm14, %xmm12, %xmm12; vpxor %xmm15, %xmm13, %xmm13; vmovq %xmm0, cmll_sub(10, CTX); vmovq %xmm10, cmll_sub(11, CTX); vmovq %xmm11, cmll_sub(12, CTX); vmovq %xmm12, cmll_sub(13, CTX); vmovq %xmm13, cmll_sub(14, CTX); vmovq cmll_sub(16, CTX), %xmm6; vmovq cmll_sub(17, CTX), %xmm7; vmovq cmll_sub(18, CTX), %xmm8; vmovq cmll_sub(19, CTX), %xmm9; vmovq cmll_sub(20, CTX), %xmm10; /* tl = subl(18) ^ (subr(18) & ~subr(16)); */ vpandn %xmm8, %xmm6, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm8, %xmm0; /* dw = tl & subl(16), tr = subr(18) ^ CAMELLIA_RL1(dw); */ vpand %xmm6, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vpxor %xmm14, %xmm0, %xmm0; vmovq %xmm0, cmll_sub(15, CTX); /* tl = subl(15) ^ (subr(15) & ~subr(17)); */ vpandn %xmm15, %xmm7, %xmm1; vpsrldq $4, %xmm1, %xmm1; vpxor %xmm1, %xmm15, %xmm0; /* dw = tl & subl(17), tr = subr(15) ^ CAMELLIA_RL1(dw); */ vpand %xmm7, %xmm0, %xmm1; vpslld $1, %xmm1, %xmm2; vpsrld $31, %xmm1, %xmm1; vpaddd %xmm2, %xmm1, %xmm1; vpslldq $12, %xmm1, %xmm1; vpsrldq $8, %xmm1, %xmm1; vpxor %xmm1, %xmm0, %xmm0; vmovq cmll_sub(21, CTX), %xmm1; vmovq cmll_sub(22, CTX), %xmm2; vmovq cmll_sub(23, CTX), %xmm3; vmovq cmll_sub(24, CTX), %xmm4; vpxor %xmm9, %xmm0, %xmm0; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm1, %xmm9, %xmm9; vpxor %xmm2, %xmm10, %xmm10; vpxor %xmm3, %xmm1, %xmm1; vmovq %xmm0, cmll_sub(18, CTX); vmovq %xmm8, cmll_sub(19, CTX); vmovq %xmm9, cmll_sub(20, CTX); vmovq %xmm10, cmll_sub(21, CTX); vmovq %xmm1, cmll_sub(22, CTX); vmovq cmll_sub(25, CTX), %xmm5; vmovq cmll_sub(26, CTX), %xmm6; vmovq cmll_sub(27, CTX), %xmm7; vmovq cmll_sub(28, CTX), %xmm8; vmovq cmll_sub(29, CTX), %xmm9; vmovq cmll_sub(30, CTX), %xmm10; vmovq cmll_sub(31, CTX), %xmm11; vmovq cmll_sub(32, CTX), %xmm12; /* tl = subl(26) ^ (subr(26) & ~subr(24)); */ vpandn %xmm6, %xmm4, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm6, %xmm0; /* dw = tl & subl(26), tr = subr(24) ^ CAMELLIA_RL1(dw); */ vpand %xmm4, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm0, %xmm2, %xmm2; vmovq %xmm2, cmll_sub(23, CTX); /* tl = subl(23) ^ (subr(23) & ~subr(25)); */ vpandn %xmm3, %xmm5, %xmm15; vpsrldq $4, %xmm15, %xmm15; vpxor %xmm15, %xmm3, %xmm0; /* dw = tl & subl(26), tr = subr(24) ^ CAMELLIA_RL1(dw); */ vpand %xmm5, %xmm0, %xmm15; vpslld $1, %xmm15, %xmm14; vpsrld $31, %xmm15, %xmm15; vpaddd %xmm14, %xmm15, %xmm15; vpslldq $12, %xmm15, %xmm15; vpsrldq $8, %xmm15, %xmm15; vpxor %xmm15, %xmm0, %xmm0; vpxor %xmm7, %xmm0, %xmm0; vpxor %xmm8, %xmm6, %xmm6; vpxor %xmm9, %xmm7, %xmm7; vpxor %xmm10, %xmm8, %xmm8; vpxor %xmm11, %xmm9, %xmm9; vpxor %xmm12, %xmm11, %xmm11; vmovq %xmm0, cmll_sub(26, CTX); vmovq %xmm6, cmll_sub(27, CTX); vmovq %xmm7, cmll_sub(28, CTX); vmovq %xmm8, cmll_sub(29, CTX); vmovq %xmm9, cmll_sub(30, CTX); vmovq %xmm10, cmll_sub(31, CTX); vmovq %xmm11, cmll_sub(32, CTX); /* kw2 and kw4 are unused now. */ movq $0, cmll_sub(1, CTX); movq $0, cmll_sub(33, CTX); vzeroall; ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_avx_setup256,.-__camellia_avx_setup256;) .align 16 .globl _gcry_camellia_aesni_avx_keygen ELF(.type _gcry_camellia_aesni_avx_keygen,@function;) _gcry_camellia_aesni_avx_keygen: /* input: * %rdi: ctx, CTX * %rsi: key * %rdx: keylen */ CFI_STARTPROC(); vzeroupper; vmovdqu (%rsi), %xmm0; cmpl $24, %edx; jb __camellia_avx_setup128; je .Lprepare_key192; vmovdqu 16(%rsi), %xmm1; jmp __camellia_avx_setup256; .Lprepare_key192: vpcmpeqd %xmm2, %xmm2, %xmm2; vmovq 16(%rsi), %xmm1; vpxor %xmm1, %xmm2, %xmm2; vpslldq $8, %xmm2, %xmm2; vpor %xmm2, %xmm1, %xmm1; jmp __camellia_avx_setup256; CFI_ENDPROC(); ELF(.size _gcry_camellia_aesni_avx_keygen,.-_gcry_camellia_aesni_avx_keygen;) #endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)*/ #endif /*__x86_64*/ diff --git a/cipher/camellia-aesni-avx2-amd64.h b/cipher/camellia-aesni-avx2-amd64.h index c92a0559..7d451c09 100644 --- a/cipher/camellia-aesni-avx2-amd64.h +++ b/cipher/camellia-aesni-avx2-amd64.h @@ -1,2221 +1,2296 @@ /* camellia-aesni-avx2-amd64.h - AES-NI/VAES/GFNI/AVX2 implementation of Camellia * * Copyright (C) 2013-2015,2020-2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifndef GCRY_CAMELLIA_AESNI_AVX2_AMD64_H #define GCRY_CAMELLIA_AESNI_AVX2_AMD64_H #include "asm-common-amd64.h" #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #ifndef CAMELLIA_GFNI_BUILD #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ vpand x, mask4bit, tmp0; \ vpandn x, mask4bit, x; \ vpsrld $4, x, x; \ \ vpshufb tmp0, lo_t, tmp0; \ vpshufb x, hi_t, x; \ vpxor tmp0, x, x; #endif #define ymm0_x xmm0 #define ymm1_x xmm1 #define ymm2_x xmm2 #define ymm3_x xmm3 #define ymm4_x xmm4 #define ymm5_x xmm5 #define ymm6_x xmm6 #define ymm7_x xmm7 #define ymm8_x xmm8 #define ymm9_x xmm9 #define ymm10_x xmm10 #define ymm11_x xmm11 #define ymm12_x xmm12 #define ymm13_x xmm13 #define ymm14_x xmm14 #define ymm15_x xmm15 #ifdef CAMELLIA_VAES_BUILD # define IF_AESNI(...) # define IF_VAES(...) __VA_ARGS__ #else # define IF_AESNI(...) __VA_ARGS__ # define IF_VAES(...) #endif /********************************************************************** GFNI helper macros and constants **********************************************************************/ #ifdef CAMELLIA_GFNI_BUILD #define BV8(a0,a1,a2,a3,a4,a5,a6,a7) \ ( (((a0) & 1) << 0) | \ (((a1) & 1) << 1) | \ (((a2) & 1) << 2) | \ (((a3) & 1) << 3) | \ (((a4) & 1) << 4) | \ (((a5) & 1) << 5) | \ (((a6) & 1) << 6) | \ (((a7) & 1) << 7) ) #define BM8X8(l0,l1,l2,l3,l4,l5,l6,l7) \ ( ((l7) << (0 * 8)) | \ ((l6) << (1 * 8)) | \ ((l5) << (2 * 8)) | \ ((l4) << (3 * 8)) | \ ((l3) << (4 * 8)) | \ ((l2) << (5 * 8)) | \ ((l1) << (6 * 8)) | \ ((l0) << (7 * 8)) ) /* Pre-filters and post-filters constants for Camellia sboxes s1, s2, s3 and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Constant from "θ₁(x)" and "θ₄(x)" functions. */ #define pre_filter_constant_s1234 BV8(1, 0, 1, 0, 0, 0, 1, 0) /* Constant from "ψ₁(A(x))" function: */ #define post_filter_constant_s14 BV8(0, 1, 1, 1, 0, 1, 1, 0) /* Constant from "ψ₂(A(x))" function: */ #define post_filter_constant_s2 BV8(0, 0, 1, 1, 1, 0, 1, 1) /* Constant from "ψ₃(A(x))" function: */ #define post_filter_constant_s3 BV8(1, 1, 1, 0, 1, 1, 0, 0) #endif /* CAMELLIA_GFNI_BUILD */ /********************************************************************** 32-way camellia **********************************************************************/ #ifdef CAMELLIA_GFNI_BUILD /* roundsm32 (GFNI version) * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, \ t6, t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vpbroadcastq .Lpre_filter_bitmatrix_s123 rRIP, t5; \ vpbroadcastq .Lpre_filter_bitmatrix_s4 rRIP, t2; \ vpbroadcastq .Lpost_filter_bitmatrix_s14 rRIP, t4; \ vpbroadcastq .Lpost_filter_bitmatrix_s2 rRIP, t3; \ vpbroadcastq .Lpost_filter_bitmatrix_s3 rRIP, t6; \ vpxor t7##_x, t7##_x, t7##_x; \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* prefilter sboxes */ \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x0, x0; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x7, x7; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x3, x3; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x6, x6; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x2, x2; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x5, x5; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x1, x1; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x4, x4; \ \ /* sbox GF8 inverse + postfilter sboxes 1 and 4 */ \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x0, x0; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x7, x7; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x3, x3; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x6, x6; \ \ /* sbox GF8 inverse + postfilter sbox 3 */ \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x2, x2; \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x5, x5; \ \ /* sbox GF8 inverse + postfilter sbox 2 */ \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x1, x1; \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x4, x4; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 32(mem_cd), x0, x0; \ \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 32(mem_cd), x3, x3; \ \ vpxor t3, x4, x4; \ vpxor 0 * 32(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 32(mem_cd), x5, x5; \ \ vpxor t1, x6, x6; \ vpxor 2 * 32(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 32(mem_cd), x7, x7; #else /* CAMELLIA_GFNI_BUILD */ /* roundsm32 (AES-NI / VAES version) * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, \ t6, t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vbroadcasti128 .Linv_shift_row rRIP, t4; \ vpbroadcastd .L0f0f0f0f rRIP, t7; \ vbroadcasti128 .Lpre_tf_lo_s1 rRIP, t5; \ vbroadcasti128 .Lpre_tf_hi_s1 rRIP, t6; \ vbroadcasti128 .Lpre_tf_lo_s4 rRIP, t2; \ vbroadcasti128 .Lpre_tf_hi_s4 rRIP, t3; \ \ /* AES inverse shift rows */ \ vpshufb t4, x0, x0; \ vpshufb t4, x7, x7; \ vpshufb t4, x3, x3; \ vpshufb t4, x6, x6; \ vpshufb t4, x2, x2; \ vpshufb t4, x5, x5; \ vpshufb t4, x1, x1; \ vpshufb t4, x4, x4; \ \ /* prefilter sboxes 1, 2 and 3 */ \ /* prefilter sbox 4 */ \ filter_8bit(x0, t5, t6, t7, t4); \ filter_8bit(x7, t5, t6, t7, t4); \ IF_AESNI(vextracti128 $1, x0, t0##_x); \ IF_AESNI(vextracti128 $1, x7, t1##_x); \ filter_8bit(x3, t2, t3, t7, t4); \ filter_8bit(x6, t2, t3, t7, t4); \ IF_AESNI(vextracti128 $1, x3, t3##_x); \ IF_AESNI(vextracti128 $1, x6, t2##_x); \ filter_8bit(x2, t5, t6, t7, t4); \ filter_8bit(x5, t5, t6, t7, t4); \ filter_8bit(x1, t5, t6, t7, t4); \ filter_8bit(x4, t5, t6, t7, t4); \ \ vpxor t4##_x, t4##_x, t4##_x; \ \ /* AES subbytes + AES shift rows */ \ IF_AESNI(vextracti128 $1, x2, t6##_x; \ vextracti128 $1, x5, t5##_x; \ vaesenclast t4##_x, x0##_x, x0##_x; \ vaesenclast t4##_x, t0##_x, t0##_x; \ vaesenclast t4##_x, x7##_x, x7##_x; \ vaesenclast t4##_x, t1##_x, t1##_x; \ vaesenclast t4##_x, x3##_x, x3##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vaesenclast t4##_x, x6##_x, x6##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t0##_x, x0, x0; \ vinserti128 $1, t1##_x, x7, x7; \ vinserti128 $1, t3##_x, x3, x3; \ vinserti128 $1, t2##_x, x6, x6; \ vextracti128 $1, x1, t3##_x; \ vextracti128 $1, x4, t2##_x); \ vbroadcasti128 .Lpost_tf_lo_s1 rRIP, t0; \ vbroadcasti128 .Lpost_tf_hi_s1 rRIP, t1; \ IF_AESNI(vaesenclast t4##_x, x2##_x, x2##_x; \ vaesenclast t4##_x, t6##_x, t6##_x; \ vaesenclast t4##_x, x5##_x, x5##_x; \ vaesenclast t4##_x, t5##_x, t5##_x; \ vaesenclast t4##_x, x1##_x, x1##_x; \ vaesenclast t4##_x, t3##_x, t3##_x; \ vaesenclast t4##_x, x4##_x, x4##_x; \ vaesenclast t4##_x, t2##_x, t2##_x; \ vinserti128 $1, t6##_x, x2, x2; \ vinserti128 $1, t5##_x, x5, x5; \ vinserti128 $1, t3##_x, x1, x1; \ vinserti128 $1, t2##_x, x4, x4); \ IF_VAES(vaesenclast t4, x0, x0; \ vaesenclast t4, x7, x7; \ vaesenclast t4, x3, x3; \ vaesenclast t4, x6, x6; \ vaesenclast t4, x2, x2; \ vaesenclast t4, x5, x5; \ vaesenclast t4, x1, x1; \ vaesenclast t4, x4, x4); \ \ /* postfilter sboxes 1 and 4 */ \ vbroadcasti128 .Lpost_tf_lo_s3 rRIP, t2; \ vbroadcasti128 .Lpost_tf_hi_s3 rRIP, t3; \ filter_8bit(x0, t0, t1, t7, t4); \ filter_8bit(x7, t0, t1, t7, t4); \ filter_8bit(x3, t0, t1, t7, t6); \ filter_8bit(x6, t0, t1, t7, t6); \ \ /* postfilter sbox 3 */ \ vbroadcasti128 .Lpost_tf_lo_s2 rRIP, t4; \ vbroadcasti128 .Lpost_tf_hi_s2 rRIP, t5; \ filter_8bit(x2, t2, t3, t7, t6); \ filter_8bit(x5, t2, t3, t7, t6); \ \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* postfilter sbox 2 */ \ filter_8bit(x1, t4, t5, t7, t2); \ filter_8bit(x4, t4, t5, t7, t2); \ vpxor t7##_x, t7##_x, t7##_x; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxor x5, x0, x0; \ vpxor x6, x1, x1; \ vpxor x7, x2, x2; \ vpxor x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxor x2, x4, x4; \ vpxor x3, x5, x5; \ vpxor x0, x6, x6; \ vpxor x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxor x7, x0, x0; \ vpxor x4, x1, x1; \ vpxor x5, x2, x2; \ vpxor x6, x3, x3; \ \ vpxor x3, x4, x4; \ vpxor x0, x5, x5; \ vpxor x1, x6, x6; \ vpxor x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpxor t6, x1, x1; \ vpxor 5 * 32(mem_cd), x1, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpxor t7, x0, x0; \ vpxor 4 * 32(mem_cd), x0, x0; \ \ vpxor t5, x2, x2; \ vpxor 6 * 32(mem_cd), x2, x2; \ \ vpxor t4, x3, x3; \ vpxor 7 * 32(mem_cd), x3, x3; \ \ vpxor t3, x4, x4; \ vpxor 0 * 32(mem_cd), x4, x4; \ \ vpxor t2, x5, x5; \ vpxor 1 * 32(mem_cd), x5, x5; \ \ vpxor t1, x6, x6; \ vpxor 2 * 32(mem_cd), x6, x6; \ \ vpxor t0, x7, x7; \ vpxor 3 * 32(mem_cd), x7, x7; #endif /* CAMELLIA_GFNI_BUILD */ /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu x0, 4 * 32(mem_cd); \ vmovdqu x1, 5 * 32(mem_cd); \ vmovdqu x2, 6 * 32(mem_cd); \ vmovdqu x3, 7 * 32(mem_cd); \ vmovdqu x4, 0 * 32(mem_cd); \ vmovdqu x5, 1 * 32(mem_cd); \ vmovdqu x6, 2 * 32(mem_cd); \ vmovdqu x7, 3 * 32(mem_cd); \ \ roundsm32(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN <<< 1) */ #define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \ vpcmpgtb v0, zero, t0; \ vpaddb v0, v0, v0; \ vpabsb t0, t0; \ \ vpcmpgtb v1, zero, t1; \ vpaddb v1, v1, v1; \ vpabsb t1, t1; \ \ vpcmpgtb v2, zero, t2; \ vpaddb v2, v2, v2; \ vpabsb t2, t2; \ \ vpor t0, v1, v1; \ \ vpcmpgtb v3, zero, t0; \ vpaddb v3, v3, v3; \ vpabsb t0, t0; \ \ vpor t1, v2, v2; \ vpor t2, v3, v3; \ vpor t0, v0, v0; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpxor tt0, tt0, tt0; \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand l0, t0, t0; \ vpand l1, t1, t1; \ vpand l2, t2, t2; \ vpand l3, t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor l4, t0, l4; \ vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu l4, 4 * 32(l); \ vpxor l5, t1, l5; \ vmovdqu l5, 5 * 32(l); \ vpxor l6, t2, l6; \ vmovdqu l6, 6 * 32(l); \ vpxor l7, t3, l7; \ vmovdqu l7, 7 * 32(l); \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor 4 * 32(r), t0, t0; \ vpor 5 * 32(r), t1, t1; \ vpor 6 * 32(r), t2, t2; \ vpor 7 * 32(r), t3, t3; \ \ vpxor 0 * 32(r), t0, t0; \ vpxor 1 * 32(r), t1, t1; \ vpxor 2 * 32(r), t2, t2; \ vpxor 3 * 32(r), t3, t3; \ vmovdqu t0, 0 * 32(r); \ vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 1 * 32(r); \ vmovdqu t2, 2 * 32(r); \ vmovdqu t3, 3 * 32(r); \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpand 0 * 32(r), t0, t0; \ vpand 1 * 32(r), t1, t1; \ vpand 2 * 32(r), t2, t2; \ vpand 3 * 32(r), t3, t3; \ \ rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \ \ vpxor 4 * 32(r), t0, t0; \ vpxor 5 * 32(r), t1, t1; \ vpxor 6 * 32(r), t2, t2; \ vpxor 7 * 32(r), t3, t3; \ vmovdqu t0, 4 * 32(r); \ vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vmovdqu t1, 5 * 32(r); \ vmovdqu t2, 6 * 32(r); \ vmovdqu t3, 7 * 32(r); \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vpshufb tt0, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt0, t0, t0; \ \ vpor l4, t0, t0; \ vpor l5, t1, t1; \ vpor l6, t2, t2; \ vpor l7, t3, t3; \ \ vpxor l0, t0, l0; \ vmovdqu l0, 0 * 32(l); \ vpxor l1, t1, l1; \ vmovdqu l1, 1 * 32(l); \ vpxor l2, t2, l2; \ vmovdqu l2, 2 * 32(l); \ vpxor l3, t3, l3; \ vmovdqu l3, 3 * 32(l); #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ vmovdqu d2, st0; \ vmovdqu d3, st1; \ transpose_4x4(a0, a1, a2, a3, d2, d3); \ transpose_4x4(b0, b1, b2, b3, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu a0, st0; \ vmovdqu a1, st1; \ transpose_4x4(c0, c1, c2, c3, a0, a1); \ transpose_4x4(d0, d1, d2, d3, a0, a1); \ \ vbroadcasti128 .Lshufb_16x16b rRIP, a0; \ vmovdqu st1, a1; \ vpshufb a0, a2, a2; \ vpshufb a0, a3, a3; \ vpshufb a0, b0, b0; \ vpshufb a0, b1, b1; \ vpshufb a0, b2, b2; \ vpshufb a0, b3, b3; \ vpshufb a0, a1, a1; \ vpshufb a0, c0, c0; \ vpshufb a0, c1, c1; \ vpshufb a0, c2, c2; \ vpshufb a0, c3, c3; \ vpshufb a0, d0, d0; \ vpshufb a0, d1, d1; \ vpshufb a0, d2, d2; \ vpshufb a0, d3, d3; \ vmovdqu d3, st1; \ vmovdqu st0, d3; \ vpshufb a0, d3, a0; \ vmovdqu d2, st0; \ \ transpose_4x4(a0, b0, c0, d0, d2, d3); \ transpose_4x4(a1, b1, c1, d1, d2, d3); \ vmovdqu st0, d2; \ vmovdqu st1, d3; \ \ vmovdqu b0, st0; \ vmovdqu b1, st1; \ transpose_4x4(a2, b2, c2, d2, b0, b1); \ transpose_4x4(a3, b3, c3, d3, b0, b1); \ vmovdqu st0, b0; \ vmovdqu st1, b1; \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor 0 * 32(rio), x0, y7; \ vpxor 1 * 32(rio), x0, y6; \ vpxor 2 * 32(rio), x0, y5; \ vpxor 3 * 32(rio), x0, y4; \ vpxor 4 * 32(rio), x0, y3; \ vpxor 5 * 32(rio), x0, y2; \ vpxor 6 * 32(rio), x0, y1; \ vpxor 7 * 32(rio), x0, y0; \ vpxor 8 * 32(rio), x0, x7; \ vpxor 9 * 32(rio), x0, x6; \ vpxor 10 * 32(rio), x0, x5; \ vpxor 11 * 32(rio), x0, x4; \ vpxor 12 * 32(rio), x0, x3; \ vpxor 13 * 32(rio), x0, x2; \ vpxor 14 * 32(rio), x0, x1; \ vpxor 15 * 32(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, (mem_ab), (mem_cd)); \ \ vmovdqu x0, 0 * 32(mem_ab); \ vmovdqu x1, 1 * 32(mem_ab); \ vmovdqu x2, 2 * 32(mem_ab); \ vmovdqu x3, 3 * 32(mem_ab); \ vmovdqu x4, 4 * 32(mem_ab); \ vmovdqu x5, 5 * 32(mem_ab); \ vmovdqu x6, 6 * 32(mem_ab); \ vmovdqu x7, 7 * 32(mem_ab); \ vmovdqu y0, 0 * 32(mem_cd); \ vmovdqu y1, 1 * 32(mem_cd); \ vmovdqu y2, 2 * 32(mem_cd); \ vmovdqu y3, 3 * 32(mem_cd); \ vmovdqu y4, 4 * 32(mem_cd); \ vmovdqu y5, 5 * 32(mem_cd); \ vmovdqu y6, 6 * 32(mem_cd); \ vmovdqu y7, 7 * 32(mem_cd); /* de-byteslice, apply post-whitening and store blocks */ #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, stack_tmp0, stack_tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, stack_tmp0, stack_tmp1); \ \ vmovdqu x0, stack_tmp0; \ \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxor x0, y7, y7; \ vpxor x0, y6, y6; \ vpxor x0, y5, y5; \ vpxor x0, y4, y4; \ vpxor x0, y3, y3; \ vpxor x0, y2, y2; \ vpxor x0, y1, y1; \ vpxor x0, y0, y0; \ vpxor x0, x7, x7; \ vpxor x0, x6, x6; \ vpxor x0, x5, x5; \ vpxor x0, x4, x4; \ vpxor x0, x3, x3; \ vpxor x0, x2, x2; \ vpxor x0, x1, x1; \ vpxor stack_tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu x0, 0 * 32(rio); \ vmovdqu x1, 1 * 32(rio); \ vmovdqu x2, 2 * 32(rio); \ vmovdqu x3, 3 * 32(rio); \ vmovdqu x4, 4 * 32(rio); \ vmovdqu x5, 5 * 32(rio); \ vmovdqu x6, 6 * 32(rio); \ vmovdqu x7, 7 * 32(rio); \ vmovdqu y0, 8 * 32(rio); \ vmovdqu y1, 9 * 32(rio); \ vmovdqu y2, 10 * 32(rio); \ vmovdqu y3, 11 * 32(rio); \ vmovdqu y4, 12 * 32(rio); \ vmovdqu y5, 13 * 32(rio); \ vmovdqu y6, 14 * 32(rio); \ vmovdqu y7, 15 * 32(rio); SECTION_RODATA .align 32 #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) FUNC_NAME(_constants): ELF(.type FUNC_NAME(_constants),@object;) .Lpack_bswap: .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 +/* CTR byte addition constants */ +.align 32 +.Lbige_addb_0_1: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +.Lbige_addb_2_3: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 +.Lbige_addb_4_5: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 +.Lbige_addb_6_7: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 +.Lbige_addb_8_9: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9 +.Lbige_addb_10_11: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 +.Lbige_addb_12_13: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 +.Lbige_addb_14_15: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 +.Lbige_addb_16_16: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 + #ifdef CAMELLIA_GFNI_BUILD /* Pre-filters and post-filters bit-matrixes for Camellia sboxes s1, s2, s3 * and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Bit-matrix from "θ₁(x)" function: */ .Lpre_filter_bitmatrix_s123: .quad BM8X8(BV8(1, 1, 1, 0, 1, 1, 0, 1), BV8(0, 0, 1, 1, 0, 0, 1, 0), BV8(1, 1, 0, 1, 0, 0, 0, 0), BV8(1, 0, 1, 1, 0, 0, 1, 1), BV8(0, 0, 0, 0, 1, 1, 0, 0), BV8(1, 0, 1, 0, 0, 1, 0, 0), BV8(0, 0, 1, 0, 1, 1, 0, 0), BV8(1, 0, 0, 0, 0, 1, 1, 0)) /* Bit-matrix from "θ₄(x)" function: */ .Lpre_filter_bitmatrix_s4: .quad BM8X8(BV8(1, 1, 0, 1, 1, 0, 1, 1), BV8(0, 1, 1, 0, 0, 1, 0, 0), BV8(1, 0, 1, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 1), BV8(0, 0, 0, 1, 1, 0, 0, 0), BV8(0, 1, 0, 0, 1, 0, 0, 1), BV8(0, 1, 0, 1, 1, 0, 0, 0), BV8(0, 0, 0, 0, 1, 1, 0, 1)) /* Bit-matrix from "ψ₁(A(x))" function: */ .Lpost_filter_bitmatrix_s14: .quad BM8X8(BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0)) /* Bit-matrix from "ψ₂(A(x))" function: */ .Lpost_filter_bitmatrix_s2: .quad BM8X8(BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1)) /* Bit-matrix from "ψ₃(A(x))" function: */ .Lpost_filter_bitmatrix_s3: .quad BM8X8(BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) #else /* CAMELLIA_GFNI_BUILD */ /* * pre-SubByte transform * * pre-lookup for sbox1, sbox2, sbox3: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s1: .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86 .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88 .Lpre_tf_hi_s1: .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23 /* * pre-SubByte transform * * pre-lookup for sbox4: * swap_bitendianness( * isom_map_camellia_to_aes( * camellia_f( * swap_bitendianess(in <<< 1) * ) * ) * ) * * (note: '⊕ 0xc5' inside camellia_f()) */ .Lpre_tf_lo_s4: .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25 .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74 .Lpre_tf_hi_s4: .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72 .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf /* * post-SubByte transform * * post-lookup for sbox1, sbox4: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s1: .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31 .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1 .Lpost_tf_hi_s1: .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8 .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c /* * post-SubByte transform * * post-lookup for sbox2: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) <<< 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s2: .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62 .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3 .Lpost_tf_hi_s2: .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51 .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18 /* * post-SubByte transform * * post-lookup for sbox3: * swap_bitendianness( * camellia_h( * isom_map_aes_to_camellia( * swap_bitendianness( * aes_inverse_affine_transform(in) * ) * ) * ) * ) >>> 1 * * (note: '⊕ 0x6e' inside camellia_h()) */ .Lpost_tf_lo_s3: .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98 .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8 .Lpost_tf_hi_s3: .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54 .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06 /* For isolating SubBytes from AESENCLAST, inverse shift row */ .Linv_shift_row: .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 .align 4 /* 4-bit mask */ .L0f0f0f0f: .long 0x0f0f0f0f #endif /* CAMELLIA_GFNI_BUILD */ ELF(.size FUNC_NAME(_constants),.-FUNC_NAME(_constants);) .text .align 16 ELF(.type FUNC_NAME(enc_blk32),@function;) FUNC_NAME(enc_blk32): /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %r8d: 24 for 16 byte key, 32 for larger * %ymm0..%ymm15: 32 plaintext blocks * output: * %ymm0..%ymm15: 32 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); leaq 8 * 32(%rax), %rcx; leaq (-8 * 8)(CTX, %r8, 8), %r8; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); .align 8 .Lenc_loop: enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Lenc_done; leaq (8 * 8)(CTX), CTX; fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), ((key_table) + 8)(CTX), ((key_table) + 12)(CTX)); jmp .Lenc_loop; .align 8 .Lenc_done: /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table) + 8 * 8)(%r8), (%rax), 1 * 32(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(enc_blk32),.-FUNC_NAME(enc_blk32);) .align 16 ELF(.type FUNC_NAME(dec_blk32),@function;) FUNC_NAME(dec_blk32): /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes * %r8d: 24 for 16 byte key, 32 for larger * %ymm0..%ymm15: 32 encrypted blocks * output: * %ymm0..%ymm15: 32 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); movq %r8, %rcx; movq CTX, %r8 leaq (-8 * 8)(CTX, %rcx, 8), CTX; leaq 8 * 32(%rax), %rcx; inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx); .align 8 .Ldec_loop: dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rax, %rcx, 0); cmpq %r8, CTX; je .Ldec_done; fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), ((key_table) + 0)(CTX), ((key_table) + 4)(CTX)); leaq (-8 * 8)(CTX), CTX; jmp .Ldec_loop; .align 8 .Ldec_done: /* load CD for output */ vmovdqu 0 * 32(%rcx), %ymm8; vmovdqu 1 * 32(%rcx), %ymm9; vmovdqu 2 * 32(%rcx), %ymm10; vmovdqu 3 * 32(%rcx), %ymm11; vmovdqu 4 * 32(%rcx), %ymm12; vmovdqu 5 * 32(%rcx), %ymm13; vmovdqu 6 * 32(%rcx), %ymm14; vmovdqu 7 * 32(%rcx), %ymm15; outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax)); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(dec_blk32),.-FUNC_NAME(dec_blk32);) #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; .align 16 .globl FUNC_NAME(ctr_enc) ELF(.type FUNC_NAME(ctr_enc),@function;) FUNC_NAME(ctr_enc): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); - movq 8(%rcx), %r11; - bswapq %r11; - cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; + cmpb $(0x100 - 32), 15(%rcx); + jbe .Lctr_byteadd; + + movq 8(%rcx), %r11; + bswapq %r11; + vpcmpeqd %ymm15, %ymm15, %ymm15; vpsrldq $8, %ymm15, %ymm15; /* ab: -1:0 ; cd: -1:0 */ /* load IV and byteswap */ vmovdqu (%rcx), %xmm0; vpshufb .Lbswap128_mask rRIP, %xmm0, %xmm0; vmovdqa %xmm0, %xmm1; inc_le128(%xmm0, %xmm15, %xmm14); vbroadcasti128 .Lbswap128_mask rRIP, %ymm14; vinserti128 $1, %xmm0, %ymm1, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 15 * 32(%rax); /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 32), %r11; ja .Lload_ctr_carry; /* construct IVs */ vpaddq %ymm15, %ymm15, %ymm15; /* ab: -2:0 ; cd: -2:0 */ vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 14 * 32(%rax); vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 13 * 32(%rax); vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm12; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm11; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm10; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm9; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm8; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm7; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm6; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm5; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm4; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm3; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm2; vpsubq %ymm15, %ymm0, %ymm0; vpshufb %ymm14, %ymm0, %ymm1; vpsubq %ymm15, %ymm0, %ymm0; /* +30 ; +31 */ vpsubq %xmm15, %xmm0, %xmm13; /* +32 */ vpshufb %ymm14, %ymm0, %ymm0; vpshufb %xmm14, %xmm13, %xmm13; vmovdqu %xmm13, (%rcx); jmp .Lload_ctr_done; .align 4 .Lload_ctr_carry: /* construct IVs */ inc_le128(%ymm0, %ymm15, %ymm13); /* ab: le1 ; cd: le2 */ inc_le128(%ymm0, %ymm15, %ymm13); /* ab: le2 ; cd: le3 */ vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 14 * 32(%rax); inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm13; vmovdqu %ymm13, 13 * 32(%rax); inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm12; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm11; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm10; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm9; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm8; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm7; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm6; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm5; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm4; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm3; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm2; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vpshufb %ymm14, %ymm0, %ymm1; inc_le128(%ymm0, %ymm15, %ymm13); inc_le128(%ymm0, %ymm15, %ymm13); vextracti128 $1, %ymm0, %xmm13; vpshufb %ymm14, %ymm0, %ymm0; inc_le128(%xmm13, %xmm15, %xmm14); vpshufb .Lbswap128_mask rRIP, %xmm13, %xmm13; vmovdqu %xmm13, (%rcx); -.align 4 +.align 8 .Lload_ctr_done: /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(enc_blk32); vpxor 0 * 32(%rdx), %ymm7, %ymm7; vpxor 1 * 32(%rdx), %ymm6, %ymm6; vpxor 2 * 32(%rdx), %ymm5, %ymm5; vpxor 3 * 32(%rdx), %ymm4, %ymm4; vpxor 4 * 32(%rdx), %ymm3, %ymm3; vpxor 5 * 32(%rdx), %ymm2, %ymm2; vpxor 6 * 32(%rdx), %ymm1, %ymm1; vpxor 7 * 32(%rdx), %ymm0, %ymm0; vpxor 8 * 32(%rdx), %ymm15, %ymm15; vpxor 9 * 32(%rdx), %ymm14, %ymm14; vpxor 10 * 32(%rdx), %ymm13, %ymm13; vpxor 11 * 32(%rdx), %ymm12, %ymm12; vpxor 12 * 32(%rdx), %ymm11, %ymm11; vpxor 13 * 32(%rdx), %ymm10, %ymm10; vpxor 14 * 32(%rdx), %ymm9, %ymm9; vpxor 15 * 32(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; + +.align 8 +.Lctr_byteadd_full_ctr_carry: + movq 8(%rcx), %r11; + movq (%rcx), %r10; + bswapq %r11; + bswapq %r10; + addq $32, %r11; + adcq $0, %r10; + bswapq %r11; + bswapq %r10; + movq %r11, 8(%rcx); + movq %r10, (%rcx); + jmp .Lctr_byteadd_ymm; +.align 8 +.Lctr_byteadd: + vbroadcasti128 (%rcx), %ymm8; + je .Lctr_byteadd_full_ctr_carry; + addb $32, 15(%rcx); +.Lctr_byteadd_ymm: + vpaddb .Lbige_addb_16_16 rRIP, %ymm8, %ymm0; + vpaddb .Lbige_addb_0_1 rRIP, %ymm8, %ymm15; + vpaddb .Lbige_addb_2_3 rRIP, %ymm8, %ymm14; + vmovdqu %ymm15, 15 * 32(%rax); + vpaddb .Lbige_addb_4_5 rRIP, %ymm8, %ymm13; + vmovdqu %ymm14, 14 * 32(%rax); + vpaddb .Lbige_addb_6_7 rRIP, %ymm8, %ymm12; + vmovdqu %ymm13, 13 * 32(%rax); + vpaddb .Lbige_addb_8_9 rRIP, %ymm8, %ymm11; + vpaddb .Lbige_addb_10_11 rRIP, %ymm8, %ymm10; + vpaddb .Lbige_addb_12_13 rRIP, %ymm8, %ymm9; + vpaddb .Lbige_addb_14_15 rRIP, %ymm8, %ymm8; + vpaddb .Lbige_addb_0_1 rRIP, %ymm0, %ymm7; + vpaddb .Lbige_addb_2_3 rRIP, %ymm0, %ymm6; + vpaddb .Lbige_addb_4_5 rRIP, %ymm0, %ymm5; + vpaddb .Lbige_addb_6_7 rRIP, %ymm0, %ymm4; + vpaddb .Lbige_addb_8_9 rRIP, %ymm0, %ymm3; + vpaddb .Lbige_addb_10_11 rRIP, %ymm0, %ymm2; + vpaddb .Lbige_addb_12_13 rRIP, %ymm0, %ymm1; + vpaddb .Lbige_addb_14_15 rRIP, %ymm0, %ymm0; + + jmp .Lload_ctr_done; CFI_ENDPROC(); ELF(.size FUNC_NAME(ctr_enc),.-FUNC_NAME(ctr_enc);) .align 16 .globl FUNC_NAME(cbc_dec) ELF(.type FUNC_NAME(cbc_dec),@function;) FUNC_NAME(cbc_dec): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rdx, (key_table)(CTX, %r8, 8)); call FUNC_NAME(dec_blk32); /* XOR output with IV */ vmovdqu %ymm8, (%rax); vmovdqu (%r9), %xmm8; vinserti128 $1, (%rdx), %ymm8, %ymm8; vpxor %ymm8, %ymm7, %ymm7; vmovdqu (%rax), %ymm8; vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6; vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5; vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4; vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3; vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2; vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0; vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15; vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14; vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13; vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12; vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11; vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10; vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9; vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8; movq (15 * 32 + 16 + 0)(%rdx), %rax; movq (15 * 32 + 16 + 8)(%rdx), %rcx; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); /* store new IV */ movq %rax, (0)(%r9); movq %rcx, (8)(%r9); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(cbc_dec),.-FUNC_NAME(cbc_dec);) .align 16 .globl FUNC_NAME(cfb_dec) ELF(.type FUNC_NAME(cfb_dec),@function;) FUNC_NAME(cfb_dec): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm0; vpshufb .Lpack_bswap rRIP, %ymm0, %ymm0; vmovdqu (%rcx), %xmm15; vinserti128 $1, (%rdx), %ymm15, %ymm15; vpxor %ymm15, %ymm0, %ymm15; vmovdqu (15 * 32 + 16)(%rdx), %xmm1; vmovdqu %xmm1, (%rcx); /* store new IV */ vpxor (0 * 32 + 16)(%rdx), %ymm0, %ymm14; vpxor (1 * 32 + 16)(%rdx), %ymm0, %ymm13; vpxor (2 * 32 + 16)(%rdx), %ymm0, %ymm12; vpxor (3 * 32 + 16)(%rdx), %ymm0, %ymm11; vpxor (4 * 32 + 16)(%rdx), %ymm0, %ymm10; vpxor (5 * 32 + 16)(%rdx), %ymm0, %ymm9; vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm8; vpxor (7 * 32 + 16)(%rdx), %ymm0, %ymm7; vpxor (8 * 32 + 16)(%rdx), %ymm0, %ymm6; vpxor (9 * 32 + 16)(%rdx), %ymm0, %ymm5; vpxor (10 * 32 + 16)(%rdx), %ymm0, %ymm4; vpxor (11 * 32 + 16)(%rdx), %ymm0, %ymm3; vpxor (12 * 32 + 16)(%rdx), %ymm0, %ymm2; vpxor (13 * 32 + 16)(%rdx), %ymm0, %ymm1; vpxor (14 * 32 + 16)(%rdx), %ymm0, %ymm0; call FUNC_NAME(enc_blk32); vpxor 0 * 32(%rdx), %ymm7, %ymm7; vpxor 1 * 32(%rdx), %ymm6, %ymm6; vpxor 2 * 32(%rdx), %ymm5, %ymm5; vpxor 3 * 32(%rdx), %ymm4, %ymm4; vpxor 4 * 32(%rdx), %ymm3, %ymm3; vpxor 5 * 32(%rdx), %ymm2, %ymm2; vpxor 6 * 32(%rdx), %ymm1, %ymm1; vpxor 7 * 32(%rdx), %ymm0, %ymm0; vpxor 8 * 32(%rdx), %ymm15, %ymm15; vpxor 9 * 32(%rdx), %ymm14, %ymm14; vpxor 10 * 32(%rdx), %ymm13, %ymm13; vpxor 11 * 32(%rdx), %ymm12, %ymm12; vpxor 12 * 32(%rdx), %ymm11, %ymm11; vpxor 13 * 32(%rdx), %ymm10, %ymm10; vpxor 14 * 32(%rdx), %ymm9, %ymm9; vpxor 15 * 32(%rdx), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(cfb_dec),.-FUNC_NAME(cfb_dec);) .align 16 .globl FUNC_NAME(ocb_enc) ELF(.type FUNC_NAME(ocb_enc),@function;) FUNC_NAME(ocb_enc): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rsp); movq %r11, (16 * 32 + 1 * 8)(%rsp); movq %r12, (16 * 32 + 2 * 8)(%rsp); movq %r13, (16 * 32 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 32 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 32 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 32 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 32 + 3 * 8); vmovdqu (%rcx), %xmm14; vmovdqu (%r8), %xmm13; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm13, %ymm13; \ vpxor yreg, %ymm15, yreg; \ vmovdqu %ymm15, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, %ymm0); vmovdqu %ymm0, (13 * 32)(%rax); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r9), %r10; movq (17 * 8)(%r9), %r11; movq (18 * 8)(%r9), %r12; movq (19 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r9), %r10; movq (21 * 8)(%r9), %r11; movq (22 * 8)(%r9), %r12; movq (23 * 8)(%r9), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r9), %r10; movq (25 * 8)(%r9), %r11; movq (26 * 8)(%r9), %r12; movq (27 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r9), %r10; movq (29 * 8)(%r9), %r11; movq (30 * 8)(%r9), %r12; movq (31 * 8)(%r9), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vextracti128 $1, %ymm13, %xmm15; vmovdqu %xmm14, (%rcx); vpxor %xmm13, %xmm15, %xmm15; vmovdqu %xmm15, (%r8); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor 13 * 32(%rax), %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(enc_blk32); vpxor 0 * 32(%rsi), %ymm7, %ymm7; vpxor 1 * 32(%rsi), %ymm6, %ymm6; vpxor 2 * 32(%rsi), %ymm5, %ymm5; vpxor 3 * 32(%rsi), %ymm4, %ymm4; vpxor 4 * 32(%rsi), %ymm3, %ymm3; vpxor 5 * 32(%rsi), %ymm2, %ymm2; vpxor 6 * 32(%rsi), %ymm1, %ymm1; vpxor 7 * 32(%rsi), %ymm0, %ymm0; vpxor 8 * 32(%rsi), %ymm15, %ymm15; vpxor 9 * 32(%rsi), %ymm14, %ymm14; vpxor 10 * 32(%rsi), %ymm13, %ymm13; vpxor 11 * 32(%rsi), %ymm12, %ymm12; vpxor 12 * 32(%rsi), %ymm11, %ymm11; vpxor 13 * 32(%rsi), %ymm10, %ymm10; vpxor 14 * 32(%rsi), %ymm9, %ymm9; vpxor 15 * 32(%rsi), %ymm8, %ymm8; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; movq (16 * 32 + 0 * 8)(%rsp), %r10; movq (16 * 32 + 1 * 8)(%rsp), %r11; movq (16 * 32 + 2 * 8)(%rsp), %r12; movq (16 * 32 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(ocb_enc),.-FUNC_NAME(ocb_enc);) .align 16 .globl FUNC_NAME(ocb_dec) ELF(.type FUNC_NAME(ocb_dec),@function;) FUNC_NAME(ocb_dec): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[32]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rsp); movq %r11, (16 * 32 + 1 * 8)(%rsp); movq %r12, (16 * 32 + 2 * 8)(%rsp); movq %r13, (16 * 32 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 32 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 32 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 32 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 32 + 3 * 8); vmovdqu (%rcx), %xmm14; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rdx), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm15, yreg; \ vmovdqu %ymm15, (n * 32)(%rsi); movq (0 * 8)(%r9), %r10; movq (1 * 8)(%r9), %r11; movq (2 * 8)(%r9), %r12; movq (3 * 8)(%r9), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r9), %r10; movq (5 * 8)(%r9), %r11; movq (6 * 8)(%r9), %r12; movq (7 * 8)(%r9), %r13; OCB_INPUT(2, %r10, %r11, %ymm13); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r9), %r10; movq (9 * 8)(%r9), %r11; movq (10 * 8)(%r9), %r12; movq (11 * 8)(%r9), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r9), %r10; movq (13 * 8)(%r9), %r11; movq (14 * 8)(%r9), %r12; movq (15 * 8)(%r9), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r9), %r10; movq (17 * 8)(%r9), %r11; movq (18 * 8)(%r9), %r12; movq (19 * 8)(%r9), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r9), %r10; movq (21 * 8)(%r9), %r11; movq (22 * 8)(%r9), %r12; movq (23 * 8)(%r9), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r9), %r10; movq (25 * 8)(%r9), %r11; movq (26 * 8)(%r9), %r12; movq (27 * 8)(%r9), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r9), %r10; movq (29 * 8)(%r9), %r11; movq (30 * 8)(%r9), %r12; movq (31 * 8)(%r9), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vmovdqu %xmm14, (%rcx); movq %r8, %r10; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r9d; cmovel %r9d, %r8d; /* max */ /* inpack32_pre: */ vpbroadcastq (key_table)(CTX, %r8, 8), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor %ymm13, %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(dec_blk32); vpxor 0 * 32(%rsi), %ymm7, %ymm7; vpxor 1 * 32(%rsi), %ymm6, %ymm6; vpxor 2 * 32(%rsi), %ymm5, %ymm5; vpxor 3 * 32(%rsi), %ymm4, %ymm4; vpxor 4 * 32(%rsi), %ymm3, %ymm3; vpxor 5 * 32(%rsi), %ymm2, %ymm2; vpxor 6 * 32(%rsi), %ymm1, %ymm1; vpxor 7 * 32(%rsi), %ymm0, %ymm0; vmovdqu %ymm7, (7 * 32)(%rax); vmovdqu %ymm6, (6 * 32)(%rax); vpxor 8 * 32(%rsi), %ymm15, %ymm15; vpxor 9 * 32(%rsi), %ymm14, %ymm14; vpxor 10 * 32(%rsi), %ymm13, %ymm13; vpxor 11 * 32(%rsi), %ymm12, %ymm12; vpxor 12 * 32(%rsi), %ymm11, %ymm11; vpxor 13 * 32(%rsi), %ymm10, %ymm10; vpxor 14 * 32(%rsi), %ymm9, %ymm9; vpxor 15 * 32(%rsi), %ymm8, %ymm8; /* Checksum_i = Checksum_{i-1} xor P_i */ vpxor %ymm5, %ymm7, %ymm7; vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm3, %ymm7, %ymm7; vpxor %ymm2, %ymm6, %ymm6; vpxor %ymm1, %ymm7, %ymm7; vpxor %ymm0, %ymm6, %ymm6; vpxor %ymm15, %ymm7, %ymm7; vpxor %ymm14, %ymm6, %ymm6; vpxor %ymm13, %ymm7, %ymm7; vpxor %ymm12, %ymm6, %ymm6; vpxor %ymm11, %ymm7, %ymm7; vpxor %ymm10, %ymm6, %ymm6; vpxor %ymm9, %ymm7, %ymm7; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm7, %ymm6, %ymm7; vextracti128 $1, %ymm7, %xmm6; vpxor %xmm6, %xmm7, %xmm7; vpxor (%r10), %xmm7, %xmm7; vmovdqu %xmm7, (%r10); vmovdqu 7 * 32(%rax), %ymm7; vmovdqu 6 * 32(%rax), %ymm6; write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0, %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9, %ymm8, %rsi); vzeroall; movq (16 * 32 + 0 * 8)(%rsp), %r10; movq (16 * 32 + 1 * 8)(%rsp), %r11; movq (16 * 32 + 2 * 8)(%rsp), %r12; movq (16 * 32 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(ocb_dec),.-FUNC_NAME(ocb_dec);) .align 16 .globl FUNC_NAME(ocb_auth) ELF(.type FUNC_NAME(ocb_auth),@function;) FUNC_NAME(ocb_auth): /* input: * %rdi: ctx, CTX * %rsi: abuf (16 blocks) * %rdx: offset * %rcx: checksum * %r8 : L pointers (void *L[16]) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $(16 * 32 + 4 * 8), %rsp; andq $~63, %rsp; movq %rsp, %rax; movq %r10, (16 * 32 + 0 * 8)(%rsp); movq %r11, (16 * 32 + 1 * 8)(%rsp); movq %r12, (16 * 32 + 2 * 8)(%rsp); movq %r13, (16 * 32 + 3 * 8)(%rsp); CFI_REG_ON_STACK(r10, 16 * 32 + 0 * 8); CFI_REG_ON_STACK(r11, 16 * 32 + 1 * 8); CFI_REG_ON_STACK(r12, 16 * 32 + 2 * 8); CFI_REG_ON_STACK(r13, 16 * 32 + 3 * 8); vmovdqu (%rdx), %xmm14; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, yreg) \ vmovdqu (n * 32)(%rsi), yreg; \ vpxor (l0reg), %xmm14, %xmm15; \ vpxor (l1reg), %xmm15, %xmm14; \ vinserti128 $1, %xmm14, %ymm15, %ymm15; \ vpxor yreg, %ymm15, yreg; movq (0 * 8)(%r8), %r10; movq (1 * 8)(%r8), %r11; movq (2 * 8)(%r8), %r12; movq (3 * 8)(%r8), %r13; OCB_INPUT(0, %r10, %r11, %ymm0); vmovdqu %ymm0, (15 * 32)(%rax); OCB_INPUT(1, %r12, %r13, %ymm0); vmovdqu %ymm0, (14 * 32)(%rax); movq (4 * 8)(%r8), %r10; movq (5 * 8)(%r8), %r11; movq (6 * 8)(%r8), %r12; movq (7 * 8)(%r8), %r13; OCB_INPUT(2, %r10, %r11, %ymm13); OCB_INPUT(3, %r12, %r13, %ymm12); movq (8 * 8)(%r8), %r10; movq (9 * 8)(%r8), %r11; movq (10 * 8)(%r8), %r12; movq (11 * 8)(%r8), %r13; OCB_INPUT(4, %r10, %r11, %ymm11); OCB_INPUT(5, %r12, %r13, %ymm10); movq (12 * 8)(%r8), %r10; movq (13 * 8)(%r8), %r11; movq (14 * 8)(%r8), %r12; movq (15 * 8)(%r8), %r13; OCB_INPUT(6, %r10, %r11, %ymm9); OCB_INPUT(7, %r12, %r13, %ymm8); movq (16 * 8)(%r8), %r10; movq (17 * 8)(%r8), %r11; movq (18 * 8)(%r8), %r12; movq (19 * 8)(%r8), %r13; OCB_INPUT(8, %r10, %r11, %ymm7); OCB_INPUT(9, %r12, %r13, %ymm6); movq (20 * 8)(%r8), %r10; movq (21 * 8)(%r8), %r11; movq (22 * 8)(%r8), %r12; movq (23 * 8)(%r8), %r13; OCB_INPUT(10, %r10, %r11, %ymm5); OCB_INPUT(11, %r12, %r13, %ymm4); movq (24 * 8)(%r8), %r10; movq (25 * 8)(%r8), %r11; movq (26 * 8)(%r8), %r12; movq (27 * 8)(%r8), %r13; OCB_INPUT(12, %r10, %r11, %ymm3); OCB_INPUT(13, %r12, %r13, %ymm2); movq (28 * 8)(%r8), %r10; movq (29 * 8)(%r8), %r11; movq (30 * 8)(%r8), %r12; movq (31 * 8)(%r8), %r13; OCB_INPUT(14, %r10, %r11, %ymm1); OCB_INPUT(15, %r12, %r13, %ymm0); #undef OCB_INPUT vmovdqu %xmm14, (%rdx); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %r10d; cmovel %r10d, %r8d; /* max */ movq %rcx, %r10; /* inpack32_pre: */ vpbroadcastq (key_table)(CTX), %ymm15; vpshufb .Lpack_bswap rRIP, %ymm15, %ymm15; vpxor %ymm0, %ymm15, %ymm0; vpxor %ymm1, %ymm15, %ymm1; vpxor %ymm2, %ymm15, %ymm2; vpxor %ymm3, %ymm15, %ymm3; vpxor %ymm4, %ymm15, %ymm4; vpxor %ymm5, %ymm15, %ymm5; vpxor %ymm6, %ymm15, %ymm6; vpxor %ymm7, %ymm15, %ymm7; vpxor %ymm8, %ymm15, %ymm8; vpxor %ymm9, %ymm15, %ymm9; vpxor %ymm10, %ymm15, %ymm10; vpxor %ymm11, %ymm15, %ymm11; vpxor %ymm12, %ymm15, %ymm12; vpxor %ymm13, %ymm15, %ymm13; vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; call FUNC_NAME(enc_blk32); vpxor %ymm7, %ymm6, %ymm6; vpxor %ymm5, %ymm4, %ymm4; vpxor %ymm3, %ymm2, %ymm2; vpxor %ymm1, %ymm0, %ymm0; vpxor %ymm15, %ymm14, %ymm14; vpxor %ymm13, %ymm12, %ymm12; vpxor %ymm11, %ymm10, %ymm10; vpxor %ymm9, %ymm8, %ymm8; vpxor %ymm6, %ymm4, %ymm4; vpxor %ymm2, %ymm0, %ymm0; vpxor %ymm14, %ymm12, %ymm12; vpxor %ymm10, %ymm8, %ymm8; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm12, %ymm8, %ymm8; vpxor %ymm0, %ymm8, %ymm0; vextracti128 $1, %ymm0, %xmm1; vpxor (%r10), %xmm0, %xmm0; vpxor %xmm0, %xmm1, %xmm0; vmovdqu %xmm0, (%r10); vzeroall; movq (16 * 32 + 0 * 8)(%rsp), %r10; movq (16 * 32 + 1 * 8)(%rsp), %r11; movq (16 * 32 + 2 * 8)(%rsp), %r12; movq (16 * 32 + 3 * 8)(%rsp), %r13; CFI_RESTORE(%r10); CFI_RESTORE(%r11); CFI_RESTORE(%r12); CFI_RESTORE(%r13); leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(ocb_auth),.-FUNC_NAME(ocb_auth);) .align 16 .globl FUNC_NAME(enc_blk1_32) ELF(.type FUNC_NAME(enc_blk1_32),@function;) FUNC_NAME(enc_blk1_32): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %ecx: nblocks (1 to 32) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movl %ecx, %r9d; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; cmpl $31, %ecx; vpxor %xmm0, %xmm0, %xmm0; ja 1f; jb 2f; vmovdqu 15 * 32(%rdx), %xmm0; jmp 2f; 1: vmovdqu 15 * 32(%rdx), %ymm0; 2: vmovdqu %ymm0, (%rax); vpbroadcastq (key_table)(CTX), %ymm0; vpshufb .Lpack_bswap rRIP, %ymm0, %ymm0; #define LOAD_INPUT(offset, ymm) \ cmpl $(1 + 2 * (offset)), %ecx; \ jb 2f; \ ja 1f; \ vmovdqu (offset) * 32(%rdx), %ymm##_x; \ vpxor %ymm0, %ymm, %ymm; \ jmp 2f; \ 1: \ vpxor (offset) * 32(%rdx), %ymm0, %ymm; LOAD_INPUT(0, ymm15); LOAD_INPUT(1, ymm14); LOAD_INPUT(2, ymm13); LOAD_INPUT(3, ymm12); LOAD_INPUT(4, ymm11); LOAD_INPUT(5, ymm10); LOAD_INPUT(6, ymm9); LOAD_INPUT(7, ymm8); LOAD_INPUT(8, ymm7); LOAD_INPUT(9, ymm6); LOAD_INPUT(10, ymm5); LOAD_INPUT(11, ymm4); LOAD_INPUT(12, ymm3); LOAD_INPUT(13, ymm2); LOAD_INPUT(14, ymm1); vpxor (%rax), %ymm0, %ymm0; 2: call FUNC_NAME(enc_blk32); #define STORE_OUTPUT(ymm, offset) \ cmpl $(1 + 2 * (offset)), %r9d; \ jb 2f; \ ja 1f; \ vmovdqu %ymm##_x, (offset) * 32(%rsi); \ jmp 2f; \ 1: \ vmovdqu %ymm, (offset) * 32(%rsi); STORE_OUTPUT(ymm7, 0); STORE_OUTPUT(ymm6, 1); STORE_OUTPUT(ymm5, 2); STORE_OUTPUT(ymm4, 3); STORE_OUTPUT(ymm3, 4); STORE_OUTPUT(ymm2, 5); STORE_OUTPUT(ymm1, 6); STORE_OUTPUT(ymm0, 7); STORE_OUTPUT(ymm15, 8); STORE_OUTPUT(ymm14, 9); STORE_OUTPUT(ymm13, 10); STORE_OUTPUT(ymm12, 11); STORE_OUTPUT(ymm11, 12); STORE_OUTPUT(ymm10, 13); STORE_OUTPUT(ymm9, 14); STORE_OUTPUT(ymm8, 15); 2: vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(enc_blk1_32),.-FUNC_NAME(enc_blk1_32);) .align 16 .globl FUNC_NAME(dec_blk1_32) ELF(.type FUNC_NAME(dec_blk1_32),@function;) FUNC_NAME(dec_blk1_32): /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %ecx: nblocks (1 to 32) */ CFI_STARTPROC(); pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); movl %ecx, %r9d; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ subq $(16 * 32), %rsp; andq $~63, %rsp; movq %rsp, %rax; cmpl $31, %ecx; vpxor %xmm0, %xmm0, %xmm0; ja 1f; jb 2f; vmovdqu 15 * 32(%rdx), %xmm0; jmp 2f; 1: vmovdqu 15 * 32(%rdx), %ymm0; 2: vmovdqu %ymm0, (%rax); vpbroadcastq (key_table)(CTX, %r8, 8), %ymm0; vpshufb .Lpack_bswap rRIP, %ymm0, %ymm0; LOAD_INPUT(0, ymm15); LOAD_INPUT(1, ymm14); LOAD_INPUT(2, ymm13); LOAD_INPUT(3, ymm12); LOAD_INPUT(4, ymm11); LOAD_INPUT(5, ymm10); LOAD_INPUT(6, ymm9); LOAD_INPUT(7, ymm8); LOAD_INPUT(8, ymm7); LOAD_INPUT(9, ymm6); LOAD_INPUT(10, ymm5); LOAD_INPUT(11, ymm4); LOAD_INPUT(12, ymm3); LOAD_INPUT(13, ymm2); LOAD_INPUT(14, ymm1); vpxor (%rax), %ymm0, %ymm0; 2: call FUNC_NAME(dec_blk32); STORE_OUTPUT(ymm7, 0); STORE_OUTPUT(ymm6, 1); STORE_OUTPUT(ymm5, 2); STORE_OUTPUT(ymm4, 3); STORE_OUTPUT(ymm3, 4); STORE_OUTPUT(ymm2, 5); STORE_OUTPUT(ymm1, 6); STORE_OUTPUT(ymm0, 7); STORE_OUTPUT(ymm15, 8); STORE_OUTPUT(ymm14, 9); STORE_OUTPUT(ymm13, 10); STORE_OUTPUT(ymm12, 11); STORE_OUTPUT(ymm11, 12); STORE_OUTPUT(ymm10, 13); STORE_OUTPUT(ymm9, 14); STORE_OUTPUT(ymm8, 15); 2: vzeroall; leave; CFI_LEAVE(); ret_spec_stop; CFI_ENDPROC(); ELF(.size FUNC_NAME(dec_blk1_32),.-FUNC_NAME(dec_blk1_32);) #endif /* GCRY_CAMELLIA_AESNI_AVX2_AMD64_H */ diff --git a/cipher/camellia-gfni-avx512-amd64.S b/cipher/camellia-gfni-avx512-amd64.S index 64fef8b6..c62b7848 100644 --- a/cipher/camellia-gfni-avx512-amd64.S +++ b/cipher/camellia-gfni-avx512-amd64.S @@ -1,1574 +1,1659 @@ /* camellia-gfni-avx512-amd64.S - GFNI/AVX512 implementation of Camellia * * Copyright (C) 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT) #include "asm-common-amd64.h" #define CAMELLIA_TABLE_BYTE_LEN 272 /* struct CAMELLIA_context: */ #define key_table 0 #define key_bitlength CAMELLIA_TABLE_BYTE_LEN /* register macros */ #define CTX %rdi #define RIO %r8 /********************************************************************** helper macros **********************************************************************/ #define zmm0_x xmm0 #define zmm1_x xmm1 #define zmm2_x xmm2 #define zmm3_x xmm3 #define zmm4_x xmm4 #define zmm5_x xmm5 #define zmm6_x xmm6 #define zmm7_x xmm7 #define zmm8_x xmm8 #define zmm9_x xmm9 #define zmm10_x xmm10 #define zmm11_x xmm11 #define zmm12_x xmm12 #define zmm13_x xmm13 #define zmm14_x xmm14 #define zmm15_x xmm15 #define zmm0_y ymm0 #define zmm1_y ymm1 #define zmm2_y ymm2 #define zmm3_y ymm3 #define zmm4_y ymm4 #define zmm5_y ymm5 #define zmm6_y ymm6 #define zmm7_y ymm7 #define zmm8_y ymm8 #define zmm9_y ymm9 #define zmm10_y ymm10 #define zmm11_y ymm11 #define zmm12_y ymm12 #define zmm13_y ymm13 #define zmm14_y ymm14 #define zmm15_y ymm15 #define mem_ab_0 %zmm16 #define mem_ab_1 %zmm17 #define mem_ab_2 %zmm31 #define mem_ab_3 %zmm18 #define mem_ab_4 %zmm19 #define mem_ab_5 %zmm20 #define mem_ab_6 %zmm21 #define mem_ab_7 %zmm22 #define mem_cd_0 %zmm23 #define mem_cd_1 %zmm24 #define mem_cd_2 %zmm30 #define mem_cd_3 %zmm25 #define mem_cd_4 %zmm26 #define mem_cd_5 %zmm27 #define mem_cd_6 %zmm28 #define mem_cd_7 %zmm29 #define clear_vec4(v0,v1,v2,v3) \ vpxord v0, v0, v0; \ vpxord v1, v1, v1; \ vpxord v2, v2, v2; \ vpxord v3, v3, v3 #define clear_zmm16_zmm31() \ clear_vec4(%ymm16, %ymm20, %ymm24, %ymm28); \ clear_vec4(%ymm17, %ymm21, %ymm25, %ymm29); \ clear_vec4(%ymm18, %ymm22, %ymm26, %ymm30); \ clear_vec4(%ymm19, %ymm23, %ymm27, %ymm31) #define clear_regs() \ kxorq %k1, %k1, %k1; \ vzeroall; \ clear_zmm16_zmm31() /********************************************************************** GFNI helper macros and constants **********************************************************************/ #define BV8(a0,a1,a2,a3,a4,a5,a6,a7) \ ( (((a0) & 1) << 0) | \ (((a1) & 1) << 1) | \ (((a2) & 1) << 2) | \ (((a3) & 1) << 3) | \ (((a4) & 1) << 4) | \ (((a5) & 1) << 5) | \ (((a6) & 1) << 6) | \ (((a7) & 1) << 7) ) #define BM8X8(l0,l1,l2,l3,l4,l5,l6,l7) \ ( ((l7) << (0 * 8)) | \ ((l6) << (1 * 8)) | \ ((l5) << (2 * 8)) | \ ((l4) << (3 * 8)) | \ ((l3) << (4 * 8)) | \ ((l2) << (5 * 8)) | \ ((l1) << (6 * 8)) | \ ((l0) << (7 * 8)) ) /* Pre-filters and post-filters constants for Camellia sboxes s1, s2, s3 and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Constant from "θ₁(x)" and "θ₄(x)" functions. */ #define pre_filter_constant_s1234 BV8(1, 0, 1, 0, 0, 0, 1, 0) /* Constant from "ψ₁(A(x))" function: */ #define post_filter_constant_s14 BV8(0, 1, 1, 1, 0, 1, 1, 0) /* Constant from "ψ₂(A(x))" function: */ #define post_filter_constant_s2 BV8(0, 0, 1, 1, 1, 0, 1, 1) /* Constant from "ψ₃(A(x))" function: */ #define post_filter_constant_s3 BV8(1, 1, 1, 0, 1, 1, 0, 0) /********************************************************************** 64-way parallel camellia **********************************************************************/ /* roundsm64 (GFNI/AVX512 version) * IN: * x0..x7: byte-sliced AB state * mem_cd: register pointer storing CD state * key: index for key material * OUT: * x0..x7: new byte-sliced CD state */ #define roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, \ t6, t7, mem_cd, key) \ /* \ * S-function with AES subbytes \ */ \ vpbroadcastq .Lpre_filter_bitmatrix_s123 rRIP, t5; \ vpbroadcastq .Lpre_filter_bitmatrix_s4 rRIP, t2; \ vpbroadcastq .Lpost_filter_bitmatrix_s14 rRIP, t4; \ vpbroadcastq .Lpost_filter_bitmatrix_s2 rRIP, t3; \ vpbroadcastq .Lpost_filter_bitmatrix_s3 rRIP, t6; \ vpxor t7##_x, t7##_x, t7##_x; \ vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \ \ /* prefilter sboxes */ \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x0, x0; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x7, x7; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x3, x3; \ vgf2p8affineqb $(pre_filter_constant_s1234), t2, x6, x6; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x2, x2; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x5, x5; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x1, x1; \ vgf2p8affineqb $(pre_filter_constant_s1234), t5, x4, x4; \ \ /* sbox GF8 inverse + postfilter sboxes 1 and 4 */ \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x0, x0; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x7, x7; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x3, x3; \ vgf2p8affineinvqb $(post_filter_constant_s14), t4, x6, x6; \ \ /* sbox GF8 inverse + postfilter sbox 3 */ \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x2, x2; \ vgf2p8affineinvqb $(post_filter_constant_s3), t6, x5, x5; \ \ /* sbox GF8 inverse + postfilter sbox 2 */ \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x1, x1; \ vgf2p8affineinvqb $(post_filter_constant_s2), t3, x4, x4; \ \ vpsrldq $1, t0, t1; \ vpsrldq $2, t0, t2; \ vpshufb t7, t1, t1; \ vpsrldq $3, t0, t3; \ \ /* P-function */ \ vpxorq x5, x0, x0; \ vpxorq x6, x1, x1; \ vpxorq x7, x2, x2; \ vpxorq x4, x3, x3; \ \ vpshufb t7, t2, t2; \ vpsrldq $4, t0, t4; \ vpshufb t7, t3, t3; \ vpsrldq $5, t0, t5; \ vpshufb t7, t4, t4; \ \ vpxorq x2, x4, x4; \ vpxorq x3, x5, x5; \ vpxorq x0, x6, x6; \ vpxorq x1, x7, x7; \ \ vpsrldq $6, t0, t6; \ vpshufb t7, t5, t5; \ vpshufb t7, t6, t6; \ \ vpxorq x7, x0, x0; \ vpxorq x4, x1, x1; \ vpxorq x5, x2, x2; \ vpxorq x6, x3, x3; \ \ vpxorq x3, x4, x4; \ vpxorq x0, x5, x5; \ vpxorq x1, x6, x6; \ vpxorq x2, x7, x7; /* note: high and low parts swapped */ \ \ /* Add key material and result to CD (x becomes new CD) */ \ \ vpternlogq $0x96, mem_cd##_5, t6, x1; \ \ vpsrldq $7, t0, t6; \ vpshufb t7, t0, t0; \ vpshufb t7, t6, t7; \ \ vpternlogq $0x96, mem_cd##_4, t7, x0; \ vpternlogq $0x96, mem_cd##_6, t5, x2; \ vpternlogq $0x96, mem_cd##_7, t4, x3; \ vpternlogq $0x96, mem_cd##_0, t3, x4; \ vpternlogq $0x96, mem_cd##_1, t2, x5; \ vpternlogq $0x96, mem_cd##_2, t1, x6; \ vpternlogq $0x96, mem_cd##_3, t0, x7; /* * IN/OUT: * x0..x7: byte-sliced AB state preloaded * mem_ab: byte-sliced AB state in memory * mem_cb: byte-sliced CD state in memory */ #define two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i, dir, store_ab) \ roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_cd, (key_table + (i) * 8)(CTX)); \ \ vmovdqu64 x0, mem_cd##_4; \ vmovdqu64 x1, mem_cd##_5; \ vmovdqu64 x2, mem_cd##_6; \ vmovdqu64 x3, mem_cd##_7; \ vmovdqu64 x4, mem_cd##_0; \ vmovdqu64 x5, mem_cd##_1; \ vmovdqu64 x6, mem_cd##_2; \ vmovdqu64 x7, mem_cd##_3; \ \ roundsm64(x4, x5, x6, x7, x0, x1, x2, x3, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, (key_table + ((i) + (dir)) * 8)(CTX)); \ \ store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab); #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */ #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \ /* Store new AB state */ \ vmovdqu64 x4, mem_ab##_4; \ vmovdqu64 x5, mem_ab##_5; \ vmovdqu64 x6, mem_ab##_6; \ vmovdqu64 x7, mem_ab##_7; \ vmovdqu64 x0, mem_ab##_0; \ vmovdqu64 x1, mem_ab##_1; \ vmovdqu64 x2, mem_ab##_2; \ vmovdqu64 x3, mem_ab##_3; #define enc_rounds64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store); #define dec_rounds64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, i) \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \ two_roundsm64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store); /* * IN: * v0..3: byte-sliced 32-bit integers * OUT: * v0..3: (IN << 1) * t0, t1, t2, zero: (IN >> 7) */ #define rol32_1_64(v0, v1, v2, v3, t0, t1, t2, zero, one) \ vpcmpltb zero, v0, %k1; \ vpaddb v0, v0, v0; \ vpaddb one, zero, t0{%k1}{z}; \ \ vpcmpltb zero, v1, %k1; \ vpaddb v1, v1, v1; \ vpaddb one, zero, t1{%k1}{z}; \ \ vpcmpltb zero, v2, %k1; \ vpaddb v2, v2, v2; \ vpaddb one, zero, t2{%k1}{z}; \ \ vpcmpltb zero, v3, %k1; \ vpaddb v3, v3, v3; \ vpaddb one, zero, zero{%k1}{z}; /* * IN: * r: byte-sliced AB state in memory * l: byte-sliced CD state in memory * OUT: * x0..x7: new byte-sliced CD state */ #define fls64(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \ tt1, tt2, tt3, kll, klr, krl, krr, tmp) \ /* \ * t0 = kll; \ * t0 &= ll; \ * lr ^= rol32(t0, 1); \ */ \ vpbroadcastd kll, t0; /* only lowest 32-bit used */ \ vpbroadcastq .Lbyte_ones rRIP, tmp; \ vpxor tt3##_x, tt3##_x, tt3##_x; \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpandq l0, t0, t0; \ vpandq l1, t1, t1; \ vpandq l2, t2, t2; \ vpandq l3, t3, t3; \ \ rol32_1_64(t3, t2, t1, t0, tt0, tt1, tt2, tt3, tmp); \ \ vpternlogq $0x96, tt2, t0, l4; \ vpbroadcastd krr, t0; /* only lowest 32-bit used */ \ vmovdqu64 l4, l##_4; \ vpternlogq $0x96, tt1, t1, l5; \ vmovdqu64 l5, l##_5; \ vpternlogq $0x96, tt0, t2, l6; \ vmovdqu64 l6, l##_6; \ vpternlogq $0x96, tt3, t3, l7; \ vmovdqu64 l7, l##_7; \ vpxor tt3##_x, tt3##_x, tt3##_x; \ \ /* \ * t2 = krr; \ * t2 |= rr; \ * rl ^= t2; \ */ \ \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpternlogq $0x1e, r##_4, t0, r##_0; \ vpbroadcastd krl, t0; /* only lowest 32-bit used */ \ vpternlogq $0x1e, r##_5, t1, r##_1; \ vpternlogq $0x1e, r##_6, t2, r##_2; \ vpternlogq $0x1e, r##_7, t3, r##_3; \ \ /* \ * t2 = krl; \ * t2 &= rl; \ * rr ^= rol32(t2, 1); \ */ \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpandq r##_0, t0, t0; \ vpandq r##_1, t1, t1; \ vpandq r##_2, t2, t2; \ vpandq r##_3, t3, t3; \ \ rol32_1_64(t3, t2, t1, t0, tt0, tt1, tt2, tt3, tmp); \ \ vpternlogq $0x96, tt2, t0, r##_4; \ vpbroadcastd klr, t0; /* only lowest 32-bit used */ \ vpternlogq $0x96, tt1, t1, r##_5; \ vpternlogq $0x96, tt0, t2, r##_6; \ vpternlogq $0x96, tt3, t3, r##_7; \ vpxor tt3##_x, tt3##_x, tt3##_x; \ \ /* \ * t0 = klr; \ * t0 |= lr; \ * ll ^= t0; \ */ \ \ vpshufb tt3, t0, t3; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t2; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t1; \ vpsrldq $1, t0, t0; \ vpshufb tt3, t0, t0; \ \ vpternlogq $0x1e, l4, t0, l0; \ vmovdqu64 l0, l##_0; \ vpternlogq $0x1e, l5, t1, l1; \ vmovdqu64 l1, l##_1; \ vpternlogq $0x1e, l6, t2, l2; \ vmovdqu64 l2, l##_2; \ vpternlogq $0x1e, l7, t3, l3; \ vmovdqu64 l3, l##_3; #define transpose_4x4(x0, x1, x2, x3, t1, t2) \ vpunpckhdq x1, x0, t2; \ vpunpckldq x1, x0, x0; \ \ vpunpckldq x3, x2, t1; \ vpunpckhdq x3, x2, x2; \ \ vpunpckhqdq t1, x0, x1; \ vpunpcklqdq t1, x0, x0; \ \ vpunpckhqdq x2, t2, x3; \ vpunpcklqdq x2, t2, x2; #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \ a3, b3, c3, d3, st0, st1) \ transpose_4x4(a0, a1, a2, a3, st0, st1); \ transpose_4x4(b0, b1, b2, b3, st0, st1); \ \ transpose_4x4(c0, c1, c2, c3, st0, st1); \ transpose_4x4(d0, d1, d2, d3, st0, st1); \ \ vbroadcasti64x2 .Lshufb_16x16b rRIP, st0; \ vpshufb st0, a0, a0; \ vpshufb st0, a1, a1; \ vpshufb st0, a2, a2; \ vpshufb st0, a3, a3; \ vpshufb st0, b0, b0; \ vpshufb st0, b1, b1; \ vpshufb st0, b2, b2; \ vpshufb st0, b3, b3; \ vpshufb st0, c0, c0; \ vpshufb st0, c1, c1; \ vpshufb st0, c2, c2; \ vpshufb st0, c3, c3; \ vpshufb st0, d0, d0; \ vpshufb st0, d1, d1; \ vpshufb st0, d2, d2; \ vpshufb st0, d3, d3; \ \ transpose_4x4(a0, b0, c0, d0, st0, st1); \ transpose_4x4(a1, b1, c1, d1, st0, st1); \ \ transpose_4x4(a2, b2, c2, d2, st0, st1); \ transpose_4x4(a3, b3, c3, d3, st0, st1); \ /* does not adjust output bytes inside vectors */ /* load blocks to registers and apply pre-whitening */ #define inpack64_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio, key) \ vpbroadcastq key, x0; \ vpshufb .Lpack_bswap rRIP, x0, x0; \ \ vpxorq 0 * 64(rio), x0, y7; \ vpxorq 1 * 64(rio), x0, y6; \ vpxorq 2 * 64(rio), x0, y5; \ vpxorq 3 * 64(rio), x0, y4; \ vpxorq 4 * 64(rio), x0, y3; \ vpxorq 5 * 64(rio), x0, y2; \ vpxorq 6 * 64(rio), x0, y1; \ vpxorq 7 * 64(rio), x0, y0; \ vpxorq 8 * 64(rio), x0, x7; \ vpxorq 9 * 64(rio), x0, x6; \ vpxorq 10 * 64(rio), x0, x5; \ vpxorq 11 * 64(rio), x0, x4; \ vpxorq 12 * 64(rio), x0, x3; \ vpxorq 13 * 64(rio), x0, x2; \ vpxorq 14 * 64(rio), x0, x1; \ vpxorq 15 * 64(rio), x0, x0; /* byteslice pre-whitened blocks and store to temporary memory */ #define inpack64_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, mem_ab, mem_cd, tmp0, tmp1) \ byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \ y4, y5, y6, y7, tmp0, tmp1); \ \ vmovdqu64 x0, mem_ab##_0; \ vmovdqu64 x1, mem_ab##_1; \ vmovdqu64 x2, mem_ab##_2; \ vmovdqu64 x3, mem_ab##_3; \ vmovdqu64 x4, mem_ab##_4; \ vmovdqu64 x5, mem_ab##_5; \ vmovdqu64 x6, mem_ab##_6; \ vmovdqu64 x7, mem_ab##_7; \ vmovdqu64 y0, mem_cd##_0; \ vmovdqu64 y1, mem_cd##_1; \ vmovdqu64 y2, mem_cd##_2; \ vmovdqu64 y3, mem_cd##_3; \ vmovdqu64 y4, mem_cd##_4; \ vmovdqu64 y5, mem_cd##_5; \ vmovdqu64 y6, mem_cd##_6; \ vmovdqu64 y7, mem_cd##_7; /* de-byteslice, apply post-whitening and store blocks */ #define outunpack64(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \ y5, y6, y7, key, tmp0, tmp1) \ byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \ y3, y7, x3, x7, tmp0, tmp1); \ \ vpbroadcastq key, tmp0; \ vpshufb .Lpack_bswap rRIP, tmp0, tmp0; \ \ vpxorq tmp0, y7, y7; \ vpxorq tmp0, y6, y6; \ vpxorq tmp0, y5, y5; \ vpxorq tmp0, y4, y4; \ vpxorq tmp0, y3, y3; \ vpxorq tmp0, y2, y2; \ vpxorq tmp0, y1, y1; \ vpxorq tmp0, y0, y0; \ vpxorq tmp0, x7, x7; \ vpxorq tmp0, x6, x6; \ vpxorq tmp0, x5, x5; \ vpxorq tmp0, x4, x4; \ vpxorq tmp0, x3, x3; \ vpxorq tmp0, x2, x2; \ vpxorq tmp0, x1, x1; \ vpxorq tmp0, x0, x0; #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \ y6, y7, rio) \ vmovdqu64 x0, 0 * 64(rio); \ vmovdqu64 x1, 1 * 64(rio); \ vmovdqu64 x2, 2 * 64(rio); \ vmovdqu64 x3, 3 * 64(rio); \ vmovdqu64 x4, 4 * 64(rio); \ vmovdqu64 x5, 5 * 64(rio); \ vmovdqu64 x6, 6 * 64(rio); \ vmovdqu64 x7, 7 * 64(rio); \ vmovdqu64 y0, 8 * 64(rio); \ vmovdqu64 y1, 9 * 64(rio); \ vmovdqu64 y2, 10 * 64(rio); \ vmovdqu64 y3, 11 * 64(rio); \ vmovdqu64 y4, 12 * 64(rio); \ vmovdqu64 y5, 13 * 64(rio); \ vmovdqu64 y6, 14 * 64(rio); \ vmovdqu64 y7, 15 * 64(rio); SECTION_RODATA #define SHUFB_BYTES(idx) \ 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) _gcry_camellia_gfni_avx512__constants: ELF(.type _gcry_camellia_gfni_avx512__constants,@object;) .align 64 .Lpack_bswap: .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .long 0x00010203, 0x04050607, 0x80808080, 0x80808080 .Lcounter0123_lo: .quad 0, 0 .quad 1, 0 .quad 2, 0 .quad 3, 0 .align 16 .Lcounter4444_lo: .quad 4, 0 .Lcounter8888_lo: .quad 8, 0 .Lcounter16161616_lo: .quad 16, 0 .Lcounter1111_hi: .quad 0, 1 .Lshufb_16x16b: .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) /* For CTR-mode IV byteswap */ .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 .Lbyte_ones: .byte 1, 1, 1, 1, 1, 1, 1, 1 /* Pre-filters and post-filters bit-matrixes for Camellia sboxes s1, s2, s3 * and s4. * See http://urn.fi/URN:NBN:fi:oulu-201305311409, pages 43-48. * * Pre-filters are directly from above source, "θ₁"/"θ₄". Post-filters are * combination of function "A" (AES SubBytes affine transformation) and * "ψ₁"/"ψ₂"/"ψ₃". */ /* Bit-matrix from "θ₁(x)" function: */ .Lpre_filter_bitmatrix_s123: .quad BM8X8(BV8(1, 1, 1, 0, 1, 1, 0, 1), BV8(0, 0, 1, 1, 0, 0, 1, 0), BV8(1, 1, 0, 1, 0, 0, 0, 0), BV8(1, 0, 1, 1, 0, 0, 1, 1), BV8(0, 0, 0, 0, 1, 1, 0, 0), BV8(1, 0, 1, 0, 0, 1, 0, 0), BV8(0, 0, 1, 0, 1, 1, 0, 0), BV8(1, 0, 0, 0, 0, 1, 1, 0)) /* Bit-matrix from "θ₄(x)" function: */ .Lpre_filter_bitmatrix_s4: .quad BM8X8(BV8(1, 1, 0, 1, 1, 0, 1, 1), BV8(0, 1, 1, 0, 0, 1, 0, 0), BV8(1, 0, 1, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 1), BV8(0, 0, 0, 1, 1, 0, 0, 0), BV8(0, 1, 0, 0, 1, 0, 0, 1), BV8(0, 1, 0, 1, 1, 0, 0, 0), BV8(0, 0, 0, 0, 1, 1, 0, 1)) /* Bit-matrix from "ψ₁(A(x))" function: */ .Lpost_filter_bitmatrix_s14: .quad BM8X8(BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0)) /* Bit-matrix from "ψ₂(A(x))" function: */ .Lpost_filter_bitmatrix_s2: .quad BM8X8(BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1), BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1)) /* Bit-matrix from "ψ₃(A(x))" function: */ .Lpost_filter_bitmatrix_s3: .quad BM8X8(BV8(0, 1, 1, 0, 0, 1, 1, 0), BV8(1, 0, 1, 1, 1, 1, 1, 0), BV8(0, 0, 0, 1, 1, 0, 1, 1), BV8(1, 0, 0, 0, 1, 1, 1, 0), BV8(0, 1, 0, 1, 1, 1, 1, 0), BV8(0, 1, 1, 1, 1, 1, 1, 1), BV8(0, 0, 0, 1, 1, 1, 0, 0), BV8(0, 0, 0, 0, 0, 0, 0, 1)) +/* CTR byte addition constants */ +.align 64 +.Lbige_addb_0_1: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +.Lbige_addb_2_3: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 +.Lbige_addb_4_5: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 +.Lbige_addb_6_7: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 +.Lbige_addb_8_9: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9 +.Lbige_addb_10_11: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 +.Lbige_addb_12_13: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 +.Lbige_addb_14_15: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 +.Lbige_addb_16: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 + ELF(.size _gcry_camellia_gfni_avx512__constants,.-_gcry_camellia_gfni_avx512__constants;) .text .align 16 ELF(.type __camellia_gfni_avx512_enc_blk64,@function;) __camellia_gfni_avx512_enc_blk64: /* input: * %rdi: ctx, CTX * %r8d: 24 for 16 byte key, 32 for larger * %zmm0..%zmm15: 64 plaintext blocks * output: * %zmm0..%zmm15: 64 encrypted blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); leaq (-8 * 8)(CTX, %r8, 8), %r8; inpack64_post(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, %zmm30, %zmm31); .align 8 .Lenc_loop: enc_rounds64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, 0); cmpq %r8, CTX; je .Lenc_done; leaq (8 * 8)(CTX), CTX; fls64(mem_ab, %zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, mem_cd, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), %zmm31); jmp .Lenc_loop; .align 8 .Lenc_done: /* load CD for output */ vmovdqu64 mem_cd_0, %zmm8; vmovdqu64 mem_cd_1, %zmm9; vmovdqu64 mem_cd_2, %zmm10; vmovdqu64 mem_cd_3, %zmm11; vmovdqu64 mem_cd_4, %zmm12; vmovdqu64 mem_cd_5, %zmm13; vmovdqu64 mem_cd_6, %zmm14; vmovdqu64 mem_cd_7, %zmm15; outunpack64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, ((key_table) + 8 * 8)(%r8), %zmm30, %zmm31); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_gfni_avx512_enc_blk64,.-__camellia_gfni_avx512_enc_blk64;) .align 16 ELF(.type __camellia_gfni_avx512_dec_blk64,@function;) __camellia_gfni_avx512_dec_blk64: /* input: * %rdi: ctx, CTX * %r8d: 24 for 16 byte key, 32 for larger * %zmm0..%zmm15: 64 encrypted blocks * output: * %zmm0..%zmm15: 64 plaintext blocks, order swapped: * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 */ CFI_STARTPROC(); movq %r8, %rcx; movq CTX, %r8 leaq (-8 * 8)(CTX, %rcx, 8), CTX; inpack64_post(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, %zmm30, %zmm31); .align 8 .Ldec_loop: dec_rounds64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, mem_ab, mem_cd, 0); cmpq %r8, CTX; je .Ldec_done; fls64(mem_ab, %zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, mem_cd, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, ((key_table) + 8)(CTX), ((key_table) + 12)(CTX), ((key_table) + 0)(CTX), ((key_table) + 4)(CTX), %zmm31); leaq (-8 * 8)(CTX), CTX; jmp .Ldec_loop; .align 8 .Ldec_done: /* load CD for output */ vmovdqu64 mem_cd_0, %zmm8; vmovdqu64 mem_cd_1, %zmm9; vmovdqu64 mem_cd_2, %zmm10; vmovdqu64 mem_cd_3, %zmm11; vmovdqu64 mem_cd_4, %zmm12; vmovdqu64 mem_cd_5, %zmm13; vmovdqu64 mem_cd_6, %zmm14; vmovdqu64 mem_cd_7, %zmm15; outunpack64(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, (key_table)(CTX), %zmm30, %zmm31); ret_spec_stop; CFI_ENDPROC(); ELF(.size __camellia_gfni_avx512_dec_blk64,.-__camellia_gfni_avx512_dec_blk64;) #define add_le128(out, in, lo_counter, hi_counter1) \ vpaddq lo_counter, in, out; \ vpcmpuq $1, lo_counter, out, %k1; \ kaddb %k1, %k1, %k1; \ vpaddq hi_counter1, out, out{%k1}; .align 16 .globl _gcry_camellia_gfni_avx512_ctr_enc ELF(.type _gcry_camellia_gfni_avx512_ctr_enc,@function;) _gcry_camellia_gfni_avx512_ctr_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: iv (big endian, 128bit) */ CFI_STARTPROC(); spec_stop_avx512; + cmpl $128, key_bitlength(CTX); + movl $32, %r8d; + movl $24, %eax; + cmovel %eax, %r8d; /* max */ + + cmpb $(0x100 - 64), 15(%rcx); + jbe .Lctr_byteadd; + vbroadcasti64x2 .Lbswap128_mask rRIP, %zmm19; vmovdqa64 .Lcounter0123_lo rRIP, %zmm21; vbroadcasti64x2 .Lcounter4444_lo rRIP, %zmm22; vbroadcasti64x2 .Lcounter8888_lo rRIP, %zmm23; vbroadcasti64x2 .Lcounter16161616_lo rRIP, %zmm24; vbroadcasti64x2 .Lcounter1111_hi rRIP, %zmm25; /* load IV and byteswap */ movq 8(%rcx), %r11; movq (%rcx), %r10; bswapq %r11; bswapq %r10; vbroadcasti64x2 (%rcx), %zmm0; vpshufb %zmm19, %zmm0, %zmm0; - cmpl $128, key_bitlength(CTX); - movl $32, %r8d; - movl $24, %eax; - cmovel %eax, %r8d; /* max */ - /* check need for handling 64-bit overflow and carry */ cmpq $(0xffffffffffffffff - 64), %r11; ja .Lload_ctr_carry; /* construct IVs */ vpaddq %zmm21, %zmm0, %zmm15; /* +0:+1:+2:+3 */ vpaddq %zmm22, %zmm15, %zmm14; /* +4:+5:+6:+7 */ vpaddq %zmm23, %zmm15, %zmm13; /* +8:+9:+10:+11 */ vpaddq %zmm23, %zmm14, %zmm12; /* +12:+13:+14:+15 */ vpaddq %zmm24, %zmm15, %zmm11; /* +16... */ vpaddq %zmm24, %zmm14, %zmm10; /* +20... */ vpaddq %zmm24, %zmm13, %zmm9; /* +24... */ vpaddq %zmm24, %zmm12, %zmm8; /* +28... */ vpaddq %zmm24, %zmm11, %zmm7; /* +32... */ vpaddq %zmm24, %zmm10, %zmm6; /* +36... */ vpaddq %zmm24, %zmm9, %zmm5; /* +40... */ vpaddq %zmm24, %zmm8, %zmm4; /* +44... */ vpaddq %zmm24, %zmm7, %zmm3; /* +48... */ vpaddq %zmm24, %zmm6, %zmm2; /* +52... */ vpaddq %zmm24, %zmm5, %zmm1; /* +56... */ vpaddq %zmm24, %zmm4, %zmm0; /* +60... */ jmp .Lload_ctr_done; .align 4 .Lload_ctr_carry: /* construct IVs */ add_le128(%zmm15, %zmm0, %zmm21, %zmm25); /* +0:+1:+2:+3 */ add_le128(%zmm14, %zmm15, %zmm22, %zmm25); /* +4:+5:+6:+7 */ add_le128(%zmm13, %zmm15, %zmm23, %zmm25); /* +8:+9:+10:+11 */ add_le128(%zmm12, %zmm14, %zmm23, %zmm25); /* +12:+13:+14:+15 */ add_le128(%zmm11, %zmm15, %zmm24, %zmm25); /* +16... */ add_le128(%zmm10, %zmm14, %zmm24, %zmm25); /* +20... */ add_le128(%zmm9, %zmm13, %zmm24, %zmm25); /* +24... */ add_le128(%zmm8, %zmm12, %zmm24, %zmm25); /* +28... */ add_le128(%zmm7, %zmm11, %zmm24, %zmm25); /* +32... */ add_le128(%zmm6, %zmm10, %zmm24, %zmm25); /* +36... */ add_le128(%zmm5, %zmm9, %zmm24, %zmm25); /* +40... */ add_le128(%zmm4, %zmm8, %zmm24, %zmm25); /* +44... */ add_le128(%zmm3, %zmm7, %zmm24, %zmm25); /* +48... */ add_le128(%zmm2, %zmm6, %zmm24, %zmm25); /* +52... */ add_le128(%zmm1, %zmm5, %zmm24, %zmm25); /* +56... */ add_le128(%zmm0, %zmm4, %zmm24, %zmm25); /* +60... */ .align 4 .Lload_ctr_done: + vbroadcasti64x2 .Lpack_bswap rRIP, %zmm17; vpbroadcastq (key_table)(CTX), %zmm16; - vpshufb .Lpack_bswap rRIP, %zmm16, %zmm16; + vpshufb %zmm17, %zmm16, %zmm16; /* Byte-swap IVs and update counter. */ addq $64, %r11; adcq $0, %r10; vpshufb %zmm19, %zmm15, %zmm15; vpshufb %zmm19, %zmm14, %zmm14; vpshufb %zmm19, %zmm13, %zmm13; vpshufb %zmm19, %zmm12, %zmm12; vpshufb %zmm19, %zmm11, %zmm11; vpshufb %zmm19, %zmm10, %zmm10; vpshufb %zmm19, %zmm9, %zmm9; vpshufb %zmm19, %zmm8, %zmm8; bswapq %r11; bswapq %r10; vpshufb %zmm19, %zmm7, %zmm7; vpshufb %zmm19, %zmm6, %zmm6; vpshufb %zmm19, %zmm5, %zmm5; vpshufb %zmm19, %zmm4, %zmm4; vpshufb %zmm19, %zmm3, %zmm3; vpshufb %zmm19, %zmm2, %zmm2; vpshufb %zmm19, %zmm1, %zmm1; vpshufb %zmm19, %zmm0, %zmm0; movq %r11, 8(%rcx); movq %r10, (%rcx); +.align 16 +.Lctr_inpack64_pre: /* inpack64_pre: */ vpxorq %zmm0, %zmm16, %zmm0; vpxorq %zmm1, %zmm16, %zmm1; vpxorq %zmm2, %zmm16, %zmm2; vpxorq %zmm3, %zmm16, %zmm3; vpxorq %zmm4, %zmm16, %zmm4; vpxorq %zmm5, %zmm16, %zmm5; vpxorq %zmm6, %zmm16, %zmm6; vpxorq %zmm7, %zmm16, %zmm7; vpxorq %zmm8, %zmm16, %zmm8; vpxorq %zmm9, %zmm16, %zmm9; vpxorq %zmm10, %zmm16, %zmm10; vpxorq %zmm11, %zmm16, %zmm11; vpxorq %zmm12, %zmm16, %zmm12; vpxorq %zmm13, %zmm16, %zmm13; vpxorq %zmm14, %zmm16, %zmm14; vpxorq %zmm15, %zmm16, %zmm15; call __camellia_gfni_avx512_enc_blk64; vpxorq 0 * 64(%rdx), %zmm7, %zmm7; vpxorq 1 * 64(%rdx), %zmm6, %zmm6; vpxorq 2 * 64(%rdx), %zmm5, %zmm5; vpxorq 3 * 64(%rdx), %zmm4, %zmm4; vpxorq 4 * 64(%rdx), %zmm3, %zmm3; vpxorq 5 * 64(%rdx), %zmm2, %zmm2; vpxorq 6 * 64(%rdx), %zmm1, %zmm1; vpxorq 7 * 64(%rdx), %zmm0, %zmm0; vpxorq 8 * 64(%rdx), %zmm15, %zmm15; vpxorq 9 * 64(%rdx), %zmm14, %zmm14; vpxorq 10 * 64(%rdx), %zmm13, %zmm13; vpxorq 11 * 64(%rdx), %zmm12, %zmm12; vpxorq 12 * 64(%rdx), %zmm11, %zmm11; vpxorq 13 * 64(%rdx), %zmm10, %zmm10; vpxorq 14 * 64(%rdx), %zmm9, %zmm9; vpxorq 15 * 64(%rdx), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); clear_regs(); ret_spec_stop; + +.align 16 +.Lctr_byteadd_full_ctr_carry: + movq 8(%rcx), %r11; + movq (%rcx), %r10; + bswapq %r11; + bswapq %r10; + addq $64, %r11; + adcq $0, %r10; + bswapq %r11; + bswapq %r10; + movq %r11, 8(%rcx); + movq %r10, (%rcx); + jmp .Lctr_byteadd_zmm; +.align 16 +.Lctr_byteadd: + vbroadcasti64x2 (%rcx), %zmm12; + je .Lctr_byteadd_full_ctr_carry; + addb $64, 15(%rcx); +.Lctr_byteadd_zmm: + vbroadcasti64x2 .Lbige_addb_16 rRIP, %zmm16; + vmovdqa64 .Lbige_addb_0_1 rRIP, %zmm17; + vmovdqa64 .Lbige_addb_4_5 rRIP, %zmm18; + vmovdqa64 .Lbige_addb_8_9 rRIP, %zmm19; + vmovdqa64 .Lbige_addb_12_13 rRIP, %zmm20; + vpaddb %zmm16, %zmm12, %zmm8; + vpaddb %zmm17, %zmm12, %zmm15; + vpaddb %zmm18, %zmm12, %zmm14; + vpaddb %zmm19, %zmm12, %zmm13; + vpaddb %zmm20, %zmm12, %zmm12; + vpaddb %zmm16, %zmm8, %zmm4; + vpaddb %zmm17, %zmm8, %zmm11; + vpaddb %zmm18, %zmm8, %zmm10; + vpaddb %zmm19, %zmm8, %zmm9; + vpaddb %zmm20, %zmm8, %zmm8; + vpaddb %zmm16, %zmm4, %zmm0; + vpaddb %zmm17, %zmm4, %zmm7; + vpaddb %zmm18, %zmm4, %zmm6; + vpaddb %zmm19, %zmm4, %zmm5; + vpaddb %zmm20, %zmm4, %zmm4; + vpaddb %zmm17, %zmm0, %zmm3; + vpaddb %zmm18, %zmm0, %zmm2; + vpaddb %zmm19, %zmm0, %zmm1; + vpaddb %zmm20, %zmm0, %zmm0; + + vbroadcasti64x2 .Lpack_bswap rRIP, %zmm17 + vpbroadcastq (key_table)(CTX), %zmm16; + vpshufb %zmm17, %zmm16, %zmm16; + + jmp .Lctr_inpack64_pre; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_ctr_enc,.-_gcry_camellia_gfni_avx512_ctr_enc;) .align 16 .globl _gcry_camellia_gfni_avx512_cbc_dec ELF(.type _gcry_camellia_gfni_avx512_cbc_dec,@function;) _gcry_camellia_gfni_avx512_cbc_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; movq %rcx, %r9; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ inpack64_pre(%zmm0, %zmm1, %zmm2, %zmm3, %zmm4, %zmm5, %zmm6, %zmm7, %zmm8, %zmm9, %zmm10, %zmm11, %zmm12, %zmm13, %zmm14, %zmm15, %rdx, (key_table)(CTX, %r8, 8)); call __camellia_gfni_avx512_dec_blk64; /* XOR output with IV */ vmovdqu64 (%r9), %xmm16; vinserti64x2 $1, (0 * 16)(%rdx), %ymm16, %ymm16; vinserti64x4 $1, (1 * 16)(%rdx), %zmm16, %zmm16; vpxorq %zmm16, %zmm7, %zmm7; vpxorq (0 * 64 + 48)(%rdx), %zmm6, %zmm6; vpxorq (1 * 64 + 48)(%rdx), %zmm5, %zmm5; vpxorq (2 * 64 + 48)(%rdx), %zmm4, %zmm4; vpxorq (3 * 64 + 48)(%rdx), %zmm3, %zmm3; vpxorq (4 * 64 + 48)(%rdx), %zmm2, %zmm2; vpxorq (5 * 64 + 48)(%rdx), %zmm1, %zmm1; vpxorq (6 * 64 + 48)(%rdx), %zmm0, %zmm0; vpxorq (7 * 64 + 48)(%rdx), %zmm15, %zmm15; vpxorq (8 * 64 + 48)(%rdx), %zmm14, %zmm14; vpxorq (9 * 64 + 48)(%rdx), %zmm13, %zmm13; vpxorq (10 * 64 + 48)(%rdx), %zmm12, %zmm12; vpxorq (11 * 64 + 48)(%rdx), %zmm11, %zmm11; vpxorq (12 * 64 + 48)(%rdx), %zmm10, %zmm10; vpxorq (13 * 64 + 48)(%rdx), %zmm9, %zmm9; vpxorq (14 * 64 + 48)(%rdx), %zmm8, %zmm8; vmovdqu64 (15 * 64 + 48)(%rdx), %xmm16; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); /* store new IV */ vmovdqu64 %xmm16, (0)(%r9); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_cbc_dec,.-_gcry_camellia_gfni_avx512_cbc_dec;) .align 16 .globl _gcry_camellia_gfni_avx512_cfb_dec ELF(.type _gcry_camellia_gfni_avx512_cfb_dec,@function;) _gcry_camellia_gfni_avx512_cfb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) * %rdx: src (32 blocks) * %rcx: iv */ CFI_STARTPROC(); spec_stop_avx512; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ /* inpack64_pre: */ vpbroadcastq (key_table)(CTX), %zmm0; vpshufb .Lpack_bswap rRIP, %zmm0, %zmm0; vmovdqu64 (%rcx), %xmm15; vinserti64x2 $1, (%rdx), %ymm15, %ymm15; vinserti64x4 $1, 16(%rdx), %zmm15, %zmm15; vpxorq %zmm15, %zmm0, %zmm15; vpxorq (0 * 64 + 48)(%rdx), %zmm0, %zmm14; vpxorq (1 * 64 + 48)(%rdx), %zmm0, %zmm13; vpxorq (2 * 64 + 48)(%rdx), %zmm0, %zmm12; vpxorq (3 * 64 + 48)(%rdx), %zmm0, %zmm11; vpxorq (4 * 64 + 48)(%rdx), %zmm0, %zmm10; vpxorq (5 * 64 + 48)(%rdx), %zmm0, %zmm9; vpxorq (6 * 64 + 48)(%rdx), %zmm0, %zmm8; vpxorq (7 * 64 + 48)(%rdx), %zmm0, %zmm7; vpxorq (8 * 64 + 48)(%rdx), %zmm0, %zmm6; vpxorq (9 * 64 + 48)(%rdx), %zmm0, %zmm5; vpxorq (10 * 64 + 48)(%rdx), %zmm0, %zmm4; vpxorq (11 * 64 + 48)(%rdx), %zmm0, %zmm3; vpxorq (12 * 64 + 48)(%rdx), %zmm0, %zmm2; vpxorq (13 * 64 + 48)(%rdx), %zmm0, %zmm1; vpxorq (14 * 64 + 48)(%rdx), %zmm0, %zmm0; vmovdqu64 (15 * 64 + 48)(%rdx), %xmm16; vmovdqu64 %xmm16, (%rcx); /* store new IV */ call __camellia_gfni_avx512_enc_blk64; vpxorq 0 * 64(%rdx), %zmm7, %zmm7; vpxorq 1 * 64(%rdx), %zmm6, %zmm6; vpxorq 2 * 64(%rdx), %zmm5, %zmm5; vpxorq 3 * 64(%rdx), %zmm4, %zmm4; vpxorq 4 * 64(%rdx), %zmm3, %zmm3; vpxorq 5 * 64(%rdx), %zmm2, %zmm2; vpxorq 6 * 64(%rdx), %zmm1, %zmm1; vpxorq 7 * 64(%rdx), %zmm0, %zmm0; vpxorq 8 * 64(%rdx), %zmm15, %zmm15; vpxorq 9 * 64(%rdx), %zmm14, %zmm14; vpxorq 10 * 64(%rdx), %zmm13, %zmm13; vpxorq 11 * 64(%rdx), %zmm12, %zmm12; vpxorq 12 * 64(%rdx), %zmm11, %zmm11; vpxorq 13 * 64(%rdx), %zmm10, %zmm10; vpxorq 14 * 64(%rdx), %zmm9, %zmm9; vpxorq 15 * 64(%rdx), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_cfb_dec,.-_gcry_camellia_gfni_avx512_cfb_dec;) .align 16 .globl _gcry_camellia_gfni_avx512_ocb_enc ELF(.type _gcry_camellia_gfni_avx512_ocb_enc,@function;) _gcry_camellia_gfni_avx512_ocb_enc: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[64]) */ CFI_STARTPROC(); spec_stop_avx512; pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rbx; CFI_PUSH(%rbx); vmovdqu64 (%rcx), %xmm30; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Checksum_i = Checksum_{i-1} xor P_i */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg, zplain) \ vmovdqu64 (n * 64)(%rdx), zplain; \ vpxorq (l0reg), %xmm30, %xmm16; \ vpxorq (l1reg), %xmm16, %xmm30; \ vinserti64x2 $1, %xmm30, %ymm16, %ymm16; \ vpxorq (l2reg), %xmm30, %xmm30; \ vinserti64x2 $2, %xmm30, %zmm16, %zmm16; \ vpxorq (l3reg), %xmm30, %xmm30; \ vinserti64x2 $3, %xmm30, %zmm16, %zmm16; \ vpxorq zplain, %zmm16, zreg; \ vmovdqu64 %zmm16, (n * 64)(%rsi); #define OCB_LOAD_PTRS(n) \ movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; OCB_LOAD_PTRS(0); OCB_INPUT(0, %r10, %r11, %r12, %r13, %zmm15, %zmm20); OCB_INPUT(1, %r14, %r15, %rax, %rbx, %zmm14, %zmm21); OCB_LOAD_PTRS(2); OCB_INPUT(2, %r10, %r11, %r12, %r13, %zmm13, %zmm22); vpternlogq $0x96, %zmm20, %zmm21, %zmm22; OCB_INPUT(3, %r14, %r15, %rax, %rbx, %zmm12, %zmm23); OCB_LOAD_PTRS(4); OCB_INPUT(4, %r10, %r11, %r12, %r13, %zmm11, %zmm24); OCB_INPUT(5, %r14, %r15, %rax, %rbx, %zmm10, %zmm25); vpternlogq $0x96, %zmm23, %zmm24, %zmm25; OCB_LOAD_PTRS(6); OCB_INPUT(6, %r10, %r11, %r12, %r13, %zmm9, %zmm20); OCB_INPUT(7, %r14, %r15, %rax, %rbx, %zmm8, %zmm21); OCB_LOAD_PTRS(8); OCB_INPUT(8, %r10, %r11, %r12, %r13, %zmm7, %zmm26); vpternlogq $0x96, %zmm20, %zmm21, %zmm26; OCB_INPUT(9, %r14, %r15, %rax, %rbx, %zmm6, %zmm23); OCB_LOAD_PTRS(10); OCB_INPUT(10, %r10, %r11, %r12, %r13, %zmm5, %zmm24); OCB_INPUT(11, %r14, %r15, %rax, %rbx, %zmm4, %zmm27); vpternlogq $0x96, %zmm23, %zmm24, %zmm27; OCB_LOAD_PTRS(12); OCB_INPUT(12, %r10, %r11, %r12, %r13, %zmm3, %zmm20); OCB_INPUT(13, %r14, %r15, %rax, %rbx, %zmm2, %zmm21); OCB_LOAD_PTRS(14); OCB_INPUT(14, %r10, %r11, %r12, %r13, %zmm1, %zmm23); vpternlogq $0x96, %zmm20, %zmm21, %zmm23; OCB_INPUT(15, %r14, %r15, %rax, %rbx, %zmm0, %zmm24); #undef OCB_LOAD_PTRS #undef OCB_INPUT vpbroadcastq (key_table)(CTX), %zmm16; vpshufb .Lpack_bswap rRIP, %zmm16, %zmm16; vpternlogq $0x96, %zmm24, %zmm22, %zmm25; vpternlogq $0x96, %zmm26, %zmm27, %zmm23; vpxorq %zmm25, %zmm23, %zmm20; vextracti64x4 $1, %zmm20, %ymm21; vpxorq %ymm21, %ymm20, %ymm20; vextracti64x2 $1, %ymm20, %xmm21; vpternlogq $0x96, (%r8), %xmm21, %xmm20; vmovdqu64 %xmm30, (%rcx); vmovdqu64 %xmm20, (%r8); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ /* inpack64_pre: */ vpxorq %zmm0, %zmm16, %zmm0; vpxorq %zmm1, %zmm16, %zmm1; vpxorq %zmm2, %zmm16, %zmm2; vpxorq %zmm3, %zmm16, %zmm3; vpxorq %zmm4, %zmm16, %zmm4; vpxorq %zmm5, %zmm16, %zmm5; vpxorq %zmm6, %zmm16, %zmm6; vpxorq %zmm7, %zmm16, %zmm7; vpxorq %zmm8, %zmm16, %zmm8; vpxorq %zmm9, %zmm16, %zmm9; vpxorq %zmm10, %zmm16, %zmm10; vpxorq %zmm11, %zmm16, %zmm11; vpxorq %zmm12, %zmm16, %zmm12; vpxorq %zmm13, %zmm16, %zmm13; vpxorq %zmm14, %zmm16, %zmm14; vpxorq %zmm15, %zmm16, %zmm15; call __camellia_gfni_avx512_enc_blk64; vpxorq 0 * 64(%rsi), %zmm7, %zmm7; vpxorq 1 * 64(%rsi), %zmm6, %zmm6; vpxorq 2 * 64(%rsi), %zmm5, %zmm5; vpxorq 3 * 64(%rsi), %zmm4, %zmm4; vpxorq 4 * 64(%rsi), %zmm3, %zmm3; vpxorq 5 * 64(%rsi), %zmm2, %zmm2; vpxorq 6 * 64(%rsi), %zmm1, %zmm1; vpxorq 7 * 64(%rsi), %zmm0, %zmm0; vpxorq 8 * 64(%rsi), %zmm15, %zmm15; vpxorq 9 * 64(%rsi), %zmm14, %zmm14; vpxorq 10 * 64(%rsi), %zmm13, %zmm13; vpxorq 11 * 64(%rsi), %zmm12, %zmm12; vpxorq 12 * 64(%rsi), %zmm11, %zmm11; vpxorq 13 * 64(%rsi), %zmm10, %zmm10; vpxorq 14 * 64(%rsi), %zmm9, %zmm9; vpxorq 15 * 64(%rsi), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); clear_regs(); popq %rbx; CFI_RESTORE(%rbx); popq %r15; CFI_RESTORE(%r15); popq %r14; CFI_RESTORE(%r14); popq %r13; CFI_RESTORE(%r12); popq %r12; CFI_RESTORE(%r13); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_ocb_enc,.-_gcry_camellia_gfni_avx512_ocb_enc;) .align 16 .globl _gcry_camellia_gfni_avx512_ocb_dec ELF(.type _gcry_camellia_gfni_avx512_ocb_dec,@function;) _gcry_camellia_gfni_avx512_ocb_dec: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) * %rcx: offset * %r8 : checksum * %r9 : L pointers (void *L[64]) */ CFI_STARTPROC(); spec_stop_avx512; pushq %r12; CFI_PUSH(%r12); pushq %r13; CFI_PUSH(%r13); pushq %r14; CFI_PUSH(%r14); pushq %r15; CFI_PUSH(%r15); pushq %rbx; CFI_PUSH(%rbx); pushq %r8; CFI_PUSH(%r8); vmovdqu64 (%rcx), %xmm30; /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor DECIPHER(K, P_i xor Offset_i) */ #define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg) \ vpxorq (l0reg), %xmm30, %xmm16; \ vpxorq (l1reg), %xmm16, %xmm30; \ vinserti64x2 $1, %xmm30, %ymm16, %ymm16; \ vpxorq (l2reg), %xmm30, %xmm30; \ vinserti64x2 $2, %xmm30, %zmm16, %zmm16; \ vpxorq (l3reg), %xmm30, %xmm30; \ vinserti64x2 $3, %xmm30, %zmm16, %zmm16; \ vpxorq (n * 64)(%rdx), %zmm16, zreg; \ vmovdqu64 %zmm16, (n * 64)(%rsi); #define OCB_LOAD_PTRS(n) \ movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \ movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \ movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \ movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \ movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \ movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \ movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \ movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx; OCB_LOAD_PTRS(0); OCB_INPUT(0, %r10, %r11, %r12, %r13, %zmm15); OCB_INPUT(1, %r14, %r15, %rax, %rbx, %zmm14); OCB_LOAD_PTRS(2); OCB_INPUT(2, %r10, %r11, %r12, %r13, %zmm13); OCB_INPUT(3, %r14, %r15, %rax, %rbx, %zmm12); OCB_LOAD_PTRS(4); OCB_INPUT(4, %r10, %r11, %r12, %r13, %zmm11); OCB_INPUT(5, %r14, %r15, %rax, %rbx, %zmm10); OCB_LOAD_PTRS(6); OCB_INPUT(6, %r10, %r11, %r12, %r13, %zmm9); OCB_INPUT(7, %r14, %r15, %rax, %rbx, %zmm8); OCB_LOAD_PTRS(8); OCB_INPUT(8, %r10, %r11, %r12, %r13, %zmm7); OCB_INPUT(9, %r14, %r15, %rax, %rbx, %zmm6); OCB_LOAD_PTRS(10); OCB_INPUT(10, %r10, %r11, %r12, %r13, %zmm5); OCB_INPUT(11, %r14, %r15, %rax, %rbx, %zmm4); OCB_LOAD_PTRS(12); OCB_INPUT(12, %r10, %r11, %r12, %r13, %zmm3); OCB_INPUT(13, %r14, %r15, %rax, %rbx, %zmm2); OCB_LOAD_PTRS(14); OCB_INPUT(14, %r10, %r11, %r12, %r13, %zmm1); OCB_INPUT(15, %r14, %r15, %rax, %rbx, %zmm0); #undef OCB_LOAD_PTRS #undef OCB_INPUT vmovdqu64 %xmm30, (%rcx); cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ vpbroadcastq (key_table)(CTX, %r8, 8), %zmm16; vpshufb .Lpack_bswap rRIP, %zmm16, %zmm16; /* inpack64_pre: */ vpxorq %zmm0, %zmm16, %zmm0; vpxorq %zmm1, %zmm16, %zmm1; vpxorq %zmm2, %zmm16, %zmm2; vpxorq %zmm3, %zmm16, %zmm3; vpxorq %zmm4, %zmm16, %zmm4; vpxorq %zmm5, %zmm16, %zmm5; vpxorq %zmm6, %zmm16, %zmm6; vpxorq %zmm7, %zmm16, %zmm7; vpxorq %zmm8, %zmm16, %zmm8; vpxorq %zmm9, %zmm16, %zmm9; vpxorq %zmm10, %zmm16, %zmm10; vpxorq %zmm11, %zmm16, %zmm11; vpxorq %zmm12, %zmm16, %zmm12; vpxorq %zmm13, %zmm16, %zmm13; vpxorq %zmm14, %zmm16, %zmm14; vpxorq %zmm15, %zmm16, %zmm15; call __camellia_gfni_avx512_dec_blk64; vpxorq 0 * 64(%rsi), %zmm7, %zmm7; vpxorq 1 * 64(%rsi), %zmm6, %zmm6; vpxorq 2 * 64(%rsi), %zmm5, %zmm5; vpxorq 3 * 64(%rsi), %zmm4, %zmm4; vpxorq 4 * 64(%rsi), %zmm3, %zmm3; vpxorq 5 * 64(%rsi), %zmm2, %zmm2; vpxorq 6 * 64(%rsi), %zmm1, %zmm1; vpxorq 7 * 64(%rsi), %zmm0, %zmm0; vpxorq 8 * 64(%rsi), %zmm15, %zmm15; vpxorq 9 * 64(%rsi), %zmm14, %zmm14; vpxorq 10 * 64(%rsi), %zmm13, %zmm13; vpxorq 11 * 64(%rsi), %zmm12, %zmm12; vpxorq 12 * 64(%rsi), %zmm11, %zmm11; vpxorq 13 * 64(%rsi), %zmm10, %zmm10; vpxorq 14 * 64(%rsi), %zmm9, %zmm9; vpxorq 15 * 64(%rsi), %zmm8, %zmm8; write_output(%zmm7, %zmm6, %zmm5, %zmm4, %zmm3, %zmm2, %zmm1, %zmm0, %zmm15, %zmm14, %zmm13, %zmm12, %zmm11, %zmm10, %zmm9, %zmm8, %rsi); popq %r8; CFI_RESTORE(%r8); /* Checksum_i = Checksum_{i-1} xor C_i */ vpternlogq $0x96, %zmm7, %zmm6, %zmm5; vpternlogq $0x96, %zmm4, %zmm3, %zmm2; vpternlogq $0x96, %zmm1, %zmm0, %zmm15; vpternlogq $0x96, %zmm14, %zmm13, %zmm12; vpternlogq $0x96, %zmm11, %zmm10, %zmm9; vpternlogq $0x96, %zmm5, %zmm2, %zmm15; vpternlogq $0x96, %zmm12, %zmm9, %zmm8; vpxorq %zmm15, %zmm8, %zmm8; vextracti64x4 $1, %zmm8, %ymm0; vpxor %ymm0, %ymm8, %ymm8; vextracti128 $1, %ymm8, %xmm0; vpternlogq $0x96, (%r8), %xmm0, %xmm8; vmovdqu64 %xmm8, (%r8); clear_regs(); popq %rbx; CFI_RESTORE(%rbx); popq %r15; CFI_RESTORE(%r15); popq %r14; CFI_RESTORE(%r14); popq %r13; CFI_RESTORE(%r12); popq %r12; CFI_RESTORE(%r13); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_ocb_dec,.-_gcry_camellia_gfni_avx512_ocb_dec;) .align 16 .globl _gcry_camellia_gfni_avx512_enc_blk64 ELF(.type _gcry_camellia_gfni_avx512_enc_blk64,@function;) _gcry_camellia_gfni_avx512_enc_blk64: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) */ CFI_STARTPROC(); spec_stop_avx512; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ xorl %eax, %eax; vpbroadcastq (key_table)(CTX), %zmm0; vpshufb .Lpack_bswap rRIP, %zmm0, %zmm0; vpxorq (0) * 64(%rdx), %zmm0, %zmm15; vpxorq (1) * 64(%rdx), %zmm0, %zmm14; vpxorq (2) * 64(%rdx), %zmm0, %zmm13; vpxorq (3) * 64(%rdx), %zmm0, %zmm12; vpxorq (4) * 64(%rdx), %zmm0, %zmm11; vpxorq (5) * 64(%rdx), %zmm0, %zmm10; vpxorq (6) * 64(%rdx), %zmm0, %zmm9; vpxorq (7) * 64(%rdx), %zmm0, %zmm8; vpxorq (8) * 64(%rdx), %zmm0, %zmm7; vpxorq (9) * 64(%rdx), %zmm0, %zmm6; vpxorq (10) * 64(%rdx), %zmm0, %zmm5; vpxorq (11) * 64(%rdx), %zmm0, %zmm4; vpxorq (12) * 64(%rdx), %zmm0, %zmm3; vpxorq (13) * 64(%rdx), %zmm0, %zmm2; vpxorq (14) * 64(%rdx), %zmm0, %zmm1; vpxorq (15) * 64(%rdx), %zmm0, %zmm0; call __camellia_gfni_avx512_enc_blk64; vmovdqu64 %zmm7, (0) * 64(%rsi); vmovdqu64 %zmm6, (1) * 64(%rsi); vmovdqu64 %zmm5, (2) * 64(%rsi); vmovdqu64 %zmm4, (3) * 64(%rsi); vmovdqu64 %zmm3, (4) * 64(%rsi); vmovdqu64 %zmm2, (5) * 64(%rsi); vmovdqu64 %zmm1, (6) * 64(%rsi); vmovdqu64 %zmm0, (7) * 64(%rsi); vmovdqu64 %zmm15, (8) * 64(%rsi); vmovdqu64 %zmm14, (9) * 64(%rsi); vmovdqu64 %zmm13, (10) * 64(%rsi); vmovdqu64 %zmm12, (11) * 64(%rsi); vmovdqu64 %zmm11, (12) * 64(%rsi); vmovdqu64 %zmm10, (13) * 64(%rsi); vmovdqu64 %zmm9, (14) * 64(%rsi); vmovdqu64 %zmm8, (15) * 64(%rsi); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_enc_blk64,.-_gcry_camellia_gfni_avx512_enc_blk64;) .align 16 .globl _gcry_camellia_gfni_avx512_dec_blk64 ELF(.type _gcry_camellia_gfni_avx512_dec_blk64,@function;) _gcry_camellia_gfni_avx512_dec_blk64: /* input: * %rdi: ctx, CTX * %rsi: dst (64 blocks) * %rdx: src (64 blocks) */ CFI_STARTPROC(); spec_stop_avx512; cmpl $128, key_bitlength(CTX); movl $32, %r8d; movl $24, %eax; cmovel %eax, %r8d; /* max */ xorl %eax, %eax; vpbroadcastq (key_table)(CTX, %r8, 8), %zmm0; vpshufb .Lpack_bswap rRIP, %zmm0, %zmm0; vpxorq (0) * 64(%rdx), %zmm0, %zmm15; vpxorq (1) * 64(%rdx), %zmm0, %zmm14; vpxorq (2) * 64(%rdx), %zmm0, %zmm13; vpxorq (3) * 64(%rdx), %zmm0, %zmm12; vpxorq (4) * 64(%rdx), %zmm0, %zmm11; vpxorq (5) * 64(%rdx), %zmm0, %zmm10; vpxorq (6) * 64(%rdx), %zmm0, %zmm9; vpxorq (7) * 64(%rdx), %zmm0, %zmm8; vpxorq (8) * 64(%rdx), %zmm0, %zmm7; vpxorq (9) * 64(%rdx), %zmm0, %zmm6; vpxorq (10) * 64(%rdx), %zmm0, %zmm5; vpxorq (11) * 64(%rdx), %zmm0, %zmm4; vpxorq (12) * 64(%rdx), %zmm0, %zmm3; vpxorq (13) * 64(%rdx), %zmm0, %zmm2; vpxorq (14) * 64(%rdx), %zmm0, %zmm1; vpxorq (15) * 64(%rdx), %zmm0, %zmm0; call __camellia_gfni_avx512_dec_blk64; vmovdqu64 %zmm7, (0) * 64(%rsi); vmovdqu64 %zmm6, (1) * 64(%rsi); vmovdqu64 %zmm5, (2) * 64(%rsi); vmovdqu64 %zmm4, (3) * 64(%rsi); vmovdqu64 %zmm3, (4) * 64(%rsi); vmovdqu64 %zmm2, (5) * 64(%rsi); vmovdqu64 %zmm1, (6) * 64(%rsi); vmovdqu64 %zmm0, (7) * 64(%rsi); vmovdqu64 %zmm15, (8) * 64(%rsi); vmovdqu64 %zmm14, (9) * 64(%rsi); vmovdqu64 %zmm13, (10) * 64(%rsi); vmovdqu64 %zmm12, (11) * 64(%rsi); vmovdqu64 %zmm11, (12) * 64(%rsi); vmovdqu64 %zmm10, (13) * 64(%rsi); vmovdqu64 %zmm9, (14) * 64(%rsi); vmovdqu64 %zmm8, (15) * 64(%rsi); clear_regs(); ret_spec_stop; CFI_ENDPROC(); ELF(.size _gcry_camellia_gfni_avx512_dec_blk64,.-_gcry_camellia_gfni_avx512_dec_blk64;) #endif /* defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT) */ #endif /* __x86_64 */ diff --git a/cipher/camellia-glue.c b/cipher/camellia-glue.c index 8b4b4b3c..76a09eb1 100644 --- a/cipher/camellia-glue.c +++ b/cipher/camellia-glue.c @@ -1,1672 +1,1676 @@ /* camellia-glue.c - Glue for the Camellia cipher * Copyright (C) 2007 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /* I put all the libgcrypt-specific stuff in this file to keep the camellia.c/camellia.h files exactly as provided by NTT. If they update their code, this should make it easier to bring the changes in. - dshaw There is one small change which needs to be done: Include the following code at the top of camellia.h: */ #if 0 /* To use Camellia with libraries it is often useful to keep the name * space of the library clean. The following macro is thus useful: * * #define CAMELLIA_EXT_SYM_PREFIX foo_ * * This prefixes all external symbols with "foo_". */ #ifdef HAVE_CONFIG_H #include #endif #ifdef CAMELLIA_EXT_SYM_PREFIX #define CAMELLIA_PREFIX1(x,y) x ## y #define CAMELLIA_PREFIX2(x,y) CAMELLIA_PREFIX1(x,y) #define CAMELLIA_PREFIX(x) CAMELLIA_PREFIX2(CAMELLIA_EXT_SYM_PREFIX,x) #define Camellia_Ekeygen CAMELLIA_PREFIX(Camellia_Ekeygen) #define Camellia_EncryptBlock CAMELLIA_PREFIX(Camellia_EncryptBlock) #define Camellia_DecryptBlock CAMELLIA_PREFIX(Camellia_DecryptBlock) #define camellia_decrypt128 CAMELLIA_PREFIX(camellia_decrypt128) #define camellia_decrypt256 CAMELLIA_PREFIX(camellia_decrypt256) #define camellia_encrypt128 CAMELLIA_PREFIX(camellia_encrypt128) #define camellia_encrypt256 CAMELLIA_PREFIX(camellia_encrypt256) #define camellia_setup128 CAMELLIA_PREFIX(camellia_setup128) #define camellia_setup192 CAMELLIA_PREFIX(camellia_setup192) #define camellia_setup256 CAMELLIA_PREFIX(camellia_setup256) #endif /*CAMELLIA_EXT_SYM_PREFIX*/ #endif /* Code sample. */ #include #include "types.h" #include "g10lib.h" #include "cipher.h" #include "camellia.h" #include "bufhelp.h" #include "cipher-internal.h" #include "bulkhelp.h" /* Helper macro to force alignment to 16 bytes. */ #ifdef HAVE_GCC_ATTRIBUTE_ALIGNED # define ATTR_ALIGNED_16 __attribute__ ((aligned (16))) #else # define ATTR_ALIGNED_16 #endif /* USE_AESNI inidicates whether to compile with Intel AES-NI/AVX code. */ #undef USE_AESNI_AVX #if defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT) # if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AESNI_AVX 1 # endif #endif /* USE_AESNI_AVX2 inidicates whether to compile with Intel AES-NI/AVX2 code. */ #undef USE_AESNI_AVX2 #if defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) # if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AESNI_AVX2 1 # endif #endif /* USE_VAES_AVX2 inidicates whether to compile with Intel VAES/AVX2 code. */ #undef USE_VAES_AVX2 #if defined(USE_AESNI_AVX2) && defined(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL) # define USE_VAES_AVX2 1 #endif /* USE_GFNI_AVX2 inidicates whether to compile with Intel GFNI/AVX2 code. */ #undef USE_GFNI_AVX2 #if defined(USE_AESNI_AVX2) && defined(ENABLE_GFNI_SUPPORT) # define USE_GFNI_AVX2 1 #endif /* USE_GFNI_AVX512 inidicates whether to compile with Intel GFNI/AVX512 code. */ #undef USE_GFNI_AVX512 #if defined(USE_GFNI_AVX2) && defined(ENABLE_AVX512_SUPPORT) # define USE_GFNI_AVX512 1 #endif typedef struct { KEY_TABLE_TYPE keytable; int keybitlength; #ifdef USE_AESNI_AVX unsigned int use_aesni_avx:1; /* AES-NI/AVX implementation shall be used. */ #endif /*USE_AESNI_AVX*/ #ifdef USE_AESNI_AVX2 + unsigned int use_avx2:1; /* If any of AVX2 implementation is enabled. */ unsigned int use_aesni_avx2:1;/* AES-NI/AVX2 implementation shall be used. */ unsigned int use_vaes_avx2:1; /* VAES/AVX2 implementation shall be used. */ unsigned int use_gfni_avx2:1; /* GFNI/AVX2 implementation shall be used. */ unsigned int use_gfni_avx512:1; /* GFNI/AVX512 implementation shall be used. */ #endif /*USE_AESNI_AVX2*/ } CAMELLIA_context; /* Assembly implementations use SystemV ABI, ABI conversion and additional * stack to store XMM6-XMM15 needed on Win64. */ #undef ASM_FUNC_ABI #undef ASM_EXTRA_STACK #if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) # ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS # define ASM_FUNC_ABI __attribute__((sysv_abi)) # define ASM_EXTRA_STACK (10 * 16) # else # define ASM_FUNC_ABI # define ASM_EXTRA_STACK 0 # endif #endif #ifdef USE_AESNI_AVX /* Assembler implementations of Camellia using AES-NI and AVX. Process data in 16 blocks same time. */ extern void _gcry_camellia_aesni_avx_ctr_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *ctr) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_cbc_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_cfb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_ocb_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[16]) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_ocb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[16]) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_ocb_auth(CAMELLIA_context *ctx, const unsigned char *abuf, unsigned char *offset, unsigned char *checksum, const u64 Ls[16]) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_keygen(CAMELLIA_context *ctx, const unsigned char *key, unsigned int keylen) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_ecb_enc(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx_ecb_dec(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in) ASM_FUNC_ABI; static const int avx_burn_stack_depth = 16 * CAMELLIA_BLOCK_SIZE + 16 + 2 * sizeof(void *) + ASM_EXTRA_STACK; #endif #ifdef USE_AESNI_AVX2 /* Assembler implementations of Camellia using AES-NI and AVX2. Process data in 32 blocks same time. */ extern void _gcry_camellia_aesni_avx2_ctr_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *ctr) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx2_cbc_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx2_cfb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx2_ocb_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx2_ocb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx2_ocb_auth(CAMELLIA_context *ctx, const unsigned char *abuf, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx2_enc_blk1_32(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned int nblocks) ASM_FUNC_ABI; extern void _gcry_camellia_aesni_avx2_dec_blk1_32(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned int nblocks) ASM_FUNC_ABI; static const int avx2_burn_stack_depth = 32 * CAMELLIA_BLOCK_SIZE + 16 + 2 * sizeof(void *) + ASM_EXTRA_STACK; #endif #ifdef USE_VAES_AVX2 /* Assembler implementations of Camellia using VAES and AVX2. Process data in 32 blocks same time. */ extern void _gcry_camellia_vaes_avx2_ctr_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *ctr) ASM_FUNC_ABI; extern void _gcry_camellia_vaes_avx2_cbc_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_vaes_avx2_cfb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_vaes_avx2_ocb_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_vaes_avx2_ocb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_vaes_avx2_ocb_auth(CAMELLIA_context *ctx, const unsigned char *abuf, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_vaes_avx2_enc_blk1_32(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned int nblocks) ASM_FUNC_ABI; extern void _gcry_camellia_vaes_avx2_dec_blk1_32(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned int nblocks) ASM_FUNC_ABI; #endif #ifdef USE_GFNI_AVX2 /* Assembler implementations of Camellia using GFNI and AVX2. Process data in 32 blocks same time. */ extern void _gcry_camellia_gfni_avx2_ctr_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *ctr) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx2_cbc_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx2_cfb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx2_ocb_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx2_ocb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx2_ocb_auth(CAMELLIA_context *ctx, const unsigned char *abuf, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx2_enc_blk1_32(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned int nblocks) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx2_dec_blk1_32(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned int nblocks) ASM_FUNC_ABI; #endif #ifdef USE_GFNI_AVX512 /* Assembler implementations of Camellia using GFNI and AVX512. Process data in 64 blocks same time. */ extern void _gcry_camellia_gfni_avx512_ctr_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *ctr) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx512_cbc_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx512_cfb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *iv) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx512_ocb_enc(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx512_ocb_dec(CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in, unsigned char *offset, unsigned char *checksum, const u64 Ls[32]) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx512_enc_blk64(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in) ASM_FUNC_ABI; extern void _gcry_camellia_gfni_avx512_dec_blk64(const CAMELLIA_context *ctx, unsigned char *out, const unsigned char *in) ASM_FUNC_ABI; /* Stack not used by AVX512 implementation. */ static const int avx512_burn_stack_depth = 0; #endif static const char *selftest(void); static void _gcry_camellia_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); static void _gcry_camellia_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); static void _gcry_camellia_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); static void _gcry_camellia_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); static void _gcry_camellia_ecb_crypt (void *context, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); static void _gcry_camellia_ctr32le_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); static size_t _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); static size_t _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); static gcry_err_code_t camellia_setkey(void *c, const byte *key, unsigned keylen, cipher_bulk_ops_t *bulk_ops) { CAMELLIA_context *ctx=c; static int initialized=0; static const char *selftest_failed=NULL; #if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) \ || defined(USE_VAES_AVX2) || defined(USE_GFNI_AVX2) unsigned int hwf = _gcry_get_hw_features (); #endif if(keylen!=16 && keylen!=24 && keylen!=32) return GPG_ERR_INV_KEYLEN; if(!initialized) { initialized=1; selftest_failed=selftest(); if(selftest_failed) log_error("%s\n",selftest_failed); } if(selftest_failed) return GPG_ERR_SELFTEST_FAILED; #ifdef USE_AESNI_AVX ctx->use_aesni_avx = (hwf & HWF_INTEL_AESNI) && (hwf & HWF_INTEL_AVX); #endif #ifdef USE_AESNI_AVX2 ctx->use_aesni_avx2 = (hwf & HWF_INTEL_AESNI) && (hwf & HWF_INTEL_AVX2); ctx->use_vaes_avx2 = 0; ctx->use_gfni_avx2 = 0; ctx->use_gfni_avx512 = 0; + ctx->use_avx2 = ctx->use_aesni_avx2; #endif #ifdef USE_VAES_AVX2 ctx->use_vaes_avx2 = (hwf & HWF_INTEL_VAES_VPCLMUL) && (hwf & HWF_INTEL_AVX2); + ctx->use_avx2 |= ctx->use_vaes_avx2; #endif #ifdef USE_GFNI_AVX2 ctx->use_gfni_avx2 = (hwf & HWF_INTEL_GFNI) && (hwf & HWF_INTEL_AVX2); + ctx->use_avx2 |= ctx->use_gfni_avx2; #endif #ifdef USE_GFNI_AVX512 ctx->use_gfni_avx512 = (hwf & HWF_INTEL_GFNI) && (hwf & HWF_INTEL_AVX512); #endif ctx->keybitlength=keylen*8; /* Setup bulk encryption routines. */ memset (bulk_ops, 0, sizeof(*bulk_ops)); bulk_ops->cbc_dec = _gcry_camellia_cbc_dec; bulk_ops->cfb_dec = _gcry_camellia_cfb_dec; bulk_ops->ctr_enc = _gcry_camellia_ctr_enc; bulk_ops->ocb_crypt = _gcry_camellia_ocb_crypt; bulk_ops->ocb_auth = _gcry_camellia_ocb_auth; bulk_ops->xts_crypt = _gcry_camellia_xts_crypt; bulk_ops->ecb_crypt = _gcry_camellia_ecb_crypt; bulk_ops->ctr32le_enc = _gcry_camellia_ctr32le_enc; if (0) { } #ifdef USE_AESNI_AVX else if (ctx->use_aesni_avx) _gcry_camellia_aesni_avx_keygen(ctx, key, keylen); else #endif { Camellia_Ekeygen(ctx->keybitlength,key,ctx->keytable); _gcry_burn_stack ((19+34+34)*sizeof(u32)+2*sizeof(void*) /* camellia_setup256 */ +(4+32)*sizeof(u32)+2*sizeof(void*) /* camellia_setup192 */ +0+sizeof(int)+2*sizeof(void*) /* Camellia_Ekeygen */ +3*2*sizeof(void*) /* Function calls. */ ); } #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) { /* Disable AESNI & VAES implementations when GFNI implementation is * enabled. */ #ifdef USE_AESNI_AVX ctx->use_aesni_avx = 0; #endif #ifdef USE_AESNI_AVX2 ctx->use_aesni_avx2 = 0; #endif #ifdef USE_VAES_AVX2 ctx->use_vaes_avx2 = 0; #endif } #endif return 0; } #ifdef USE_ARM_ASM /* Assembly implementations of Camellia. */ extern void _gcry_camellia_arm_encrypt_block(const KEY_TABLE_TYPE keyTable, byte *outbuf, const byte *inbuf, const int keybits); extern void _gcry_camellia_arm_decrypt_block(const KEY_TABLE_TYPE keyTable, byte *outbuf, const byte *inbuf, const int keybits); static void Camellia_EncryptBlock(const int keyBitLength, const unsigned char *plaintext, const KEY_TABLE_TYPE keyTable, unsigned char *cipherText) { _gcry_camellia_arm_encrypt_block(keyTable, cipherText, plaintext, keyBitLength); } static void Camellia_DecryptBlock(const int keyBitLength, const unsigned char *cipherText, const KEY_TABLE_TYPE keyTable, unsigned char *plaintext) { _gcry_camellia_arm_decrypt_block(keyTable, plaintext, cipherText, keyBitLength); } #ifdef __aarch64__ # define CAMELLIA_encrypt_stack_burn_size (0) # define CAMELLIA_decrypt_stack_burn_size (0) #else # define CAMELLIA_encrypt_stack_burn_size (15*4) # define CAMELLIA_decrypt_stack_burn_size (15*4) #endif static unsigned int camellia_encrypt(void *c, byte *outbuf, const byte *inbuf) { CAMELLIA_context *ctx = c; Camellia_EncryptBlock(ctx->keybitlength,inbuf,ctx->keytable,outbuf); return /*burn_stack*/ (CAMELLIA_encrypt_stack_burn_size); } static unsigned int camellia_decrypt(void *c, byte *outbuf, const byte *inbuf) { CAMELLIA_context *ctx=c; Camellia_DecryptBlock(ctx->keybitlength,inbuf,ctx->keytable,outbuf); return /*burn_stack*/ (CAMELLIA_decrypt_stack_burn_size); } #else /*USE_ARM_ASM*/ static unsigned int camellia_encrypt(void *c, byte *outbuf, const byte *inbuf) { CAMELLIA_context *ctx=c; Camellia_EncryptBlock(ctx->keybitlength,inbuf,ctx->keytable,outbuf); #define CAMELLIA_encrypt_stack_burn_size \ (sizeof(int)+2*sizeof(unsigned char *)+sizeof(void*/*KEY_TABLE_TYPE*/) \ +4*sizeof(u32)+4*sizeof(u32) \ +2*sizeof(u32*)+4*sizeof(u32) \ +2*2*sizeof(void*) /* Function calls. */ \ ) return /*burn_stack*/ (CAMELLIA_encrypt_stack_burn_size); } static unsigned int camellia_decrypt(void *c, byte *outbuf, const byte *inbuf) { CAMELLIA_context *ctx=c; Camellia_DecryptBlock(ctx->keybitlength,inbuf,ctx->keytable,outbuf); #define CAMELLIA_decrypt_stack_burn_size \ (sizeof(int)+2*sizeof(unsigned char *)+sizeof(void*/*KEY_TABLE_TYPE*/) \ +4*sizeof(u32)+4*sizeof(u32) \ +2*sizeof(u32*)+4*sizeof(u32) \ +2*2*sizeof(void*) /* Function calls. */ \ ) return /*burn_stack*/ (CAMELLIA_decrypt_stack_burn_size); } #endif /*!USE_ARM_ASM*/ static unsigned int camellia_encrypt_blk1_32 (void *priv, byte *outbuf, const byte *inbuf, size_t num_blks) { const CAMELLIA_context *ctx = priv; unsigned int stack_burn_size = 0; gcry_assert (num_blks <= 32); #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2 && num_blks >= 3) { /* 3 or more parallel block GFNI processing is faster than * generic C implementation. */ _gcry_camellia_gfni_avx2_enc_blk1_32 (ctx, outbuf, inbuf, num_blks); return avx2_burn_stack_depth; } #endif #ifdef USE_VAES_AVX2 if (ctx->use_vaes_avx2 && num_blks >= 6) { /* 6 or more parallel block VAES processing is faster than * generic C implementation. */ _gcry_camellia_vaes_avx2_enc_blk1_32 (ctx, outbuf, inbuf, num_blks); return avx2_burn_stack_depth; } #endif #ifdef USE_AESNI_AVX2 if (ctx->use_aesni_avx2 && num_blks >= 6) { /* 6 or more parallel block AESNI processing is faster than * generic C implementation. */ _gcry_camellia_aesni_avx2_enc_blk1_32 (ctx, outbuf, inbuf, num_blks); return avx2_burn_stack_depth; } #endif #ifdef USE_AESNI_AVX while (ctx->use_aesni_avx && num_blks >= 16) { _gcry_camellia_aesni_avx_ecb_enc (ctx, outbuf, inbuf); stack_burn_size = avx_burn_stack_depth; outbuf += CAMELLIA_BLOCK_SIZE * 16; inbuf += CAMELLIA_BLOCK_SIZE * 16; num_blks -= 16; } #endif while (num_blks) { unsigned int nburn = camellia_encrypt((void *)ctx, outbuf, inbuf); stack_burn_size = nburn > stack_burn_size ? nburn : stack_burn_size; outbuf += CAMELLIA_BLOCK_SIZE; inbuf += CAMELLIA_BLOCK_SIZE; num_blks--; } return stack_burn_size; } static unsigned int camellia_encrypt_blk1_64 (void *priv, byte *outbuf, const byte *inbuf, size_t num_blks) { CAMELLIA_context *ctx = priv; unsigned int stack_burn_size = 0; unsigned int nburn; gcry_assert (num_blks <= 64); #ifdef USE_GFNI_AVX512 if (num_blks == 64 && ctx->use_gfni_avx512) { _gcry_camellia_gfni_avx512_enc_blk64 (ctx, outbuf, inbuf); return avx512_burn_stack_depth; } #endif do { unsigned int curr_blks = num_blks > 32 ? 32 : num_blks; nburn = camellia_encrypt_blk1_32 (ctx, outbuf, inbuf, curr_blks); stack_burn_size = nburn > stack_burn_size ? nburn : stack_burn_size; outbuf += curr_blks * 16; inbuf += curr_blks * 16; num_blks -= curr_blks; } while (num_blks > 0); return stack_burn_size; } static unsigned int camellia_decrypt_blk1_32 (void *priv, byte *outbuf, const byte *inbuf, size_t num_blks) { const CAMELLIA_context *ctx = priv; unsigned int stack_burn_size = 0; gcry_assert (num_blks <= 32); #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2 && num_blks >= 3) { /* 3 or more parallel block GFNI processing is faster than * generic C implementation. */ _gcry_camellia_gfni_avx2_dec_blk1_32 (ctx, outbuf, inbuf, num_blks); return avx2_burn_stack_depth; } #endif #ifdef USE_VAES_AVX2 if (ctx->use_vaes_avx2 && num_blks >= 6) { /* 6 or more parallel block VAES processing is faster than * generic C implementation. */ _gcry_camellia_vaes_avx2_dec_blk1_32 (ctx, outbuf, inbuf, num_blks); return avx2_burn_stack_depth; } #endif #ifdef USE_AESNI_AVX2 if (ctx->use_aesni_avx2 && num_blks >= 6) { /* 6 or more parallel block AESNI processing is faster than * generic C implementation. */ _gcry_camellia_aesni_avx2_dec_blk1_32 (ctx, outbuf, inbuf, num_blks); return avx2_burn_stack_depth; } #endif #ifdef USE_AESNI_AVX while (ctx->use_aesni_avx && num_blks >= 16) { _gcry_camellia_aesni_avx_ecb_dec (ctx, outbuf, inbuf); stack_burn_size = avx_burn_stack_depth; outbuf += CAMELLIA_BLOCK_SIZE * 16; inbuf += CAMELLIA_BLOCK_SIZE * 16; num_blks -= 16; } #endif while (num_blks) { unsigned int nburn = camellia_decrypt((void *)ctx, outbuf, inbuf); stack_burn_size = nburn > stack_burn_size ? nburn : stack_burn_size; outbuf += CAMELLIA_BLOCK_SIZE; inbuf += CAMELLIA_BLOCK_SIZE; num_blks--; } return stack_burn_size; } static unsigned int camellia_decrypt_blk1_64 (void *priv, byte *outbuf, const byte *inbuf, size_t num_blks) { CAMELLIA_context *ctx = priv; unsigned int stack_burn_size = 0; unsigned int nburn; gcry_assert (num_blks <= 64); #ifdef USE_GFNI_AVX512 if (num_blks == 64 && ctx->use_gfni_avx512) { _gcry_camellia_gfni_avx512_dec_blk64 (ctx, outbuf, inbuf); return avx512_burn_stack_depth; } #endif do { unsigned int curr_blks = num_blks > 32 ? 32 : num_blks; nburn = camellia_decrypt_blk1_32 (ctx, outbuf, inbuf, curr_blks); stack_burn_size = nburn > stack_burn_size ? nburn : stack_burn_size; outbuf += curr_blks * 16; inbuf += curr_blks * 16; num_blks -= curr_blks; } while (num_blks > 0); return stack_burn_size; } /* Bulk encryption of complete blocks in CTR mode. This function is only intended for the bulk encryption feature of cipher.c. CTR is expected to be of size CAMELLIA_BLOCK_SIZE. */ static void _gcry_camellia_ctr_enc(void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { CAMELLIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; #ifdef USE_GFNI_AVX512 if (ctx->use_gfni_avx512) { int did_use_gfni_avx512 = 0; /* Process data in 64 block chunks. */ while (nblocks >= 64) { _gcry_camellia_gfni_avx512_ctr_enc (ctx, outbuf, inbuf, ctr); nblocks -= 64; outbuf += 64 * CAMELLIA_BLOCK_SIZE; inbuf += 64 * CAMELLIA_BLOCK_SIZE; did_use_gfni_avx512 = 1; } if (did_use_gfni_avx512) { if (burn_stack_depth < avx512_burn_stack_depth) burn_stack_depth = avx512_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX2 - if (ctx->use_aesni_avx2) + if (ctx->use_avx2) { int did_use_aesni_avx2 = 0; typeof (&_gcry_camellia_aesni_avx2_ctr_enc) bulk_ctr_fn = _gcry_camellia_aesni_avx2_ctr_enc; #ifdef USE_VAES_AVX2 if (ctx->use_vaes_avx2) bulk_ctr_fn =_gcry_camellia_vaes_avx2_ctr_enc; #endif #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) bulk_ctr_fn =_gcry_camellia_gfni_avx2_ctr_enc; #endif /* Process data in 32 block chunks. */ while (nblocks >= 32) { bulk_ctr_fn (ctx, outbuf, inbuf, ctr); nblocks -= 32; outbuf += 32 * CAMELLIA_BLOCK_SIZE; inbuf += 32 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx2 = 1; } if (did_use_aesni_avx2) { if (burn_stack_depth < avx2_burn_stack_depth) burn_stack_depth = avx2_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX if (ctx->use_aesni_avx) { int did_use_aesni_avx = 0; /* Process data in 16 block chunks. */ while (nblocks >= 16) { _gcry_camellia_aesni_avx_ctr_enc(ctx, outbuf, inbuf, ctr); nblocks -= 16; outbuf += 16 * CAMELLIA_BLOCK_SIZE; inbuf += 16 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx = 1; } if (did_use_aesni_avx) { if (burn_stack_depth < avx_burn_stack_depth) burn_stack_depth = avx_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif /* Process remaining blocks. */ if (nblocks) { byte tmpbuf[CAMELLIA_BLOCK_SIZE * 32]; unsigned int tmp_used = CAMELLIA_BLOCK_SIZE; size_t nburn; nburn = bulk_ctr_enc_128(ctx, camellia_encrypt_blk1_32, outbuf, inbuf, nblocks, ctr, tmpbuf, sizeof(tmpbuf) / CAMELLIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory(tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack(burn_stack_depth); } /* Bulk decryption of complete blocks in CBC mode. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_camellia_cbc_dec(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { CAMELLIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; #ifdef USE_GFNI_AVX512 if (ctx->use_gfni_avx512) { int did_use_gfni_avx512 = 0; /* Process data in 64 block chunks. */ while (nblocks >= 64) { _gcry_camellia_gfni_avx512_cbc_dec (ctx, outbuf, inbuf, iv); nblocks -= 64; outbuf += 64 * CAMELLIA_BLOCK_SIZE; inbuf += 64 * CAMELLIA_BLOCK_SIZE; did_use_gfni_avx512 = 1; } if (did_use_gfni_avx512) { if (burn_stack_depth < avx512_burn_stack_depth) burn_stack_depth = avx512_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX2 - if (ctx->use_aesni_avx2) + if (ctx->use_avx2) { int did_use_aesni_avx2 = 0; typeof (&_gcry_camellia_aesni_avx2_cbc_dec) bulk_cbc_fn = _gcry_camellia_aesni_avx2_cbc_dec; #ifdef USE_VAES_AVX2 if (ctx->use_vaes_avx2) bulk_cbc_fn =_gcry_camellia_vaes_avx2_cbc_dec; #endif #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) bulk_cbc_fn =_gcry_camellia_gfni_avx2_cbc_dec; #endif /* Process data in 32 block chunks. */ while (nblocks >= 32) { bulk_cbc_fn (ctx, outbuf, inbuf, iv); nblocks -= 32; outbuf += 32 * CAMELLIA_BLOCK_SIZE; inbuf += 32 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx2 = 1; } if (did_use_aesni_avx2) { if (burn_stack_depth < avx2_burn_stack_depth) burn_stack_depth = avx2_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX if (ctx->use_aesni_avx) { int did_use_aesni_avx = 0; /* Process data in 16 block chunks. */ while (nblocks >= 16) { _gcry_camellia_aesni_avx_cbc_dec(ctx, outbuf, inbuf, iv); nblocks -= 16; outbuf += 16 * CAMELLIA_BLOCK_SIZE; inbuf += 16 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx = 1; } if (did_use_aesni_avx) { if (burn_stack_depth < avx_burn_stack_depth) burn_stack_depth = avx_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif /* Process remaining blocks. */ if (nblocks) { byte tmpbuf[CAMELLIA_BLOCK_SIZE * 32]; unsigned int tmp_used = CAMELLIA_BLOCK_SIZE; size_t nburn; nburn = bulk_cbc_dec_128(ctx, camellia_decrypt_blk1_32, outbuf, inbuf, nblocks, iv, tmpbuf, sizeof(tmpbuf) / CAMELLIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory(tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack(burn_stack_depth); } /* Bulk decryption of complete blocks in CFB mode. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_camellia_cfb_dec(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { CAMELLIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; #ifdef USE_GFNI_AVX512 if (ctx->use_gfni_avx512) { int did_use_gfni_avx512 = 0; /* Process data in 64 block chunks. */ while (nblocks >= 64) { _gcry_camellia_gfni_avx512_cfb_dec (ctx, outbuf, inbuf, iv); nblocks -= 64; outbuf += 64 * CAMELLIA_BLOCK_SIZE; inbuf += 64 * CAMELLIA_BLOCK_SIZE; did_use_gfni_avx512 = 1; } if (did_use_gfni_avx512) { if (burn_stack_depth < avx512_burn_stack_depth) burn_stack_depth = avx512_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX2 - if (ctx->use_aesni_avx2) + if (ctx->use_avx2) { int did_use_aesni_avx2 = 0; typeof (&_gcry_camellia_aesni_avx2_cfb_dec) bulk_cfb_fn = _gcry_camellia_aesni_avx2_cfb_dec; #ifdef USE_VAES_AVX2 if (ctx->use_vaes_avx2) bulk_cfb_fn =_gcry_camellia_vaes_avx2_cfb_dec; #endif #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) bulk_cfb_fn =_gcry_camellia_gfni_avx2_cfb_dec; #endif /* Process data in 32 block chunks. */ while (nblocks >= 32) { bulk_cfb_fn (ctx, outbuf, inbuf, iv); nblocks -= 32; outbuf += 32 * CAMELLIA_BLOCK_SIZE; inbuf += 32 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx2 = 1; } if (did_use_aesni_avx2) { if (burn_stack_depth < avx2_burn_stack_depth) burn_stack_depth = avx2_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX if (ctx->use_aesni_avx) { int did_use_aesni_avx = 0; /* Process data in 16 block chunks. */ while (nblocks >= 16) { _gcry_camellia_aesni_avx_cfb_dec(ctx, outbuf, inbuf, iv); nblocks -= 16; outbuf += 16 * CAMELLIA_BLOCK_SIZE; inbuf += 16 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx = 1; } if (did_use_aesni_avx) { if (burn_stack_depth < avx_burn_stack_depth) burn_stack_depth = avx_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif /* Process remaining blocks. */ if (nblocks) { byte tmpbuf[CAMELLIA_BLOCK_SIZE * 32]; unsigned int tmp_used = CAMELLIA_BLOCK_SIZE; size_t nburn; nburn = bulk_cfb_dec_128(ctx, camellia_encrypt_blk1_32, outbuf, inbuf, nblocks, iv, tmpbuf, sizeof(tmpbuf) / CAMELLIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory(tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack(burn_stack_depth); } /* Bulk encryption/decryption in ECB mode. */ static void _gcry_camellia_ecb_crypt (void *context, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { CAMELLIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; /* Process remaining blocks. */ if (nblocks) { size_t nburn; nburn = bulk_ecb_crypt_128(ctx, encrypt ? camellia_encrypt_blk1_64 : camellia_decrypt_blk1_64, outbuf, inbuf, nblocks, 64); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; } if (burn_stack_depth) _gcry_burn_stack(burn_stack_depth); } /* Bulk encryption/decryption of complete blocks in XTS mode. */ static void _gcry_camellia_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { CAMELLIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; /* Process remaining blocks. */ if (nblocks) { byte tmpbuf[CAMELLIA_BLOCK_SIZE * 64]; unsigned int tmp_used = CAMELLIA_BLOCK_SIZE; size_t nburn; nburn = bulk_xts_crypt_128(ctx, encrypt ? camellia_encrypt_blk1_64 : camellia_decrypt_blk1_64, outbuf, inbuf, nblocks, tweak, tmpbuf, sizeof(tmpbuf) / CAMELLIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory(tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack(burn_stack_depth); } /* Bulk encryption of complete blocks in CTR32LE mode (for GCM-SIV). */ static void _gcry_camellia_ctr32le_enc(void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { CAMELLIA_context *ctx = context; byte *outbuf = outbuf_arg; const byte *inbuf = inbuf_arg; int burn_stack_depth = 0; /* Process remaining blocks. */ if (nblocks) { byte tmpbuf[64 * CAMELLIA_BLOCK_SIZE]; unsigned int tmp_used = CAMELLIA_BLOCK_SIZE; size_t nburn; nburn = bulk_ctr32le_enc_128 (ctx, camellia_encrypt_blk1_64, outbuf, inbuf, nblocks, ctr, tmpbuf, sizeof(tmpbuf) / CAMELLIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); } /* Bulk encryption/decryption of complete blocks in OCB mode. */ static size_t _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { #if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) CAMELLIA_context *ctx = (void *)&c->context.c; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; u64 blkn = c->u_mode.ocb.data_nblocks; #else (void)c; (void)outbuf_arg; (void)inbuf_arg; (void)encrypt; #endif #ifdef USE_GFNI_AVX512 if (ctx->use_gfni_avx512) { int did_use_gfni_avx512 = 0; u64 Ls[64]; u64 *l; if (nblocks >= 64) { typeof (&_gcry_camellia_gfni_avx512_ocb_dec) bulk_ocb_fn = encrypt ? _gcry_camellia_gfni_avx512_ocb_enc : _gcry_camellia_gfni_avx512_ocb_dec; l = bulk_ocb_prepare_L_pointers_array_blk64 (c, Ls, blkn); /* Process data in 64 block chunks. */ while (nblocks >= 64) { blkn += 64; *l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 64); bulk_ocb_fn (ctx, outbuf, inbuf, c->u_iv.iv, c->u_ctr.ctr, Ls); nblocks -= 64; outbuf += 64 * CAMELLIA_BLOCK_SIZE; inbuf += 64 * CAMELLIA_BLOCK_SIZE; did_use_gfni_avx512 = 1; } } if (did_use_gfni_avx512) { if (burn_stack_depth < avx2_burn_stack_depth) burn_stack_depth = avx2_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX2 - if (ctx->use_aesni_avx2) + if (ctx->use_avx2) { int did_use_aesni_avx2 = 0; u64 Ls[32]; u64 *l; if (nblocks >= 32) { typeof (&_gcry_camellia_aesni_avx2_ocb_dec) bulk_ocb_fn = encrypt ? _gcry_camellia_aesni_avx2_ocb_enc : _gcry_camellia_aesni_avx2_ocb_dec; #ifdef USE_VAES_AVX2 if (ctx->use_vaes_avx2) bulk_ocb_fn = encrypt ? _gcry_camellia_vaes_avx2_ocb_enc : _gcry_camellia_vaes_avx2_ocb_dec; #endif #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) bulk_ocb_fn = encrypt ? _gcry_camellia_gfni_avx2_ocb_enc : _gcry_camellia_gfni_avx2_ocb_dec; #endif l = bulk_ocb_prepare_L_pointers_array_blk32 (c, Ls, blkn); /* Process data in 32 block chunks. */ while (nblocks >= 32) { blkn += 32; *l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 32); bulk_ocb_fn (ctx, outbuf, inbuf, c->u_iv.iv, c->u_ctr.ctr, Ls); nblocks -= 32; outbuf += 32 * CAMELLIA_BLOCK_SIZE; inbuf += 32 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx2 = 1; } } if (did_use_aesni_avx2) { if (burn_stack_depth < avx2_burn_stack_depth) burn_stack_depth = avx2_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX if (ctx->use_aesni_avx) { int did_use_aesni_avx = 0; u64 Ls[16]; u64 *l; if (nblocks >= 16) { l = bulk_ocb_prepare_L_pointers_array_blk16 (c, Ls, blkn); /* Process data in 16 block chunks. */ while (nblocks >= 16) { blkn += 16; *l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 16); if (encrypt) _gcry_camellia_aesni_avx_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv, c->u_ctr.ctr, Ls); else _gcry_camellia_aesni_avx_ocb_dec(ctx, outbuf, inbuf, c->u_iv.iv, c->u_ctr.ctr, Ls); nblocks -= 16; outbuf += 16 * CAMELLIA_BLOCK_SIZE; inbuf += 16 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx = 1; } } if (did_use_aesni_avx) { if (burn_stack_depth < avx_burn_stack_depth) burn_stack_depth = avx_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) /* Process remaining blocks. */ if (nblocks) { byte tmpbuf[CAMELLIA_BLOCK_SIZE * 32]; unsigned int tmp_used = CAMELLIA_BLOCK_SIZE; size_t nburn; nburn = bulk_ocb_crypt_128 (c, ctx, encrypt ? camellia_encrypt_blk1_32 : camellia_decrypt_blk1_32, outbuf, inbuf, nblocks, &blkn, encrypt, tmpbuf, sizeof(tmpbuf) / CAMELLIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory(tmpbuf, tmp_used); nblocks = 0; } c->u_mode.ocb.data_nblocks = blkn; if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth + 4 * sizeof(void *)); #endif return nblocks; } /* Bulk authentication of complete blocks in OCB mode. */ static size_t _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks) { #if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) CAMELLIA_context *ctx = (void *)&c->context.c; const unsigned char *abuf = abuf_arg; int burn_stack_depth = 0; u64 blkn = c->u_mode.ocb.aad_nblocks; #else (void)c; (void)abuf_arg; #endif #ifdef USE_AESNI_AVX2 - if (ctx->use_aesni_avx2) + if (ctx->use_avx2) { int did_use_aesni_avx2 = 0; u64 Ls[32]; u64 *l; if (nblocks >= 32) { typeof (&_gcry_camellia_aesni_avx2_ocb_auth) bulk_auth_fn = _gcry_camellia_aesni_avx2_ocb_auth; #ifdef USE_VAES_AVX2 if (ctx->use_vaes_avx2) bulk_auth_fn = _gcry_camellia_vaes_avx2_ocb_auth; #endif #ifdef USE_GFNI_AVX2 if (ctx->use_gfni_avx2) bulk_auth_fn = _gcry_camellia_gfni_avx2_ocb_auth; #endif l = bulk_ocb_prepare_L_pointers_array_blk32 (c, Ls, blkn); /* Process data in 32 block chunks. */ while (nblocks >= 32) { blkn += 32; *l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 32); bulk_auth_fn (ctx, abuf, c->u_mode.ocb.aad_offset, c->u_mode.ocb.aad_sum, Ls); nblocks -= 32; abuf += 32 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx2 = 1; } } if (did_use_aesni_avx2) { if (burn_stack_depth < avx2_burn_stack_depth) burn_stack_depth = avx2_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #ifdef USE_AESNI_AVX if (ctx->use_aesni_avx) { int did_use_aesni_avx = 0; u64 Ls[16]; u64 *l; if (nblocks >= 16) { l = bulk_ocb_prepare_L_pointers_array_blk16 (c, Ls, blkn); /* Process data in 16 block chunks. */ while (nblocks >= 16) { blkn += 16; *l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 16); _gcry_camellia_aesni_avx_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset, c->u_mode.ocb.aad_sum, Ls); nblocks -= 16; abuf += 16 * CAMELLIA_BLOCK_SIZE; did_use_aesni_avx = 1; } } if (did_use_aesni_avx) { if (burn_stack_depth < avx_burn_stack_depth) burn_stack_depth = avx_burn_stack_depth; } /* Use generic code to handle smaller chunks... */ } #endif #if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) /* Process remaining blocks. */ if (nblocks) { byte tmpbuf[CAMELLIA_BLOCK_SIZE * 32]; unsigned int tmp_used = CAMELLIA_BLOCK_SIZE; size_t nburn; nburn = bulk_ocb_auth_128 (c, ctx, camellia_encrypt_blk1_32, abuf, nblocks, &blkn, tmpbuf, sizeof(tmpbuf) / CAMELLIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory(tmpbuf, tmp_used); nblocks = 0; } c->u_mode.ocb.aad_nblocks = blkn; if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth + 4 * sizeof(void *)); #endif return nblocks; } static const char * selftest(void) { CAMELLIA_context ctx; byte scratch[16]; cipher_bulk_ops_t bulk_ops; /* These test vectors are from RFC-3713 */ static const byte plaintext[]= { 0x01,0x23,0x45,0x67,0x89,0xab,0xcd,0xef, 0xfe,0xdc,0xba,0x98,0x76,0x54,0x32,0x10 }; static const byte key_128[]= { 0x01,0x23,0x45,0x67,0x89,0xab,0xcd,0xef, 0xfe,0xdc,0xba,0x98,0x76,0x54,0x32,0x10 }; static const byte ciphertext_128[]= { 0x67,0x67,0x31,0x38,0x54,0x96,0x69,0x73, 0x08,0x57,0x06,0x56,0x48,0xea,0xbe,0x43 }; static const byte key_192[]= { 0x01,0x23,0x45,0x67,0x89,0xab,0xcd,0xef,0xfe,0xdc,0xba,0x98, 0x76,0x54,0x32,0x10,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77 }; static const byte ciphertext_192[]= { 0xb4,0x99,0x34,0x01,0xb3,0xe9,0x96,0xf8, 0x4e,0xe5,0xce,0xe7,0xd7,0x9b,0x09,0xb9 }; static const byte key_256[]= { 0x01,0x23,0x45,0x67,0x89,0xab,0xcd,0xef,0xfe,0xdc,0xba, 0x98,0x76,0x54,0x32,0x10,0x00,0x11,0x22,0x33,0x44,0x55, 0x66,0x77,0x88,0x99,0xaa,0xbb,0xcc,0xdd,0xee,0xff }; static const byte ciphertext_256[]= { 0x9a,0xcc,0x23,0x7d,0xff,0x16,0xd7,0x6c, 0x20,0xef,0x7c,0x91,0x9e,0x3a,0x75,0x09 }; camellia_setkey(&ctx,key_128,sizeof(key_128),&bulk_ops); camellia_encrypt(&ctx,scratch,plaintext); if(memcmp(scratch,ciphertext_128,sizeof(ciphertext_128))!=0) return "CAMELLIA-128 test encryption failed."; camellia_decrypt(&ctx,scratch,scratch); if(memcmp(scratch,plaintext,sizeof(plaintext))!=0) return "CAMELLIA-128 test decryption failed."; camellia_setkey(&ctx,key_192,sizeof(key_192),&bulk_ops); camellia_encrypt(&ctx,scratch,plaintext); if(memcmp(scratch,ciphertext_192,sizeof(ciphertext_192))!=0) return "CAMELLIA-192 test encryption failed."; camellia_decrypt(&ctx,scratch,scratch); if(memcmp(scratch,plaintext,sizeof(plaintext))!=0) return "CAMELLIA-192 test decryption failed."; camellia_setkey(&ctx,key_256,sizeof(key_256),&bulk_ops); camellia_encrypt(&ctx,scratch,plaintext); if(memcmp(scratch,ciphertext_256,sizeof(ciphertext_256))!=0) return "CAMELLIA-256 test encryption failed."; camellia_decrypt(&ctx,scratch,scratch); if(memcmp(scratch,plaintext,sizeof(plaintext))!=0) return "CAMELLIA-256 test decryption failed."; return NULL; } /* These oids are from , retrieved May 1, 2007. */ static const gcry_cipher_oid_spec_t camellia128_oids[] = { {"1.2.392.200011.61.1.1.1.2", GCRY_CIPHER_MODE_CBC}, {"0.3.4401.5.3.1.9.1", GCRY_CIPHER_MODE_ECB}, {"0.3.4401.5.3.1.9.3", GCRY_CIPHER_MODE_OFB}, {"0.3.4401.5.3.1.9.4", GCRY_CIPHER_MODE_CFB}, { NULL } }; static const gcry_cipher_oid_spec_t camellia192_oids[] = { {"1.2.392.200011.61.1.1.1.3", GCRY_CIPHER_MODE_CBC}, {"0.3.4401.5.3.1.9.21", GCRY_CIPHER_MODE_ECB}, {"0.3.4401.5.3.1.9.23", GCRY_CIPHER_MODE_OFB}, {"0.3.4401.5.3.1.9.24", GCRY_CIPHER_MODE_CFB}, { NULL } }; static const gcry_cipher_oid_spec_t camellia256_oids[] = { {"1.2.392.200011.61.1.1.1.4", GCRY_CIPHER_MODE_CBC}, {"0.3.4401.5.3.1.9.41", GCRY_CIPHER_MODE_ECB}, {"0.3.4401.5.3.1.9.43", GCRY_CIPHER_MODE_OFB}, {"0.3.4401.5.3.1.9.44", GCRY_CIPHER_MODE_CFB}, { NULL } }; gcry_cipher_spec_t _gcry_cipher_spec_camellia128 = { GCRY_CIPHER_CAMELLIA128, {0, 0}, "CAMELLIA128",NULL,camellia128_oids,CAMELLIA_BLOCK_SIZE,128, sizeof(CAMELLIA_context),camellia_setkey,camellia_encrypt,camellia_decrypt }; gcry_cipher_spec_t _gcry_cipher_spec_camellia192 = { GCRY_CIPHER_CAMELLIA192, {0, 0}, "CAMELLIA192",NULL,camellia192_oids,CAMELLIA_BLOCK_SIZE,192, sizeof(CAMELLIA_context),camellia_setkey,camellia_encrypt,camellia_decrypt }; gcry_cipher_spec_t _gcry_cipher_spec_camellia256 = { GCRY_CIPHER_CAMELLIA256, {0, 0}, "CAMELLIA256",NULL,camellia256_oids,CAMELLIA_BLOCK_SIZE,256, sizeof(CAMELLIA_context),camellia_setkey,camellia_encrypt,camellia_decrypt };