Page MenuHome GnuPG

No OneTemporary

diff --git a/cipher/sm4-aesni-avx-amd64.S b/cipher/sm4-aesni-avx-amd64.S
index c09b205d..ca9be44a 100644
--- a/cipher/sm4-aesni-avx-amd64.S
+++ b/cipher/sm4-aesni-avx-amd64.S
@@ -1,992 +1,1058 @@
/* sm4-avx-aesni-amd64.S - AES-NI/AVX implementation of SM4 cipher
*
- * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (C) 2020,2023 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Based on SM4 AES-NI work by Markku-Juhani O. Saarinen at:
* https://github.com/mjosaarinen/sm4ni
*/
#include <config.h>
#ifdef __x86_64
#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)
#include "asm-common-amd64.h"
/* vector registers */
#define RX0 %xmm0
#define RX1 %xmm1
#define MASK_4BIT %xmm2
#define RTMP0 %xmm3
#define RTMP1 %xmm4
#define RTMP2 %xmm5
#define RTMP3 %xmm6
#define RTMP4 %xmm7
#define RA0 %xmm8
#define RA1 %xmm9
#define RA2 %xmm10
#define RA3 %xmm11
#define RB0 %xmm12
#define RB1 %xmm13
#define RB2 %xmm14
#define RB3 %xmm15
#define RNOT %xmm0
#define RBSWAP %xmm1
/**********************************************************************
helper macros
**********************************************************************/
/* Transpose four 32-bit words between 128-bit vectors. */
#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
vpunpckhdq x1, x0, t2; \
vpunpckldq x1, x0, x0; \
\
vpunpckldq x3, x2, t1; \
vpunpckhdq x3, x2, x2; \
\
vpunpckhqdq t1, x0, x1; \
vpunpcklqdq t1, x0, x0; \
\
vpunpckhqdq x2, t2, x3; \
vpunpcklqdq x2, t2, x2;
/* post-SubByte transform. */
#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
vpand x, mask4bit, tmp0; \
vpandn x, mask4bit, x; \
vpsrld $4, x, x; \
\
vpshufb tmp0, lo_t, tmp0; \
vpshufb x, hi_t, x; \
vpxor tmp0, x, x;
/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
* 'vaeslastenc' instruction. */
#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
vpandn mask4bit, x, tmp0; \
vpsrld $4, x, x; \
vpand x, mask4bit, x; \
\
vpshufb tmp0, lo_t, tmp0; \
vpshufb x, hi_t, x; \
vpxor tmp0, x, x;
/**********************************************************************
4-way && 8-way SM4 with AES-NI and AVX
**********************************************************************/
SECTION_RODATA
.align 16
ELF(.type _sm4_aesni_avx_consts,@object)
_sm4_aesni_avx_consts:
/*
* Following four affine transform look-up tables are from work by
* Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
*
* These allow exposing SM4 S-Box from AES SubByte.
*/
/* pre-SubByte affine transform, from SM4 field to AES field. */
.Lpre_tf_lo_s:
.quad 0x9197E2E474720701, 0xC7C1B4B222245157
.Lpre_tf_hi_s:
.quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
/* post-SubByte affine transform, from AES field to SM4 field. */
.Lpost_tf_lo_s:
.quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
.Lpost_tf_hi_s:
.quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
/* For isolating SubBytes from AESENCLAST, inverse shift row */
.Linv_shift_row:
.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
.Linv_shift_row_rol_8:
.byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
.byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
.Linv_shift_row_rol_16:
.byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
.byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
.Linv_shift_row_rol_24:
.byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
.byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
/* For CTR-mode IV byteswap */
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/* For input word byte-swap */
.Lbswap32_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+/* CTR byte addition constants */
+.Lbige_addb_1:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
+.Lbige_addb_2:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
+.Lbige_addb_3:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3
+.Lbige_addb_4:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4
+.Lbige_addb_5:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5
+.Lbige_addb_6:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6
+.Lbige_addb_7:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+.Lbige_addb_8:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8
+.Lbige_addb_9:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9
+.Lbige_addb_10:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10
+.Lbige_addb_11:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11
+.Lbige_addb_12:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12
+.Lbige_addb_13:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13
+.Lbige_addb_14:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14
+.Lbige_addb_15:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15
+
.align 4
/* 4-bit mask */
.L0f0f0f0f:
.long 0x0f0f0f0f
.text
.align 16
.globl _gcry_sm4_aesni_avx_expand_key
ELF(.type _gcry_sm4_aesni_avx_expand_key,@function;)
_gcry_sm4_aesni_avx_expand_key:
/* input:
* %rdi: 128-bit key
* %rsi: rkey_enc
* %rdx: rkey_dec
* %rcx: fk array
* %r8: ck array
*/
CFI_STARTPROC();
vmovd 0*4(%rdi), RA0;
vmovd 1*4(%rdi), RA1;
vmovd 2*4(%rdi), RA2;
vmovd 3*4(%rdi), RA3;
vmovdqa .Lbswap32_mask rRIP, RTMP2;
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vmovd 0*4(%rcx), RB0;
vmovd 1*4(%rcx), RB1;
vmovd 2*4(%rcx), RB2;
vmovd 3*4(%rcx), RB3;
vpxor RB0, RA0, RA0;
vpxor RB1, RA1, RA1;
vpxor RB2, RA2, RA2;
vpxor RB3, RA3, RA3;
vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;
vmovdqa .Lpre_tf_hi_s rRIP, RB0;
vmovdqa .Lpost_tf_lo_s rRIP, RB1;
vmovdqa .Lpost_tf_hi_s rRIP, RB2;
vmovdqa .Linv_shift_row rRIP, RB3;
#define ROUND(round, s0, s1, s2, s3) \
vbroadcastss (4*(round))(%r8), RX0; \
vpxor s1, RX0, RX0; \
vpxor s2, RX0, RX0; \
vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
\
/* sbox, non-linear part */ \
transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \
vaesenclast MASK_4BIT, RX0, RX0; \
transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \
\
/* linear part */ \
vpshufb RB3, RX0, RX0; \
vpxor RX0, s0, s0; /* s0 ^ x */ \
vpslld $13, RX0, RTMP0; \
vpsrld $19, RX0, RTMP1; \
vpslld $23, RX0, RTMP2; \
vpsrld $9, RX0, RTMP3; \
vpxor RTMP0, RTMP1, RTMP1; \
vpxor RTMP2, RTMP3, RTMP3; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,13) */ \
vpxor RTMP3, s0, s0; /* s0 ^ x ^ rol(x,13) ^ rol(x,23) */
leaq (32*4)(%r8), %rax;
leaq (32*4)(%rdx), %rdx;
.align 16
.Lroundloop_expand_key:
leaq (-4*4)(%rdx), %rdx;
ROUND(0, RA0, RA1, RA2, RA3);
ROUND(1, RA1, RA2, RA3, RA0);
ROUND(2, RA2, RA3, RA0, RA1);
ROUND(3, RA3, RA0, RA1, RA2);
leaq (4*4)(%r8), %r8;
vmovd RA0, (0*4)(%rsi);
vmovd RA1, (1*4)(%rsi);
vmovd RA2, (2*4)(%rsi);
vmovd RA3, (3*4)(%rsi);
vmovd RA0, (3*4)(%rdx);
vmovd RA1, (2*4)(%rdx);
vmovd RA2, (1*4)(%rdx);
vmovd RA3, (0*4)(%rdx);
leaq (4*4)(%rsi), %rsi;
cmpq %rax, %r8;
jne .Lroundloop_expand_key;
#undef ROUND
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_expand_key,.-_gcry_sm4_aesni_avx_expand_key;)
.align 16
ELF(.type sm4_aesni_avx_crypt_blk1_4,@function;)
sm4_aesni_avx_crypt_blk1_4:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..4 blocks)
* %rdx: src (1..4 blocks)
* %rcx: num blocks (1..4)
*/
CFI_STARTPROC();
vmovdqu 0*16(%rdx), RA0;
vmovdqa RA0, RA1;
vmovdqa RA0, RA2;
vmovdqa RA0, RA3;
cmpq $2, %rcx;
jb .Lblk4_load_input_done;
vmovdqu 1*16(%rdx), RA1;
je .Lblk4_load_input_done;
vmovdqu 2*16(%rdx), RA2;
cmpq $3, %rcx;
je .Lblk4_load_input_done;
vmovdqu 3*16(%rdx), RA3;
.Lblk4_load_input_done:
vmovdqa .Lbswap32_mask rRIP, RTMP2;
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
vmovdqa .Lpre_tf_lo_s rRIP, RTMP4;
vmovdqa .Lpre_tf_hi_s rRIP, RB0;
vmovdqa .Lpost_tf_lo_s rRIP, RB1;
vmovdqa .Lpost_tf_hi_s rRIP, RB2;
vmovdqa .Linv_shift_row rRIP, RB3;
vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP2;
vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP3;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
#define ROUND(round, s0, s1, s2, s3) \
vbroadcastss (4*(round))(%rdi), RX0; \
vpxor s1, RX0, RX0; \
vpxor s2, RX0, RX0; \
vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
\
/* sbox, non-linear part */ \
transform_pre(RX0, RTMP4, RB0, MASK_4BIT, RTMP0); \
vaesenclast MASK_4BIT, RX0, RX0; \
transform_post(RX0, RB1, RB2, MASK_4BIT, RTMP0); \
\
/* linear part */ \
vpshufb RB3, RX0, RTMP0; \
vpxor RTMP0, s0, s0; /* s0 ^ x */ \
vpshufb RTMP2, RX0, RTMP1; \
vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \
vpshufb RTMP3, RX0, RTMP1; \
vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb .Linv_shift_row_rol_24 rRIP, RX0, RTMP1; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP0, RTMP1; \
vpsrld $30, RTMP0, RTMP0; \
vpxor RTMP0, s0, s0; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk4:
ROUND(0, RA0, RA1, RA2, RA3);
ROUND(1, RA1, RA2, RA3, RA0);
ROUND(2, RA2, RA3, RA0, RA1);
ROUND(3, RA3, RA0, RA1, RA2);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk4;
#undef ROUND
vmovdqa .Lbswap128_mask rRIP, RTMP2;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vmovdqu RA0, 0*16(%rsi);
cmpq $2, %rcx;
jb .Lblk4_store_output_done;
vmovdqu RA1, 1*16(%rsi);
je .Lblk4_store_output_done;
vmovdqu RA2, 2*16(%rsi);
cmpq $3, %rcx;
je .Lblk4_store_output_done;
vmovdqu RA3, 3*16(%rsi);
.Lblk4_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size sm4_aesni_avx_crypt_blk1_4,.-sm4_aesni_avx_crypt_blk1_4;)
.align 16
ELF(.type __sm4_crypt_blk8,@function;)
__sm4_crypt_blk8:
/* input:
* %rdi: round key array, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
* ciphertext blocks
* output:
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext
* blocks
*/
CFI_STARTPROC();
vmovdqa .Lbswap32_mask rRIP, RTMP2;
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
vbroadcastss .L0f0f0f0f rRIP, MASK_4BIT;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
vbroadcastss (4*(round))(%rdi), RX0; \
vmovdqa .Lpre_tf_lo_s rRIP, RTMP4; \
vmovdqa .Lpre_tf_hi_s rRIP, RTMP1; \
vmovdqa RX0, RX1; \
vpxor s1, RX0, RX0; \
vpxor s2, RX0, RX0; \
vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
vmovdqa .Lpost_tf_lo_s rRIP, RTMP2; \
vmovdqa .Lpost_tf_hi_s rRIP, RTMP3; \
vpxor r1, RX1, RX1; \
vpxor r2, RX1, RX1; \
vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \
\
/* sbox, non-linear part */ \
transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
vmovdqa .Linv_shift_row rRIP, RTMP4; \
vaesenclast MASK_4BIT, RX0, RX0; \
vaesenclast MASK_4BIT, RX1, RX1; \
transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
\
/* linear part */ \
vpshufb RTMP4, RX0, RTMP0; \
vpxor RTMP0, s0, s0; /* s0 ^ x */ \
vpshufb RTMP4, RX1, RTMP2; \
vmovdqa .Linv_shift_row_rol_8 rRIP, RTMP4; \
vpxor RTMP2, r0, r0; /* r0 ^ x */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \
vpshufb RTMP4, RX1, RTMP3; \
vmovdqa .Linv_shift_row_rol_16 rRIP, RTMP4; \
vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4, RX1, RTMP3; \
vmovdqa .Linv_shift_row_rol_24 rRIP, RTMP4; \
vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP0, RTMP1; \
vpsrld $30, RTMP0, RTMP0; \
vpxor RTMP0, s0, s0; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpshufb RTMP4, RX1, RTMP3; \
vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP2, RTMP3; \
vpsrld $30, RTMP2, RTMP2; \
vpxor RTMP2, r0, r0; \
vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk8:
ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk8;
#undef ROUND
vmovdqa .Lbswap128_mask rRIP, RTMP2;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __sm4_crypt_blk8,.-__sm4_crypt_blk8;)
.align 16
.globl _gcry_sm4_aesni_avx_crypt_blk1_8
ELF(.type _gcry_sm4_aesni_avx_crypt_blk1_8,@function;)
_gcry_sm4_aesni_avx_crypt_blk1_8:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..8 blocks)
* %rdx: src (1..8 blocks)
* %rcx: num blocks (1..8)
*/
CFI_STARTPROC();
cmpq $5, %rcx;
jb sm4_aesni_avx_crypt_blk1_4;
vmovdqu (0 * 16)(%rdx), RA0;
vmovdqu (1 * 16)(%rdx), RA1;
vmovdqu (2 * 16)(%rdx), RA2;
vmovdqu (3 * 16)(%rdx), RA3;
vmovdqu (4 * 16)(%rdx), RB0;
vmovdqa RB0, RB1;
vmovdqa RB0, RB2;
vmovdqa RB0, RB3;
je .Lblk8_load_input_done;
vmovdqu (5 * 16)(%rdx), RB1;
cmpq $7, %rcx;
jb .Lblk8_load_input_done;
vmovdqu (6 * 16)(%rdx), RB2;
je .Lblk8_load_input_done;
vmovdqu (7 * 16)(%rdx), RB3;
.Lblk8_load_input_done:
call __sm4_crypt_blk8;
cmpq $6, %rcx;
vmovdqu RA0, (0 * 16)(%rsi);
vmovdqu RA1, (1 * 16)(%rsi);
vmovdqu RA2, (2 * 16)(%rsi);
vmovdqu RA3, (3 * 16)(%rsi);
vmovdqu RB0, (4 * 16)(%rsi);
jb .Lblk8_store_output_done;
vmovdqu RB1, (5 * 16)(%rsi);
je .Lblk8_store_output_done;
vmovdqu RB2, (6 * 16)(%rsi);
cmpq $7, %rcx;
je .Lblk8_store_output_done;
vmovdqu RB3, (7 * 16)(%rsi);
.Lblk8_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_crypt_blk1_8,.-_gcry_sm4_aesni_avx_crypt_blk1_8;)
.align 16
.globl _gcry_sm4_aesni_avx_ctr_enc
ELF(.type _gcry_sm4_aesni_avx_ctr_enc,@function;)
_gcry_sm4_aesni_avx_ctr_enc:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (8 blocks)
* %rdx: src (8 blocks)
* %rcx: iv (big endian, 128bit)
*/
CFI_STARTPROC();
+ cmpb $(0x100 - 8), 15(%rcx);
+ jbe .Lctr_byteadd;
+
/* load IV and byteswap */
vmovdqu (%rcx), RA0;
vmovdqa .Lbswap128_mask rRIP, RBSWAP;
vpshufb RBSWAP, RA0, RTMP0; /* be => le */
vpcmpeqd RNOT, RNOT, RNOT;
vpsrldq $8, RNOT, RNOT; /* low: -1, high: 0 */
#define inc_le128(x, minus_one, tmp) \
vpcmpeqq minus_one, x, tmp; \
vpsubq minus_one, x, x; \
vpslldq $8, tmp, tmp; \
vpsubq tmp, x, x;
/* construct IVs */
inc_le128(RTMP0, RNOT, RTMP2); /* +1 */
vpshufb RBSWAP, RTMP0, RA1;
inc_le128(RTMP0, RNOT, RTMP2); /* +2 */
vpshufb RBSWAP, RTMP0, RA2;
inc_le128(RTMP0, RNOT, RTMP2); /* +3 */
vpshufb RBSWAP, RTMP0, RA3;
inc_le128(RTMP0, RNOT, RTMP2); /* +4 */
vpshufb RBSWAP, RTMP0, RB0;
inc_le128(RTMP0, RNOT, RTMP2); /* +5 */
vpshufb RBSWAP, RTMP0, RB1;
inc_le128(RTMP0, RNOT, RTMP2); /* +6 */
vpshufb RBSWAP, RTMP0, RB2;
inc_le128(RTMP0, RNOT, RTMP2); /* +7 */
vpshufb RBSWAP, RTMP0, RB3;
inc_le128(RTMP0, RNOT, RTMP2); /* +8 */
vpshufb RBSWAP, RTMP0, RTMP1;
/* store new IV */
vmovdqu RTMP1, (%rcx);
+.align 8
+.Lload_ctr_done:
call __sm4_crypt_blk8;
vpxor (0 * 16)(%rdx), RA0, RA0;
vpxor (1 * 16)(%rdx), RA1, RA1;
vpxor (2 * 16)(%rdx), RA2, RA2;
vpxor (3 * 16)(%rdx), RA3, RA3;
vpxor (4 * 16)(%rdx), RB0, RB0;
vpxor (5 * 16)(%rdx), RB1, RB1;
vpxor (6 * 16)(%rdx), RB2, RB2;
vpxor (7 * 16)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 16)(%rsi);
vmovdqu RA1, (1 * 16)(%rsi);
vmovdqu RA2, (2 * 16)(%rsi);
vmovdqu RA3, (3 * 16)(%rsi);
vmovdqu RB0, (4 * 16)(%rsi);
vmovdqu RB1, (5 * 16)(%rsi);
vmovdqu RB2, (6 * 16)(%rsi);
vmovdqu RB3, (7 * 16)(%rsi);
vzeroall;
ret_spec_stop;
+ .align 8
+
+.Lctr_byteadd_full_ctr_carry:
+ movq 8(%rcx), %r11;
+ movq (%rcx), %r10;
+ bswapq %r11;
+ bswapq %r10;
+ addq $8, %r11;
+ adcq $0, %r10;
+ bswapq %r11;
+ bswapq %r10;
+ movq %r11, 8(%rcx);
+ movq %r10, (%rcx);
+ jmp .Lctr_byteadd_xmm;
+.align 8
+.Lctr_byteadd:
+ vmovdqu (%rcx), RA0;
+ je .Lctr_byteadd_full_ctr_carry;
+ addb $8, 15(%rcx);
+.Lctr_byteadd_xmm:
+ vpaddb .Lbige_addb_1 rRIP, RA0, RA1;
+ vpaddb .Lbige_addb_2 rRIP, RA0, RA2;
+ vpaddb .Lbige_addb_3 rRIP, RA0, RA3;
+ vpaddb .Lbige_addb_4 rRIP, RA0, RB0;
+ vpaddb .Lbige_addb_5 rRIP, RA0, RB1;
+ vpaddb .Lbige_addb_6 rRIP, RA0, RB2;
+ vpaddb .Lbige_addb_7 rRIP, RA0, RB3;
+
+ jmp .Lload_ctr_done;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_ctr_enc,.-_gcry_sm4_aesni_avx_ctr_enc;)
.align 16
.globl _gcry_sm4_aesni_avx_cbc_dec
ELF(.type _gcry_sm4_aesni_avx_cbc_dec,@function;)
_gcry_sm4_aesni_avx_cbc_dec:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (8 blocks)
* %rdx: src (8 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
vmovdqu (0 * 16)(%rdx), RA0;
vmovdqu (1 * 16)(%rdx), RA1;
vmovdqu (2 * 16)(%rdx), RA2;
vmovdqu (3 * 16)(%rdx), RA3;
vmovdqu (4 * 16)(%rdx), RB0;
vmovdqu (5 * 16)(%rdx), RB1;
vmovdqu (6 * 16)(%rdx), RB2;
vmovdqu (7 * 16)(%rdx), RB3;
call __sm4_crypt_blk8;
vmovdqu (7 * 16)(%rdx), RNOT;
vpxor (%rcx), RA0, RA0;
vpxor (0 * 16)(%rdx), RA1, RA1;
vpxor (1 * 16)(%rdx), RA2, RA2;
vpxor (2 * 16)(%rdx), RA3, RA3;
vpxor (3 * 16)(%rdx), RB0, RB0;
vpxor (4 * 16)(%rdx), RB1, RB1;
vpxor (5 * 16)(%rdx), RB2, RB2;
vpxor (6 * 16)(%rdx), RB3, RB3;
vmovdqu RNOT, (%rcx); /* store new IV */
vmovdqu RA0, (0 * 16)(%rsi);
vmovdqu RA1, (1 * 16)(%rsi);
vmovdqu RA2, (2 * 16)(%rsi);
vmovdqu RA3, (3 * 16)(%rsi);
vmovdqu RB0, (4 * 16)(%rsi);
vmovdqu RB1, (5 * 16)(%rsi);
vmovdqu RB2, (6 * 16)(%rsi);
vmovdqu RB3, (7 * 16)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_cbc_dec,.-_gcry_sm4_aesni_avx_cbc_dec;)
.align 16
.globl _gcry_sm4_aesni_avx_cfb_dec
ELF(.type _gcry_sm4_aesni_avx_cfb_dec,@function;)
_gcry_sm4_aesni_avx_cfb_dec:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (8 blocks)
* %rdx: src (8 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
/* Load input */
vmovdqu (%rcx), RA0;
vmovdqu 0 * 16(%rdx), RA1;
vmovdqu 1 * 16(%rdx), RA2;
vmovdqu 2 * 16(%rdx), RA3;
vmovdqu 3 * 16(%rdx), RB0;
vmovdqu 4 * 16(%rdx), RB1;
vmovdqu 5 * 16(%rdx), RB2;
vmovdqu 6 * 16(%rdx), RB3;
/* Update IV */
vmovdqu 7 * 16(%rdx), RNOT;
vmovdqu RNOT, (%rcx);
call __sm4_crypt_blk8;
vpxor (0 * 16)(%rdx), RA0, RA0;
vpxor (1 * 16)(%rdx), RA1, RA1;
vpxor (2 * 16)(%rdx), RA2, RA2;
vpxor (3 * 16)(%rdx), RA3, RA3;
vpxor (4 * 16)(%rdx), RB0, RB0;
vpxor (5 * 16)(%rdx), RB1, RB1;
vpxor (6 * 16)(%rdx), RB2, RB2;
vpxor (7 * 16)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 16)(%rsi);
vmovdqu RA1, (1 * 16)(%rsi);
vmovdqu RA2, (2 * 16)(%rsi);
vmovdqu RA3, (3 * 16)(%rsi);
vmovdqu RB0, (4 * 16)(%rsi);
vmovdqu RB1, (5 * 16)(%rsi);
vmovdqu RB2, (6 * 16)(%rsi);
vmovdqu RB3, (7 * 16)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_cfb_dec,.-_gcry_sm4_aesni_avx_cfb_dec;)
.align 16
.globl _gcry_sm4_aesni_avx_ocb_enc
ELF(.type _gcry_sm4_aesni_avx_ocb_enc,@function;)
_gcry_sm4_aesni_avx_ocb_enc:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (8 blocks)
* %rdx: src (8 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[8])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0;
vmovdqu (%r8), RTMP1;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Checksum_i = Checksum_{i-1} xor P_i */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, lreg, xreg) \
vmovdqu (n * 16)(%rdx), xreg; \
vpxor (lreg), RTMP0, RTMP0; \
vpxor xreg, RTMP1, RTMP1; \
vpxor RTMP0, xreg, xreg; \
vmovdqu RTMP0, (n * 16)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, RA0);
OCB_INPUT(1, %r11, RA1);
OCB_INPUT(2, %r12, RA2);
OCB_INPUT(3, %r13, RA3);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, RB0);
OCB_INPUT(5, %r11, RB1);
OCB_INPUT(6, %r12, RB2);
OCB_INPUT(7, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0, (%rcx);
vmovdqu RTMP1, (%r8);
movq (0 * 8)(%rsp), %r10;
CFI_RESTORE(%r10);
movq (1 * 8)(%rsp), %r11;
CFI_RESTORE(%r11);
movq (2 * 8)(%rsp), %r12;
CFI_RESTORE(%r12);
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r13);
call __sm4_crypt_blk8;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor (0 * 16)(%rsi), RA0, RA0;
vpxor (1 * 16)(%rsi), RA1, RA1;
vpxor (2 * 16)(%rsi), RA2, RA2;
vpxor (3 * 16)(%rsi), RA3, RA3;
vpxor (4 * 16)(%rsi), RB0, RB0;
vpxor (5 * 16)(%rsi), RB1, RB1;
vpxor (6 * 16)(%rsi), RB2, RB2;
vpxor (7 * 16)(%rsi), RB3, RB3;
vmovdqu RA0, (0 * 16)(%rsi);
vmovdqu RA1, (1 * 16)(%rsi);
vmovdqu RA2, (2 * 16)(%rsi);
vmovdqu RA3, (3 * 16)(%rsi);
vmovdqu RB0, (4 * 16)(%rsi);
vmovdqu RB1, (5 * 16)(%rsi);
vmovdqu RB2, (6 * 16)(%rsi);
vmovdqu RB3, (7 * 16)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_ocb_enc,.-_gcry_sm4_aesni_avx_ocb_enc;)
.align 16
.globl _gcry_sm4_aesni_avx_ocb_dec
ELF(.type _gcry_sm4_aesni_avx_ocb_dec,@function;)
_gcry_sm4_aesni_avx_ocb_dec:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (8 blocks)
* %rdx: src (8 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[8])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
movdqu (%rcx), RTMP0;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */
#define OCB_INPUT(n, lreg, xreg) \
vmovdqu (n * 16)(%rdx), xreg; \
vpxor (lreg), RTMP0, RTMP0; \
vpxor RTMP0, xreg, xreg; \
vmovdqu RTMP0, (n * 16)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, RA0);
OCB_INPUT(1, %r11, RA1);
OCB_INPUT(2, %r12, RA2);
OCB_INPUT(3, %r13, RA3);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, RB0);
OCB_INPUT(5, %r11, RB1);
OCB_INPUT(6, %r12, RB2);
OCB_INPUT(7, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0, (%rcx);
movq (0 * 8)(%rsp), %r10;
CFI_RESTORE(%r10);
movq (1 * 8)(%rsp), %r11;
CFI_RESTORE(%r11);
movq (2 * 8)(%rsp), %r12;
CFI_RESTORE(%r12);
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r13);
call __sm4_crypt_blk8;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vmovdqu (%r8), RTMP0;
vpxor (0 * 16)(%rsi), RA0, RA0;
vpxor (1 * 16)(%rsi), RA1, RA1;
vpxor (2 * 16)(%rsi), RA2, RA2;
vpxor (3 * 16)(%rsi), RA3, RA3;
vpxor (4 * 16)(%rsi), RB0, RB0;
vpxor (5 * 16)(%rsi), RB1, RB1;
vpxor (6 * 16)(%rsi), RB2, RB2;
vpxor (7 * 16)(%rsi), RB3, RB3;
/* Checksum_i = Checksum_{i-1} xor P_i */
vmovdqu RA0, (0 * 16)(%rsi);
vpxor RA0, RTMP0, RTMP0;
vmovdqu RA1, (1 * 16)(%rsi);
vpxor RA1, RTMP0, RTMP0;
vmovdqu RA2, (2 * 16)(%rsi);
vpxor RA2, RTMP0, RTMP0;
vmovdqu RA3, (3 * 16)(%rsi);
vpxor RA3, RTMP0, RTMP0;
vmovdqu RB0, (4 * 16)(%rsi);
vpxor RB0, RTMP0, RTMP0;
vmovdqu RB1, (5 * 16)(%rsi);
vpxor RB1, RTMP0, RTMP0;
vmovdqu RB2, (6 * 16)(%rsi);
vpxor RB2, RTMP0, RTMP0;
vmovdqu RB3, (7 * 16)(%rsi);
vpxor RB3, RTMP0, RTMP0;
vmovdqu RTMP0, (%r8);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_ocb_dec,.-_gcry_sm4_aesni_avx_ocb_dec;)
.align 16
.globl _gcry_sm4_aesni_avx_ocb_auth
ELF(.type _gcry_sm4_aesni_avx_ocb_auth,@function;)
_gcry_sm4_aesni_avx_ocb_auth:
/* input:
* %rdi: round key array, CTX
* %rsi: abuf (8 blocks)
* %rdx: offset
* %rcx: checksum
* %r8 : L pointers (void *L[8])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rdx), RTMP0;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
#define OCB_INPUT(n, lreg, xreg) \
vmovdqu (n * 16)(%rsi), xreg; \
vpxor (lreg), RTMP0, RTMP0; \
vpxor RTMP0, xreg, xreg;
movq (0 * 8)(%r8), %r10;
movq (1 * 8)(%r8), %r11;
movq (2 * 8)(%r8), %r12;
movq (3 * 8)(%r8), %r13;
OCB_INPUT(0, %r10, RA0);
OCB_INPUT(1, %r11, RA1);
OCB_INPUT(2, %r12, RA2);
OCB_INPUT(3, %r13, RA3);
movq (4 * 8)(%r8), %r10;
movq (5 * 8)(%r8), %r11;
movq (6 * 8)(%r8), %r12;
movq (7 * 8)(%r8), %r13;
OCB_INPUT(4, %r10, RB0);
OCB_INPUT(5, %r11, RB1);
OCB_INPUT(6, %r12, RB2);
OCB_INPUT(7, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0, (%rdx);
movq (0 * 8)(%rsp), %r10;
CFI_RESTORE(%r10);
movq (1 * 8)(%rsp), %r11;
CFI_RESTORE(%r11);
movq (2 * 8)(%rsp), %r12;
CFI_RESTORE(%r12);
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r13);
call __sm4_crypt_blk8;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vmovdqu (%rcx), RTMP0;
vpxor RB0, RA0, RA0;
vpxor RB1, RA1, RA1;
vpxor RB2, RA2, RA2;
vpxor RB3, RA3, RA3;
vpxor RTMP0, RA3, RA3;
vpxor RA2, RA0, RA0;
vpxor RA3, RA1, RA1;
vpxor RA1, RA0, RA0;
vmovdqu RA0, (%rcx);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx_ocb_auth,.-_gcry_sm4_aesni_avx_ocb_auth;)
#endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)*/
#endif /*__x86_64*/
diff --git a/cipher/sm4-aesni-avx2-amd64.S b/cipher/sm4-aesni-avx2-amd64.S
index acd37cff..03f979fa 100644
--- a/cipher/sm4-aesni-avx2-amd64.S
+++ b/cipher/sm4-aesni-avx2-amd64.S
@@ -1,912 +1,973 @@
/* sm4-avx2-amd64.S - AVX2 implementation of SM4 cipher
*
- * Copyright (C) 2020, 2022 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (C) 2020, 2022-2023 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/* Based on SM4 AES-NI work by Markku-Juhani O. Saarinen at:
* https://github.com/mjosaarinen/sm4ni
*/
#include <config.h>
#ifdef __x86_64
#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT)
#include "asm-common-amd64.h"
/* vector registers */
#define RX0 %ymm0
#define RX1 %ymm1
#define MASK_4BIT %ymm2
#define RTMP0 %ymm3
#define RTMP1 %ymm4
#define RTMP2 %ymm5
#define RTMP3 %ymm6
#define RTMP4 %ymm7
#define RA0 %ymm8
#define RA1 %ymm9
#define RA2 %ymm10
#define RA3 %ymm11
#define RA0x %xmm8
#define RA1x %xmm9
#define RA2x %xmm10
#define RA3x %xmm11
#define RB0 %ymm12
#define RB1 %ymm13
#define RB2 %ymm14
#define RB3 %ymm15
#define RB0x %xmm12
#define RB1x %xmm13
#define RB2x %xmm14
#define RB3x %xmm15
#define RNOT %ymm0
#define RBSWAP %ymm1
#define RX0x %xmm0
#define RX1x %xmm1
#define MASK_4BITx %xmm2
#define RNOTx %xmm0
#define RBSWAPx %xmm1
#define RTMP0x %xmm3
#define RTMP1x %xmm4
#define RTMP2x %xmm5
#define RTMP3x %xmm6
#define RTMP4x %xmm7
/**********************************************************************
helper macros
**********************************************************************/
/* Transpose four 32-bit words between 128-bit vector lanes. */
#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
vpunpckhdq x1, x0, t2; \
vpunpckldq x1, x0, x0; \
\
vpunpckldq x3, x2, t1; \
vpunpckhdq x3, x2, x2; \
\
vpunpckhqdq t1, x0, x1; \
vpunpcklqdq t1, x0, x0; \
\
vpunpckhqdq x2, t2, x3; \
vpunpcklqdq x2, t2, x2;
/* post-SubByte transform. */
#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
vpand x, mask4bit, tmp0; \
vpandn x, mask4bit, x; \
vpsrld $4, x, x; \
\
vpshufb tmp0, lo_t, tmp0; \
vpshufb x, hi_t, x; \
vpxor tmp0, x, x;
/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
* 'vaeslastenc' instruction. */
#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
vpandn mask4bit, x, tmp0; \
vpsrld $4, x, x; \
vpand x, mask4bit, x; \
\
vpshufb tmp0, lo_t, tmp0; \
vpshufb x, hi_t, x; \
vpxor tmp0, x, x;
/**********************************************************************
16-way SM4 with AES-NI and AVX
**********************************************************************/
SECTION_RODATA
.align 16
ELF(.type _sm4_aesni_avx2_consts,@object)
_sm4_aesni_avx2_consts:
/*
* Following four affine transform look-up tables are from work by
* Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
*
* These allow exposing SM4 S-Box from AES SubByte.
*/
/* pre-SubByte affine transform, from SM4 field to AES field. */
.Lpre_tf_lo_s:
.quad 0x9197E2E474720701, 0xC7C1B4B222245157
.Lpre_tf_hi_s:
.quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
/* post-SubByte affine transform, from AES field to SM4 field. */
.Lpost_tf_lo_s:
.quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
.Lpost_tf_hi_s:
.quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
/* For isolating SubBytes from AESENCLAST, inverse shift row */
.Linv_shift_row:
.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
.Linv_shift_row_rol_8:
.byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
.byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
.Linv_shift_row_rol_16:
.byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
.byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
.Linv_shift_row_rol_24:
.byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
.byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
/* For CTR-mode IV byteswap */
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/* For input word byte-swap */
.Lbswap32_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+/* CTR byte addition constants */
+.align 32
+.Lbige_addb_0_1:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
+.Lbige_addb_2_3:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3
+.Lbige_addb_4_5:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5
+.Lbige_addb_6_7:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+.Lbige_addb_8_9:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9
+.Lbige_addb_10_11:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11
+.Lbige_addb_12_13:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13
+.Lbige_addb_14_15:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15
+
.align 4
/* 4-bit mask */
.L0f0f0f0f:
.long 0x0f0f0f0f
.text
.align 16
ELF(.type __sm4_crypt_blk16,@function;)
__sm4_crypt_blk16:
/* input:
* %rdi: ctx, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* plaintext blocks
* output:
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* ciphertext blocks
*/
CFI_STARTPROC();
vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
vpbroadcastd (4*(round))(%rdi), RX0; \
vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4; \
vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1; \
vmovdqa RX0, RX1; \
vpxor s1, RX0, RX0; \
vpxor s2, RX0, RX0; \
vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2; \
vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3; \
vpxor r1, RX1, RX1; \
vpxor r2, RX1, RX1; \
vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \
\
/* sbox, non-linear part */ \
transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0); \
vextracti128 $1, RX0, RTMP4x; \
vextracti128 $1, RX1, RTMP0x; \
vaesenclast MASK_4BITx, RX0x, RX0x; \
vaesenclast MASK_4BITx, RTMP4x, RTMP4x; \
vaesenclast MASK_4BITx, RX1x, RX1x; \
vaesenclast MASK_4BITx, RTMP0x, RTMP0x; \
vinserti128 $1, RTMP4x, RX0, RX0; \
vbroadcasti128 .Linv_shift_row rRIP, RTMP4; \
vinserti128 $1, RTMP0x, RX1, RX1; \
transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0); \
\
/* linear part */ \
vpshufb RTMP4, RX0, RTMP0; \
vpxor RTMP0, s0, s0; /* s0 ^ x */ \
vpshufb RTMP4, RX1, RTMP2; \
vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4; \
vpxor RTMP2, r0, r0; /* r0 ^ x */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */ \
vpshufb RTMP4, RX1, RTMP3; \
vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4; \
vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4, RX1, RTMP3; \
vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4; \
vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP0, RTMP1; \
vpsrld $30, RTMP0, RTMP0; \
vpxor RTMP0, s0, s0; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpshufb RTMP4, RX1, RTMP3; \
vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP2, RTMP3; \
vpsrld $30, RTMP2, RTMP2; \
vpxor RTMP2, r0, r0; \
vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk16:
ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk16;
#undef ROUND
vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __sm4_crypt_blk16,.-__sm4_crypt_blk16;)
.align 16
.globl _gcry_sm4_aesni_avx2_crypt_blk1_16
ELF(.type _gcry_sm4_aesni_avx2_crypt_blk1_16,@function;)
_gcry_sm4_aesni_avx2_crypt_blk1_16:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..16 blocks)
* %rdx: src (1..16 blocks)
* %rcx: num blocks (1..16)
*/
CFI_STARTPROC();
#define LOAD_INPUT(offset, yreg) \
cmpq $(1 + 2 * (offset)), %rcx; \
jb .Lblk16_load_input_done; \
ja 1f; \
vmovdqu (offset) * 32(%rdx), yreg##x; \
jmp .Lblk16_load_input_done; \
1: \
vmovdqu (offset) * 32(%rdx), yreg;
LOAD_INPUT(0, RA0);
LOAD_INPUT(1, RA1);
LOAD_INPUT(2, RA2);
LOAD_INPUT(3, RA3);
LOAD_INPUT(4, RB0);
LOAD_INPUT(5, RB1);
LOAD_INPUT(6, RB2);
LOAD_INPUT(7, RB3);
#undef LOAD_INPUT
.Lblk16_load_input_done:
call __sm4_crypt_blk16;
#define STORE_OUTPUT(yreg, offset) \
cmpq $(1 + 2 * (offset)), %rcx; \
jb .Lblk16_store_output_done; \
ja 1f; \
vmovdqu yreg##x, (offset) * 32(%rsi); \
jmp .Lblk16_store_output_done; \
1: \
vmovdqu yreg, (offset) * 32(%rsi);
STORE_OUTPUT(RA0, 0);
STORE_OUTPUT(RA1, 1);
STORE_OUTPUT(RA2, 2);
STORE_OUTPUT(RA3, 3);
STORE_OUTPUT(RB0, 4);
STORE_OUTPUT(RB1, 5);
STORE_OUTPUT(RB2, 6);
STORE_OUTPUT(RB3, 7);
#undef STORE_OUTPUT
.Lblk16_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx2_crypt_blk1_16,.-_gcry_sm4_aesni_avx2_crypt_blk1_16;)
#define inc_le128(x, minus_one, tmp) \
vpcmpeqq minus_one, x, tmp; \
vpsubq minus_one, x, x; \
vpslldq $8, tmp, tmp; \
vpsubq tmp, x, x;
.align 16
.globl _gcry_sm4_aesni_avx2_ctr_enc
ELF(.type _gcry_sm4_aesni_avx2_ctr_enc,@function;)
_gcry_sm4_aesni_avx2_ctr_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv (big endian, 128bit)
*/
CFI_STARTPROC();
+ cmpb $(0x100 - 16), 15(%rcx);
+ jbe .Lctr_byteadd;
+
movq 8(%rcx), %rax;
bswapq %rax;
vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
vpcmpeqd RNOT, RNOT, RNOT;
vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */
vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
/* load IV and byteswap */
vmovdqu (%rcx), RTMP4x;
vpshufb RTMP3x, RTMP4x, RTMP4x;
vmovdqa RTMP4x, RTMP0x;
inc_le128(RTMP4x, RNOTx, RTMP1x);
vinserti128 $1, RTMP4x, RTMP0, RTMP0;
vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
/* check need for handling 64-bit overflow and carry */
cmpq $(0xffffffffffffffff - 16), %rax;
ja .Lhandle_ctr_carry;
/* construct IVs */
vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
vpshufb RTMP3, RTMP0, RA1;
vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
vpshufb RTMP3, RTMP0, RA2;
vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
vpshufb RTMP3, RTMP0, RA3;
vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
vpshufb RTMP3, RTMP0, RB0;
vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
vpshufb RTMP3, RTMP0, RB1;
vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
vpshufb RTMP3, RTMP0, RB2;
vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
vpshufb RTMP3, RTMP0, RB3;
vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
vpshufb RTMP3x, RTMP0x, RTMP0x;
jmp .Lctr_carry_done;
.Lhandle_ctr_carry:
/* construct IVs */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
inc_le128(RTMP0, RNOT, RTMP1);
vextracti128 $1, RTMP0, RTMP0x;
vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
-.align 4
.Lctr_carry_done:
/* store new IV */
vmovdqu RTMP0x, (%rcx);
+.align 8
+.Lload_ctr_done:
call __sm4_crypt_blk16;
vpxor (0 * 32)(%rdx), RA0, RA0;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA3, RA3;
vpxor (4 * 32)(%rdx), RB0, RB0;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
+
+.align 8
+.Lctr_byteadd_full_ctr_carry:
+ movq 8(%rcx), %r11;
+ movq (%rcx), %r10;
+ bswapq %r11;
+ bswapq %r10;
+ addq $16, %r11;
+ adcq $0, %r10;
+ bswapq %r11;
+ bswapq %r10;
+ movq %r11, 8(%rcx);
+ movq %r10, (%rcx);
+ jmp .Lctr_byteadd_ymm;
+.align 8
+.Lctr_byteadd:
+ vbroadcasti128 (%rcx), RB3;
+ je .Lctr_byteadd_full_ctr_carry;
+ addb $16, 15(%rcx);
+.Lctr_byteadd_ymm:
+ vpaddb .Lbige_addb_0_1 rRIP, RB3, RA0;
+ vpaddb .Lbige_addb_2_3 rRIP, RB3, RA1;
+ vpaddb .Lbige_addb_4_5 rRIP, RB3, RA2;
+ vpaddb .Lbige_addb_6_7 rRIP, RB3, RA3;
+ vpaddb .Lbige_addb_8_9 rRIP, RB3, RB0;
+ vpaddb .Lbige_addb_10_11 rRIP, RB3, RB1;
+ vpaddb .Lbige_addb_12_13 rRIP, RB3, RB2;
+ vpaddb .Lbige_addb_14_15 rRIP, RB3, RB3;
+
+ jmp .Lload_ctr_done;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx2_ctr_enc,.-_gcry_sm4_aesni_avx2_ctr_enc;)
.align 16
.globl _gcry_sm4_aesni_avx2_cbc_dec
ELF(.type _gcry_sm4_aesni_avx2_cbc_dec,@function;)
_gcry_sm4_aesni_avx2_cbc_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
vmovdqu (0 * 32)(%rdx), RA0;
vmovdqu (1 * 32)(%rdx), RA1;
vmovdqu (2 * 32)(%rdx), RA2;
vmovdqu (3 * 32)(%rdx), RA3;
vmovdqu (4 * 32)(%rdx), RB0;
vmovdqu (5 * 32)(%rdx), RB1;
vmovdqu (6 * 32)(%rdx), RB2;
vmovdqu (7 * 32)(%rdx), RB3;
call __sm4_crypt_blk16;
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RNOT;
vpxor RNOT, RA0, RA0;
vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx); /* store new IV */
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx2_cbc_dec,.-_gcry_sm4_aesni_avx2_cbc_dec;)
.align 16
.globl _gcry_sm4_aesni_avx2_cfb_dec
ELF(.type _gcry_sm4_aesni_avx2_cfb_dec,@function;)
_gcry_sm4_aesni_avx2_cfb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
/* Load input */
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RA0;
vmovdqu (0 * 32 + 16)(%rdx), RA1;
vmovdqu (1 * 32 + 16)(%rdx), RA2;
vmovdqu (2 * 32 + 16)(%rdx), RA3;
vmovdqu (3 * 32 + 16)(%rdx), RB0;
vmovdqu (4 * 32 + 16)(%rdx), RB1;
vmovdqu (5 * 32 + 16)(%rdx), RB2;
vmovdqu (6 * 32 + 16)(%rdx), RB3;
/* Update IV */
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx);
call __sm4_crypt_blk16;
vpxor (0 * 32)(%rdx), RA0, RA0;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA3, RA3;
vpxor (4 * 32)(%rdx), RB0, RB0;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx2_cfb_dec,.-_gcry_sm4_aesni_avx2_cfb_dec;)
.align 16
.globl _gcry_sm4_aesni_avx2_ocb_enc
ELF(.type _gcry_sm4_aesni_avx2_ocb_enc,@function;)
_gcry_sm4_aesni_avx2_ocb_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
vmovdqu (%r8), RTMP1x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Checksum_i = Checksum_{i-1} xor P_i */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rdx), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RTMP1, RTMP1; \
vpxor yreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vextracti128 $1, RTMP1, RNOTx;
vmovdqu RTMP0x, (%rcx);
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%r8);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor (0 * 32)(%rsi), RA0, RA0;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA3, RA3;
vpxor (4 * 32)(%rsi), RB0, RB0;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx2_ocb_enc,.-_gcry_sm4_aesni_avx2_ocb_enc;)
.align 16
.globl _gcry_sm4_aesni_avx2_ocb_dec
ELF(.type _gcry_sm4_aesni_avx2_ocb_dec,@function;)
_gcry_sm4_aesni_avx2_ocb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rdx), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rcx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vmovdqu (%r8), RTMP1x;
vpxor (0 * 32)(%rsi), RA0, RA0;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA3, RA3;
vpxor (4 * 32)(%rsi), RB0, RB0;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB3, RB3;
/* Checksum_i = Checksum_{i-1} xor P_i */
vmovdqu RA0, (0 * 32)(%rsi);
vpxor RA0, RTMP1, RTMP1;
vmovdqu RA1, (1 * 32)(%rsi);
vpxor RA1, RTMP1, RTMP1;
vmovdqu RA2, (2 * 32)(%rsi);
vpxor RA2, RTMP1, RTMP1;
vmovdqu RA3, (3 * 32)(%rsi);
vpxor RA3, RTMP1, RTMP1;
vmovdqu RB0, (4 * 32)(%rsi);
vpxor RB0, RTMP1, RTMP1;
vmovdqu RB1, (5 * 32)(%rsi);
vpxor RB1, RTMP1, RTMP1;
vmovdqu RB2, (6 * 32)(%rsi);
vpxor RB2, RTMP1, RTMP1;
vmovdqu RB3, (7 * 32)(%rsi);
vpxor RB3, RTMP1, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%r8);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx2_ocb_dec,.-_gcry_sm4_aesni_avx2_ocb_dec;)
.align 16
.globl _gcry_sm4_aesni_avx2_ocb_auth
ELF(.type _gcry_sm4_aesni_avx2_ocb_auth,@function;)
_gcry_sm4_aesni_avx2_ocb_auth:
/* input:
* %rdi: ctx, CTX
* %rsi: abuf (16 blocks)
* %rdx: offset
* %rcx: checksum
* %r8 : L pointers (void *L[16])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rdx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rsi), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg;
movq (0 * 8)(%r8), %r10;
movq (1 * 8)(%r8), %r11;
movq (2 * 8)(%r8), %r12;
movq (3 * 8)(%r8), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r8), %r10;
movq (5 * 8)(%r8), %r11;
movq (6 * 8)(%r8), %r12;
movq (7 * 8)(%r8), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r8), %r10;
movq (9 * 8)(%r8), %r11;
movq (10 * 8)(%r8), %r12;
movq (11 * 8)(%r8), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r8), %r10;
movq (13 * 8)(%r8), %r11;
movq (14 * 8)(%r8), %r12;
movq (15 * 8)(%r8), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rdx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor RA0, RB0, RA0;
vpxor RA1, RB1, RA1;
vpxor RA2, RB2, RA2;
vpxor RA3, RB3, RA3;
vpxor RA1, RA0, RA0;
vpxor RA3, RA2, RA2;
vpxor RA2, RA0, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpxor (%rcx), RTMP1x, RTMP1x;
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%rcx);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_aesni_avx2_ocb_auth,.-_gcry_sm4_aesni_avx2_ocb_auth;)
#endif /*defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT)*/
#endif /*__x86_64*/
diff --git a/cipher/sm4-gfni-avx2-amd64.S b/cipher/sm4-gfni-avx2-amd64.S
index 2fbaffd5..464da399 100644
--- a/cipher/sm4-gfni-avx2-amd64.S
+++ b/cipher/sm4-gfni-avx2-amd64.S
@@ -1,1199 +1,1260 @@
/* sm4-gfni-avx2-amd64.S - GFNI/AVX2 implementation of SM4 cipher
*
- * Copyright (C) 2022 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (C) 2022-2023 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#ifdef __x86_64
#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT)
#include "asm-common-amd64.h"
/**********************************************************************
helper macros
**********************************************************************/
/* Transpose four 32-bit words between 128-bit vectors. */
#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
vpunpckhdq x1, x0, t2; \
vpunpckldq x1, x0, x0; \
\
vpunpckldq x3, x2, t1; \
vpunpckhdq x3, x2, x2; \
\
vpunpckhqdq t1, x0, x1; \
vpunpcklqdq t1, x0, x0; \
\
vpunpckhqdq x2, t2, x3; \
vpunpcklqdq x2, t2, x2;
/**********************************************************************
4-way && 8-way SM4 with GFNI and AVX2
**********************************************************************/
/* vector registers */
#define RX0 %ymm0
#define RX1 %ymm1
#define RX0x %xmm0
#define RX1x %xmm1
#define RTMP0 %ymm2
#define RTMP1 %ymm3
#define RTMP2 %ymm4
#define RTMP3 %ymm5
#define RTMP4 %ymm6
#define RTMP0x %xmm2
#define RTMP1x %xmm3
#define RTMP2x %xmm4
#define RTMP3x %xmm5
#define RTMP4x %xmm6
#define RNOT %ymm7
#define RNOTx %xmm7
#define RA0 %ymm8
#define RA1 %ymm9
#define RA2 %ymm10
#define RA3 %ymm11
#define RA0x %xmm8
#define RA1x %xmm9
#define RA2x %xmm10
#define RA3x %xmm11
#define RB0 %ymm12
#define RB1 %ymm13
#define RB2 %ymm14
#define RB3 %ymm15
#define RB0x %xmm12
#define RB1x %xmm13
#define RB2x %xmm14
#define RB3x %xmm15
SECTION_RODATA
.align 32
ELF(.type _sm4_gfni_avx2_consts,@object)
_sm4_gfni_avx2_consts:
/* Affine transform, SM4 field to AES field */
.Lpre_affine_s:
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
/* Affine transform, AES field to SM4 field */
.Lpost_affine_s:
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
/* Rotate left by 8 bits on 32-bit words with vpshufb */
.Lrol_8:
.byte 0x03, 0x00, 0x01, 0x02, 0x07, 0x04, 0x05, 0x06
.byte 0x0b, 0x08, 0x09, 0x0a, 0x0f, 0x0c, 0x0d, 0x0e
.byte 0x03, 0x00, 0x01, 0x02, 0x07, 0x04, 0x05, 0x06
.byte 0x0b, 0x08, 0x09, 0x0a, 0x0f, 0x0c, 0x0d, 0x0e
/* Rotate left by 16 bits on 32-bit words with vpshufb */
.Lrol_16:
.byte 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05
.byte 0x0a, 0x0b, 0x08, 0x09, 0x0e, 0x0f, 0x0c, 0x0d
.byte 0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05
.byte 0x0a, 0x0b, 0x08, 0x09, 0x0e, 0x0f, 0x0c, 0x0d
/* Rotate left by 24 bits on 32-bit words with vpshufb */
.Lrol_24:
.byte 0x01, 0x02, 0x03, 0x00, 0x05, 0x06, 0x07, 0x04
.byte 0x09, 0x0a, 0x0b, 0x08, 0x0d, 0x0e, 0x0f, 0x0c
.byte 0x01, 0x02, 0x03, 0x00, 0x05, 0x06, 0x07, 0x04
.byte 0x09, 0x0a, 0x0b, 0x08, 0x0d, 0x0e, 0x0f, 0x0c
/* For CTR-mode IV byteswap */
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/* For input word byte-swap */
.Lbswap32_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+/* CTR byte addition constants */
+.align 32
+.Lbige_addb_0_1:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
+.Lbige_addb_2_3:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3
+.Lbige_addb_4_5:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5
+.Lbige_addb_6_7:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+.Lbige_addb_8_9:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9
+.Lbige_addb_10_11:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11
+.Lbige_addb_12_13:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13
+.Lbige_addb_14_15:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15
+
.text
.align 16
.globl _gcry_sm4_gfni_avx2_expand_key
ELF(.type _gcry_sm4_gfni_avx2_expand_key,@function;)
_gcry_sm4_gfni_avx2_expand_key:
/* input:
* %rdi: 128-bit key
* %rsi: rkey_enc
* %rdx: rkey_dec
* %rcx: fk array
* %r8: ck array
*/
CFI_STARTPROC();
vmovd 0*4(%rdi), RA0x;
vmovd 1*4(%rdi), RA1x;
vmovd 2*4(%rdi), RA2x;
vmovd 3*4(%rdi), RA3x;
vmovdqa .Lbswap32_mask rRIP, RTMP2x;
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vmovd 0*4(%rcx), RB0x;
vmovd 1*4(%rcx), RB1x;
vmovd 2*4(%rcx), RB2x;
vmovd 3*4(%rcx), RB3x;
vpxor RB0x, RA0x, RA0x;
vpxor RB1x, RA1x, RA1x;
vpxor RB2x, RA2x, RA2x;
vpxor RB3x, RA3x, RA3x;
#define ROUND(round, s0, s1, s2, s3) \
vpbroadcastd (4*(round))(%r8), RX0x; \
vpxor s1, RX0x, RX0x; \
vpxor s2, RX0x, RX0x; \
vpxor s3, RX0x, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \
\
/* sbox, non-linear part */ \
vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \
vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \
\
/* linear part */ \
vpxor RX0x, s0, s0; /* s0 ^ x */ \
vpslld $13, RX0x, RTMP0x; \
vpsrld $19, RX0x, RTMP1x; \
vpslld $23, RX0x, RTMP2x; \
vpsrld $9, RX0x, RTMP3x; \
vpxor RTMP0x, RTMP1x, RTMP1x; \
vpxor RTMP2x, RTMP3x, RTMP3x; \
vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,13) */ \
vpxor RTMP3x, s0, s0; /* s0 ^ x ^ rol(x,13) ^ rol(x,23) */
leaq (32*4)(%r8), %rax;
leaq (32*4)(%rdx), %rdx;
.align 16
.Lroundloop_expand_key:
leaq (-4*4)(%rdx), %rdx;
ROUND(0, RA0x, RA1x, RA2x, RA3x);
ROUND(1, RA1x, RA2x, RA3x, RA0x);
ROUND(2, RA2x, RA3x, RA0x, RA1x);
ROUND(3, RA3x, RA0x, RA1x, RA2x);
leaq (4*4)(%r8), %r8;
vmovd RA0x, (0*4)(%rsi);
vmovd RA1x, (1*4)(%rsi);
vmovd RA2x, (2*4)(%rsi);
vmovd RA3x, (3*4)(%rsi);
vmovd RA0x, (3*4)(%rdx);
vmovd RA1x, (2*4)(%rdx);
vmovd RA2x, (1*4)(%rdx);
vmovd RA3x, (0*4)(%rdx);
leaq (4*4)(%rsi), %rsi;
cmpq %rax, %r8;
jne .Lroundloop_expand_key;
#undef ROUND
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_expand_key,.-_gcry_sm4_gfni_avx2_expand_key;)
.align 16
ELF(.type sm4_gfni_avx2_crypt_blk1_4,@function;)
sm4_gfni_avx2_crypt_blk1_4:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..4 blocks)
* %rdx: src (1..4 blocks)
* %rcx: num blocks (1..4)
*/
CFI_STARTPROC();
vmovdqu 0*16(%rdx), RA0x;
vmovdqa RA0x, RA1x;
vmovdqa RA0x, RA2x;
vmovdqa RA0x, RA3x;
cmpq $2, %rcx;
jb .Lblk4_load_input_done;
vmovdqu 1*16(%rdx), RA1x;
je .Lblk4_load_input_done;
vmovdqu 2*16(%rdx), RA2x;
cmpq $3, %rcx;
je .Lblk4_load_input_done;
vmovdqu 3*16(%rdx), RA3x;
.Lblk4_load_input_done:
vmovdqa .Lbswap32_mask rRIP, RTMP2x;
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vmovdqa .Lrol_8 rRIP, RTMP2x;
vmovdqa .Lrol_16 rRIP, RTMP3x;
vmovdqa .Lrol_24 rRIP, RB3x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
#define ROUND(round, s0, s1, s2, s3) \
vpbroadcastd (4*(round))(%rdi), RX0x; \
vpxor s1, RX0x, RX0x; \
vpxor s2, RX0x, RX0x; \
vpxor s3, RX0x, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \
\
/* sbox, non-linear part */ \
vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \
vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \
\
/* linear part */ \
vpxor RX0x, s0, s0; /* s0 ^ x */ \
vpshufb RTMP2x, RX0x, RTMP1x; \
vpxor RTMP1x, RX0x, RTMP0x; /* x ^ rol(x,8) */ \
vpshufb RTMP3x, RX0x, RTMP1x; \
vpxor RTMP1x, RTMP0x, RTMP0x; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RB3x, RX0x, RTMP1x; \
vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP0x, RTMP1x; \
vpsrld $30, RTMP0x, RTMP0x; \
vpxor RTMP0x, s0, s0; \
vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk4:
ROUND(0, RA0x, RA1x, RA2x, RA3x);
ROUND(1, RA1x, RA2x, RA3x, RA0x);
ROUND(2, RA2x, RA3x, RA0x, RA1x);
ROUND(3, RA3x, RA0x, RA1x, RA2x);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk4;
#undef ROUND
vmovdqa .Lbswap128_mask rRIP, RTMP2x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vmovdqu RA0x, 0*16(%rsi);
cmpq $2, %rcx;
jb .Lblk4_store_output_done;
vmovdqu RA1x, 1*16(%rsi);
je .Lblk4_store_output_done;
vmovdqu RA2x, 2*16(%rsi);
cmpq $3, %rcx;
je .Lblk4_store_output_done;
vmovdqu RA3x, 3*16(%rsi);
.Lblk4_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size sm4_gfni_avx2_crypt_blk1_4,.-sm4_gfni_avx2_crypt_blk1_4;)
.align 16
ELF(.type __sm4_gfni_crypt_blk8,@function;)
__sm4_gfni_crypt_blk8:
/* input:
* %rdi: round key array, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
* ciphertext blocks
* output:
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext
* blocks
*/
CFI_STARTPROC();
vmovdqa .Lbswap32_mask rRIP, RTMP2x;
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vpshufb RTMP2x, RB0x, RB0x;
vpshufb RTMP2x, RB1x, RB1x;
vpshufb RTMP2x, RB2x, RB2x;
vpshufb RTMP2x, RB3x, RB3x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x);
#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
vpbroadcastd (4*(round))(%rdi), RX0x; \
vmovdqa .Lpre_affine_s rRIP, RTMP2x; \
vmovdqa .Lpost_affine_s rRIP, RTMP3x; \
vmovdqa RX0x, RX1x; \
vpxor s1, RX0x, RX0x; \
vpxor s2, RX0x, RX0x; \
vpxor s3, RX0x, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \
vpxor r1, RX1x, RX1x; \
vpxor r2, RX1x, RX1x; \
vpxor r3, RX1x, RX1x; /* r1 ^ r2 ^ r3 ^ rk */ \
\
/* sbox, non-linear part */ \
vmovdqa .Lrol_8 rRIP, RTMP4x; \
vgf2p8affineqb $0x65, RTMP2x, RX0x, RX0x; \
vgf2p8affineinvqb $0xd3, RTMP3x, RX0x, RX0x; \
vgf2p8affineqb $0x65, RTMP2x, RX1x, RX1x; \
vgf2p8affineinvqb $0xd3, RTMP3x, RX1x, RX1x; \
\
/* linear part */ \
vpxor RX0x, s0, s0; /* s0 ^ x */ \
vpshufb RTMP4x, RX0x, RTMP1x; \
vpxor RTMP1x, RX0x, RTMP0x; /* x ^ rol(x,8) */ \
vpxor RX1x, r0, r0; /* r0 ^ x */ \
vpshufb RTMP4x, RX1x, RTMP3x; \
vmovdqa .Lrol_16 rRIP, RTMP4x; \
vpxor RTMP3x, RX1x, RTMP2x; /* x ^ rol(x,8) */ \
vpshufb RTMP4x, RX0x, RTMP1x; \
vpxor RTMP1x, RTMP0x, RTMP0x; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4x, RX1x, RTMP3x; \
vmovdqa .Lrol_24 rRIP, RTMP4x; \
vpxor RTMP3x, RTMP2x, RTMP2x; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4x, RX0x, RTMP1x; \
vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP0x, RTMP1x; \
vpsrld $30, RTMP0x, RTMP0x; \
vpxor RTMP0x, s0, s0; \
vpxor RTMP1x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpshufb RTMP4x, RX1x, RTMP3x; \
vpxor RTMP3x, r0, r0; /* r0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP2x, RTMP3x; \
vpsrld $30, RTMP2x, RTMP2x; \
vpxor RTMP2x, r0, r0; \
vpxor RTMP3x, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk8:
ROUND(0, RA0x, RA1x, RA2x, RA3x, RB0x, RB1x, RB2x, RB3x);
ROUND(1, RA1x, RA2x, RA3x, RA0x, RB1x, RB2x, RB3x, RB0x);
ROUND(2, RA2x, RA3x, RA0x, RA1x, RB2x, RB3x, RB0x, RB1x);
ROUND(3, RA3x, RA0x, RA1x, RA2x, RB3x, RB0x, RB1x, RB2x);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk8;
#undef ROUND
vmovdqa .Lbswap128_mask rRIP, RTMP2x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x);
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vpshufb RTMP2x, RB0x, RB0x;
vpshufb RTMP2x, RB1x, RB1x;
vpshufb RTMP2x, RB2x, RB2x;
vpshufb RTMP2x, RB3x, RB3x;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __sm4_gfni_crypt_blk8,.-__sm4_gfni_crypt_blk8;)
.align 16
ELF(.type _gcry_sm4_gfni_avx2_crypt_blk1_8,@function;)
_gcry_sm4_gfni_avx2_crypt_blk1_8:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..8 blocks)
* %rdx: src (1..8 blocks)
* %rcx: num blocks (1..8)
*/
CFI_STARTPROC();
cmpq $5, %rcx;
jb sm4_gfni_avx2_crypt_blk1_4;
vmovdqu (0 * 16)(%rdx), RA0x;
vmovdqu (1 * 16)(%rdx), RA1x;
vmovdqu (2 * 16)(%rdx), RA2x;
vmovdqu (3 * 16)(%rdx), RA3x;
vmovdqu (4 * 16)(%rdx), RB0x;
vmovdqa RB0x, RB1x;
vmovdqa RB0x, RB2x;
vmovdqa RB0x, RB3x;
je .Lblk8_load_input_done;
vmovdqu (5 * 16)(%rdx), RB1x;
cmpq $7, %rcx;
jb .Lblk8_load_input_done;
vmovdqu (6 * 16)(%rdx), RB2x;
je .Lblk8_load_input_done;
vmovdqu (7 * 16)(%rdx), RB3x;
.Lblk8_load_input_done:
call __sm4_gfni_crypt_blk8;
cmpq $6, %rcx;
vmovdqu RA0x, (0 * 16)(%rsi);
vmovdqu RA1x, (1 * 16)(%rsi);
vmovdqu RA2x, (2 * 16)(%rsi);
vmovdqu RA3x, (3 * 16)(%rsi);
vmovdqu RB0x, (4 * 16)(%rsi);
jb .Lblk8_store_output_done;
vmovdqu RB1x, (5 * 16)(%rsi);
je .Lblk8_store_output_done;
vmovdqu RB2x, (6 * 16)(%rsi);
cmpq $7, %rcx;
je .Lblk8_store_output_done;
vmovdqu RB3x, (7 * 16)(%rsi);
.Lblk8_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_crypt_blk1_8,.-_gcry_sm4_gfni_avx2_crypt_blk1_8;)
/**********************************************************************
16-way SM4 with GFNI and AVX2
**********************************************************************/
.align 16
ELF(.type __sm4_gfni_crypt_blk16,@function;)
__sm4_gfni_crypt_blk16:
/* input:
* %rdi: ctx, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* plaintext blocks
* output:
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* ciphertext blocks
*/
CFI_STARTPROC();
vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
vpbroadcastd (4*(round))(%rdi), RX0; \
vbroadcasti128 .Lpre_affine_s rRIP, RTMP2; \
vbroadcasti128 .Lpost_affine_s rRIP, RTMP3; \
vmovdqa RX0, RX1; \
vpxor s1, RX0, RX0; \
vpxor s2, RX0, RX0; \
vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
vpxor r1, RX1, RX1; \
vpxor r2, RX1, RX1; \
vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \
\
/* sbox, non-linear part */ \
vbroadcasti128 .Lrol_8 rRIP, RTMP4; \
vgf2p8affineqb $0x65, RTMP2, RX0, RX0; \
vgf2p8affineinvqb $0xd3, RTMP3, RX0, RX0; \
vgf2p8affineqb $0x65, RTMP2, RX1, RX1; \
vgf2p8affineinvqb $0xd3, RTMP3, RX1, RX1; \
\
/* linear part */ \
vpxor RX0, s0, s0; /* s0 ^ x */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, RX0, RTMP0; /* x ^ rol(x,8) */ \
vpxor RX1, r0, r0; /* r0 ^ x */ \
vpshufb RTMP4, RX1, RTMP3; \
vbroadcasti128 .Lrol_16 rRIP, RTMP4; \
vpxor RTMP3, RX1, RTMP2; /* x ^ rol(x,8) */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4, RX1, RTMP3; \
vbroadcasti128 .Lrol_24 rRIP, RTMP4; \
vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */ \
vpshufb RTMP4, RX0, RTMP1; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP0, RTMP1; \
vpsrld $30, RTMP0, RTMP0; \
vpxor RTMP0, s0, s0; \
vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpshufb RTMP4, RX1, RTMP3; \
vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */ \
vpslld $2, RTMP2, RTMP3; \
vpsrld $30, RTMP2, RTMP2; \
vpxor RTMP2, r0, r0; \
vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk16:
ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk16;
#undef ROUND
vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __sm4_gfni_crypt_blk16,.-__sm4_gfni_crypt_blk16;)
.align 16
.globl _gcry_sm4_gfni_avx2_crypt_blk1_16
ELF(.type _gcry_sm4_gfni_avx2_crypt_blk1_16,@function;)
_gcry_sm4_gfni_avx2_crypt_blk1_16:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..16 blocks)
* %rdx: src (1..16 blocks)
* %rcx: num blocks (1..16)
*/
CFI_STARTPROC();
#define LOAD_INPUT(offset, yreg) \
cmpq $(1 + 2 * (offset)), %rcx; \
jb .Lblk16_load_input_done; \
ja 1f; \
vmovdqu (offset) * 32(%rdx), yreg##x; \
jmp .Lblk16_load_input_done; \
1: \
vmovdqu (offset) * 32(%rdx), yreg;
cmpq $8, %rcx;
jbe _gcry_sm4_gfni_avx2_crypt_blk1_8;
vmovdqu (0 * 32)(%rdx), RA0;
vmovdqu (1 * 32)(%rdx), RA1;
vmovdqu (2 * 32)(%rdx), RA2;
vmovdqu (3 * 32)(%rdx), RA3;
LOAD_INPUT(4, RB0);
LOAD_INPUT(5, RB1);
LOAD_INPUT(6, RB2);
LOAD_INPUT(7, RB3);
#undef LOAD_INPUT
.Lblk16_load_input_done:
call __sm4_gfni_crypt_blk16;
#define STORE_OUTPUT(yreg, offset) \
cmpq $(1 + 2 * (offset)), %rcx; \
jb .Lblk16_store_output_done; \
ja 1f; \
vmovdqu yreg##x, (offset) * 32(%rsi); \
jmp .Lblk16_store_output_done; \
1: \
vmovdqu yreg, (offset) * 32(%rsi);
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
STORE_OUTPUT(RB0, 4);
STORE_OUTPUT(RB1, 5);
STORE_OUTPUT(RB2, 6);
STORE_OUTPUT(RB3, 7);
#undef STORE_OUTPUT
.Lblk16_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_crypt_blk1_16,.-_gcry_sm4_gfni_avx2_crypt_blk1_16;)
#define inc_le128(x, minus_one, tmp) \
vpcmpeqq minus_one, x, tmp; \
vpsubq minus_one, x, x; \
vpslldq $8, tmp, tmp; \
vpsubq tmp, x, x;
.align 16
.globl _gcry_sm4_gfni_avx2_ctr_enc
ELF(.type _gcry_sm4_gfni_avx2_ctr_enc,@function;)
_gcry_sm4_gfni_avx2_ctr_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv (big endian, 128bit)
*/
CFI_STARTPROC();
+ cmpb $(0x100 - 16), 15(%rcx);
+ jbe .Lctr_byteadd;
+
movq 8(%rcx), %rax;
bswapq %rax;
vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
vpcmpeqd RNOT, RNOT, RNOT;
vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */
vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
/* load IV and byteswap */
vmovdqu (%rcx), RTMP4x;
vpshufb RTMP3x, RTMP4x, RTMP4x;
vmovdqa RTMP4x, RTMP0x;
inc_le128(RTMP4x, RNOTx, RTMP1x);
vinserti128 $1, RTMP4x, RTMP0, RTMP0;
vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
/* check need for handling 64-bit overflow and carry */
cmpq $(0xffffffffffffffff - 16), %rax;
ja .Lhandle_ctr_carry;
/* construct IVs */
vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
vpshufb RTMP3, RTMP0, RA1;
vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
vpshufb RTMP3, RTMP0, RA2;
vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
vpshufb RTMP3, RTMP0, RA3;
vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
vpshufb RTMP3, RTMP0, RB0;
vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
vpshufb RTMP3, RTMP0, RB1;
vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
vpshufb RTMP3, RTMP0, RB2;
vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
vpshufb RTMP3, RTMP0, RB3;
vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
vpshufb RTMP3x, RTMP0x, RTMP0x;
jmp .Lctr_carry_done;
.Lhandle_ctr_carry:
/* construct IVs */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
inc_le128(RTMP0, RNOT, RTMP1);
vextracti128 $1, RTMP0, RTMP0x;
vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
-.align 4
.Lctr_carry_done:
/* store new IV */
vmovdqu RTMP0x, (%rcx);
+.align 8
+.Lload_ctr_done:
call __sm4_gfni_crypt_blk16;
vpxor (0 * 32)(%rdx), RA0, RA0;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA3, RA3;
vpxor (4 * 32)(%rdx), RB0, RB0;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
+
+.align 8
+.Lctr_byteadd_full_ctr_carry:
+ movq 8(%rcx), %r11;
+ movq (%rcx), %r10;
+ bswapq %r11;
+ bswapq %r10;
+ addq $16, %r11;
+ adcq $0, %r10;
+ bswapq %r11;
+ bswapq %r10;
+ movq %r11, 8(%rcx);
+ movq %r10, (%rcx);
+ jmp .Lctr_byteadd_ymm;
+.align 8
+.Lctr_byteadd:
+ vbroadcasti128 (%rcx), RB3;
+ je .Lctr_byteadd_full_ctr_carry;
+ addb $16, 15(%rcx);
+.Lctr_byteadd_ymm:
+ vpaddb .Lbige_addb_0_1 rRIP, RB3, RA0;
+ vpaddb .Lbige_addb_2_3 rRIP, RB3, RA1;
+ vpaddb .Lbige_addb_4_5 rRIP, RB3, RA2;
+ vpaddb .Lbige_addb_6_7 rRIP, RB3, RA3;
+ vpaddb .Lbige_addb_8_9 rRIP, RB3, RB0;
+ vpaddb .Lbige_addb_10_11 rRIP, RB3, RB1;
+ vpaddb .Lbige_addb_12_13 rRIP, RB3, RB2;
+ vpaddb .Lbige_addb_14_15 rRIP, RB3, RB3;
+
+ jmp .Lload_ctr_done;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_ctr_enc,.-_gcry_sm4_gfni_avx2_ctr_enc;)
.align 16
.globl _gcry_sm4_gfni_avx2_cbc_dec
ELF(.type _gcry_sm4_gfni_avx2_cbc_dec,@function;)
_gcry_sm4_gfni_avx2_cbc_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
vmovdqu (0 * 32)(%rdx), RA0;
vmovdqu (1 * 32)(%rdx), RA1;
vmovdqu (2 * 32)(%rdx), RA2;
vmovdqu (3 * 32)(%rdx), RA3;
vmovdqu (4 * 32)(%rdx), RB0;
vmovdqu (5 * 32)(%rdx), RB1;
vmovdqu (6 * 32)(%rdx), RB2;
vmovdqu (7 * 32)(%rdx), RB3;
call __sm4_gfni_crypt_blk16;
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RNOT;
vpxor RNOT, RA0, RA0;
vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx); /* store new IV */
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_cbc_dec,.-_gcry_sm4_gfni_avx2_cbc_dec;)
.align 16
.globl _gcry_sm4_gfni_avx2_cfb_dec
ELF(.type _gcry_sm4_gfni_avx2_cfb_dec,@function;)
_gcry_sm4_gfni_avx2_cfb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
/* Load input */
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RA0;
vmovdqu (0 * 32 + 16)(%rdx), RA1;
vmovdqu (1 * 32 + 16)(%rdx), RA2;
vmovdqu (2 * 32 + 16)(%rdx), RA3;
vmovdqu (3 * 32 + 16)(%rdx), RB0;
vmovdqu (4 * 32 + 16)(%rdx), RB1;
vmovdqu (5 * 32 + 16)(%rdx), RB2;
vmovdqu (6 * 32 + 16)(%rdx), RB3;
/* Update IV */
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx);
call __sm4_gfni_crypt_blk16;
vpxor (0 * 32)(%rdx), RA0, RA0;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA3, RA3;
vpxor (4 * 32)(%rdx), RB0, RB0;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_cfb_dec,.-_gcry_sm4_gfni_avx2_cfb_dec;)
.align 16
.globl _gcry_sm4_gfni_avx2_ocb_enc
ELF(.type _gcry_sm4_gfni_avx2_ocb_enc,@function;)
_gcry_sm4_gfni_avx2_ocb_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
vmovdqu (%r8), RTMP1x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Checksum_i = Checksum_{i-1} xor P_i */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rdx), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RTMP1, RTMP1; \
vpxor yreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vextracti128 $1, RTMP1, RNOTx;
vmovdqu RTMP0x, (%rcx);
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%r8);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_gfni_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor (0 * 32)(%rsi), RA0, RA0;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA3, RA3;
vpxor (4 * 32)(%rsi), RB0, RB0;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_ocb_enc,.-_gcry_sm4_gfni_avx2_ocb_enc;)
.align 16
.globl _gcry_sm4_gfni_avx2_ocb_dec
ELF(.type _gcry_sm4_gfni_avx2_ocb_dec,@function;)
_gcry_sm4_gfni_avx2_ocb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rdx), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rcx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_gfni_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vmovdqu (%r8), RTMP1x;
vpxor (0 * 32)(%rsi), RA0, RA0;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA3, RA3;
vpxor (4 * 32)(%rsi), RB0, RB0;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB3, RB3;
/* Checksum_i = Checksum_{i-1} xor P_i */
vmovdqu RA0, (0 * 32)(%rsi);
vpxor RA0, RTMP1, RTMP1;
vmovdqu RA1, (1 * 32)(%rsi);
vpxor RA1, RTMP1, RTMP1;
vmovdqu RA2, (2 * 32)(%rsi);
vpxor RA2, RTMP1, RTMP1;
vmovdqu RA3, (3 * 32)(%rsi);
vpxor RA3, RTMP1, RTMP1;
vmovdqu RB0, (4 * 32)(%rsi);
vpxor RB0, RTMP1, RTMP1;
vmovdqu RB1, (5 * 32)(%rsi);
vpxor RB1, RTMP1, RTMP1;
vmovdqu RB2, (6 * 32)(%rsi);
vpxor RB2, RTMP1, RTMP1;
vmovdqu RB3, (7 * 32)(%rsi);
vpxor RB3, RTMP1, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%r8);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_ocb_dec,.-_gcry_sm4_gfni_avx2_ocb_dec;)
.align 16
.globl _gcry_sm4_gfni_avx2_ocb_auth
ELF(.type _gcry_sm4_gfni_avx2_ocb_auth,@function;)
_gcry_sm4_gfni_avx2_ocb_auth:
/* input:
* %rdi: ctx, CTX
* %rsi: abuf (16 blocks)
* %rdx: offset
* %rcx: checksum
* %r8 : L pointers (void *L[16])
*/
CFI_STARTPROC();
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rdx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rsi), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg;
movq (0 * 8)(%r8), %r10;
movq (1 * 8)(%r8), %r11;
movq (2 * 8)(%r8), %r12;
movq (3 * 8)(%r8), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r8), %r10;
movq (5 * 8)(%r8), %r11;
movq (6 * 8)(%r8), %r12;
movq (7 * 8)(%r8), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r8), %r10;
movq (9 * 8)(%r8), %r11;
movq (10 * 8)(%r8), %r12;
movq (11 * 8)(%r8), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r8), %r10;
movq (13 * 8)(%r8), %r11;
movq (14 * 8)(%r8), %r12;
movq (15 * 8)(%r8), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rdx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_gfni_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor RA0, RB0, RA0;
vpxor RA1, RB1, RA1;
vpxor RA2, RB2, RA2;
vpxor RA3, RB3, RA3;
vpxor RA1, RA0, RA0;
vpxor RA3, RA2, RA2;
vpxor RA2, RA0, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpxor (%rcx), RTMP1x, RTMP1x;
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%rcx);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx2_ocb_auth,.-_gcry_sm4_gfni_avx2_ocb_auth;)
#endif /*defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT)*/
#endif /*__x86_64*/
diff --git a/cipher/sm4-gfni-avx512-amd64.S b/cipher/sm4-gfni-avx512-amd64.S
index b095f85d..91f6e80b 100644
--- a/cipher/sm4-gfni-avx512-amd64.S
+++ b/cipher/sm4-gfni-avx512-amd64.S
@@ -1,1760 +1,1861 @@
/* sm4-gfni-avx512-amd64.S - GFNI/AVX512 implementation of SM4 cipher
*
- * Copyright (C) 2022 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ * Copyright (C) 2022-2023 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#ifdef __x86_64
#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT)
#include "asm-common-amd64.h"
/**********************************************************************
helper macros
**********************************************************************/
/* Transpose four 32-bit words between 128-bit vectors. */
#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
vpunpckhdq x1, x0, t2; \
vpunpckldq x1, x0, x0; \
\
vpunpckldq x3, x2, t1; \
vpunpckhdq x3, x2, x2; \
\
vpunpckhqdq t1, x0, x1; \
vpunpcklqdq t1, x0, x0; \
\
vpunpckhqdq x2, t2, x3; \
vpunpcklqdq x2, t2, x2;
/**********************************************************************
4-way && 8-way SM4 with GFNI and AVX512 (128-bit vectors)
**********************************************************************/
/* vector registers */
#define RX0 %ymm0
#define RX1 %ymm1
#define RX0x %xmm0
#define RX1x %xmm1
#define RX0z %zmm0
#define RX1z %zmm1
#define RTMP0 %ymm2
#define RTMP1 %ymm3
#define RTMP2 %ymm4
#define RTMP3 %ymm5
#define RTMP4 %ymm6
#define RTMP0x %xmm2
#define RTMP1x %xmm3
#define RTMP2x %xmm4
#define RTMP3x %xmm5
#define RTMP4x %xmm6
#define RTMP0z %zmm2
#define RTMP1z %zmm3
#define RTMP2z %zmm4
#define RTMP3z %zmm5
#define RTMP4z %zmm6
#define RNOT %ymm7
#define RNOTx %xmm7
#define RNOTz %zmm7
#define RA0 %ymm8
#define RA1 %ymm9
#define RA2 %ymm10
#define RA3 %ymm11
#define RA0x %xmm8
#define RA1x %xmm9
#define RA2x %xmm10
#define RA3x %xmm11
#define RA0z %zmm8
#define RA1z %zmm9
#define RA2z %zmm10
#define RA3z %zmm11
#define RB0 %ymm12
#define RB1 %ymm13
#define RB2 %ymm14
#define RB3 %ymm15
#define RB0x %xmm12
#define RB1x %xmm13
#define RB2x %xmm14
#define RB3x %xmm15
#define RB0z %zmm12
#define RB1z %zmm13
#define RB2z %zmm14
#define RB3z %zmm15
SECTION_RODATA
.align 32
/* Affine transform, SM4 field to AES field */
.Lpre_affine_s:
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
.byte 0x52, 0xbc, 0x2d, 0x02, 0x9e, 0x25, 0xac, 0x34
/* Affine transform, AES field to SM4 field */
.Lpost_affine_s:
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
.byte 0x19, 0x8b, 0x6c, 0x1e, 0x51, 0x8e, 0x2d, 0xd7
/* For CTR-mode IV byteswap */
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
/* For input word byte-swap */
.Lbswap32_mask:
.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
.Lcounter2222_lo:
.quad 2, 0
.Lcounter4444_lo:
.quad 4, 0
.Lcounter8888_lo:
.quad 8, 0
.Lcounter16161616_lo:
.quad 16, 0
.Lcounter1111_hi:
.quad 0, 1
.align 64
.Lcounter0123_lo:
.quad 0, 0
.quad 1, 0
.quad 2, 0
.quad 3, 0
+/* CTR byte addition constants */
+.align 64
+.Lbige_addb_0_1:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
+.Lbige_addb_2_3:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3
+.Lbige_addb_4_5:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5
+.Lbige_addb_6_7:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7
+.Lbige_addb_8_9:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9
+.Lbige_addb_10_11:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11
+.Lbige_addb_12_13:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13
+.Lbige_addb_14_15:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15
+.Lbige_addb_16:
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16
+
.text
.align 16
.globl _gcry_sm4_gfni_avx512_expand_key
ELF(.type _gcry_sm4_gfni_avx512_expand_key,@function;)
_gcry_sm4_gfni_avx512_expand_key:
/* input:
* %rdi: 128-bit key
* %rsi: rkey_enc
* %rdx: rkey_dec
* %rcx: fk array
* %r8: ck array
*/
CFI_STARTPROC();
spec_stop_avx512;
vmovd 0*4(%rdi), RA0x;
vmovd 1*4(%rdi), RA1x;
vmovd 2*4(%rdi), RA2x;
vmovd 3*4(%rdi), RA3x;
vmovdqa .Lbswap32_mask rRIP, RTMP2x;
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vmovd 0*4(%rcx), RB0x;
vmovd 1*4(%rcx), RB1x;
vmovd 2*4(%rcx), RB2x;
vmovd 3*4(%rcx), RB3x;
vpxor RB0x, RA0x, RA0x;
vpxor RB1x, RA1x, RA1x;
vpxor RB2x, RA2x, RA2x;
vpxor RB3x, RA3x, RA3x;
#define ROUND(round, s0, s1, s2, s3) \
vpxord (4*(round))(%r8) {1to4}, s1, RX0x; \
vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \
\
/* sbox, non-linear part */ \
vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \
vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \
\
/* linear part */ \
vpxor RX0x, s0, s0; /* s0 ^ x */ \
vprold $13, RX0x, RTMP1x; \
vprold $23, RX0x, RTMP3x; \
vpternlogd $0x96, RTMP1x, RTMP3x, s0; /* s0 ^ x ^ rol(x,13) ^ rol(x,23) */
leaq (32*4)(%r8), %rax;
leaq (32*4)(%rdx), %rdx;
.align 16
.Lroundloop_expand_key:
leaq (-4*4)(%rdx), %rdx;
ROUND(0, RA0x, RA1x, RA2x, RA3x);
ROUND(1, RA1x, RA2x, RA3x, RA0x);
ROUND(2, RA2x, RA3x, RA0x, RA1x);
ROUND(3, RA3x, RA0x, RA1x, RA2x);
leaq (4*4)(%r8), %r8;
vmovd RA0x, (0*4)(%rsi);
vmovd RA1x, (1*4)(%rsi);
vmovd RA2x, (2*4)(%rsi);
vmovd RA3x, (3*4)(%rsi);
vmovd RA0x, (3*4)(%rdx);
vmovd RA1x, (2*4)(%rdx);
vmovd RA2x, (1*4)(%rdx);
vmovd RA3x, (0*4)(%rdx);
leaq (4*4)(%rsi), %rsi;
cmpq %rax, %r8;
jne .Lroundloop_expand_key;
#undef ROUND
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_expand_key,.-_gcry_sm4_gfni_avx512_expand_key;)
.align 16
ELF(.type sm4_gfni_avx512_crypt_blk1_4,@function;)
sm4_gfni_avx512_crypt_blk1_4:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..4 blocks)
* %rdx: src (1..4 blocks)
* %rcx: num blocks (1..4)
*/
CFI_STARTPROC();
vmovdqu 0*16(%rdx), RA0x;
vmovdqa RA0x, RA1x;
vmovdqa RA0x, RA2x;
vmovdqa RA0x, RA3x;
cmpq $2, %rcx;
jb .Lblk4_load_input_done;
vmovdqu 1*16(%rdx), RA1x;
je .Lblk4_load_input_done;
vmovdqu 2*16(%rdx), RA2x;
cmpq $3, %rcx;
je .Lblk4_load_input_done;
vmovdqu 3*16(%rdx), RA3x;
.Lblk4_load_input_done:
vmovdqa .Lbswap32_mask rRIP, RTMP2x;
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
#define ROUND(round, s0, s1, s2, s3) \
vpxord (4*(round))(%rdi) {1to4}, s1, RX0x; \
vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \
\
/* sbox, non-linear part */ \
vgf2p8affineqb $0x65, .Lpre_affine_s rRIP, RX0x, RX0x; \
vgf2p8affineinvqb $0xd3, .Lpost_affine_s rRIP, RX0x, RX0x; \
\
/* linear part */ \
vprold $2, RX0x, RTMP0x; \
vprold $10, RX0x, RTMP1x; \
vprold $18, RX0x, RTMP2x; \
vpternlogd $0x96, RTMP0x, RX0x, s0; /* s0 ^ x ^ rol(x,2) */ \
vprold $24, RX0x, RX0x; \
vpternlogd $0x96, RTMP1x, RTMP2x, RX0x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpxor RX0x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk4:
ROUND(0, RA0x, RA1x, RA2x, RA3x);
ROUND(1, RA1x, RA2x, RA3x, RA0x);
ROUND(2, RA2x, RA3x, RA0x, RA1x);
ROUND(3, RA3x, RA0x, RA1x, RA2x);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk4;
#undef ROUND
vmovdqa .Lbswap128_mask rRIP, RTMP2x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vmovdqu RA0x, 0*16(%rsi);
cmpq $2, %rcx;
jb .Lblk4_store_output_done;
vmovdqu RA1x, 1*16(%rsi);
je .Lblk4_store_output_done;
vmovdqu RA2x, 2*16(%rsi);
cmpq $3, %rcx;
je .Lblk4_store_output_done;
vmovdqu RA3x, 3*16(%rsi);
.Lblk4_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size sm4_gfni_avx512_crypt_blk1_4,.-sm4_gfni_avx512_crypt_blk1_4;)
.align 16
ELF(.type __sm4_gfni_crypt_blk8,@function;)
__sm4_gfni_crypt_blk8:
/* input:
* %rdi: round key array, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel
* ciphertext blocks
* output:
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: eight parallel plaintext
* blocks
*/
CFI_STARTPROC();
vmovdqa .Lbswap32_mask rRIP, RTMP2x;
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vpshufb RTMP2x, RB0x, RB0x;
vpshufb RTMP2x, RB1x, RB1x;
vpshufb RTMP2x, RB2x, RB2x;
vpshufb RTMP2x, RB3x, RB3x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x);
#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
vpbroadcastd (4*(round))(%rdi), RX1x; \
vmovdqa .Lpre_affine_s rRIP, RTMP2x; \
vmovdqa .Lpost_affine_s rRIP, RTMP3x; \
vpxor s1, RX1x, RX0x; \
vpternlogd $0x96, s2, s3, RX0x; /* s1 ^ s2 ^ s3 ^ rk */ \
vpxor r1, RX1x, RX1x; \
vpternlogd $0x96, r2, r3, RX1x; /* r1 ^ r2 ^ r3 ^ rk */ \
\
/* sbox, non-linear part */ \
vgf2p8affineqb $0x65, RTMP2x, RX0x, RX0x; \
vgf2p8affineinvqb $0xd3, RTMP3x, RX0x, RX0x; \
vgf2p8affineqb $0x65, RTMP2x, RX1x, RX1x; \
vgf2p8affineinvqb $0xd3, RTMP3x, RX1x, RX1x; \
\
/* linear part */ \
vprold $2, RX0x, RTMP0x; \
vprold $10, RX0x, RTMP1x; \
vprold $18, RX0x, RTMP2x; \
vpternlogd $0x96, RTMP0x, RX0x, s0; /* s0 ^ x ^ rol(x,2) */ \
vprold $24, RX0x, RX0x; \
vprold $2, RX1x, RTMP3x; \
vprold $10, RX1x, RTMP4x; \
vprold $18, RX1x, RTMP0x; \
vpternlogd $0x96, RTMP3x, RX1x, r0; /* r0 ^ x ^ rol(x,2) */ \
vprold $24, RX1x, RX1x; \
vpternlogd $0x96, RTMP1x, RTMP2x, RX0x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpternlogd $0x96, RTMP4x, RTMP0x, RX1x; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpxor RX0x, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpxor RX1x, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk8:
ROUND(0, RA0x, RA1x, RA2x, RA3x, RB0x, RB1x, RB2x, RB3x);
ROUND(1, RA1x, RA2x, RA3x, RA0x, RB1x, RB2x, RB3x, RB0x);
ROUND(2, RA2x, RA3x, RA0x, RA1x, RB2x, RB3x, RB0x, RB1x);
ROUND(3, RA3x, RA0x, RA1x, RA2x, RB3x, RB0x, RB1x, RB2x);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk8;
#undef ROUND
vmovdqa .Lbswap128_mask rRIP, RTMP2x;
transpose_4x4(RA0x, RA1x, RA2x, RA3x, RTMP0x, RTMP1x);
transpose_4x4(RB0x, RB1x, RB2x, RB3x, RTMP0x, RTMP1x);
vpshufb RTMP2x, RA0x, RA0x;
vpshufb RTMP2x, RA1x, RA1x;
vpshufb RTMP2x, RA2x, RA2x;
vpshufb RTMP2x, RA3x, RA3x;
vpshufb RTMP2x, RB0x, RB0x;
vpshufb RTMP2x, RB1x, RB1x;
vpshufb RTMP2x, RB2x, RB2x;
vpshufb RTMP2x, RB3x, RB3x;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __sm4_gfni_crypt_blk8,.-__sm4_gfni_crypt_blk8;)
.align 16
ELF(.type _gcry_sm4_gfni_avx512_crypt_blk1_8,@function;)
_gcry_sm4_gfni_avx512_crypt_blk1_8:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..8 blocks)
* %rdx: src (1..8 blocks)
* %rcx: num blocks (1..8)
*/
CFI_STARTPROC();
cmpq $5, %rcx;
jb sm4_gfni_avx512_crypt_blk1_4;
vmovdqu (0 * 16)(%rdx), RA0x;
vmovdqu (1 * 16)(%rdx), RA1x;
vmovdqu (2 * 16)(%rdx), RA2x;
vmovdqu (3 * 16)(%rdx), RA3x;
vmovdqu (4 * 16)(%rdx), RB0x;
vmovdqa RB0x, RB1x;
vmovdqa RB0x, RB2x;
vmovdqa RB0x, RB3x;
je .Lblk8_load_input_done;
vmovdqu (5 * 16)(%rdx), RB1x;
cmpq $7, %rcx;
jb .Lblk8_load_input_done;
vmovdqu (6 * 16)(%rdx), RB2x;
je .Lblk8_load_input_done;
vmovdqu (7 * 16)(%rdx), RB3x;
.Lblk8_load_input_done:
call __sm4_gfni_crypt_blk8;
cmpq $6, %rcx;
vmovdqu RA0x, (0 * 16)(%rsi);
vmovdqu RA1x, (1 * 16)(%rsi);
vmovdqu RA2x, (2 * 16)(%rsi);
vmovdqu RA3x, (3 * 16)(%rsi);
vmovdqu RB0x, (4 * 16)(%rsi);
jb .Lblk8_store_output_done;
vmovdqu RB1x, (5 * 16)(%rsi);
je .Lblk8_store_output_done;
vmovdqu RB2x, (6 * 16)(%rsi);
cmpq $7, %rcx;
je .Lblk8_store_output_done;
vmovdqu RB3x, (7 * 16)(%rsi);
.Lblk8_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_crypt_blk1_8,.-_gcry_sm4_gfni_avx512_crypt_blk1_8;)
/**********************************************************************
16-way SM4 with GFNI and AVX512 (256-bit vectors)
**********************************************************************/
.align 16
ELF(.type __sm4_gfni_crypt_blk16,@function;)
__sm4_gfni_crypt_blk16:
/* input:
* %rdi: ctx, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* plaintext blocks
* output:
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* ciphertext blocks
*/
CFI_STARTPROC();
vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
vpbroadcastd (4*(round))(%rdi), RX1; \
vbroadcasti128 .Lpre_affine_s rRIP, RTMP2; \
vbroadcasti128 .Lpost_affine_s rRIP, RTMP3; \
vpxor s1, RX1, RX0; \
vpternlogd $0x96, s2, s3, RX0; /* s1 ^ s2 ^ s3 ^ rk */ \
vpxor r1, RX1, RX1; \
vpternlogd $0x96, r2, r3, RX1; /* r1 ^ r2 ^ r3 ^ rk */ \
\
/* sbox, non-linear part */ \
vgf2p8affineqb $0x65, RTMP2, RX0, RX0; \
vgf2p8affineinvqb $0xd3, RTMP3, RX0, RX0; \
vgf2p8affineqb $0x65, RTMP2, RX1, RX1; \
vgf2p8affineinvqb $0xd3, RTMP3, RX1, RX1; \
\
/* linear part */ \
vprold $2, RX0, RTMP0; \
vprold $10, RX0, RTMP1; \
vprold $18, RX0, RTMP2; \
vpternlogd $0x96, RTMP0, RX0, s0; /* s0 ^ x ^ rol(x,2) */ \
vprold $24, RX0, RX0; \
vprold $2, RX1, RTMP3; \
vprold $10, RX1, RTMP4; \
vprold $18, RX1, RTMP0; \
vpternlogd $0x96, RTMP3, RX1, r0; /* r0 ^ x ^ rol(x,2) */ \
vprold $24, RX1, RX1; \
vpternlogd $0x96, RTMP1, RTMP2, RX0; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpternlogd $0x96, RTMP4, RTMP0, RX1; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpxor RX0, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpxor RX1, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk16:
ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk16;
#undef ROUND
vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
vpshufb RTMP2, RA0, RA0;
vpshufb RTMP2, RA1, RA1;
vpshufb RTMP2, RA2, RA2;
vpshufb RTMP2, RA3, RA3;
vpshufb RTMP2, RB0, RB0;
vpshufb RTMP2, RB1, RB1;
vpshufb RTMP2, RB2, RB2;
vpshufb RTMP2, RB3, RB3;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __sm4_gfni_crypt_blk16,.-__sm4_gfni_crypt_blk16;)
.align 16
.globl _gcry_sm4_gfni_avx512_crypt_blk1_16
ELF(.type _gcry_sm4_gfni_avx512_crypt_blk1_16,@function;)
_gcry_sm4_gfni_avx512_crypt_blk1_16:
/* input:
* %rdi: round key array, CTX
* %rsi: dst (1..16 blocks)
* %rdx: src (1..16 blocks)
* %rcx: num blocks (1..16)
*/
CFI_STARTPROC();
spec_stop_avx512;
#define LOAD_INPUT(offset, yreg) \
cmpq $(1 + 2 * (offset)), %rcx; \
jb .Lblk16_load_input_done; \
ja 1f; \
vmovdqu (offset) * 32(%rdx), yreg##x; \
jmp .Lblk16_load_input_done; \
1: \
vmovdqu (offset) * 32(%rdx), yreg;
cmpq $8, %rcx;
jbe _gcry_sm4_gfni_avx512_crypt_blk1_8;
vmovdqu (0 * 32)(%rdx), RA0;
vmovdqu (1 * 32)(%rdx), RA1;
vmovdqu (2 * 32)(%rdx), RA2;
vmovdqu (3 * 32)(%rdx), RA3;
LOAD_INPUT(4, RB0);
LOAD_INPUT(5, RB1);
LOAD_INPUT(6, RB2);
LOAD_INPUT(7, RB3);
#undef LOAD_INPUT
.Lblk16_load_input_done:
call __sm4_gfni_crypt_blk16;
#define STORE_OUTPUT(yreg, offset) \
cmpq $(1 + 2 * (offset)), %rcx; \
jb .Lblk16_store_output_done; \
ja 1f; \
vmovdqu yreg##x, (offset) * 32(%rsi); \
jmp .Lblk16_store_output_done; \
1: \
vmovdqu yreg, (offset) * 32(%rsi);
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
STORE_OUTPUT(RB0, 4);
STORE_OUTPUT(RB1, 5);
STORE_OUTPUT(RB2, 6);
STORE_OUTPUT(RB3, 7);
#undef STORE_OUTPUT
.Lblk16_store_output_done:
vzeroall;
xorl %eax, %eax;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_crypt_blk1_16,.-_gcry_sm4_gfni_avx512_crypt_blk1_16;)
#define add_le128(out, in, lo_counter, hi_counter1) \
vpaddq lo_counter, in, out; \
vpcmpuq $1, lo_counter, out, %k1; \
kaddb %k1, %k1, %k1; \
vpaddq hi_counter1, out, out{%k1};
.align 16
.globl _gcry_sm4_gfni_avx512_ctr_enc
ELF(.type _gcry_sm4_gfni_avx512_ctr_enc,@function;)
_gcry_sm4_gfni_avx512_ctr_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv (big endian, 128bit)
*/
CFI_STARTPROC();
spec_stop_avx512;
+ cmpb $(0x100 - 16), 15(%rcx);
+ jbe .Lctr_byteadd16;
+
vbroadcasti128 .Lbswap128_mask rRIP, RTMP0;
vmovdqa .Lcounter0123_lo rRIP, RTMP1;
vbroadcasti128 .Lcounter2222_lo rRIP, RTMP2;
vbroadcasti128 .Lcounter4444_lo rRIP, RTMP3;
vbroadcasti128 .Lcounter8888_lo rRIP, RTMP4;
/* load IV and byteswap */
movq 8(%rcx), %r11;
bswapq %r11;
vbroadcasti128 (%rcx), RB3;
vpshufb RTMP0, RB3, RB3;
/* check need for handling 64-bit overflow and carry */
cmpq $(0xffffffffffffffff - 16), %r11;
ja .Lhandle_ctr_carry_blk16;
/* construct IVs */
vpaddq RTMP1, RB3, RA0; /* +0:+1 */
vpaddq RTMP2, RA0, RA1; /* +2:+3 */
vpaddq RTMP3, RA0, RA2; /* +4:+5 */
vpaddq RTMP3, RA1, RA3; /* +6:+7 */
vpaddq RTMP4, RA0, RB0; /* +8... */
vpaddq RTMP4, RA1, RB1; /* +10... */
vpaddq RTMP4, RA2, RB2; /* +12... */
vpaddq RTMP4, RA3, RB3; /* +14... */
/* Update counter */
leaq 16(%r11), %r11;
bswapq %r11;
movq %r11, 8(%rcx);
jmp .Lctr_carry_done_blk16;
.Lhandle_ctr_carry_blk16:
vbroadcasti128 .Lcounter1111_hi rRIP, RNOT;
/* construct IVs */
add_le128(RA0, RB3, RTMP1, RNOT); /* +0:+1 */
add_le128(RA1, RA0, RTMP2, RNOT); /* +2:+3 */
add_le128(RA2, RA0, RTMP3, RNOT); /* +4:+5 */
add_le128(RA3, RA1, RTMP3, RNOT); /* +6:+7 */
add_le128(RB0, RA0, RTMP4, RNOT); /* +8... */
add_le128(RB1, RA1, RTMP4, RNOT); /* +10... */
add_le128(RB2, RA2, RTMP4, RNOT); /* +12... */
add_le128(RB3, RA3, RTMP4, RNOT); /* +14... */
/* Update counter */
addq $16, %r11;
movq (%rcx), %r10;
bswapq %r10;
adcq $0, %r10;
bswapq %r11;
bswapq %r10;
movq %r11, 8(%rcx);
movq %r10, (%rcx);
.align 16
.Lctr_carry_done_blk16:
/* Byte-swap IVs. */
vpshufb RTMP0, RA0, RA0;
vpshufb RTMP0, RA1, RA1;
vpshufb RTMP0, RA2, RA2;
vpshufb RTMP0, RA3, RA3;
vpshufb RTMP0, RB0, RB0;
vpshufb RTMP0, RB1, RB1;
vpshufb RTMP0, RB2, RB2;
vpshufb RTMP0, RB3, RB3;
+.align 16
+.Lload_ctr_done16:
call __sm4_gfni_crypt_blk16;
vpxor (0 * 32)(%rdx), RA0, RA0;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA3, RA3;
vpxor (4 * 32)(%rdx), RB0, RB0;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
kxorq %k1, %k1, %k1;
ret_spec_stop;
+
+.align 16
+.Lctr_byteadd_full_ctr_carry16:
+ movq 8(%rcx), %r11;
+ movq (%rcx), %r10;
+ bswapq %r11;
+ bswapq %r10;
+ addq $16, %r11;
+ adcq $0, %r10;
+ bswapq %r11;
+ bswapq %r10;
+ movq %r11, 8(%rcx);
+ movq %r10, (%rcx);
+ jmp .Lctr_byteadd_ymm16;
+.align 16
+.Lctr_byteadd16:
+ vbroadcasti128 (%rcx), RB3;
+ je .Lctr_byteadd_full_ctr_carry16;
+ addb $16, 15(%rcx);
+.Lctr_byteadd_ymm16:
+ vpaddb .Lbige_addb_0_1 rRIP, RB3, RA0;
+ vpaddb .Lbige_addb_2_3 rRIP, RB3, RA1;
+ vpaddb .Lbige_addb_4_5 rRIP, RB3, RA2;
+ vpaddb .Lbige_addb_6_7 rRIP, RB3, RA3;
+ vpaddb .Lbige_addb_8_9 rRIP, RB3, RB0;
+ vpaddb .Lbige_addb_10_11 rRIP, RB3, RB1;
+ vpaddb .Lbige_addb_12_13 rRIP, RB3, RB2;
+ vpaddb .Lbige_addb_14_15 rRIP, RB3, RB3;
+
+ jmp .Lload_ctr_done16;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_ctr_enc,.-_gcry_sm4_gfni_avx512_ctr_enc;)
.align 16
.globl _gcry_sm4_gfni_avx512_cbc_dec
ELF(.type _gcry_sm4_gfni_avx512_cbc_dec,@function;)
_gcry_sm4_gfni_avx512_cbc_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
spec_stop_avx512;
vmovdqu (0 * 32)(%rdx), RA0;
vmovdqu (1 * 32)(%rdx), RA1;
vmovdqu (2 * 32)(%rdx), RA2;
vmovdqu (3 * 32)(%rdx), RA3;
vmovdqu (4 * 32)(%rdx), RB0;
vmovdqu (5 * 32)(%rdx), RB1;
vmovdqu (6 * 32)(%rdx), RB2;
vmovdqu (7 * 32)(%rdx), RB3;
call __sm4_gfni_crypt_blk16;
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RNOT;
vpxor RNOT, RA0, RA0;
vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx); /* store new IV */
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_cbc_dec,.-_gcry_sm4_gfni_avx512_cbc_dec;)
.align 16
.globl _gcry_sm4_gfni_avx512_cfb_dec
ELF(.type _gcry_sm4_gfni_avx512_cfb_dec,@function;)
_gcry_sm4_gfni_avx512_cfb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
spec_stop_avx512;
/* Load input */
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RA0;
vmovdqu (0 * 32 + 16)(%rdx), RA1;
vmovdqu (1 * 32 + 16)(%rdx), RA2;
vmovdqu (2 * 32 + 16)(%rdx), RA3;
vmovdqu (3 * 32 + 16)(%rdx), RB0;
vmovdqu (4 * 32 + 16)(%rdx), RB1;
vmovdqu (5 * 32 + 16)(%rdx), RB2;
vmovdqu (6 * 32 + 16)(%rdx), RB3;
/* Update IV */
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx);
call __sm4_gfni_crypt_blk16;
vpxor (0 * 32)(%rdx), RA0, RA0;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA3, RA3;
vpxor (4 * 32)(%rdx), RB0, RB0;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_cfb_dec,.-_gcry_sm4_gfni_avx512_cfb_dec;)
.align 16
.globl _gcry_sm4_gfni_avx512_ocb_enc
ELF(.type _gcry_sm4_gfni_avx512_ocb_enc,@function;)
_gcry_sm4_gfni_avx512_ocb_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
spec_stop_avx512;
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
vmovdqu (%r8), RTMP1x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Checksum_i = Checksum_{i-1} xor P_i */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg, inreg) \
vmovdqu (n * 32)(%rdx), inreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor inreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0, RTMP2);
OCB_INPUT(1, %r12, %r13, RA1, RTMP3);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2, RTMP4);
vpternlogd $0x96, RTMP2, RTMP3, RTMP4;
OCB_INPUT(3, %r12, %r13, RA3, RX0);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0, RX1);
OCB_INPUT(5, %r12, %r13, RB1, RTMP2);
vpternlogd $0x96, RX0, RX1, RTMP2;
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2, RTMP3);
OCB_INPUT(7, %r12, %r13, RB3, RX0);
vpternlogd $0x96, RTMP3, RX0, RTMP1;
#undef OCB_INPUT
vpternlogd $0x96, RTMP4, RTMP2, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vmovdqu RTMP0x, (%rcx);
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%r8);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_gfni_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor (0 * 32)(%rsi), RA0, RA0;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA3, RA3;
vpxor (4 * 32)(%rsi), RB0, RB0;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB3, RB3;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_ocb_enc,.-_gcry_sm4_gfni_avx512_ocb_enc;)
.align 16
.globl _gcry_sm4_gfni_avx512_ocb_dec
ELF(.type _gcry_sm4_gfni_avx512_ocb_dec,@function;)
_gcry_sm4_gfni_avx512_ocb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
spec_stop_avx512;
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rdx), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rcx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_gfni_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor (0 * 32)(%rsi), RA0, RA0;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA3, RA3;
vpxor (4 * 32)(%rsi), RB0, RB0;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB3, RB3;
/* Checksum_i = Checksum_{i-1} xor P_i */
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vpternlogd $0x96, RA0, RA1, RA2;
vpternlogd $0x96, RA3, RB0, RB1;
vpternlogd $0x96, RB2, RB3, RA2;
vpxord RA2, RB1, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpternlogd $0x96, (%r8), RNOTx, RTMP1x;
vmovdqu RTMP1x, (%r8);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_ocb_dec,.-_gcry_sm4_gfni_avx512_ocb_dec;)
.align 16
.globl _gcry_sm4_gfni_avx512_ocb_auth
ELF(.type _gcry_sm4_gfni_avx512_ocb_auth,@function;)
_gcry_sm4_gfni_avx512_ocb_auth:
/* input:
* %rdi: ctx, CTX
* %rsi: abuf (16 blocks)
* %rdx: offset
* %rcx: checksum
* %r8 : L pointers (void *L[16])
*/
CFI_STARTPROC();
spec_stop_avx512;
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rdx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rsi), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg;
movq (0 * 8)(%r8), %r10;
movq (1 * 8)(%r8), %r11;
movq (2 * 8)(%r8), %r12;
movq (3 * 8)(%r8), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r8), %r10;
movq (5 * 8)(%r8), %r11;
movq (6 * 8)(%r8), %r12;
movq (7 * 8)(%r8), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r8), %r10;
movq (9 * 8)(%r8), %r11;
movq (10 * 8)(%r8), %r12;
movq (11 * 8)(%r8), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r8), %r10;
movq (13 * 8)(%r8), %r11;
movq (14 * 8)(%r8), %r12;
movq (15 * 8)(%r8), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rdx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __sm4_gfni_crypt_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpternlogd $0x96, RA0, RA1, RA2;
vpternlogd $0x96, RA3, RB0, RB1;
vpternlogd $0x96, RB2, RB3, RA2;
vpxor RA2, RB1, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpternlogd $0x96, (%rcx), RNOTx, RTMP1x;
vmovdqu RTMP1x, (%rcx);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_ocb_auth,.-_gcry_sm4_gfni_avx512_ocb_auth;)
/**********************************************************************
32-way SM4 with GFNI and AVX512 (512-bit vectors)
**********************************************************************/
.align 16
ELF(.type __sm4_gfni_crypt_blk32,@function;)
__sm4_gfni_crypt_blk32:
/* input:
* %rdi: ctx, CTX
* RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z: 32 parallel plaintext blocks
* output:
* RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z: 32 parallel ciphertext blocks
*/
CFI_STARTPROC();
vbroadcasti32x4 .Lbswap32_mask rRIP, RTMP2z;
vpshufb RTMP2z, RA0z, RA0z;
vpshufb RTMP2z, RA1z, RA1z;
vpshufb RTMP2z, RA2z, RA2z;
vpshufb RTMP2z, RA3z, RA3z;
vpshufb RTMP2z, RB0z, RB0z;
vpshufb RTMP2z, RB1z, RB1z;
vpshufb RTMP2z, RB2z, RB2z;
vpshufb RTMP2z, RB3z, RB3z;
vbroadcasti32x4 .Lpre_affine_s rRIP, %zmm16;
vbroadcasti32x4 .Lpost_affine_s rRIP, %zmm17;
transpose_4x4(RA0z, RA1z, RA2z, RA3z, RTMP0z, RTMP1z);
transpose_4x4(RB0z, RB1z, RB2z, RB3z, RTMP0z, RTMP1z);
#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3) \
vpbroadcastd (4*(round))(%rdi), RX1z; \
vpxord s1, RX1z, RX0z; \
vpternlogd $0x96, s2, s3, RX0z; /* s1 ^ s2 ^ s3 ^ rk */ \
vpxord r1, RX1z, RX1z; \
vpternlogd $0x96, r2, r3, RX1z; /* r1 ^ r2 ^ r3 ^ rk */ \
\
/* sbox, non-linear part */ \
vgf2p8affineqb $0x65, %zmm16, RX0z, RX0z; \
vgf2p8affineinvqb $0xd3, %zmm17, RX0z, RX0z; \
vgf2p8affineqb $0x65, %zmm16, RX1z, RX1z; \
vgf2p8affineinvqb $0xd3, %zmm17, RX1z, RX1z; \
\
/* linear part */ \
vprold $2, RX0z, RTMP0z; \
vprold $10, RX0z, RTMP1z; \
vprold $18, RX0z, RTMP2z; \
vpternlogd $0x96, RTMP0z, RX0z, s0; /* s0 ^ x ^ rol(x,2) */ \
vprold $24, RX0z, RX0z; \
vprold $2, RX1z, RTMP3z; \
vprold $10, RX1z, RTMP4z; \
vprold $18, RX1z, RTMP0z; \
vpternlogd $0x96, RTMP3z, RX1z, r0; /* r0 ^ x ^ rol(x,2) */ \
vprold $24, RX1z, RX1z; \
vpternlogd $0x96, RTMP1z, RTMP2z, RX0z; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpternlogd $0x96, RTMP4z, RTMP0z, RX1z; /* rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpxord RX0z, s0, s0; /* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
vpxord RX1z, r0, r0; /* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */
leaq (32*4)(%rdi), %rax;
.align 16
.Lroundloop_blk32:
ROUND(0, RA0z, RA1z, RA2z, RA3z, RB0z, RB1z, RB2z, RB3z);
ROUND(1, RA1z, RA2z, RA3z, RA0z, RB1z, RB2z, RB3z, RB0z);
ROUND(2, RA2z, RA3z, RA0z, RA1z, RB2z, RB3z, RB0z, RB1z);
ROUND(3, RA3z, RA0z, RA1z, RA2z, RB3z, RB0z, RB1z, RB2z);
leaq (4*4)(%rdi), %rdi;
cmpq %rax, %rdi;
jne .Lroundloop_blk32;
#undef ROUND
vbroadcasti32x4 .Lbswap128_mask rRIP, RTMP2z;
transpose_4x4(RA0z, RA1z, RA2z, RA3z, RTMP0z, RTMP1z);
transpose_4x4(RB0z, RB1z, RB2z, RB3z, RTMP0z, RTMP1z);
vpshufb RTMP2z, RA0z, RA0z;
vpshufb RTMP2z, RA1z, RA1z;
vpshufb RTMP2z, RA2z, RA2z;
vpshufb RTMP2z, RA3z, RA3z;
vpshufb RTMP2z, RB0z, RB0z;
vpshufb RTMP2z, RB1z, RB1z;
vpshufb RTMP2z, RB2z, RB2z;
vpshufb RTMP2z, RB3z, RB3z;
vpxord %zmm16, %zmm16, %zmm16;
vpxord %zmm17, %zmm17, %zmm17;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __sm4_gfni_crypt_blk32,.-__sm4_gfni_crypt_blk32;)
.align 16
.globl _gcry_sm4_gfni_avx512_crypt_blk32
ELF(.type _gcry_sm4_gfni_avx512_crypt_blk32,@function;)
_gcry_sm4_gfni_avx512_crypt_blk32:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
*/
CFI_STARTPROC();
spec_stop_avx512;
/* Load input */
vmovdqu32 (0 * 64)(%rdx), RA0z;
vmovdqu32 (1 * 64)(%rdx), RA1z;
vmovdqu32 (2 * 64)(%rdx), RA2z;
vmovdqu32 (3 * 64)(%rdx), RA3z;
vmovdqu32 (4 * 64)(%rdx), RB0z;
vmovdqu32 (5 * 64)(%rdx), RB1z;
vmovdqu32 (6 * 64)(%rdx), RB2z;
vmovdqu32 (7 * 64)(%rdx), RB3z;
call __sm4_gfni_crypt_blk32;
vmovdqu32 RA0z, (0 * 64)(%rsi);
vmovdqu32 RA1z, (1 * 64)(%rsi);
vmovdqu32 RA2z, (2 * 64)(%rsi);
vmovdqu32 RA3z, (3 * 64)(%rsi);
vmovdqu32 RB0z, (4 * 64)(%rsi);
vmovdqu32 RB1z, (5 * 64)(%rsi);
vmovdqu32 RB2z, (6 * 64)(%rsi);
vmovdqu32 RB3z, (7 * 64)(%rsi);
xorl %eax, %eax;
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_crypt_blk32,.-_gcry_sm4_gfni_avx512_crypt_blk32;)
.align 16
.globl _gcry_sm4_gfni_avx512_ctr_enc_blk32
ELF(.type _gcry_sm4_gfni_avx512_ctr_enc_blk32,@function;)
_gcry_sm4_gfni_avx512_ctr_enc_blk32:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
* %rcx: iv (big endian, 128bit)
*/
CFI_STARTPROC();
spec_stop_avx512;
+ cmpb $(0x100 - 32), 15(%rcx);
+ jbe .Lctr_byteadd32;
+
vbroadcasti64x2 .Lbswap128_mask rRIP, RTMP0z;
vmovdqa32 .Lcounter0123_lo rRIP, RTMP1z;
vbroadcasti64x2 .Lcounter4444_lo rRIP, RTMP2z;
vbroadcasti64x2 .Lcounter8888_lo rRIP, RTMP3z;
vbroadcasti64x2 .Lcounter16161616_lo rRIP, RTMP4z;
/* load IV and byteswap */
movq 8(%rcx), %r11;
bswapq %r11;
vbroadcasti64x2 (%rcx), RB3z;
vpshufb RTMP0z, RB3z, RB3z;
/* check need for handling 64-bit overflow and carry */
cmpq $(0xffffffffffffffff - 32), %r11;
ja .Lhandle_ctr_carry_blk32;
/* construct IVs */
vpaddq RTMP1z, RB3z, RA0z; /* +0:+1:+2:+3 */
vpaddq RTMP2z, RA0z, RA1z; /* +4:+5:+6:+7 */
vpaddq RTMP3z, RA0z, RA2z; /* +8:+9:+10:+11 */
vpaddq RTMP3z, RA1z, RA3z; /* +12:+13:+14:+15 */
vpaddq RTMP4z, RA0z, RB0z; /* +16... */
vpaddq RTMP4z, RA1z, RB1z; /* +20... */
vpaddq RTMP4z, RA2z, RB2z; /* +24... */
vpaddq RTMP4z, RA3z, RB3z; /* +28... */
/* Update counter */
leaq 32(%r11), %r11;
bswapq %r11;
movq %r11, 8(%rcx);
jmp .Lctr_carry_done_blk32;
.Lhandle_ctr_carry_blk32:
vbroadcasti64x2 .Lcounter1111_hi rRIP, RNOTz;
/* construct IVs */
add_le128(RA0z, RB3z, RTMP1z, RNOTz); /* +0:+1:+2:+3 */
add_le128(RA1z, RA0z, RTMP2z, RNOTz); /* +4:+5:+6:+7 */
add_le128(RA2z, RA0z, RTMP3z, RNOTz); /* +8:+9:+10:+11 */
add_le128(RA3z, RA1z, RTMP3z, RNOTz); /* +12:+13:+14:+15 */
add_le128(RB0z, RA0z, RTMP4z, RNOTz); /* +16... */
add_le128(RB1z, RA1z, RTMP4z, RNOTz); /* +20... */
add_le128(RB2z, RA2z, RTMP4z, RNOTz); /* +24... */
add_le128(RB3z, RA3z, RTMP4z, RNOTz); /* +28... */
/* Update counter */
addq $32, %r11;
movq (%rcx), %r10;
bswapq %r10;
adcq $0, %r10;
bswapq %r11;
bswapq %r10;
movq %r11, 8(%rcx);
movq %r10, (%rcx);
.align 16
.Lctr_carry_done_blk32:
/* Byte-swap IVs. */
vpshufb RTMP0z, RA0z, RA0z;
vpshufb RTMP0z, RA1z, RA1z;
vpshufb RTMP0z, RA2z, RA2z;
vpshufb RTMP0z, RA3z, RA3z;
vpshufb RTMP0z, RB0z, RB0z;
vpshufb RTMP0z, RB1z, RB1z;
vpshufb RTMP0z, RB2z, RB2z;
vpshufb RTMP0z, RB3z, RB3z;
+.align 16
+.Lload_ctr_done32:
call __sm4_gfni_crypt_blk32;
vpxord (0 * 64)(%rdx), RA0z, RA0z;
vpxord (1 * 64)(%rdx), RA1z, RA1z;
vpxord (2 * 64)(%rdx), RA2z, RA2z;
vpxord (3 * 64)(%rdx), RA3z, RA3z;
vpxord (4 * 64)(%rdx), RB0z, RB0z;
vpxord (5 * 64)(%rdx), RB1z, RB1z;
vpxord (6 * 64)(%rdx), RB2z, RB2z;
vpxord (7 * 64)(%rdx), RB3z, RB3z;
vmovdqu32 RA0z, (0 * 64)(%rsi);
vmovdqu32 RA1z, (1 * 64)(%rsi);
vmovdqu32 RA2z, (2 * 64)(%rsi);
vmovdqu32 RA3z, (3 * 64)(%rsi);
vmovdqu32 RB0z, (4 * 64)(%rsi);
vmovdqu32 RB1z, (5 * 64)(%rsi);
vmovdqu32 RB2z, (6 * 64)(%rsi);
vmovdqu32 RB3z, (7 * 64)(%rsi);
vzeroall;
kxorq %k1, %k1, %k1;
ret_spec_stop;
+
+.align 16
+.Lctr_byteadd_full_ctr_carry32:
+ movq 8(%rcx), %r11;
+ movq (%rcx), %r10;
+ bswapq %r11;
+ bswapq %r10;
+ addq $32, %r11;
+ adcq $0, %r10;
+ bswapq %r11;
+ bswapq %r10;
+ movq %r11, 8(%rcx);
+ movq %r10, (%rcx);
+ jmp .Lctr_byteadd_zmm32;
+.align 16
+.Lctr_byteadd32:
+ vbroadcasti64x2 (%rcx), RA3z;
+ je .Lctr_byteadd_full_ctr_carry32;
+ addb $32, 15(%rcx);
+.Lctr_byteadd_zmm32:
+ vbroadcasti64x2 .Lbige_addb_16 rRIP, RB3z;
+ vpaddb RB3z, RA3z, RB3z;
+ vpaddb .Lbige_addb_0_1 rRIP, RA3z, RA0z;
+ vpaddb .Lbige_addb_4_5 rRIP, RA3z, RA1z;
+ vpaddb .Lbige_addb_8_9 rRIP, RA3z, RA2z;
+ vpaddb .Lbige_addb_12_13 rRIP, RA3z, RA3z;
+ vpaddb .Lbige_addb_0_1 rRIP, RB3z, RB0z;
+ vpaddb .Lbige_addb_4_5 rRIP, RB3z, RB1z;
+ vpaddb .Lbige_addb_8_9 rRIP, RB3z, RB2z;
+ vpaddb .Lbige_addb_12_13 rRIP, RB3z, RB3z;
+
+ jmp .Lload_ctr_done32;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_ctr_enc_blk32,.-_gcry_sm4_gfni_avx512_ctr_enc_blk32;)
.align 16
.globl _gcry_sm4_gfni_avx512_cbc_dec_blk32
ELF(.type _gcry_sm4_gfni_avx512_cbc_dec_blk32,@function;)
_gcry_sm4_gfni_avx512_cbc_dec_blk32:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
spec_stop_avx512;
vmovdqu32 (0 * 64)(%rdx), RA0z;
vmovdqu32 (1 * 64)(%rdx), RA1z;
vmovdqu32 (2 * 64)(%rdx), RA2z;
vmovdqu32 (3 * 64)(%rdx), RA3z;
vmovdqu32 (4 * 64)(%rdx), RB0z;
vmovdqu32 (5 * 64)(%rdx), RB1z;
vmovdqu32 (6 * 64)(%rdx), RB2z;
vmovdqu32 (7 * 64)(%rdx), RB3z;
call __sm4_gfni_crypt_blk32;
vmovdqu (%rcx), RNOTx;
vinserti64x2 $1, (0 * 16)(%rdx), RNOT, RNOT;
vinserti64x4 $1, (1 * 16)(%rdx), RNOTz, RNOTz;
vpxord RNOTz, RA0z, RA0z;
vpxord (0 * 64 + 48)(%rdx), RA1z, RA1z;
vpxord (1 * 64 + 48)(%rdx), RA2z, RA2z;
vpxord (2 * 64 + 48)(%rdx), RA3z, RA3z;
vpxord (3 * 64 + 48)(%rdx), RB0z, RB0z;
vpxord (4 * 64 + 48)(%rdx), RB1z, RB1z;
vpxord (5 * 64 + 48)(%rdx), RB2z, RB2z;
vpxord (6 * 64 + 48)(%rdx), RB3z, RB3z;
vmovdqu (7 * 64 + 48)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx); /* store new IV */
vmovdqu32 RA0z, (0 * 64)(%rsi);
vmovdqu32 RA1z, (1 * 64)(%rsi);
vmovdqu32 RA2z, (2 * 64)(%rsi);
vmovdqu32 RA3z, (3 * 64)(%rsi);
vmovdqu32 RB0z, (4 * 64)(%rsi);
vmovdqu32 RB1z, (5 * 64)(%rsi);
vmovdqu32 RB2z, (6 * 64)(%rsi);
vmovdqu32 RB3z, (7 * 64)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_cbc_dec_blk32,.-_gcry_sm4_gfni_avx512_cbc_dec_blk32;)
.align 16
.globl _gcry_sm4_gfni_avx512_cfb_dec_blk32
ELF(.type _gcry_sm4_gfni_avx512_cfb_dec_blk32,@function;)
_gcry_sm4_gfni_avx512_cfb_dec_blk32:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
spec_stop_avx512;
/* Load input */
vmovdqu (%rcx), RA0x;
vinserti64x2 $1, (%rdx), RA0, RA0;
vinserti64x4 $1, 16(%rdx), RA0z, RA0z;
vmovdqu32 (0 * 64 + 48)(%rdx), RA1z;
vmovdqu32 (1 * 64 + 48)(%rdx), RA2z;
vmovdqu32 (2 * 64 + 48)(%rdx), RA3z;
vmovdqu32 (3 * 64 + 48)(%rdx), RB0z;
vmovdqu32 (4 * 64 + 48)(%rdx), RB1z;
vmovdqu32 (5 * 64 + 48)(%rdx), RB2z;
vmovdqu32 (6 * 64 + 48)(%rdx), RB3z;
/* Update IV */
vmovdqu (7 * 64 + 48)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx);
call __sm4_gfni_crypt_blk32;
vpxord (0 * 64)(%rdx), RA0z, RA0z;
vpxord (1 * 64)(%rdx), RA1z, RA1z;
vpxord (2 * 64)(%rdx), RA2z, RA2z;
vpxord (3 * 64)(%rdx), RA3z, RA3z;
vpxord (4 * 64)(%rdx), RB0z, RB0z;
vpxord (5 * 64)(%rdx), RB1z, RB1z;
vpxord (6 * 64)(%rdx), RB2z, RB2z;
vpxord (7 * 64)(%rdx), RB3z, RB3z;
vmovdqu32 RA0z, (0 * 64)(%rsi);
vmovdqu32 RA1z, (1 * 64)(%rsi);
vmovdqu32 RA2z, (2 * 64)(%rsi);
vmovdqu32 RA3z, (3 * 64)(%rsi);
vmovdqu32 RB0z, (4 * 64)(%rsi);
vmovdqu32 RB1z, (5 * 64)(%rsi);
vmovdqu32 RB2z, (6 * 64)(%rsi);
vmovdqu32 RB3z, (7 * 64)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_cfb_dec_blk32,.-_gcry_sm4_gfni_avx512_cfb_dec_blk32;)
.align 16
.globl _gcry_sm4_gfni_avx512_ocb_enc_blk32
ELF(.type _gcry_sm4_gfni_avx512_ocb_enc_blk32,@function;)
_gcry_sm4_gfni_avx512_ocb_enc_blk32:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[32])
*/
CFI_STARTPROC();
spec_stop_avx512;
subq $(5 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(5 * 8);
movq %r12, (0 * 8)(%rsp);
movq %r13, (1 * 8)(%rsp);
movq %r14, (2 * 8)(%rsp);
movq %r15, (3 * 8)(%rsp);
movq %rbx, (4 * 8)(%rsp);
CFI_REL_OFFSET(%r12, 0 * 8);
CFI_REL_OFFSET(%r13, 1 * 8);
CFI_REL_OFFSET(%r14, 2 * 8);
CFI_REL_OFFSET(%r15, 3 * 8);
CFI_REL_OFFSET(%rbx, 4 * 8);
vmovdqu (%rcx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Checksum_i = Checksum_{i-1} xor P_i */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg, zplain) \
vmovdqu32 (n * 64)(%rdx), zplain; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti64x2 $1, RTMP0x, RNOT, RNOT; \
vpxor (l2reg), RTMP0x, RTMP0x; \
vinserti64x2 $2, RTMP0x, RNOTz, RNOTz; \
vpxor (l3reg), RTMP0x, RTMP0x; \
vinserti64x2 $3, RTMP0x, RNOTz, RNOTz; \
vpxord zplain, RNOTz, zreg; \
vmovdqu32 RNOTz, (n * 64)(%rsi);
#define OCB_LOAD_PTRS(n) \
movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \
movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \
movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \
movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \
movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \
movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \
movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \
movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx;
OCB_LOAD_PTRS(0);
OCB_INPUT(0, %r10, %r11, %r12, %r13, RA0z, RTMP1z);
OCB_INPUT(1, %r14, %r15, %rax, %rbx, RA1z, RTMP2z);
OCB_LOAD_PTRS(2);
OCB_INPUT(2, %r10, %r11, %r12, %r13, RA2z, RTMP3z);
vpternlogd $0x96, RTMP1z, RTMP2z, RTMP3z;
OCB_INPUT(3, %r14, %r15, %rax, %rbx, RA3z, RTMP4z);
OCB_LOAD_PTRS(4);
OCB_INPUT(4, %r10, %r11, %r12, %r13, RB0z, RX0z);
OCB_INPUT(5, %r14, %r15, %rax, %rbx, RB1z, RX1z);
vpternlogd $0x96, RTMP4z, RX0z, RX1z;
OCB_LOAD_PTRS(6);
OCB_INPUT(6, %r10, %r11, %r12, %r13, RB2z, RTMP4z);
OCB_INPUT(7, %r14, %r15, %rax, %rbx, RB3z, RX0z);
#undef OCB_LOAD_PTRS
#undef OCB_INPUT
vpternlogd $0x96, RTMP3z, RTMP4z, RX0z;
vpxord RX1z, RX0z, RNOTz;
vextracti64x4 $1, RNOTz, RTMP1;
vpxor RTMP1, RNOT, RNOT;
vextracti128 $1, RNOT, RTMP1x;
vpternlogd $0x96, (%r8), RTMP1x, RNOTx;
movq (0 * 8)(%rsp), %r12;
movq (1 * 8)(%rsp), %r13;
movq (2 * 8)(%rsp), %r14;
movq (3 * 8)(%rsp), %r15;
movq (4 * 8)(%rsp), %rbx;
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
CFI_RESTORE(%r14);
CFI_RESTORE(%r15);
CFI_RESTORE(%rbx);
vmovdqu RTMP0x, (%rcx);
vmovdqu RNOTx, (%r8);
call __sm4_gfni_crypt_blk32;
addq $(5 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-5 * 8);
vpxord (0 * 64)(%rsi), RA0z, RA0z;
vpxord (1 * 64)(%rsi), RA1z, RA1z;
vpxord (2 * 64)(%rsi), RA2z, RA2z;
vpxord (3 * 64)(%rsi), RA3z, RA3z;
vpxord (4 * 64)(%rsi), RB0z, RB0z;
vpxord (5 * 64)(%rsi), RB1z, RB1z;
vpxord (6 * 64)(%rsi), RB2z, RB2z;
vpxord (7 * 64)(%rsi), RB3z, RB3z;
vmovdqu32 RA0z, (0 * 64)(%rsi);
vmovdqu32 RA1z, (1 * 64)(%rsi);
vmovdqu32 RA2z, (2 * 64)(%rsi);
vmovdqu32 RA3z, (3 * 64)(%rsi);
vmovdqu32 RB0z, (4 * 64)(%rsi);
vmovdqu32 RB1z, (5 * 64)(%rsi);
vmovdqu32 RB2z, (6 * 64)(%rsi);
vmovdqu32 RB3z, (7 * 64)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_ocb_enc_blk32,.-_gcry_sm4_gfni_avx512_ocb_enc_blk32;)
.align 16
.globl _gcry_sm4_gfni_avx512_ocb_dec_blk32
ELF(.type _gcry_sm4_gfni_avx512_ocb_dec_blk32,@function;)
_gcry_sm4_gfni_avx512_ocb_dec_blk32:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (32 blocks)
* %rdx: src (32 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[32])
*/
CFI_STARTPROC();
spec_stop_avx512;
subq $(5 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(5 * 8);
movq %r12, (0 * 8)(%rsp);
movq %r13, (1 * 8)(%rsp);
movq %r14, (2 * 8)(%rsp);
movq %r15, (3 * 8)(%rsp);
movq %rbx, (4 * 8)(%rsp);
CFI_REL_OFFSET(%r12, 0 * 8);
CFI_REL_OFFSET(%r13, 1 * 8);
CFI_REL_OFFSET(%r14, 2 * 8);
CFI_REL_OFFSET(%r15, 3 * 8);
CFI_REL_OFFSET(%rbx, 4 * 8);
vmovdqu (%rcx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* C_i = Offset_i xor DECIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, l2reg, l3reg, zreg) \
vmovdqu32 (n * 64)(%rdx), RTMP1z; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti64x2 $1, RTMP0x, RNOT, RNOT; \
vpxor (l2reg), RTMP0x, RTMP0x; \
vinserti64x2 $2, RTMP0x, RNOTz, RNOTz; \
vpxor (l3reg), RTMP0x, RTMP0x; \
vinserti64x2 $3, RTMP0x, RNOTz, RNOTz; \
vpxord RTMP1z, RNOTz, zreg; \
vmovdqu32 RNOTz, (n * 64)(%rsi);
#define OCB_LOAD_PTRS(n) \
movq ((n * 4 * 8) + (0 * 8))(%r9), %r10; \
movq ((n * 4 * 8) + (1 * 8))(%r9), %r11; \
movq ((n * 4 * 8) + (2 * 8))(%r9), %r12; \
movq ((n * 4 * 8) + (3 * 8))(%r9), %r13; \
movq ((n * 4 * 8) + (4 * 8))(%r9), %r14; \
movq ((n * 4 * 8) + (5 * 8))(%r9), %r15; \
movq ((n * 4 * 8) + (6 * 8))(%r9), %rax; \
movq ((n * 4 * 8) + (7 * 8))(%r9), %rbx;
OCB_LOAD_PTRS(0);
OCB_INPUT(0, %r10, %r11, %r12, %r13, RA0z);
OCB_INPUT(1, %r14, %r15, %rax, %rbx, RA1z);
OCB_LOAD_PTRS(2);
OCB_INPUT(2, %r10, %r11, %r12, %r13, RA2z);
OCB_INPUT(3, %r14, %r15, %rax, %rbx, RA3z);
OCB_LOAD_PTRS(4);
OCB_INPUT(4, %r10, %r11, %r12, %r13, RB0z);
OCB_INPUT(5, %r14, %r15, %rax, %rbx, RB1z);
OCB_LOAD_PTRS(6);
OCB_INPUT(6, %r10, %r11, %r12, %r13, RB2z);
OCB_INPUT(7, %r14, %r15, %rax, %rbx, RB3z);
#undef OCB_LOAD_PTRS
#undef OCB_INPUT
movq (0 * 8)(%rsp), %r12;
movq (1 * 8)(%rsp), %r13;
movq (2 * 8)(%rsp), %r14;
movq (3 * 8)(%rsp), %r15;
movq (4 * 8)(%rsp), %rbx;
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
CFI_RESTORE(%r14);
CFI_RESTORE(%r15);
CFI_RESTORE(%rbx);
vmovdqu RTMP0x, (%rcx);
call __sm4_gfni_crypt_blk32;
addq $(5 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-5 * 8);
vpxord (0 * 64)(%rsi), RA0z, RA0z;
vpxord (1 * 64)(%rsi), RA1z, RA1z;
vpxord (2 * 64)(%rsi), RA2z, RA2z;
vpxord (3 * 64)(%rsi), RA3z, RA3z;
vpxord (4 * 64)(%rsi), RB0z, RB0z;
vpxord (5 * 64)(%rsi), RB1z, RB1z;
vpxord (6 * 64)(%rsi), RB2z, RB2z;
vpxord (7 * 64)(%rsi), RB3z, RB3z;
vmovdqu32 RA0z, (0 * 64)(%rsi);
vmovdqu32 RA1z, (1 * 64)(%rsi);
vmovdqu32 RA2z, (2 * 64)(%rsi);
vmovdqu32 RA3z, (3 * 64)(%rsi);
vmovdqu32 RB0z, (4 * 64)(%rsi);
vmovdqu32 RB1z, (5 * 64)(%rsi);
vmovdqu32 RB2z, (6 * 64)(%rsi);
vmovdqu32 RB3z, (7 * 64)(%rsi);
/* Checksum_i = Checksum_{i-1} xor C_i */
vpternlogd $0x96, RA0z, RA1z, RA2z;
vpternlogd $0x96, RA3z, RB0z, RB1z;
vpternlogd $0x96, RB2z, RB3z, RA2z;
vpxord RA2z, RB1z, RTMP1z;
vextracti64x4 $1, RTMP1z, RNOT;
vpxor RNOT, RTMP1, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpternlogd $0x96, (%r8), RNOTx, RTMP1x;
vmovdqu RTMP1x, (%r8);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_sm4_gfni_avx512_ocb_dec_blk32,.-_gcry_sm4_gfni_avx512_ocb_dec_blk32;)
#endif /*defined(ENABLE_GFNI_SUPPORT) && defined(ENABLE_AVX512_SUPPORT)*/
#endif /*__x86_64*/

File Metadata

Mime Type
text/x-diff
Expires
Mon, Dec 23, 2:12 PM (16 h, 48 m)
Storage Engine
local-disk
Storage Format
Raw Data
Storage Handle
36/d7/f0e2a2311667f9f1da170e202012

Event Timeline