diff --git a/cipher/Makefile.am b/cipher/Makefile.am index 7ebcd179..a13e52e9 100644 --- a/cipher/Makefile.am +++ b/cipher/Makefile.am @@ -1,285 +1,285 @@ # Makefile for cipher modules # Copyright (C) 1998, 1999, 2000, 2001, 2002, # 2003, 2009 Free Software Foundation, Inc. # # This file is part of Libgcrypt. # # Libgcrypt is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of # the License, or (at your option) any later version. # # Libgcrypt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, see . # Process this file with automake to produce Makefile.in # Need to include ../src in addition to top_srcdir because gcrypt.h is # a built header. AM_CPPFLAGS = -I../src -I$(top_srcdir)/src -I../mpi -I$(top_srcdir)/mpi AM_CFLAGS = $(GPG_ERROR_CFLAGS) AM_CCASFLAGS = $(NOEXECSTACK_FLAGS) EXTRA_DIST = gost-s-box.c CLEANFILES = gost-s-box$(EXEEXT_FOR_BUILD) DISTCLEANFILES = gost-sb.h noinst_LTLIBRARIES = libcipher.la GCRYPT_MODULES = @GCRYPT_CIPHERS@ @GCRYPT_PUBKEY_CIPHERS@ \ @GCRYPT_DIGESTS@ @GCRYPT_KDFS@ libcipher_la_DEPENDENCIES = $(GCRYPT_MODULES) libcipher_la_LIBADD = $(GCRYPT_MODULES) libcipher_la_SOURCES = \ cipher.c cipher-internal.h \ cipher-cbc.c \ cipher-cfb.c \ cipher-ofb.c \ cipher-ctr.c \ cipher-aeswrap.c \ cipher-ccm.c \ cipher-cmac.c \ cipher-gcm.c \ cipher-poly1305.c \ cipher-ocb.c \ cipher-xts.c \ cipher-eax.c \ cipher-siv.c \ cipher-gcm-siv.c \ pubkey.c pubkey-internal.h pubkey-util.c \ md.c \ mac.c mac-internal.h \ mac-hmac.c mac-cmac.c mac-gmac.c mac-poly1305.c \ poly1305.c poly1305-internal.h \ kdf.c kdf-internal.h \ bithelp.h \ bufhelp.h \ bulkhelp.h \ primegen.c \ hash-common.c hash-common.h \ dsa-common.c rsa-common.c \ sha1.h EXTRA_libcipher_la_SOURCES = \ asm-common-aarch64.h \ asm-common-amd64.h \ asm-common-s390x.h \ asm-inline-s390x.h \ asm-poly1305-aarch64.h \ asm-poly1305-amd64.h \ asm-poly1305-s390x.h \ - aria.c \ + aria.c aria-aesni-avx-amd64.S aria-aesni-avx2-amd64.S \ arcfour.c arcfour-amd64.S \ blowfish.c blowfish-amd64.S blowfish-arm.S \ cast5.c cast5-amd64.S cast5-arm.S \ chacha20.c chacha20-amd64-ssse3.S chacha20-amd64-avx2.S \ chacha20-amd64-avx512.S chacha20-armv7-neon.S chacha20-aarch64.S \ chacha20-ppc.c chacha20-s390x.S \ chacha20-p10le-8x.s \ cipher-gcm-ppc.c cipher-gcm-intel-pclmul.c cipher-gcm-armv7-neon.S \ cipher-gcm-armv8-aarch32-ce.S cipher-gcm-armv8-aarch64-ce.S \ crc.c crc-intel-pclmul.c crc-armv8-ce.c \ crc-armv8-aarch64-ce.S \ crc-ppc.c \ des.c des-amd64.S \ dsa.c \ elgamal.c \ ecc.c ecc-curves.c ecc-misc.c ecc-common.h \ ecc-ecdh.c ecc-ecdsa.c ecc-eddsa.c ecc-gost.c ecc-sm2.c \ idea.c \ gost28147.c gost.h \ gostr3411-94.c \ md4.c \ md5.c \ poly1305-s390x.S poly1305-amd64-avx512.S \ poly1305-p10le.s \ rijndael.c rijndael-internal.h rijndael-tables.h \ rijndael-aesni.c rijndael-padlock.c \ rijndael-amd64.S rijndael-arm.S \ rijndael-ssse3-amd64.c rijndael-ssse3-amd64-asm.S \ rijndael-vaes.c rijndael-vaes-avx2-amd64.S \ rijndael-armv8-ce.c rijndael-armv8-aarch32-ce.S \ rijndael-armv8-aarch64-ce.S rijndael-aarch64.S \ rijndael-ppc.c rijndael-ppc9le.c \ rijndael-p10le.c rijndael-gcm-p10le.s \ rijndael-ppc-common.h rijndael-ppc-functions.h \ rijndael-s390x.c \ rmd160.c \ rsa.c \ salsa20.c salsa20-amd64.S salsa20-armv7-neon.S \ scrypt.c \ seed.c \ serpent.c serpent-sse2-amd64.S \ sm4.c sm4-aesni-avx-amd64.S sm4-aesni-avx2-amd64.S \ sm4-gfni-avx2-amd64.S sm4-gfni-avx512-amd64.S \ sm4-aarch64.S sm4-armv8-aarch64-ce.S sm4-armv9-aarch64-sve-ce.S \ serpent-avx2-amd64.S serpent-armv7-neon.S \ sha1.c sha1-ssse3-amd64.S sha1-avx-amd64.S sha1-avx-bmi2-amd64.S \ sha1-avx2-bmi2-amd64.S sha1-armv7-neon.S sha1-armv8-aarch32-ce.S \ sha1-armv8-aarch64-ce.S sha1-intel-shaext.c \ sha256.c sha256-ssse3-amd64.S sha256-avx-amd64.S \ sha256-avx2-bmi2-amd64.S \ sha256-armv8-aarch32-ce.S sha256-armv8-aarch64-ce.S \ sha256-intel-shaext.c sha256-ppc.c \ sha512.c sha512-ssse3-amd64.S sha512-avx-amd64.S \ sha512-avx2-bmi2-amd64.S sha512-avx512-amd64.S \ sha512-armv7-neon.S sha512-armv8-aarch64-ce.S sha512-arm.S \ sha512-ppc.c sha512-ssse3-i386.c \ sm3.c sm3-avx-bmi2-amd64.S sm3-aarch64.S sm3-armv8-aarch64-ce.S \ keccak.c keccak_permute_32.h keccak_permute_64.h \ keccak-armv7-neon.S keccak-amd64-avx512.S \ stribog.c \ tiger.c \ whirlpool.c whirlpool-sse2-amd64.S \ twofish.c twofish-amd64.S twofish-arm.S twofish-aarch64.S \ twofish-avx2-amd64.S \ rfc2268.c \ camellia.c camellia.h camellia-glue.c camellia-aesni-avx-amd64.S \ camellia-aesni-avx2-amd64.h \ camellia-gfni-avx2-amd64.S camellia-gfni-avx512-amd64.S \ camellia-vaes-avx2-amd64.S camellia-aesni-avx2-amd64.S \ camellia-arm.S camellia-aarch64.S \ blake2.c \ blake2b-amd64-avx2.S blake2b-amd64-avx512.S \ blake2s-amd64-avx.S blake2s-amd64-avx512.S gost28147.lo: gost-sb.h gost-sb.h: gost-s-box$(EXEEXT_FOR_BUILD) ./gost-s-box$(EXEEXT_FOR_BUILD) $@ gost-s-box$(EXEEXT_FOR_BUILD): gost-s-box.c $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) \ $(CPPFLAGS_FOR_BUILD) -o $@ $(srcdir)/gost-s-box.c if ENABLE_O_FLAG_MUNGING o_flag_munging = sed -e 's/-O\([2-9sg][2-9sg]*\)/-O1/' -e 's/-Ofast/-O1/g' else o_flag_munging = cat endif # We need to lower the optimization for this module. tiger.o: $(srcdir)/tiger.c Makefile `echo $(COMPILE) -c $< | $(o_flag_munging) ` tiger.lo: $(srcdir)/tiger.c Makefile `echo $(LTCOMPILE) -c $< | $(o_flag_munging) ` # We need to disable instrumentation for these modules as they use cc as # thin assembly front-end and do not tolerate in-between function calls # inserted by compiler as those functions may clobber the XMM registers. if ENABLE_INSTRUMENTATION_MUNGING instrumentation_munging = sed \ -e 's/-fsanitize[=,\-][=,a-z,A-Z,0-9,\,,\-]*//g' \ -e 's/-fprofile[=,\-][=,a-z,A-Z,0-9,\,,\-]*//g' \ -e 's/-fcoverage[=,\-][=,a-z,A-Z,0-9,\,,\-]*//g' else instrumentation_munging = cat endif rijndael-aesni.o: $(srcdir)/rijndael-aesni.c Makefile `echo $(COMPILE) -c $< | $(instrumentation_munging) ` rijndael-aesni.lo: $(srcdir)/rijndael-aesni.c Makefile `echo $(LTCOMPILE) -c $< | $(instrumentation_munging) ` rijndael-ssse3-amd64.o: $(srcdir)/rijndael-ssse3-amd64.c Makefile `echo $(COMPILE) -c $< | $(instrumentation_munging) ` rijndael-ssse3-amd64.lo: $(srcdir)/rijndael-ssse3-amd64.c Makefile `echo $(LTCOMPILE) -c $< | $(instrumentation_munging) ` cipher-gcm-intel-pclmul.o: $(srcdir)/cipher-gcm-intel-pclmul.c Makefile `echo $(COMPILE) -c $< | $(instrumentation_munging) ` cipher-gcm-intel-pclmul.lo: $(srcdir)/cipher-gcm-intel-pclmul.c Makefile `echo $(LTCOMPILE) -c $< | $(instrumentation_munging) ` sha1-intel-shaext.o: $(srcdir)/sha1-intel-shaext.c Makefile `echo $(COMPILE) -c $< | $(instrumentation_munging) ` sha1-intel-shaext.lo: $(srcdir)/sha1-intel-shaext.c Makefile `echo $(LTCOMPILE) -c $< | $(instrumentation_munging) ` sha256-intel-shaext.o: $(srcdir)/sha256-intel-shaext.c Makefile `echo $(COMPILE) -c $< | $(instrumentation_munging) ` sha256-intel-shaext.lo: $(srcdir)/sha256-intel-shaext.c Makefile `echo $(LTCOMPILE) -c $< | $(instrumentation_munging) ` sha256-ssse3-i386.o: $(srcdir)/sha256-ssse3-i386.c Makefile `echo $(COMPILE) -c $< | $(instrumentation_munging) ` sha256-ssse3-i386.lo: $(srcdir)/sha256-ssse3-i386.c Makefile `echo $(LTCOMPILE) -c $< | $(instrumentation_munging) ` crc-intel-pclmul.o: $(srcdir)/crc-intel-pclmul.c Makefile `echo $(COMPILE) -c $< | $(instrumentation_munging) ` crc-intel-pclmul.lo: $(srcdir)/crc-intel-pclmul.c Makefile `echo $(LTCOMPILE) -c $< | $(instrumentation_munging) ` if ENABLE_PPC_VCRYPTO_EXTRA_CFLAGS ppc_vcrypto_cflags = -O2 -maltivec -mvsx -mcrypto else ppc_vcrypto_cflags = endif rijndael-ppc.o: $(srcdir)/rijndael-ppc.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` rijndael-ppc.lo: $(srcdir)/rijndael-ppc.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` rijndael-ppc9le.o: $(srcdir)/rijndael-ppc9le.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` rijndael-ppc9le.lo: $(srcdir)/rijndael-ppc9le.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` rijndael-p10le.o: $(srcdir)/rijndael-p10le.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` rijndael-p10le.lo: $(srcdir)/rijndael-p10le.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` sha256-ppc.o: $(srcdir)/sha256-ppc.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` sha256-ppc.lo: $(srcdir)/sha256-ppc.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` sha512-ppc.o: $(srcdir)/sha512-ppc.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` sha512-ppc.lo: $(srcdir)/sha512-ppc.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` chacha20-ppc.o: $(srcdir)/chacha20-ppc.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` chacha20-ppc.lo: $(srcdir)/chacha20-ppc.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` crc-ppc.o: $(srcdir)/crc-ppc.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` crc-ppc.lo: $(srcdir)/crc-ppc.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` cipher-gcm-ppc.o: $(srcdir)/cipher-gcm-ppc.c Makefile `echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` cipher-gcm-ppc.lo: $(srcdir)/cipher-gcm-ppc.c Makefile `echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) ` diff --git a/cipher/aria-aesni-avx-amd64.S b/cipher/aria-aesni-avx-amd64.S new file mode 100644 index 00000000..45b0b4a4 --- /dev/null +++ b/cipher/aria-aesni-avx-amd64.S @@ -0,0 +1,1439 @@ +/* aria-aesni-avx-amd64.S - AESNI/GFNI/AVX implementation of ARIA cipher + * + * Copyright (C) 2022-2023 Taehee Yoo + * Copyright (C) 2023 Jussi Kivilinna + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see . + */ + +#include + +#ifdef __x86_64 +#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ + defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ + defined(ENABLE_AVX_SUPPORT) && defined(ENABLE_AESNI_SUPPORT) + +#include "asm-common-amd64.h" + +#ifdef ENABLE_GFNI_SUPPORT +# define CONFIG_AS_GFNI 1 +#endif + +/* struct ARIA_context: */ +#define ARIA_BLOCK_SIZE 16 +#define ARIA_MAX_RD_KEYS 17 +#define ARIA_CTX_enc_key 0 +#define ARIA_CTX_dec_key (ARIA_CTX_enc_key + (ARIA_BLOCK_SIZE * ARIA_MAX_RD_KEYS)) +#define ARIA_CTX_rounds (ARIA_CTX_dec_key + (ARIA_BLOCK_SIZE * ARIA_MAX_RD_KEYS)) + +/* register macros */ +#define CTX %rdi + +/* helper macros */ +#define STACK_DEPTH (2 * 8 + 16 * 16 + 15) + +#define BV8(a0, a1, a2, a3, a4, a5, a6, a7) \ + ( (((a0) & 1) << 0) | \ + (((a1) & 1) << 1) | \ + (((a2) & 1) << 2) | \ + (((a3) & 1) << 3) | \ + (((a4) & 1) << 4) | \ + (((a5) & 1) << 5) | \ + (((a6) & 1) << 6) | \ + (((a7) & 1) << 7) ) + +#define BM8X8(l0, l1, l2, l3, l4, l5, l6, l7) \ + ( ((l7) << (0 * 8)) | \ + ((l6) << (1 * 8)) | \ + ((l5) << (2 * 8)) | \ + ((l4) << (3 * 8)) | \ + ((l3) << (4 * 8)) | \ + ((l2) << (5 * 8)) | \ + ((l1) << (6 * 8)) | \ + ((l0) << (7 * 8)) ) + +/* asm macros */ +#define inc_le128(x, minus_one, tmp) \ + vpcmpeqq minus_one, x, tmp; \ + vpsubq minus_one, x, x; \ + vpslldq $8, tmp, tmp; \ + vpsubq tmp, x, x; + +#define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ + vpand x, mask4bit, tmp0; \ + vpandn x, mask4bit, x; \ + vpsrld $4, x, x; \ + \ + vpshufb tmp0, lo_t, tmp0; \ + vpshufb x, hi_t, x; \ + vpxor tmp0, x, x; + +#define transpose_4x4(x0, x1, x2, x3, t1, t2) \ + vpunpckhdq x1, x0, t2; \ + vpunpckldq x1, x0, x0; \ + \ + vpunpckldq x3, x2, t1; \ + vpunpckhdq x3, x2, x2; \ + \ + vpunpckhqdq t1, x0, x1; \ + vpunpcklqdq t1, x0, x0; \ + \ + vpunpckhqdq x2, t2, x3; \ + vpunpcklqdq x2, t2, x2; + +#define byteslice_16x16b(a0, b0, c0, d0, \ + a1, b1, c1, d1, \ + a2, b2, c2, d2, \ + a3, b3, c3, d3, \ + st0, st1) \ + vmovdqu d2, st0; \ + vmovdqu d3, st1; \ + transpose_4x4(a0, a1, a2, a3, d2, d3); \ + transpose_4x4(b0, b1, b2, b3, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu a0, st0; \ + vmovdqu a1, st1; \ + transpose_4x4(c0, c1, c2, c3, a0, a1); \ + transpose_4x4(d0, d1, d2, d3, a0, a1); \ + \ + vmovdqu .Lshufb_16x16b rRIP, a0; \ + vmovdqu st1, a1; \ + vpshufb a0, a2, a2; \ + vpshufb a0, a3, a3; \ + vpshufb a0, b0, b0; \ + vpshufb a0, b1, b1; \ + vpshufb a0, b2, b2; \ + vpshufb a0, b3, b3; \ + vpshufb a0, a1, a1; \ + vpshufb a0, c0, c0; \ + vpshufb a0, c1, c1; \ + vpshufb a0, c2, c2; \ + vpshufb a0, c3, c3; \ + vpshufb a0, d0, d0; \ + vpshufb a0, d1, d1; \ + vpshufb a0, d2, d2; \ + vpshufb a0, d3, d3; \ + vmovdqu d3, st1; \ + vmovdqu st0, d3; \ + vpshufb a0, d3, a0; \ + vmovdqu d2, st0; \ + \ + transpose_4x4(a0, b0, c0, d0, d2, d3); \ + transpose_4x4(a1, b1, c1, d1, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu b0, st0; \ + vmovdqu b1, st1; \ + transpose_4x4(a2, b2, c2, d2, b0, b1); \ + transpose_4x4(a3, b3, c3, d3, b0, b1); \ + vmovdqu st0, b0; \ + vmovdqu st1, b1; \ + /* does not adjust output bytes inside vectors */ + +#define debyteslice_16x16b(a0, b0, c0, d0, \ + a1, b1, c1, d1, \ + a2, b2, c2, d2, \ + a3, b3, c3, d3, \ + st0, st1) \ + vmovdqu d2, st0; \ + vmovdqu d3, st1; \ + transpose_4x4(a0, a1, a2, a3, d2, d3); \ + transpose_4x4(b0, b1, b2, b3, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu a0, st0; \ + vmovdqu a1, st1; \ + transpose_4x4(c0, c1, c2, c3, a0, a1); \ + transpose_4x4(d0, d1, d2, d3, a0, a1); \ + \ + vmovdqu .Lshufb_16x16b rRIP, a0; \ + vmovdqu st1, a1; \ + vpshufb a0, a2, a2; \ + vpshufb a0, a3, a3; \ + vpshufb a0, b0, b0; \ + vpshufb a0, b1, b1; \ + vpshufb a0, b2, b2; \ + vpshufb a0, b3, b3; \ + vpshufb a0, a1, a1; \ + vpshufb a0, c0, c0; \ + vpshufb a0, c1, c1; \ + vpshufb a0, c2, c2; \ + vpshufb a0, c3, c3; \ + vpshufb a0, d0, d0; \ + vpshufb a0, d1, d1; \ + vpshufb a0, d2, d2; \ + vpshufb a0, d3, d3; \ + vmovdqu d3, st1; \ + vmovdqu st0, d3; \ + vpshufb a0, d3, a0; \ + vmovdqu d2, st0; \ + \ + transpose_4x4(c0, d0, a0, b0, d2, d3); \ + transpose_4x4(c1, d1, a1, b1, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu b0, st0; \ + vmovdqu b1, st1; \ + transpose_4x4(c2, d2, a2, b2, b0, b1); \ + transpose_4x4(c3, d3, a3, b3, b0, b1); \ + vmovdqu st0, b0; \ + vmovdqu st1, b1; \ + /* does not adjust output bytes inside vectors */ + +/* load blocks to registers */ +#define inpack16_pre(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + rio) \ + vmovdqu (0 * 16)(rio), x0; \ + vmovdqu (1 * 16)(rio), x1; \ + vmovdqu (2 * 16)(rio), x2; \ + vmovdqu (3 * 16)(rio), x3; \ + vmovdqu (4 * 16)(rio), x4; \ + vmovdqu (5 * 16)(rio), x5; \ + vmovdqu (6 * 16)(rio), x6; \ + vmovdqu (7 * 16)(rio), x7; \ + vmovdqu (8 * 16)(rio), y0; \ + vmovdqu (9 * 16)(rio), y1; \ + vmovdqu (10 * 16)(rio), y2; \ + vmovdqu (11 * 16)(rio), y3; \ + vmovdqu (12 * 16)(rio), y4; \ + vmovdqu (13 * 16)(rio), y5; \ + vmovdqu (14 * 16)(rio), y6; \ + vmovdqu (15 * 16)(rio), y7; + +/* byteslice blocks and store to temporary memory */ +#define inpack16_post(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_ab, mem_cd) \ + byteslice_16x16b(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + (mem_ab), (mem_cd)); \ + \ + vmovdqu x0, 0 * 16(mem_ab); \ + vmovdqu x1, 1 * 16(mem_ab); \ + vmovdqu x2, 2 * 16(mem_ab); \ + vmovdqu x3, 3 * 16(mem_ab); \ + vmovdqu x4, 4 * 16(mem_ab); \ + vmovdqu x5, 5 * 16(mem_ab); \ + vmovdqu x6, 6 * 16(mem_ab); \ + vmovdqu x7, 7 * 16(mem_ab); \ + vmovdqu y0, 0 * 16(mem_cd); \ + vmovdqu y1, 1 * 16(mem_cd); \ + vmovdqu y2, 2 * 16(mem_cd); \ + vmovdqu y3, 3 * 16(mem_cd); \ + vmovdqu y4, 4 * 16(mem_cd); \ + vmovdqu y5, 5 * 16(mem_cd); \ + vmovdqu y6, 6 * 16(mem_cd); \ + vmovdqu y7, 7 * 16(mem_cd); + +#define write_output(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem) \ + vmovdqu x0, 0 * 16(mem); \ + vmovdqu x1, 1 * 16(mem); \ + vmovdqu x2, 2 * 16(mem); \ + vmovdqu x3, 3 * 16(mem); \ + vmovdqu x4, 4 * 16(mem); \ + vmovdqu x5, 5 * 16(mem); \ + vmovdqu x6, 6 * 16(mem); \ + vmovdqu x7, 7 * 16(mem); \ + vmovdqu y0, 8 * 16(mem); \ + vmovdqu y1, 9 * 16(mem); \ + vmovdqu y2, 10 * 16(mem); \ + vmovdqu y3, 11 * 16(mem); \ + vmovdqu y4, 12 * 16(mem); \ + vmovdqu y5, 13 * 16(mem); \ + vmovdqu y6, 14 * 16(mem); \ + vmovdqu y7, 15 * 16(mem); + +#define vload_if_enough_nblks(blk_offs, rnblks, rio, v) \ + vpxor v, v, v; \ + cmp $(blk_offs), rnblks; \ + jbe 1f; \ + vmovdqu (blk_offs * 16)(rio), v; \ + 1:; + +#define vstore_if_enough_nblks(blk_offs, rnblks, mem, v)\ + cmp $(blk_offs), rnblks; \ + jbe 1f; \ + vmovdqu v, (blk_offs * 16)(mem); \ + 1:; + +#define inpack_1_15_pre(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + rio, rnblks) \ + vmovdqu (0 * 16)(rio), x0; \ + vload_if_enough_nblks(1, rnblks, rio, x1); \ + vload_if_enough_nblks(2, rnblks, rio, x2); \ + vload_if_enough_nblks(3, rnblks, rio, x3); \ + vload_if_enough_nblks(4, rnblks, rio, x4); \ + vload_if_enough_nblks(5, rnblks, rio, x5); \ + vload_if_enough_nblks(6, rnblks, rio, x6); \ + vload_if_enough_nblks(7, rnblks, rio, x7); \ + vload_if_enough_nblks(8, rnblks, rio, y0); \ + vload_if_enough_nblks(9, rnblks, rio, y1); \ + vload_if_enough_nblks(10, rnblks, rio, y2); \ + vload_if_enough_nblks(11, rnblks, rio, y3); \ + vload_if_enough_nblks(12, rnblks, rio, y4); \ + vload_if_enough_nblks(13, rnblks, rio, y5); \ + vload_if_enough_nblks(14, rnblks, rio, y6); \ + vpxor y7, y7, y7; + +#define write_output_1_15(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem, rnblks) \ + vmovdqu x0, (0 * 16)(mem); \ + vstore_if_enough_nblks(1, rnblks, mem, x1); \ + vstore_if_enough_nblks(2, rnblks, mem, x2); \ + vstore_if_enough_nblks(3, rnblks, mem, x3); \ + vstore_if_enough_nblks(4, rnblks, mem, x4); \ + vstore_if_enough_nblks(5, rnblks, mem, x5); \ + vstore_if_enough_nblks(6, rnblks, mem, x6); \ + vstore_if_enough_nblks(7, rnblks, mem, x7); \ + vstore_if_enough_nblks(8, rnblks, mem, y0); \ + vstore_if_enough_nblks(9, rnblks, mem, y1); \ + vstore_if_enough_nblks(10, rnblks, mem, y2); \ + vstore_if_enough_nblks(11, rnblks, mem, y3); \ + vstore_if_enough_nblks(12, rnblks, mem, y4); \ + vstore_if_enough_nblks(13, rnblks, mem, y5); \ + vstore_if_enough_nblks(14, rnblks, mem, y6); + +#define aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, idx) \ + vmovdqu x0, ((idx + 0) * 16)(mem_tmp); \ + vmovdqu x1, ((idx + 1) * 16)(mem_tmp); \ + vmovdqu x2, ((idx + 2) * 16)(mem_tmp); \ + vmovdqu x3, ((idx + 3) * 16)(mem_tmp); \ + vmovdqu x4, ((idx + 4) * 16)(mem_tmp); \ + vmovdqu x5, ((idx + 5) * 16)(mem_tmp); \ + vmovdqu x6, ((idx + 6) * 16)(mem_tmp); \ + vmovdqu x7, ((idx + 7) * 16)(mem_tmp); + +#define aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, idx) \ + vmovdqu ((idx + 0) * 16)(mem_tmp), x0; \ + vmovdqu ((idx + 1) * 16)(mem_tmp), x1; \ + vmovdqu ((idx + 2) * 16)(mem_tmp), x2; \ + vmovdqu ((idx + 3) * 16)(mem_tmp), x3; \ + vmovdqu ((idx + 4) * 16)(mem_tmp), x4; \ + vmovdqu ((idx + 5) * 16)(mem_tmp), x5; \ + vmovdqu ((idx + 6) * 16)(mem_tmp), x6; \ + vmovdqu ((idx + 7) * 16)(mem_tmp), x7; + +#define aria_ark_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + t0, t1, t2, rk, \ + idx, round) \ + /* AddRoundKey */ \ + vbroadcastss ((round * 16) + idx + 0)(rk), t0; \ + vpsrld $24, t0, t2; \ + vpshufb t1, t2, t2; \ + vpxor t2, x0, x0; \ + vpsrld $16, t0, t2; \ + vpshufb t1, t2, t2; \ + vpxor t2, x1, x1; \ + vpsrld $8, t0, t2; \ + vpshufb t1, t2, t2; \ + vpxor t2, x2, x2; \ + vpshufb t1, t0, t2; \ + vpxor t2, x3, x3; \ + vbroadcastss ((round * 16) + idx + 4)(rk), t0; \ + vpsrld $24, t0, t2; \ + vpshufb t1, t2, t2; \ + vpxor t2, x4, x4; \ + vpsrld $16, t0, t2; \ + vpshufb t1, t2, t2; \ + vpxor t2, x5, x5; \ + vpsrld $8, t0, t2; \ + vpshufb t1, t2, t2; \ + vpxor t2, x6, x6; \ + vpshufb t1, t0, t2; \ + vpxor t2, x7, x7; + +#ifdef CONFIG_AS_GFNI +#define aria_sbox_8way_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + t0, t1, t2, t3, \ + t4, t5, t6, t7) \ + vmovddup .Ltf_s2_bitmatrix rRIP, t0; \ + vmovddup .Ltf_inv_bitmatrix rRIP, t1; \ + vmovddup .Ltf_id_bitmatrix rRIP, t2; \ + vmovddup .Ltf_aff_bitmatrix rRIP, t3; \ + vmovddup .Ltf_x2_bitmatrix rRIP, t4; \ + vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \ + vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \ + vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \ + vgf2p8affineqb $(tf_inv_const), t1, x6, x6; \ + vgf2p8affineinvqb $0, t2, x2, x2; \ + vgf2p8affineinvqb $0, t2, x6, x6; \ + vgf2p8affineinvqb $(tf_aff_const), t3, x0, x0; \ + vgf2p8affineinvqb $(tf_aff_const), t3, x4, x4; \ + vgf2p8affineqb $(tf_x2_const), t4, x3, x3; \ + vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \ + vgf2p8affineinvqb $0, t2, x3, x3; \ + vgf2p8affineinvqb $0, t2, x7, x7 +#endif /* CONFIG_AS_GFNI */ + +#define aria_sbox_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + t0, t1, t2, t3, \ + t4, t5, t6, t7) \ + vmovdqa .Linv_shift_row rRIP, t0; \ + vmovdqa .Lshift_row rRIP, t1; \ + vbroadcastss .L0f0f0f0f rRIP, t6; \ + vmovdqa .Ltf_lo__inv_aff__and__s2 rRIP, t2; \ + vmovdqa .Ltf_hi__inv_aff__and__s2 rRIP, t3; \ + vmovdqa .Ltf_lo__x2__and__fwd_aff rRIP, t4; \ + vmovdqa .Ltf_hi__x2__and__fwd_aff rRIP, t5; \ + \ + vaesenclast t7, x0, x0; \ + vaesenclast t7, x4, x4; \ + vaesenclast t7, x1, x1; \ + vaesenclast t7, x5, x5; \ + vaesdeclast t7, x2, x2; \ + vaesdeclast t7, x6, x6; \ + \ + /* AES inverse shift rows */ \ + vpshufb t0, x0, x0; \ + vpshufb t0, x4, x4; \ + vpshufb t0, x1, x1; \ + vpshufb t0, x5, x5; \ + vpshufb t1, x3, x3; \ + vpshufb t1, x7, x7; \ + vpshufb t1, x2, x2; \ + vpshufb t1, x6, x6; \ + \ + /* affine transformation for S2 */ \ + filter_8bit(x1, t2, t3, t6, t0); \ + /* affine transformation for S2 */ \ + filter_8bit(x5, t2, t3, t6, t0); \ + \ + /* affine transformation for X2 */ \ + filter_8bit(x3, t4, t5, t6, t0); \ + /* affine transformation for X2 */ \ + filter_8bit(x7, t4, t5, t6, t0); \ + vaesdeclast t7, x3, x3; \ + vaesdeclast t7, x7, x7; + +#define aria_diff_m(x0, x1, x2, x3, \ + t0, t1, t2, t3) \ + /* T = rotr32(X, 8); */ \ + /* X ^= T */ \ + vpxor x0, x3, t0; \ + vpxor x1, x0, t1; \ + vpxor x2, x1, t2; \ + vpxor x3, x2, t3; \ + /* X = T ^ rotr(X, 16); */ \ + vpxor t2, x0, x0; \ + vpxor x1, t3, t3; \ + vpxor t0, x2, x2; \ + vpxor t1, x3, x1; \ + vmovdqu t3, x3; + +#define aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7) \ + /* t1 ^= t2; */ \ + vpxor y0, x4, x4; \ + vpxor y1, x5, x5; \ + vpxor y2, x6, x6; \ + vpxor y3, x7, x7; \ + \ + /* t2 ^= t3; */ \ + vpxor y4, y0, y0; \ + vpxor y5, y1, y1; \ + vpxor y6, y2, y2; \ + vpxor y7, y3, y3; \ + \ + /* t0 ^= t1; */ \ + vpxor x4, x0, x0; \ + vpxor x5, x1, x1; \ + vpxor x6, x2, x2; \ + vpxor x7, x3, x3; \ + \ + /* t3 ^= t1; */ \ + vpxor x4, y4, y4; \ + vpxor x5, y5, y5; \ + vpxor x6, y6, y6; \ + vpxor x7, y7, y7; \ + \ + /* t2 ^= t0; */ \ + vpxor x0, y0, y0; \ + vpxor x1, y1, y1; \ + vpxor x2, y2, y2; \ + vpxor x3, y3, y3; \ + \ + /* t1 ^= t2; */ \ + vpxor y0, x4, x4; \ + vpxor y1, x5, x5; \ + vpxor y2, x6, x6; \ + vpxor y3, x7, x7; + +#define aria_fe(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + vpxor y7, y7, y7; \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T3 = ABCD -> BADC \ + * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ + * T0 = ABCD -> CDAB \ + * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ + * T1 = ABCD -> DCBA \ + * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ + */ \ + aria_diff_word(x2, x3, x0, x1, \ + x7, x6, x5, x4, \ + y0, y1, y2, y3, \ + y5, y4, y7, y6); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_fo(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + vpxor y7, y7, y7; \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, round); \ + \ + aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, round); \ + \ + aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T1 = ABCD -> BADC \ + * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ + * T2 = ABCD -> CDAB \ + * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ + * T3 = ABCD -> DCBA \ + * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ + */ \ + aria_diff_word(x0, x1, x2, x3, \ + x5, x4, x7, x6, \ + y2, y3, y0, y1, \ + y7, y6, y5, y4); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_ff(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round, last_round) \ + vpxor y7, y7, y7; \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, last_round); \ + \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, last_round); \ + \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); + +#ifdef CONFIG_AS_GFNI +#define aria_fe_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + vpxor y7, y7, y7; \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T3 = ABCD -> BADC \ + * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ + * T0 = ABCD -> CDAB \ + * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ + * T1 = ABCD -> DCBA \ + * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ + */ \ + aria_diff_word(x2, x3, x0, x1, \ + x7, x6, x5, x4, \ + y0, y1, y2, y3, \ + y5, y4, y7, y6); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_fo_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + vpxor y7, y7, y7; \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, round); \ + \ + aria_sbox_8way_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, round); \ + \ + aria_sbox_8way_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T1 = ABCD -> BADC \ + * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ + * T2 = ABCD -> CDAB \ + * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ + * T3 = ABCD -> DCBA \ + * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ + */ \ + aria_diff_word(x0, x1, x2, x3, \ + x5, x4, x7, x6, \ + y2, y3, y0, y1, \ + y7, y6, y5, y4); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_ff_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round, last_round) \ + vpxor y7, y7, y7; \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 8, last_round); \ + \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y7, y2, rk, 0, last_round); \ + \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); + +#endif /* CONFIG_AS_GFNI */ + + +SECTION_RODATA +.align 16 + +#define SHUFB_BYTES(idx) \ + 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) + +.Lshufb_16x16b: + .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3); +/* For isolating SubBytes from AESENCLAST, inverse shift row */ +.Linv_shift_row: + .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b + .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 +.Lshift_row: + .byte 0x00, 0x05, 0x0a, 0x0f, 0x04, 0x09, 0x0e, 0x03 + .byte 0x08, 0x0d, 0x02, 0x07, 0x0c, 0x01, 0x06, 0x0b +/* For CTR-mode IV byteswap */ +.Lbswap128_mask: + .byte 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08 + .byte 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 + +/* AES inverse affine and S2 combined: + * 1 1 0 0 0 0 0 1 x0 0 + * 0 1 0 0 1 0 0 0 x1 0 + * 1 1 0 0 1 1 1 1 x2 0 + * 0 1 1 0 1 0 0 1 x3 1 + * 0 1 0 0 1 1 0 0 * x4 + 0 + * 0 1 0 1 1 0 0 0 x5 0 + * 0 0 0 0 0 1 0 1 x6 0 + * 1 1 1 0 0 1 1 1 x7 1 + */ +.Ltf_lo__inv_aff__and__s2: + .octa 0x92172DA81A9FA520B2370D883ABF8500 +.Ltf_hi__inv_aff__and__s2: + .octa 0x2B15FFC1AF917B45E6D8320C625CB688 + +/* X2 and AES forward affine combined: + * 1 0 1 1 0 0 0 1 x0 0 + * 0 1 1 1 1 0 1 1 x1 0 + * 0 0 0 1 1 0 1 0 x2 1 + * 0 1 0 0 0 1 0 0 x3 0 + * 0 0 1 1 1 0 1 1 * x4 + 0 + * 0 1 0 0 1 0 0 0 x5 0 + * 1 1 0 1 0 0 1 1 x6 0 + * 0 1 0 0 1 0 1 0 x7 0 + */ +.Ltf_lo__x2__and__fwd_aff: + .octa 0xEFAE0544FCBD1657B8F95213ABEA4100 +.Ltf_hi__x2__and__fwd_aff: + .octa 0x3F893781E95FE1576CDA64D2BA0CB204 + +.Lbige_addb_1: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +.Lbige_addb_2: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 +.Lbige_addb_3: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 +.Lbige_addb_4: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 +.Lbige_addb_5: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 +.Lbige_addb_6: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 +.Lbige_addb_7: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 +.Lbige_addb_8: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 +.Lbige_addb_9: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9 +.Lbige_addb_10: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 +.Lbige_addb_11: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 +.Lbige_addb_12: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 +.Lbige_addb_13: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 +.Lbige_addb_14: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 +.Lbige_addb_15: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 + +#ifdef CONFIG_AS_GFNI +.align 8 +/* AES affine: */ +#define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) +.Ltf_aff_bitmatrix: + .quad BM8X8(BV8(1, 0, 0, 0, 1, 1, 1, 1), + BV8(1, 1, 0, 0, 0, 1, 1, 1), + BV8(1, 1, 1, 0, 0, 0, 1, 1), + BV8(1, 1, 1, 1, 0, 0, 0, 1), + BV8(1, 1, 1, 1, 1, 0, 0, 0), + BV8(0, 1, 1, 1, 1, 1, 0, 0), + BV8(0, 0, 1, 1, 1, 1, 1, 0), + BV8(0, 0, 0, 1, 1, 1, 1, 1)) + +/* AES inverse affine: */ +#define tf_inv_const BV8(1, 0, 1, 0, 0, 0, 0, 0) +.Ltf_inv_bitmatrix: + .quad BM8X8(BV8(0, 0, 1, 0, 0, 1, 0, 1), + BV8(1, 0, 0, 1, 0, 0, 1, 0), + BV8(0, 1, 0, 0, 1, 0, 0, 1), + BV8(1, 0, 1, 0, 0, 1, 0, 0), + BV8(0, 1, 0, 1, 0, 0, 1, 0), + BV8(0, 0, 1, 0, 1, 0, 0, 1), + BV8(1, 0, 0, 1, 0, 1, 0, 0), + BV8(0, 1, 0, 0, 1, 0, 1, 0)) + +/* S2: */ +#define tf_s2_const BV8(0, 1, 0, 0, 0, 1, 1, 1) +.Ltf_s2_bitmatrix: + .quad BM8X8(BV8(0, 1, 0, 1, 0, 1, 1, 1), + BV8(0, 0, 1, 1, 1, 1, 1, 1), + BV8(1, 1, 1, 0, 1, 1, 0, 1), + BV8(1, 1, 0, 0, 0, 0, 1, 1), + BV8(0, 1, 0, 0, 0, 0, 1, 1), + BV8(1, 1, 0, 0, 1, 1, 1, 0), + BV8(0, 1, 1, 0, 0, 0, 1, 1), + BV8(1, 1, 1, 1, 0, 1, 1, 0)) + +/* X2: */ +#define tf_x2_const BV8(0, 0, 1, 1, 0, 1, 0, 0) +.Ltf_x2_bitmatrix: + .quad BM8X8(BV8(0, 0, 0, 1, 1, 0, 0, 0), + BV8(0, 0, 1, 0, 0, 1, 1, 0), + BV8(0, 0, 0, 0, 1, 0, 1, 0), + BV8(1, 1, 1, 0, 0, 0, 1, 1), + BV8(1, 1, 1, 0, 1, 1, 0, 0), + BV8(0, 1, 1, 0, 1, 0, 1, 1), + BV8(1, 0, 1, 1, 1, 1, 0, 1), + BV8(1, 0, 0, 1, 0, 0, 1, 1)) + +/* Identity matrix: */ +.Ltf_id_bitmatrix: + .quad BM8X8(BV8(1, 0, 0, 0, 0, 0, 0, 0), + BV8(0, 1, 0, 0, 0, 0, 0, 0), + BV8(0, 0, 1, 0, 0, 0, 0, 0), + BV8(0, 0, 0, 1, 0, 0, 0, 0), + BV8(0, 0, 0, 0, 1, 0, 0, 0), + BV8(0, 0, 0, 0, 0, 1, 0, 0), + BV8(0, 0, 0, 0, 0, 0, 1, 0), + BV8(0, 0, 0, 0, 0, 0, 0, 1)) +#endif /* CONFIG_AS_GFNI */ + +/* 4-bit mask */ +.align 4 +.L0f0f0f0f: + .long 0x0f0f0f0f + +.text + +.align 16 +ELF(.type __aria_aesni_avx_crypt_16way,@function;) +__aria_aesni_avx_crypt_16way: + /* input: + * %r9: rk + * %rsi: dst + * %rdx: src + * %xmm0..%xmm15: 16 byte-sliced blocks + */ + CFI_STARTPROC(); + + movq %rsi, %rax; + leaq 8 * 16(%rax), %r8; + + movl ARIA_CTX_rounds(CTX), %r10d; + subl $2, %r10d; + + inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rax, %r8); + aria_fo(%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %rax, %r9, 0); + leaq 1*16(%r9), %r9; + +.align 16 +.Loop_aesni: + aria_fe(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rax, %r9, 0); + aria_fo(%xmm9, %xmm8, %xmm11, %xmm10, %xmm12, %xmm13, %xmm14, %xmm15, + %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %rax, %r9, 1); + leaq 2*16(%r9), %r9; + subl $2, %r10d; + jnz .Loop_aesni; + + aria_ff(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rax, %r9, 0, 1); + + debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4, + %xmm9, %xmm13, %xmm0, %xmm5, + %xmm10, %xmm14, %xmm3, %xmm6, + %xmm11, %xmm15, %xmm2, %xmm7, + (%rax), (%r8)); + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __aria_aesni_avx_crypt_16way,.-__aria_aesni_avx_crypt_16way;) + +.align 16 +.globl _gcry_aria_aesni_avx_ecb_crypt_blk1_16 +ELF(.type _gcry_aria_aesni_avx_ecb_crypt_blk1_16,@function;) +_gcry_aria_aesni_avx_ecb_crypt_blk1_16: + /* input: + * %rdi: ctx, CTX + * %rsi: dst + * %rdx: src + * %rcx: round keys + * %r8: num blocks + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 16), %rsp; + andq $~15, %rsp; + + movq %rcx, %r9; + movq %rsi, %r11; + movq %rsp, %rsi; /* use stack for temporary store */ + + cmpq $16, %r8; + jb .Lecb_less_than_16; + + inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rdx); + + call __aria_aesni_avx_crypt_16way; + + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %r11); + +.Lecb_end: + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + +.Lecb_less_than_16: + pushq %r8; + inpack_1_15_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rdx, %r8d); + + call __aria_aesni_avx_crypt_16way; + + popq %rax; + write_output_1_15(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, + %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, + %xmm14, %xmm15, %r11, %eax); + + jmp .Lecb_end; + CFI_ENDPROC(); +ELF(.size _gcry_aria_aesni_avx_ecb_crypt_blk1_16, + .-_gcry_aria_aesni_avx_ecb_crypt_blk1_16;) + +.align 16 +ELF(.type __aria_aesni_avx_ctr_gen_keystream_16way,@function;) +__aria_aesni_avx_ctr_gen_keystream_16way: + /* input: + * %rdi: ctx + * %rsi: dst + * %rdx: src + * %rcx: keystream + * %r8: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + + /* load IV */ + vmovdqu (%r8), %xmm8; + cmpb $(0x100 - 16), 15(%r8); + jbe .Lctr_byteadd; + + /* byteswap */ + vmovdqa .Lbswap128_mask rRIP, %xmm1; + vpshufb %xmm1, %xmm8, %xmm3; /* be => le */ + + vpcmpeqd %xmm0, %xmm0, %xmm0; + vpsrldq $8, %xmm0, %xmm0; /* low: -1, high: 0 */ + + /* construct IVs */ + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm9; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm10; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm11; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm12; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm13; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm14; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm15; + vmovdqu %xmm8, (0 * 16)(%rcx); + vmovdqu %xmm9, (1 * 16)(%rcx); + vmovdqu %xmm10, (2 * 16)(%rcx); + vmovdqu %xmm11, (3 * 16)(%rcx); + vmovdqu %xmm12, (4 * 16)(%rcx); + vmovdqu %xmm13, (5 * 16)(%rcx); + vmovdqu %xmm14, (6 * 16)(%rcx); + vmovdqu %xmm15, (7 * 16)(%rcx); + + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm8; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm9; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm10; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm11; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm12; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm13; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm14; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm15; + inc_le128(%xmm3, %xmm0, %xmm5); /* +1 */ + vpshufb %xmm1, %xmm3, %xmm4; + vmovdqu %xmm4, (%r8); + + vmovdqu (0 * 16)(%rcx), %xmm0; + vmovdqu (1 * 16)(%rcx), %xmm1; + vmovdqu (2 * 16)(%rcx), %xmm2; + vmovdqu (3 * 16)(%rcx), %xmm3; + vmovdqu (4 * 16)(%rcx), %xmm4; + vmovdqu (5 * 16)(%rcx), %xmm5; + vmovdqu (6 * 16)(%rcx), %xmm6; + vmovdqu (7 * 16)(%rcx), %xmm7; + + ret_spec_stop; + +.align 8 +.Lctr_byteadd_full_ctr_carry: + addb $16, 15(%r8); + pushq %rcx; + movl $14, %ecx; + 1: + adcb $0, (%r8, %rcx); + jnc 2f; + loop 1b; + 2: + popq %rcx; + jmp .Lctr_byteadd_xmm; +.align 8 +.Lctr_byteadd: + je .Lctr_byteadd_full_ctr_carry; + addb $16, 15(%r8); +.Lctr_byteadd_xmm: + vmovdqa %xmm8, %xmm0; + vpaddb .Lbige_addb_1 rRIP, %xmm8, %xmm1; + vpaddb .Lbige_addb_2 rRIP, %xmm8, %xmm2; + vpaddb .Lbige_addb_3 rRIP, %xmm8, %xmm3; + vpaddb .Lbige_addb_4 rRIP, %xmm8, %xmm4; + vpaddb .Lbige_addb_5 rRIP, %xmm8, %xmm5; + vpaddb .Lbige_addb_6 rRIP, %xmm8, %xmm6; + vpaddb .Lbige_addb_7 rRIP, %xmm8, %xmm7; + vpaddb .Lbige_addb_8 rRIP, %xmm0, %xmm8; + vpaddb .Lbige_addb_9 rRIP, %xmm0, %xmm9; + vpaddb .Lbige_addb_10 rRIP, %xmm0, %xmm10; + vpaddb .Lbige_addb_11 rRIP, %xmm0, %xmm11; + vpaddb .Lbige_addb_12 rRIP, %xmm0, %xmm12; + vpaddb .Lbige_addb_13 rRIP, %xmm0, %xmm13; + vpaddb .Lbige_addb_14 rRIP, %xmm0, %xmm14; + vpaddb .Lbige_addb_15 rRIP, %xmm0, %xmm15; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __aria_aesni_avx_ctr_gen_keystream_16way,.-__aria_aesni_avx_ctr_gen_keystream_16way;) + +.align 16 +.globl _gcry_aria_aesni_avx_ctr_crypt_blk16 +ELF(.type _gcry_aria_aesni_avx_ctr_crypt_blk16,@function;) +_gcry_aria_aesni_avx_ctr_crypt_blk16: + /* input: + * %rdi: ctx + * %rsi: dst + * %rdx: src + * %rcx: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 16), %rsp; + andq $~15, %rsp; + + movq %rcx, %r8; /* %r8: iv */ + movq %rsp, %rcx; /* %rcx: keystream */ + call __aria_aesni_avx_ctr_gen_keystream_16way; + + pushq %rsi; + movq %rdx, %r11; + movq %rcx, %rsi; /* use stack for temporary store */ + movq %rcx, %rdx; + leaq ARIA_CTX_enc_key(CTX), %r9; + + call __aria_aesni_avx_crypt_16way; + + popq %rsi; + vpxor (0 * 16)(%r11), %xmm1, %xmm1; + vpxor (1 * 16)(%r11), %xmm0, %xmm0; + vpxor (2 * 16)(%r11), %xmm3, %xmm3; + vpxor (3 * 16)(%r11), %xmm2, %xmm2; + vpxor (4 * 16)(%r11), %xmm4, %xmm4; + vpxor (5 * 16)(%r11), %xmm5, %xmm5; + vpxor (6 * 16)(%r11), %xmm6, %xmm6; + vpxor (7 * 16)(%r11), %xmm7, %xmm7; + vpxor (8 * 16)(%r11), %xmm8, %xmm8; + vpxor (9 * 16)(%r11), %xmm9, %xmm9; + vpxor (10 * 16)(%r11), %xmm10, %xmm10; + vpxor (11 * 16)(%r11), %xmm11, %xmm11; + vpxor (12 * 16)(%r11), %xmm12, %xmm12; + vpxor (13 * 16)(%r11), %xmm13, %xmm13; + vpxor (14 * 16)(%r11), %xmm14, %xmm14; + vpxor (15 * 16)(%r11), %xmm15, %xmm15; + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rsi); + + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_aria_aesni_avx_ctr_crypt_blk16,.-_gcry_aria_aesni_avx_ctr_crypt_blk16;) + +#ifdef CONFIG_AS_GFNI +.align 16 +ELF(.type __aria_gfni_avx_crypt_16way,@function;) +__aria_gfni_avx_crypt_16way: + /* input: + * %r9: rk + * %rsi: dst + * %rdx: src + * %xmm0..%xmm15: 16 byte-sliced blocks + */ + CFI_STARTPROC(); + + movq %rsi, %rax; + leaq 8 * 16(%rax), %r8; + + movl ARIA_CTX_rounds(CTX), %r10d; + subl $2, %r10d; + + inpack16_post(%xmm0, %xmm1, %xmm2, %xmm3, + %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, + %xmm12, %xmm13, %xmm14, + %xmm15, %rax, %r8); + aria_fo_gfni(%xmm8, %xmm9, %xmm10, %xmm11, + %xmm12, %xmm13, %xmm14, %xmm15, + %xmm0, %xmm1, %xmm2, %xmm3, + %xmm4, %xmm5, %xmm6, %xmm7, + %rax, %r9, 0); + leaq 1*16(%r9), %r9; + +.align 16 +.Loop_gfni: + aria_fe_gfni(%xmm1, %xmm0, %xmm3, %xmm2, + %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, + %xmm12, %xmm13, %xmm14, + %xmm15, %rax, %r9, 0); + aria_fo_gfni(%xmm9, %xmm8, %xmm11, %xmm10, + %xmm12, %xmm13, %xmm14, %xmm15, + %xmm0, %xmm1, %xmm2, %xmm3, + %xmm4, %xmm5, %xmm6, %xmm7, + %rax, %r9, 1); + leaq 2*16(%r9), %r9; + subl $2, %r10d; + jnz .Loop_gfni; + + aria_ff_gfni(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rax, %r9, 0, 1); + + debyteslice_16x16b(%xmm8, %xmm12, %xmm1, %xmm4, + %xmm9, %xmm13, %xmm0, %xmm5, + %xmm10, %xmm14, %xmm3, %xmm6, + %xmm11, %xmm15, %xmm2, %xmm7, + (%rax), (%r8)); + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __aria_gfni_avx_crypt_16way,.-__aria_gfni_avx_crypt_16way;) + +.align 16 +.globl _gcry_aria_gfni_avx_ecb_crypt_blk1_16 +ELF(.type _gcry_aria_gfni_avx_ecb_crypt_blk1_16,@function;) +_gcry_aria_gfni_avx_ecb_crypt_blk1_16: + /* input: + * %rdi: ctx, CTX + * %rsi: dst + * %rdx: src + * %rcx: round keys + * %r8: num blocks + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 16), %rsp; + andq $~15, %rsp; + + movq %rcx, %r9; + movq %rsi, %r11; + movq %rsp, %rsi; /* use stack for temporary store */ + + cmpq $16, %r8; + jb .Lecb_less_than_16_gfni; + + inpack16_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rdx); + + call __aria_gfni_avx_crypt_16way; + + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %r11); + +.Lecb_end_gfni: + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + +.Lecb_less_than_16_gfni: + pushq %r8; + inpack_1_15_pre(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rdx, %r8d); + + call __aria_gfni_avx_crypt_16way; + + popq %rax; + write_output_1_15(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, + %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, + %xmm14, %xmm15, %r11, %eax); + + jmp .Lecb_end_gfni; + CFI_ENDPROC(); +ELF(.size _gcry_aria_gfni_avx_ecb_crypt_blk1_16, + .-_gcry_aria_gfni_avx_ecb_crypt_blk1_16;) + +.align 16 +.globl _gcry_aria_gfni_avx_ctr_crypt_blk16 +ELF(.type _gcry_aria_gfni_avx_ctr_crypt_blk16,@function;) +_gcry_aria_gfni_avx_ctr_crypt_blk16: + /* input: + * %rdi: ctx + * %rsi: dst + * %rdx: src + * %rcx: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 16), %rsp; + andq $~15, %rsp; + + movq %rcx, %r8; /* %r8: iv */ + movq %rsp, %rcx; /* %rcx: keystream */ + call __aria_aesni_avx_ctr_gen_keystream_16way + + pushq %rsi; + movq %rdx, %r11; + movq %rcx, %rsi; /* use stack for temporary store */ + movq %rcx, %rdx; + leaq ARIA_CTX_enc_key(CTX), %r9; + + call __aria_gfni_avx_crypt_16way; + + popq %rsi; + vpxor (0 * 16)(%r11), %xmm1, %xmm1; + vpxor (1 * 16)(%r11), %xmm0, %xmm0; + vpxor (2 * 16)(%r11), %xmm3, %xmm3; + vpxor (3 * 16)(%r11), %xmm2, %xmm2; + vpxor (4 * 16)(%r11), %xmm4, %xmm4; + vpxor (5 * 16)(%r11), %xmm5, %xmm5; + vpxor (6 * 16)(%r11), %xmm6, %xmm6; + vpxor (7 * 16)(%r11), %xmm7, %xmm7; + vpxor (8 * 16)(%r11), %xmm8, %xmm8; + vpxor (9 * 16)(%r11), %xmm9, %xmm9; + vpxor (10 * 16)(%r11), %xmm10, %xmm10; + vpxor (11 * 16)(%r11), %xmm11, %xmm11; + vpxor (12 * 16)(%r11), %xmm12, %xmm12; + vpxor (13 * 16)(%r11), %xmm13, %xmm13; + vpxor (14 * 16)(%r11), %xmm14, %xmm14; + vpxor (15 * 16)(%r11), %xmm15, %xmm15; + write_output(%xmm1, %xmm0, %xmm3, %xmm2, %xmm4, %xmm5, %xmm6, %xmm7, + %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, + %xmm15, %rsi); + + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_aria_gfni_avx_ctr_crypt_blk16,.-_gcry_aria_gfni_avx_ctr_crypt_blk16;) +#endif /* CONFIG_AS_GFNI */ + +#endif /* ENABLE_AVX_SUPPORT && ENABLE_AESNI_SUPPORT */ +#endif /* __x86_64 */ diff --git a/cipher/aria-aesni-avx2-amd64.S b/cipher/aria-aesni-avx2-amd64.S new file mode 100644 index 00000000..0a89b0bf --- /dev/null +++ b/cipher/aria-aesni-avx2-amd64.S @@ -0,0 +1,1464 @@ +/* aria-aesni-avx2-amd64.S - AESNI/GFNI/AVX2 implementation of ARIA cipher + * + * Copyright (C) 2022-2023 Taehee Yoo + * Copyright (C) 2023 Jussi Kivilinna + * + * This file is part of Libgcrypt. + * + * Libgcrypt is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as + * published by the Free Software Foundation; either version 2.1 of + * the License, or (at your option) any later version. + * + * Libgcrypt is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this program; if not, see . + */ + +#include + +#ifdef __x86_64 +#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ + defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ + defined(ENABLE_AVX2_SUPPORT) && defined(ENABLE_AESNI_SUPPORT) + +#include "asm-common-amd64.h" + +#ifdef ENABLE_GFNI_SUPPORT +# define CONFIG_AS_GFNI 1 +#endif + +/* struct ARIA_context: */ +#define ARIA_BLOCK_SIZE 16 +#define ARIA_MAX_RD_KEYS 17 +#define ARIA_CTX_enc_key 0 +#define ARIA_CTX_dec_key (ARIA_CTX_enc_key + (ARIA_BLOCK_SIZE * ARIA_MAX_RD_KEYS)) +#define ARIA_CTX_rounds (ARIA_CTX_dec_key + (ARIA_BLOCK_SIZE * ARIA_MAX_RD_KEYS)) + +/* register macros */ +#define CTX %rdi + +#define ymm0_x xmm0 +#define ymm1_x xmm1 +#define ymm2_x xmm2 +#define ymm3_x xmm3 +#define ymm4_x xmm4 +#define ymm5_x xmm5 +#define ymm6_x xmm6 +#define ymm7_x xmm7 +#define ymm8_x xmm8 +#define ymm9_x xmm9 +#define ymm10_x xmm10 +#define ymm11_x xmm11 +#define ymm12_x xmm12 +#define ymm13_x xmm13 +#define ymm14_x xmm14 +#define ymm15_x xmm15 + +/* helper macros */ +#define STACK_DEPTH (2 * 8 + 16 * 32 + 31) + +#define BV8(a0, a1, a2, a3, a4, a5, a6, a7) \ + ( (((a0) & 1) << 0) | \ + (((a1) & 1) << 1) | \ + (((a2) & 1) << 2) | \ + (((a3) & 1) << 3) | \ + (((a4) & 1) << 4) | \ + (((a5) & 1) << 5) | \ + (((a6) & 1) << 6) | \ + (((a7) & 1) << 7) ) + +#define BM8X8(l0, l1, l2, l3, l4, l5, l6, l7) \ + ( ((l7) << (0 * 8)) | \ + ((l6) << (1 * 8)) | \ + ((l5) << (2 * 8)) | \ + ((l4) << (3 * 8)) | \ + ((l3) << (4 * 8)) | \ + ((l2) << (5 * 8)) | \ + ((l1) << (6 * 8)) | \ + ((l0) << (7 * 8)) ) + +/* asm macros */ +#define inc_le128(x, minus_one, tmp) \ + vpcmpeqq minus_one, x, tmp; \ + vpsubq minus_one, x, x; \ + vpslldq $8, tmp, tmp; \ + vpsubq tmp, x, x; + +#define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \ + vpand x, mask4bit, tmp0; \ + vpandn x, mask4bit, x; \ + vpsrld $4, x, x; \ + \ + vpshufb tmp0, lo_t, tmp0; \ + vpshufb x, hi_t, x; \ + vpxor tmp0, x, x; + +#define transpose_4x4(x0, x1, x2, x3, t1, t2) \ + vpunpckhdq x1, x0, t2; \ + vpunpckldq x1, x0, x0; \ + \ + vpunpckldq x3, x2, t1; \ + vpunpckhdq x3, x2, x2; \ + \ + vpunpckhqdq t1, x0, x1; \ + vpunpcklqdq t1, x0, x0; \ + \ + vpunpckhqdq x2, t2, x3; \ + vpunpcklqdq x2, t2, x2; + +#define byteslice_16x16b(a0, b0, c0, d0, \ + a1, b1, c1, d1, \ + a2, b2, c2, d2, \ + a3, b3, c3, d3, \ + st0, st1) \ + vmovdqu d2, st0; \ + vmovdqu d3, st1; \ + transpose_4x4(a0, a1, a2, a3, d2, d3); \ + transpose_4x4(b0, b1, b2, b3, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu a0, st0; \ + vmovdqu a1, st1; \ + transpose_4x4(c0, c1, c2, c3, a0, a1); \ + transpose_4x4(d0, d1, d2, d3, a0, a1); \ + \ + vbroadcasti128 .Lshufb_16x16b rRIP, a0; \ + vmovdqu st1, a1; \ + vpshufb a0, a2, a2; \ + vpshufb a0, a3, a3; \ + vpshufb a0, b0, b0; \ + vpshufb a0, b1, b1; \ + vpshufb a0, b2, b2; \ + vpshufb a0, b3, b3; \ + vpshufb a0, a1, a1; \ + vpshufb a0, c0, c0; \ + vpshufb a0, c1, c1; \ + vpshufb a0, c2, c2; \ + vpshufb a0, c3, c3; \ + vpshufb a0, d0, d0; \ + vpshufb a0, d1, d1; \ + vpshufb a0, d2, d2; \ + vpshufb a0, d3, d3; \ + vmovdqu d3, st1; \ + vmovdqu st0, d3; \ + vpshufb a0, d3, a0; \ + vmovdqu d2, st0; \ + \ + transpose_4x4(a0, b0, c0, d0, d2, d3); \ + transpose_4x4(a1, b1, c1, d1, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu b0, st0; \ + vmovdqu b1, st1; \ + transpose_4x4(a2, b2, c2, d2, b0, b1); \ + transpose_4x4(a3, b3, c3, d3, b0, b1); \ + vmovdqu st0, b0; \ + vmovdqu st1, b1; \ + /* does not adjust output bytes inside vectors */ + +#define debyteslice_16x16b(a0, b0, c0, d0, \ + a1, b1, c1, d1, \ + a2, b2, c2, d2, \ + a3, b3, c3, d3, \ + st0, st1) \ + vmovdqu d2, st0; \ + vmovdqu d3, st1; \ + transpose_4x4(a0, a1, a2, a3, d2, d3); \ + transpose_4x4(b0, b1, b2, b3, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu a0, st0; \ + vmovdqu a1, st1; \ + transpose_4x4(c0, c1, c2, c3, a0, a1); \ + transpose_4x4(d0, d1, d2, d3, a0, a1); \ + \ + vbroadcasti128 .Lshufb_16x16b rRIP, a0; \ + vmovdqu st1, a1; \ + vpshufb a0, a2, a2; \ + vpshufb a0, a3, a3; \ + vpshufb a0, b0, b0; \ + vpshufb a0, b1, b1; \ + vpshufb a0, b2, b2; \ + vpshufb a0, b3, b3; \ + vpshufb a0, a1, a1; \ + vpshufb a0, c0, c0; \ + vpshufb a0, c1, c1; \ + vpshufb a0, c2, c2; \ + vpshufb a0, c3, c3; \ + vpshufb a0, d0, d0; \ + vpshufb a0, d1, d1; \ + vpshufb a0, d2, d2; \ + vpshufb a0, d3, d3; \ + vmovdqu d3, st1; \ + vmovdqu st0, d3; \ + vpshufb a0, d3, a0; \ + vmovdqu d2, st0; \ + \ + transpose_4x4(c0, d0, a0, b0, d2, d3); \ + transpose_4x4(c1, d1, a1, b1, d2, d3); \ + vmovdqu st0, d2; \ + vmovdqu st1, d3; \ + \ + vmovdqu b0, st0; \ + vmovdqu b1, st1; \ + transpose_4x4(c2, d2, a2, b2, b0, b1); \ + transpose_4x4(c3, d3, a3, b3, b0, b1); \ + vmovdqu st0, b0; \ + vmovdqu st1, b1; \ + /* does not adjust output bytes inside vectors */ + +/* load blocks to registers and apply pre-whitening */ +#define inpack16_pre(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + rio) \ + vmovdqu (0 * 32)(rio), x0; \ + vmovdqu (1 * 32)(rio), x1; \ + vmovdqu (2 * 32)(rio), x2; \ + vmovdqu (3 * 32)(rio), x3; \ + vmovdqu (4 * 32)(rio), x4; \ + vmovdqu (5 * 32)(rio), x5; \ + vmovdqu (6 * 32)(rio), x6; \ + vmovdqu (7 * 32)(rio), x7; \ + vmovdqu (8 * 32)(rio), y0; \ + vmovdqu (9 * 32)(rio), y1; \ + vmovdqu (10 * 32)(rio), y2; \ + vmovdqu (11 * 32)(rio), y3; \ + vmovdqu (12 * 32)(rio), y4; \ + vmovdqu (13 * 32)(rio), y5; \ + vmovdqu (14 * 32)(rio), y6; \ + vmovdqu (15 * 32)(rio), y7; + +/* byteslice pre-whitened blocks and store to temporary memory */ +#define inpack16_post(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_ab, mem_cd) \ + byteslice_16x16b(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + (mem_ab), (mem_cd)); \ + \ + vmovdqu x0, 0 * 32(mem_ab); \ + vmovdqu x1, 1 * 32(mem_ab); \ + vmovdqu x2, 2 * 32(mem_ab); \ + vmovdqu x3, 3 * 32(mem_ab); \ + vmovdqu x4, 4 * 32(mem_ab); \ + vmovdqu x5, 5 * 32(mem_ab); \ + vmovdqu x6, 6 * 32(mem_ab); \ + vmovdqu x7, 7 * 32(mem_ab); \ + vmovdqu y0, 0 * 32(mem_cd); \ + vmovdqu y1, 1 * 32(mem_cd); \ + vmovdqu y2, 2 * 32(mem_cd); \ + vmovdqu y3, 3 * 32(mem_cd); \ + vmovdqu y4, 4 * 32(mem_cd); \ + vmovdqu y5, 5 * 32(mem_cd); \ + vmovdqu y6, 6 * 32(mem_cd); \ + vmovdqu y7, 7 * 32(mem_cd); + +#define write_output(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem) \ + vmovdqu x0, 0 * 32(mem); \ + vmovdqu x1, 1 * 32(mem); \ + vmovdqu x2, 2 * 32(mem); \ + vmovdqu x3, 3 * 32(mem); \ + vmovdqu x4, 4 * 32(mem); \ + vmovdqu x5, 5 * 32(mem); \ + vmovdqu x6, 6 * 32(mem); \ + vmovdqu x7, 7 * 32(mem); \ + vmovdqu y0, 8 * 32(mem); \ + vmovdqu y1, 9 * 32(mem); \ + vmovdqu y2, 10 * 32(mem); \ + vmovdqu y3, 11 * 32(mem); \ + vmovdqu y4, 12 * 32(mem); \ + vmovdqu y5, 13 * 32(mem); \ + vmovdqu y6, 14 * 32(mem); \ + vmovdqu y7, 15 * 32(mem); \ + +#define aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, idx) \ + vmovdqu x0, ((idx + 0) * 32)(mem_tmp); \ + vmovdqu x1, ((idx + 1) * 32)(mem_tmp); \ + vmovdqu x2, ((idx + 2) * 32)(mem_tmp); \ + vmovdqu x3, ((idx + 3) * 32)(mem_tmp); \ + vmovdqu x4, ((idx + 4) * 32)(mem_tmp); \ + vmovdqu x5, ((idx + 5) * 32)(mem_tmp); \ + vmovdqu x6, ((idx + 6) * 32)(mem_tmp); \ + vmovdqu x7, ((idx + 7) * 32)(mem_tmp); + +#define aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, idx) \ + vmovdqu ((idx + 0) * 32)(mem_tmp), x0; \ + vmovdqu ((idx + 1) * 32)(mem_tmp), x1; \ + vmovdqu ((idx + 2) * 32)(mem_tmp), x2; \ + vmovdqu ((idx + 3) * 32)(mem_tmp), x3; \ + vmovdqu ((idx + 4) * 32)(mem_tmp), x4; \ + vmovdqu ((idx + 5) * 32)(mem_tmp), x5; \ + vmovdqu ((idx + 6) * 32)(mem_tmp), x6; \ + vmovdqu ((idx + 7) * 32)(mem_tmp), x7; + +#define aria_ark_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + t0, rk, idx, round) \ + /* AddRoundKey */ \ + vpbroadcastb ((round * 16) + idx + 3)(rk), t0; \ + vpxor t0, x0, x0; \ + vpbroadcastb ((round * 16) + idx + 2)(rk), t0; \ + vpxor t0, x1, x1; \ + vpbroadcastb ((round * 16) + idx + 1)(rk), t0; \ + vpxor t0, x2, x2; \ + vpbroadcastb ((round * 16) + idx + 0)(rk), t0; \ + vpxor t0, x3, x3; \ + vpbroadcastb ((round * 16) + idx + 7)(rk), t0; \ + vpxor t0, x4, x4; \ + vpbroadcastb ((round * 16) + idx + 6)(rk), t0; \ + vpxor t0, x5, x5; \ + vpbroadcastb ((round * 16) + idx + 5)(rk), t0; \ + vpxor t0, x6, x6; \ + vpbroadcastb ((round * 16) + idx + 4)(rk), t0; \ + vpxor t0, x7, x7; + +#ifdef CONFIG_AS_GFNI +#define aria_sbox_8way_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + t0, t1, t2, t3, \ + t4, t5, t6, t7) \ + vpbroadcastq .Ltf_s2_bitmatrix rRIP, t0; \ + vpbroadcastq .Ltf_inv_bitmatrix rRIP, t1; \ + vpbroadcastq .Ltf_id_bitmatrix rRIP, t2; \ + vpbroadcastq .Ltf_aff_bitmatrix rRIP, t3; \ + vpbroadcastq .Ltf_x2_bitmatrix rRIP, t4; \ + vgf2p8affineinvqb $(tf_s2_const), t0, x1, x1; \ + vgf2p8affineinvqb $(tf_s2_const), t0, x5, x5; \ + vgf2p8affineqb $(tf_inv_const), t1, x2, x2; \ + vgf2p8affineqb $(tf_inv_const), t1, x6, x6; \ + vgf2p8affineinvqb $0, t2, x2, x2; \ + vgf2p8affineinvqb $0, t2, x6, x6; \ + vgf2p8affineinvqb $(tf_aff_const), t3, x0, x0; \ + vgf2p8affineinvqb $(tf_aff_const), t3, x4, x4; \ + vgf2p8affineqb $(tf_x2_const), t4, x3, x3; \ + vgf2p8affineqb $(tf_x2_const), t4, x7, x7; \ + vgf2p8affineinvqb $0, t2, x3, x3; \ + vgf2p8affineinvqb $0, t2, x7, x7 +#endif /* CONFIG_AS_GFNI */ + +#define aria_sbox_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + t0, t1, t2, t3, \ + t4, t5, t6, t7) \ + vpxor t7, t7, t7; \ + vpxor t6, t6, t6; \ + vbroadcasti128 .Linv_shift_row rRIP, t0; \ + vbroadcasti128 .Lshift_row rRIP, t1; \ + vbroadcasti128 .Ltf_lo__inv_aff__and__s2 rRIP, t2;\ + vbroadcasti128 .Ltf_hi__inv_aff__and__s2 rRIP, t3;\ + vbroadcasti128 .Ltf_lo__x2__and__fwd_aff rRIP, t4;\ + vbroadcasti128 .Ltf_hi__x2__and__fwd_aff rRIP, t5;\ + \ + vextracti128 $1, x0, t6##_x; \ + vaesenclast t7##_x, x0##_x, x0##_x; \ + vaesenclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x0, x0; \ + \ + vextracti128 $1, x4, t6##_x; \ + vaesenclast t7##_x, x4##_x, x4##_x; \ + vaesenclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x4, x4; \ + \ + vextracti128 $1, x1, t6##_x; \ + vaesenclast t7##_x, x1##_x, x1##_x; \ + vaesenclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x1, x1; \ + \ + vextracti128 $1, x5, t6##_x; \ + vaesenclast t7##_x, x5##_x, x5##_x; \ + vaesenclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x5, x5; \ + \ + vextracti128 $1, x2, t6##_x; \ + vaesdeclast t7##_x, x2##_x, x2##_x; \ + vaesdeclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x2, x2; \ + \ + vextracti128 $1, x6, t6##_x; \ + vaesdeclast t7##_x, x6##_x, x6##_x; \ + vaesdeclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x6, x6; \ + \ + vpbroadcastd .L0f0f0f0f rRIP, t6; \ + \ + /* AES inverse shift rows */ \ + vpshufb t0, x0, x0; \ + vpshufb t0, x4, x4; \ + vpshufb t0, x1, x1; \ + vpshufb t0, x5, x5; \ + vpshufb t1, x3, x3; \ + vpshufb t1, x7, x7; \ + vpshufb t1, x2, x2; \ + vpshufb t1, x6, x6; \ + \ + /* affine transformation for S2 */ \ + filter_8bit(x1, t2, t3, t6, t0); \ + /* affine transformation for S2 */ \ + filter_8bit(x5, t2, t3, t6, t0); \ + \ + /* affine transformation for X2 */ \ + filter_8bit(x3, t4, t5, t6, t0); \ + /* affine transformation for X2 */ \ + filter_8bit(x7, t4, t5, t6, t0); \ + \ + vpxor t6, t6, t6; \ + vextracti128 $1, x3, t6##_x; \ + vaesdeclast t7##_x, x3##_x, x3##_x; \ + vaesdeclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x3, x3; \ + \ + vextracti128 $1, x7, t6##_x; \ + vaesdeclast t7##_x, x7##_x, x7##_x; \ + vaesdeclast t7##_x, t6##_x, t6##_x; \ + vinserti128 $1, t6##_x, x7, x7; \ + +#define aria_diff_m(x0, x1, x2, x3, \ + t0, t1, t2, t3) \ + /* T = rotr32(X, 8); */ \ + /* X ^= T */ \ + vpxor x0, x3, t0; \ + vpxor x1, x0, t1; \ + vpxor x2, x1, t2; \ + vpxor x3, x2, t3; \ + /* X = T ^ rotr(X, 16); */ \ + vpxor t2, x0, x0; \ + vpxor x1, t3, t3; \ + vpxor t0, x2, x2; \ + vpxor t1, x3, x1; \ + vmovdqu t3, x3; + +#define aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7) \ + /* t1 ^= t2; */ \ + vpxor y0, x4, x4; \ + vpxor y1, x5, x5; \ + vpxor y2, x6, x6; \ + vpxor y3, x7, x7; \ + \ + /* t2 ^= t3; */ \ + vpxor y4, y0, y0; \ + vpxor y5, y1, y1; \ + vpxor y6, y2, y2; \ + vpxor y7, y3, y3; \ + \ + /* t0 ^= t1; */ \ + vpxor x4, x0, x0; \ + vpxor x5, x1, x1; \ + vpxor x6, x2, x2; \ + vpxor x7, x3, x3; \ + \ + /* t3 ^= t1; */ \ + vpxor x4, y4, y4; \ + vpxor x5, y5, y5; \ + vpxor x6, y6, y6; \ + vpxor x7, y7, y7; \ + \ + /* t2 ^= t0; */ \ + vpxor x0, y0, y0; \ + vpxor x1, y1, y1; \ + vpxor x2, y2, y2; \ + vpxor x3, y3, y3; \ + \ + /* t1 ^= t2; */ \ + vpxor y0, x4, x4; \ + vpxor y1, x5, x5; \ + vpxor y2, x6, x6; \ + vpxor y3, x7, x7; + +#define aria_fe(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T3 = ABCD -> BADC \ + * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ + * T0 = ABCD -> CDAB \ + * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ + * T1 = ABCD -> DCBA \ + * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ + */ \ + aria_diff_word(x2, x3, x0, x1, \ + x7, x6, x5, x4, \ + y0, y1, y2, y3, \ + y5, y4, y7, y6); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_fo(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, round); \ + \ + aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, round); \ + \ + aria_sbox_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T1 = ABCD -> BADC \ + * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ + * T2 = ABCD -> CDAB \ + * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ + * T3 = ABCD -> DCBA \ + * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ + */ \ + aria_diff_word(x0, x1, x2, x3, \ + x5, x4, x7, x6, \ + y2, y3, y0, y1, \ + y7, y6, y5, y4); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_ff(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round, last_round) \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, last_round); \ + \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, round); \ + \ + aria_sbox_8way(x2, x3, x0, x1, x6, x7, x4, x5, \ + y0, y1, y2, y3, y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, last_round); \ + \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); +#ifdef CONFIG_AS_GFNI +#define aria_fe_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T3 = ABCD -> BADC \ + * T3 = y4, y5, y6, y7 -> y5, y4, y7, y6 \ + * T0 = ABCD -> CDAB \ + * T0 = x0, x1, x2, x3 -> x2, x3, x0, x1 \ + * T1 = ABCD -> DCBA \ + * T1 = x4, x5, x6, x7 -> x7, x6, x5, x4 \ + */ \ + aria_diff_word(x2, x3, x0, x1, \ + x7, x6, x5, x4, \ + y0, y1, y2, y3, \ + y5, y4, y7, y6); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_fo_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round) \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, round); \ + \ + aria_sbox_8way_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, round); \ + \ + aria_sbox_8way_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_diff_m(x0, x1, x2, x3, y0, y1, y2, y3); \ + aria_diff_m(x4, x5, x6, x7, y0, y1, y2, y3); \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); \ + aria_diff_word(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + /* aria_diff_byte() \ + * T1 = ABCD -> BADC \ + * T1 = x4, x5, x6, x7 -> x5, x4, x7, x6 \ + * T2 = ABCD -> CDAB \ + * T2 = y0, y1, y2, y3, -> y2, y3, y0, y1 \ + * T3 = ABCD -> DCBA \ + * T3 = y4, y5, y6, y7 -> y7, y6, y5, y4 \ + */ \ + aria_diff_word(x0, x1, x2, x3, \ + x5, x4, x7, x6, \ + y2, y3, y0, y1, \ + y7, y6, y5, y4); \ + aria_store_state_8way(x3, x2, x1, x0, \ + x6, x7, x4, x5, \ + mem_tmp, 0); + +#define aria_ff_gfni(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, rk, round, last_round) \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 8, last_round); \ + \ + aria_store_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 8); \ + \ + aria_load_state_8way(x0, x1, x2, x3, \ + x4, x5, x6, x7, \ + mem_tmp, 0); \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, round); \ + \ + aria_sbox_8way_gfni(x2, x3, x0, x1, \ + x6, x7, x4, x5, \ + y0, y1, y2, y3, \ + y4, y5, y6, y7); \ + \ + aria_ark_8way(x0, x1, x2, x3, x4, x5, x6, x7, \ + y0, rk, 0, last_round); \ + \ + aria_load_state_8way(y0, y1, y2, y3, \ + y4, y5, y6, y7, \ + mem_tmp, 8); +#endif /* CONFIG_AS_GFNI */ + + +SECTION_RODATA +.align 32 +#define SHUFB_BYTES(idx) \ + 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx) +.Lshufb_16x16b: + .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) + .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3) + +.align 32 +.Lbige_addb_0_1: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +.Lbige_addb_2_3: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 +.Lbige_addb_4_5: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 +.Lbige_addb_6_7: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 +.Lbige_addb_8_9: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9 +.Lbige_addb_10_11: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 +.Lbige_addb_12_13: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 +.Lbige_addb_14_15: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 +.Lbige_addb_16_16: + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 + .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16 + +.align 16 +/* For isolating SubBytes from AESENCLAST, inverse shift row */ +.Linv_shift_row: + .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b + .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03 +.Lshift_row: + .byte 0x00, 0x05, 0x0a, 0x0f, 0x04, 0x09, 0x0e, 0x03 + .byte 0x08, 0x0d, 0x02, 0x07, 0x0c, 0x01, 0x06, 0x0b +/* For CTR-mode IV byteswap */ +.Lbswap128_mask: + .byte 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08 + .byte 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 + +/* AES inverse affine and S2 combined: + * 1 1 0 0 0 0 0 1 x0 0 + * 0 1 0 0 1 0 0 0 x1 0 + * 1 1 0 0 1 1 1 1 x2 0 + * 0 1 1 0 1 0 0 1 x3 1 + * 0 1 0 0 1 1 0 0 * x4 + 0 + * 0 1 0 1 1 0 0 0 x5 0 + * 0 0 0 0 0 1 0 1 x6 0 + * 1 1 1 0 0 1 1 1 x7 1 + */ +.Ltf_lo__inv_aff__and__s2: + .octa 0x92172DA81A9FA520B2370D883ABF8500 +.Ltf_hi__inv_aff__and__s2: + .octa 0x2B15FFC1AF917B45E6D8320C625CB688 + +/* X2 and AES forward affine combined: + * 1 0 1 1 0 0 0 1 x0 0 + * 0 1 1 1 1 0 1 1 x1 0 + * 0 0 0 1 1 0 1 0 x2 1 + * 0 1 0 0 0 1 0 0 x3 0 + * 0 0 1 1 1 0 1 1 * x4 + 0 + * 0 1 0 0 1 0 0 0 x5 0 + * 1 1 0 1 0 0 1 1 x6 0 + * 0 1 0 0 1 0 1 0 x7 0 + */ +.Ltf_lo__x2__and__fwd_aff: + .octa 0xEFAE0544FCBD1657B8F95213ABEA4100 +.Ltf_hi__x2__and__fwd_aff: + .octa 0x3F893781E95FE1576CDA64D2BA0CB204 + +#ifdef CONFIG_AS_GFNI +.align 8 +/* AES affine: */ +#define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0) +.Ltf_aff_bitmatrix: + .quad BM8X8(BV8(1, 0, 0, 0, 1, 1, 1, 1), + BV8(1, 1, 0, 0, 0, 1, 1, 1), + BV8(1, 1, 1, 0, 0, 0, 1, 1), + BV8(1, 1, 1, 1, 0, 0, 0, 1), + BV8(1, 1, 1, 1, 1, 0, 0, 0), + BV8(0, 1, 1, 1, 1, 1, 0, 0), + BV8(0, 0, 1, 1, 1, 1, 1, 0), + BV8(0, 0, 0, 1, 1, 1, 1, 1)) + +/* AES inverse affine: */ +#define tf_inv_const BV8(1, 0, 1, 0, 0, 0, 0, 0) +.Ltf_inv_bitmatrix: + .quad BM8X8(BV8(0, 0, 1, 0, 0, 1, 0, 1), + BV8(1, 0, 0, 1, 0, 0, 1, 0), + BV8(0, 1, 0, 0, 1, 0, 0, 1), + BV8(1, 0, 1, 0, 0, 1, 0, 0), + BV8(0, 1, 0, 1, 0, 0, 1, 0), + BV8(0, 0, 1, 0, 1, 0, 0, 1), + BV8(1, 0, 0, 1, 0, 1, 0, 0), + BV8(0, 1, 0, 0, 1, 0, 1, 0)) + +/* S2: */ +#define tf_s2_const BV8(0, 1, 0, 0, 0, 1, 1, 1) +.Ltf_s2_bitmatrix: + .quad BM8X8(BV8(0, 1, 0, 1, 0, 1, 1, 1), + BV8(0, 0, 1, 1, 1, 1, 1, 1), + BV8(1, 1, 1, 0, 1, 1, 0, 1), + BV8(1, 1, 0, 0, 0, 0, 1, 1), + BV8(0, 1, 0, 0, 0, 0, 1, 1), + BV8(1, 1, 0, 0, 1, 1, 1, 0), + BV8(0, 1, 1, 0, 0, 0, 1, 1), + BV8(1, 1, 1, 1, 0, 1, 1, 0)) + +/* X2: */ +#define tf_x2_const BV8(0, 0, 1, 1, 0, 1, 0, 0) +.Ltf_x2_bitmatrix: + .quad BM8X8(BV8(0, 0, 0, 1, 1, 0, 0, 0), + BV8(0, 0, 1, 0, 0, 1, 1, 0), + BV8(0, 0, 0, 0, 1, 0, 1, 0), + BV8(1, 1, 1, 0, 0, 0, 1, 1), + BV8(1, 1, 1, 0, 1, 1, 0, 0), + BV8(0, 1, 1, 0, 1, 0, 1, 1), + BV8(1, 0, 1, 1, 1, 1, 0, 1), + BV8(1, 0, 0, 1, 0, 0, 1, 1)) + +/* Identity matrix: */ +.Ltf_id_bitmatrix: + .quad BM8X8(BV8(1, 0, 0, 0, 0, 0, 0, 0), + BV8(0, 1, 0, 0, 0, 0, 0, 0), + BV8(0, 0, 1, 0, 0, 0, 0, 0), + BV8(0, 0, 0, 1, 0, 0, 0, 0), + BV8(0, 0, 0, 0, 1, 0, 0, 0), + BV8(0, 0, 0, 0, 0, 1, 0, 0), + BV8(0, 0, 0, 0, 0, 0, 1, 0), + BV8(0, 0, 0, 0, 0, 0, 0, 1)) + +#endif /* CONFIG_AS_GFNI */ + +/* 4-bit mask */ +.align 4 +.L0f0f0f0f: + .long 0x0f0f0f0f + +.text + +.align 16 +ELF(.type __aria_aesni_avx2_crypt_32way,@function;) +__aria_aesni_avx2_crypt_32way: + /* input: + * %r9: rk + * %rsi: dst + * %rdx: src + * %ymm0..%ymm15: byte-sliced blocks + */ + CFI_STARTPROC(); + + movq %rsi, %rax; + leaq 8 * 32(%rax), %r8; + + movl ARIA_CTX_rounds(CTX), %r10d; + subl $2, %r10d; + + inpack16_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rax, %r8); + aria_fo(%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, + %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, + %rax, %r9, 0); + leaq 1*16(%r9), %r9; + +.align 16 +.Loop_aesni: + aria_fe(%ymm1, %ymm0, %ymm3, %ymm2, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rax, %r9, 0); + aria_fo(%ymm9, %ymm8, %ymm11, %ymm10, %ymm12, %ymm13, %ymm14, %ymm15, + %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, + %rax, %r9, 1); + leaq 2*16(%r9), %r9; + subl $2, %r10d; + jnz .Loop_aesni; + + aria_ff(%ymm1, %ymm0, %ymm3, %ymm2, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rax, %r9, 0, 1); + + debyteslice_16x16b(%ymm8, %ymm12, %ymm1, %ymm4, + %ymm9, %ymm13, %ymm0, %ymm5, + %ymm10, %ymm14, %ymm3, %ymm6, + %ymm11, %ymm15, %ymm2, %ymm7, + (%rax), (%r8)); + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __aria_aesni_avx2_crypt_32way,.-__aria_aesni_avx2_crypt_32way;) + +.align 16 +.globl _gcry_aria_aesni_avx2_ecb_crypt_blk32 +ELF(.type _gcry_aria_aesni_avx2_ecb_crypt_blk32,@function;) +_gcry_aria_aesni_avx2_ecb_crypt_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst + * %rdx: src + * %rcx: round keys + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 32), %rsp; + andq $~31, %rsp; + + movq %rcx, %r9; + movq %rsi, %r11; + movq %rsp, %rsi; /* use stack for temporary store */ + + inpack16_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rdx); + + call __aria_aesni_avx2_crypt_32way; + + write_output(%ymm1, %ymm0, %ymm3, %ymm2, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %r11); + + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_aria_aesni_avx2_ecb_crypt_blk32, + .-_gcry_aria_aesni_avx2_ecb_crypt_blk32;) + +.align 16 +ELF(.type __aria_aesni_avx2_ctr_gen_keystream_32way,@function;) +__aria_aesni_avx2_ctr_gen_keystream_32way: + /* input: + * %rdi: ctx + * %rsi: dst + * %rdx: src + * %rcx: keystream + * %r8: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + + cmpb $(0x100 - 32), 15(%r8); + jbe .Lctr_byteadd; + + movq 8(%r8), %r11; + bswapq %r11; + + vbroadcasti128 .Lbswap128_mask rRIP, %ymm6; + vpcmpeqd %ymm0, %ymm0, %ymm0; + vpsrldq $8, %ymm0, %ymm0; /* ab: -1:0 ; cd: -1:0 */ + vpaddq %ymm0, %ymm0, %ymm5; /* ab: -2:0 ; cd: -2:0 */ + + /* load IV and byteswap */ + vmovdqu (%r8), %xmm7; + vpshufb %xmm6, %xmm7, %xmm7; + vmovdqa %xmm7, %xmm3; + inc_le128(%xmm7, %xmm0, %xmm4); + vinserti128 $1, %xmm7, %ymm3, %ymm3; + vpshufb %ymm6, %ymm3, %ymm8; /* +1 ; +0 */ + + /* check need for handling 64-bit overflow and carry */ + cmpq $(0xffffffffffffffff - 32), %r11; + ja .Lhandle_ctr_carry; + + /* construct IVs */ + vpsubq %ymm5, %ymm3, %ymm3; /* +3 ; +2 */ + vpshufb %ymm6, %ymm3, %ymm9; + vpsubq %ymm5, %ymm3, %ymm3; /* +5 ; +4 */ + vpshufb %ymm6, %ymm3, %ymm10; + vpsubq %ymm5, %ymm3, %ymm3; /* +7 ; +6 */ + vpshufb %ymm6, %ymm3, %ymm11; + vpsubq %ymm5, %ymm3, %ymm3; /* +9 ; +8 */ + vpshufb %ymm6, %ymm3, %ymm12; + vpsubq %ymm5, %ymm3, %ymm3; /* +11 ; +10 */ + vpshufb %ymm6, %ymm3, %ymm13; + vpsubq %ymm5, %ymm3, %ymm3; /* +13 ; +12 */ + vpshufb %ymm6, %ymm3, %ymm14; + vpsubq %ymm5, %ymm3, %ymm3; /* +15 ; +14 */ + vpshufb %ymm6, %ymm3, %ymm15; + vmovdqu %ymm8, (0 * 32)(%rcx); + vmovdqu %ymm9, (1 * 32)(%rcx); + vmovdqu %ymm10, (2 * 32)(%rcx); + vmovdqu %ymm11, (3 * 32)(%rcx); + vmovdqu %ymm12, (4 * 32)(%rcx); + vmovdqu %ymm13, (5 * 32)(%rcx); + vmovdqu %ymm14, (6 * 32)(%rcx); + vmovdqu %ymm15, (7 * 32)(%rcx); + + vpsubq %ymm5, %ymm3, %ymm3; /* +17 ; +16 */ + vpshufb %ymm6, %ymm3, %ymm8; + vpsubq %ymm5, %ymm3, %ymm3; /* +19 ; +18 */ + vpshufb %ymm6, %ymm3, %ymm9; + vpsubq %ymm5, %ymm3, %ymm3; /* +21 ; +20 */ + vpshufb %ymm6, %ymm3, %ymm10; + vpsubq %ymm5, %ymm3, %ymm3; /* +23 ; +22 */ + vpshufb %ymm6, %ymm3, %ymm11; + vpsubq %ymm5, %ymm3, %ymm3; /* +25 ; +24 */ + vpshufb %ymm6, %ymm3, %ymm12; + vpsubq %ymm5, %ymm3, %ymm3; /* +27 ; +26 */ + vpshufb %ymm6, %ymm3, %ymm13; + vpsubq %ymm5, %ymm3, %ymm3; /* +29 ; +28 */ + vpshufb %ymm6, %ymm3, %ymm14; + vpsubq %ymm5, %ymm3, %ymm3; /* +31 ; +30 */ + vpshufb %ymm6, %ymm3, %ymm15; + vpsubq %ymm5, %ymm3, %ymm3; /* +32 */ + vpshufb %xmm6, %xmm3, %xmm3; + vmovdqu %xmm3, (%r8); + vmovdqu (0 * 32)(%rcx), %ymm0; + vmovdqu (1 * 32)(%rcx), %ymm1; + vmovdqu (2 * 32)(%rcx), %ymm2; + vmovdqu (3 * 32)(%rcx), %ymm3; + vmovdqu (4 * 32)(%rcx), %ymm4; + vmovdqu (5 * 32)(%rcx), %ymm5; + vmovdqu (6 * 32)(%rcx), %ymm6; + vmovdqu (7 * 32)(%rcx), %ymm7; + jmp .Lctr_carry_done; + + .Lhandle_ctr_carry: + /* construct IVs */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm9; /* +3 ; +2 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm10; /* +5 ; +4 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm11; /* +7 ; +6 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm12; /* +9 ; +8 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm13; /* +11 ; +10 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm14; /* +13 ; +12 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm15; /* +15 ; +14 */ + vmovdqu %ymm8, (0 * 32)(%rcx); + vmovdqu %ymm9, (1 * 32)(%rcx); + vmovdqu %ymm10, (2 * 32)(%rcx); + vmovdqu %ymm11, (3 * 32)(%rcx); + vmovdqu %ymm12, (4 * 32)(%rcx); + vmovdqu %ymm13, (5 * 32)(%rcx); + vmovdqu %ymm14, (6 * 32)(%rcx); + vmovdqu %ymm15, (7 * 32)(%rcx); + + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm8; /* +17 ; +16 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm9; /* +19 ; +18 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm10; /* +21 ; +20 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm11; /* +23 ; +22 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm12; /* +25 ; +24 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm13; /* +27 ; +26 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm14; /* +29 ; +28 */ + inc_le128(%ymm3, %ymm0, %ymm4); + inc_le128(%ymm3, %ymm0, %ymm4); + vpshufb %ymm6, %ymm3, %ymm15; /* +31 ; +30 */ + inc_le128(%ymm3, %ymm0, %ymm4); + vextracti128 $1, %ymm3, %xmm3; + vpshufb %xmm6, %xmm3, %xmm3; /* +32 */ + vmovdqu %xmm3, (%r8); + vmovdqu (0 * 32)(%rcx), %ymm0; + vmovdqu (1 * 32)(%rcx), %ymm1; + vmovdqu (2 * 32)(%rcx), %ymm2; + vmovdqu (3 * 32)(%rcx), %ymm3; + vmovdqu (4 * 32)(%rcx), %ymm4; + vmovdqu (5 * 32)(%rcx), %ymm5; + vmovdqu (6 * 32)(%rcx), %ymm6; + vmovdqu (7 * 32)(%rcx), %ymm7; + +.Lctr_carry_done: + ret_spec_stop; + +.align 8 +.Lctr_byteadd_full_ctr_carry: + addb $32, 15(%r8); + pushq %rcx; + movl $14, %ecx; + 1: + adcb $0, (%r8, %rcx); + jnc 2f; + loop 1b; + 2: + popq %rcx; + jmp .Lctr_byteadd_ymm; +.align 8 +.Lctr_byteadd: + vbroadcasti128 (%r8), %ymm8; + je .Lctr_byteadd_full_ctr_carry; + addb $32, 15(%r8); +.Lctr_byteadd_ymm: + vpaddb .Lbige_addb_16_16 rRIP, %ymm8, %ymm15; + vpaddb .Lbige_addb_0_1 rRIP, %ymm8, %ymm0; + vpaddb .Lbige_addb_2_3 rRIP, %ymm8, %ymm1; + vpaddb .Lbige_addb_4_5 rRIP, %ymm8, %ymm2; + vpaddb .Lbige_addb_6_7 rRIP, %ymm8, %ymm3; + vpaddb .Lbige_addb_8_9 rRIP, %ymm8, %ymm4; + vpaddb .Lbige_addb_10_11 rRIP, %ymm8, %ymm5; + vpaddb .Lbige_addb_12_13 rRIP, %ymm8, %ymm6; + vpaddb .Lbige_addb_14_15 rRIP, %ymm8, %ymm7; + vpaddb .Lbige_addb_0_1 rRIP, %ymm15, %ymm8; + vpaddb .Lbige_addb_2_3 rRIP, %ymm15, %ymm9; + vpaddb .Lbige_addb_4_5 rRIP, %ymm15, %ymm10; + vpaddb .Lbige_addb_6_7 rRIP, %ymm15, %ymm11; + vpaddb .Lbige_addb_8_9 rRIP, %ymm15, %ymm12; + vpaddb .Lbige_addb_10_11 rRIP, %ymm15, %ymm13; + vpaddb .Lbige_addb_12_13 rRIP, %ymm15, %ymm14; + vpaddb .Lbige_addb_14_15 rRIP, %ymm15, %ymm15; + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __aria_aesni_avx2_ctr_gen_keystream_32way, + .-__aria_aesni_avx2_ctr_gen_keystream_32way;) + +.align 16 +.globl _gcry_aria_aesni_avx2_ctr_crypt_blk32 +ELF(.type _gcry_aria_aesni_avx2_ctr_crypt_blk32,@function;) +_gcry_aria_aesni_avx2_ctr_crypt_blk32: + /* input: + * %rdi: ctx + * %rsi: dst + * %rdx: src + * %rcx: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 32), %rsp; + andq $~31, %rsp; + + movq %rcx, %r8; /* %r8: iv */ + movq %rsp, %rcx; /* %rcx: keystream */ + call __aria_aesni_avx2_ctr_gen_keystream_32way; + + pushq %rsi; + movq %rdx, %r11; + movq %rcx, %rsi; /* use stack for temporary store */ + movq %rcx, %rdx; + leaq ARIA_CTX_enc_key(CTX), %r9; + + call __aria_aesni_avx2_crypt_32way; + + popq %rsi; + vpxor (0 * 32)(%r11), %ymm1, %ymm1; + vpxor (1 * 32)(%r11), %ymm0, %ymm0; + vpxor (2 * 32)(%r11), %ymm3, %ymm3; + vpxor (3 * 32)(%r11), %ymm2, %ymm2; + vpxor (4 * 32)(%r11), %ymm4, %ymm4; + vpxor (5 * 32)(%r11), %ymm5, %ymm5; + vpxor (6 * 32)(%r11), %ymm6, %ymm6; + vpxor (7 * 32)(%r11), %ymm7, %ymm7; + vpxor (8 * 32)(%r11), %ymm8, %ymm8; + vpxor (9 * 32)(%r11), %ymm9, %ymm9; + vpxor (10 * 32)(%r11), %ymm10, %ymm10; + vpxor (11 * 32)(%r11), %ymm11, %ymm11; + vpxor (12 * 32)(%r11), %ymm12, %ymm12; + vpxor (13 * 32)(%r11), %ymm13, %ymm13; + vpxor (14 * 32)(%r11), %ymm14, %ymm14; + vpxor (15 * 32)(%r11), %ymm15, %ymm15; + write_output(%ymm1, %ymm0, %ymm3, %ymm2, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rsi); + + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_aria_aesni_avx2_ctr_crypt_blk32, + .-_gcry_aria_aesni_avx2_ctr_crypt_blk32;) + +#ifdef CONFIG_AS_GFNI +.align 16 +ELF(.type __aria_gfni_avx2_crypt_32way,@function;) +__aria_gfni_avx2_crypt_32way: + /* input: + * %r9: rk + * %rsi: dst + * %rdx: src + * %ymm0..%ymm15: byte-sliced blocks + */ + CFI_STARTPROC(); + + movq %rsi, %rax; + leaq 8 * 32(%rax), %r8; + + movl ARIA_CTX_rounds(CTX), %r10d; + subl $2, %r10d; + + inpack16_post(%ymm0, %ymm1, %ymm2, %ymm3, + %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, + %ymm12, %ymm13, %ymm14, + %ymm15, %rax, %r8); + aria_fo_gfni(%ymm8, %ymm9, %ymm10, %ymm11, + %ymm12, %ymm13, %ymm14, %ymm15, + %ymm0, %ymm1, %ymm2, %ymm3, + %ymm4, %ymm5, %ymm6, %ymm7, + %rax, %r9, 0); + leaq 1*16(%r9), %r9; + +.align 16 +.Loop_gfni: + aria_fe_gfni(%ymm1, %ymm0, %ymm3, %ymm2, + %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, + %ymm12, %ymm13, %ymm14, + %ymm15, %rax, %r9, 0); + aria_fo_gfni(%ymm9, %ymm8, %ymm11, %ymm10, + %ymm12, %ymm13, %ymm14, %ymm15, + %ymm0, %ymm1, %ymm2, %ymm3, + %ymm4, %ymm5, %ymm6, %ymm7, + %rax, %r9, 1); + leaq 2*16(%r9), %r9; + subl $2, %r10d; + jnz .Loop_gfni; + + aria_ff_gfni(%ymm1, %ymm0, %ymm3, %ymm2, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rax, %r9, 0, 1); + + debyteslice_16x16b(%ymm8, %ymm12, %ymm1, %ymm4, + %ymm9, %ymm13, %ymm0, %ymm5, + %ymm10, %ymm14, %ymm3, %ymm6, + %ymm11, %ymm15, %ymm2, %ymm7, + (%rax), (%r8)); + + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size __aria_gfni_avx2_crypt_32way,.-__aria_gfni_avx2_crypt_32way;) + +.align 16 +.globl _gcry_aria_gfni_avx2_ecb_crypt_blk32 +ELF(.type _gcry_aria_gfni_avx2_ecb_crypt_blk32,@function;) +_gcry_aria_gfni_avx2_ecb_crypt_blk32: + /* input: + * %rdi: ctx, CTX + * %rsi: dst + * %rdx: src + * %rcx: round keys + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 32), %rsp; + andq $~31, %rsp; + + movq %rcx, %r9; + movq %rsi, %r11; + movq %rsp, %rsi; /* use stack for temporary store */ + + inpack16_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rdx); + + call __aria_gfni_avx2_crypt_32way; + + write_output(%ymm1, %ymm0, %ymm3, %ymm2, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %r11); + + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_aria_gfni_avx2_ecb_crypt_blk32, + .-_gcry_aria_gfni_avx2_ecb_crypt_blk32;) + +.align 16 +.globl _gcry_aria_gfni_avx2_ctr_crypt_blk32 +ELF(.type _gcry_aria_gfni_avx2_ctr_crypt_blk32,@function;) +_gcry_aria_gfni_avx2_ctr_crypt_blk32: + /* input: + * %rdi: ctx + * %rsi: dst + * %rdx: src + * %rcx: iv (big endian, 128bit) + */ + CFI_STARTPROC(); + + pushq %rbp; + CFI_PUSH(%rbp); + movq %rsp, %rbp; + CFI_DEF_CFA_REGISTER(%rbp); + + subq $(16 * 32), %rsp; + andq $~31, %rsp; + + movq %rcx, %r8; /* %r8: iv */ + movq %rsp, %rcx; /* %rcx: keystream */ + call __aria_aesni_avx2_ctr_gen_keystream_32way; + + pushq %rsi; + movq %rdx, %r11; + movq %rcx, %rsi; /* use stack for temporary store */ + movq %rcx, %rdx; + leaq ARIA_CTX_enc_key(CTX), %r9; + + call __aria_gfni_avx2_crypt_32way; + + popq %rsi; + vpxor (0 * 32)(%r11), %ymm1, %ymm1; + vpxor (1 * 32)(%r11), %ymm0, %ymm0; + vpxor (2 * 32)(%r11), %ymm3, %ymm3; + vpxor (3 * 32)(%r11), %ymm2, %ymm2; + vpxor (4 * 32)(%r11), %ymm4, %ymm4; + vpxor (5 * 32)(%r11), %ymm5, %ymm5; + vpxor (6 * 32)(%r11), %ymm6, %ymm6; + vpxor (7 * 32)(%r11), %ymm7, %ymm7; + vpxor (8 * 32)(%r11), %ymm8, %ymm8; + vpxor (9 * 32)(%r11), %ymm9, %ymm9; + vpxor (10 * 32)(%r11), %ymm10, %ymm10; + vpxor (11 * 32)(%r11), %ymm11, %ymm11; + vpxor (12 * 32)(%r11), %ymm12, %ymm12; + vpxor (13 * 32)(%r11), %ymm13, %ymm13; + vpxor (14 * 32)(%r11), %ymm14, %ymm14; + vpxor (15 * 32)(%r11), %ymm15, %ymm15; + write_output(%ymm1, %ymm0, %ymm3, %ymm2, %ymm4, %ymm5, %ymm6, %ymm7, + %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, + %ymm15, %rsi); + + movl $STACK_DEPTH, %eax; + leave; + CFI_LEAVE(); + vzeroall; + ret_spec_stop; + CFI_ENDPROC(); +ELF(.size _gcry_aria_gfni_avx2_ctr_crypt_blk32, + .-_gcry_aria_gfni_avx2_ctr_crypt_blk32;) +#endif /* CONFIG_AS_GFNI */ + +#endif /* ENABLE_AVX2_SUPPORT && ENABLE_AESNI_SUPPORT */ +#endif /* __x86_64 */ diff --git a/cipher/aria.c b/cipher/aria.c index 700ea409..18952d04 100644 --- a/cipher/aria.c +++ b/cipher/aria.c @@ -1,1403 +1,1652 @@ /* aria.c - ARIA Cipher Algorithm * * Copyright (C) 2022-2023 Taehee Yoo * Copyright (C) 2023 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #include "types.h" #include "g10lib.h" #include "cipher.h" #include "bufhelp.h" #include "cipher-internal.h" #include "bulkhelp.h" /* Attribute macro to force alignment to 64 bytes. */ #ifdef HAVE_GCC_ATTRIBUTE_ALIGNED # define ATTR_ALIGNED_64 __attribute__ ((aligned (64))) #else # define ATTR_ALIGNED_64 #endif /* Attribute macro to force inlining of function. */ #if __GNUC__ >= 4 # define ALWAYS_INLINE inline __attribute__ ((always_inline)) #else # define ALWAYS_INLINE inline #endif /* Attribute macro to prevent inlining of function. */ #if __GNUC__ >= 4 # define NO_INLINE __attribute__ ((noinline)) #else # define NO_INLINE #endif +/* USE_AESNI_AVX inidicates whether to compile with Intel AES-NI/AVX code. */ +#undef USE_AESNI_AVX +#if defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT) +# if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ + defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) +# define USE_AESNI_AVX 1 +# endif +#endif + +/* USE_GFNI_AVX inidicates whether to compile with Intel GFNI/AVX code. */ +#undef USE_GFNI_AVX +#if defined(USE_AESNI_AVX) && defined(ENABLE_GFNI_SUPPORT) +# define USE_GFNI_AVX 1 +#endif + +/* USE_AESNI_AVX2 inidicates whether to compile with Intel AES-NI/AVX2 code. */ +#undef USE_AESNI_AVX2 +#if defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) +# if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ + defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) +# define USE_AESNI_AVX2 1 +# endif +#endif + +/* USE_GFNI_AVX2 inidicates whether to compile with Intel GFNI/AVX2 code. */ +#undef USE_GFNI_AVX2 +#if defined(USE_AESNI_AVX2) && defined(ENABLE_GFNI_SUPPORT) +# define USE_GFNI_AVX2 1 +#endif + +/* How many parallel blocks to handle in bulk processing functions. */ +#if defined(USE_AESNI_AVX2) +# define MAX_PARALLEL_BLKS 32 +#elif defined(USE_AESNI_AVX) +# define MAX_PARALLEL_BLKS 16 +#else +# define MAX_PARALLEL_BLKS 8 +#endif + +/* Assembly implementations use SystemV ABI, ABI conversion and additional + * stack to store XMM6-XMM15 needed on Win64. */ +#undef ASM_FUNC_ABI +#undef ASM_EXTRA_STACK +#if defined(USE_AESNI_AVX) || defined(USE_AESNI_AVX2) +# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS +# define ASM_FUNC_ABI __attribute__((sysv_abi)) +# define ASM_EXTRA_STACK (10 * 16) +# else +# define ASM_FUNC_ABI +# define ASM_EXTRA_STACK 0 +# endif +#endif + + static const char *aria_selftest (void); #define ARIA_MIN_KEY_SIZE 16 #define ARIA_MAX_KEY_SIZE 32 #define ARIA_BLOCK_SIZE 16 #define ARIA_MAX_RD_KEYS 17 #define ARIA_RD_KEY_WORDS (ARIA_BLOCK_SIZE / sizeof(u32)) typedef struct { u32 enc_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; u32 dec_key[ARIA_MAX_RD_KEYS][ARIA_RD_KEY_WORDS]; int rounds; unsigned int decryption_prepared:1; /* The decryption key is set up. */ unsigned int bulk_prefetch_ready:1; /* Look-up table prefetch ready for * current bulk operation. */ + +#ifdef USE_AESNI_AVX + unsigned int use_aesni_avx:1; + unsigned int use_gfni_avx:1; +#endif +#ifdef USE_AESNI_AVX2 + unsigned int use_aesni_avx2:1; + unsigned int use_gfni_avx2:1; +#endif } ARIA_context; static const u32 key_rc[20] = { 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0, 0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e, 0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0, 0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0 }; static struct { volatile u32 counter_head; u32 cacheline_align[64 / 4 - 1]; u32 s1[256]; u32 s2[256]; u32 x1[256]; u32 x2[256]; volatile u32 counter_tail; } sboxes ATTR_ALIGNED_64 = { 0, { 0, }, { /* s1 */ 0x00636363, 0x007c7c7c, 0x00777777, 0x007b7b7b, 0x00f2f2f2, 0x006b6b6b, 0x006f6f6f, 0x00c5c5c5, 0x00303030, 0x00010101, 0x00676767, 0x002b2b2b, 0x00fefefe, 0x00d7d7d7, 0x00ababab, 0x00767676, 0x00cacaca, 0x00828282, 0x00c9c9c9, 0x007d7d7d, 0x00fafafa, 0x00595959, 0x00474747, 0x00f0f0f0, 0x00adadad, 0x00d4d4d4, 0x00a2a2a2, 0x00afafaf, 0x009c9c9c, 0x00a4a4a4, 0x00727272, 0x00c0c0c0, 0x00b7b7b7, 0x00fdfdfd, 0x00939393, 0x00262626, 0x00363636, 0x003f3f3f, 0x00f7f7f7, 0x00cccccc, 0x00343434, 0x00a5a5a5, 0x00e5e5e5, 0x00f1f1f1, 0x00717171, 0x00d8d8d8, 0x00313131, 0x00151515, 0x00040404, 0x00c7c7c7, 0x00232323, 0x00c3c3c3, 0x00181818, 0x00969696, 0x00050505, 0x009a9a9a, 0x00070707, 0x00121212, 0x00808080, 0x00e2e2e2, 0x00ebebeb, 0x00272727, 0x00b2b2b2, 0x00757575, 0x00090909, 0x00838383, 0x002c2c2c, 0x001a1a1a, 0x001b1b1b, 0x006e6e6e, 0x005a5a5a, 0x00a0a0a0, 0x00525252, 0x003b3b3b, 0x00d6d6d6, 0x00b3b3b3, 0x00292929, 0x00e3e3e3, 0x002f2f2f, 0x00848484, 0x00535353, 0x00d1d1d1, 0x00000000, 0x00ededed, 0x00202020, 0x00fcfcfc, 0x00b1b1b1, 0x005b5b5b, 0x006a6a6a, 0x00cbcbcb, 0x00bebebe, 0x00393939, 0x004a4a4a, 0x004c4c4c, 0x00585858, 0x00cfcfcf, 0x00d0d0d0, 0x00efefef, 0x00aaaaaa, 0x00fbfbfb, 0x00434343, 0x004d4d4d, 0x00333333, 0x00858585, 0x00454545, 0x00f9f9f9, 0x00020202, 0x007f7f7f, 0x00505050, 0x003c3c3c, 0x009f9f9f, 0x00a8a8a8, 0x00515151, 0x00a3a3a3, 0x00404040, 0x008f8f8f, 0x00929292, 0x009d9d9d, 0x00383838, 0x00f5f5f5, 0x00bcbcbc, 0x00b6b6b6, 0x00dadada, 0x00212121, 0x00101010, 0x00ffffff, 0x00f3f3f3, 0x00d2d2d2, 0x00cdcdcd, 0x000c0c0c, 0x00131313, 0x00ececec, 0x005f5f5f, 0x00979797, 0x00444444, 0x00171717, 0x00c4c4c4, 0x00a7a7a7, 0x007e7e7e, 0x003d3d3d, 0x00646464, 0x005d5d5d, 0x00191919, 0x00737373, 0x00606060, 0x00818181, 0x004f4f4f, 0x00dcdcdc, 0x00222222, 0x002a2a2a, 0x00909090, 0x00888888, 0x00464646, 0x00eeeeee, 0x00b8b8b8, 0x00141414, 0x00dedede, 0x005e5e5e, 0x000b0b0b, 0x00dbdbdb, 0x00e0e0e0, 0x00323232, 0x003a3a3a, 0x000a0a0a, 0x00494949, 0x00060606, 0x00242424, 0x005c5c5c, 0x00c2c2c2, 0x00d3d3d3, 0x00acacac, 0x00626262, 0x00919191, 0x00959595, 0x00e4e4e4, 0x00797979, 0x00e7e7e7, 0x00c8c8c8, 0x00373737, 0x006d6d6d, 0x008d8d8d, 0x00d5d5d5, 0x004e4e4e, 0x00a9a9a9, 0x006c6c6c, 0x00565656, 0x00f4f4f4, 0x00eaeaea, 0x00656565, 0x007a7a7a, 0x00aeaeae, 0x00080808, 0x00bababa, 0x00787878, 0x00252525, 0x002e2e2e, 0x001c1c1c, 0x00a6a6a6, 0x00b4b4b4, 0x00c6c6c6, 0x00e8e8e8, 0x00dddddd, 0x00747474, 0x001f1f1f, 0x004b4b4b, 0x00bdbdbd, 0x008b8b8b, 0x008a8a8a, 0x00707070, 0x003e3e3e, 0x00b5b5b5, 0x00666666, 0x00484848, 0x00030303, 0x00f6f6f6, 0x000e0e0e, 0x00616161, 0x00353535, 0x00575757, 0x00b9b9b9, 0x00868686, 0x00c1c1c1, 0x001d1d1d, 0x009e9e9e, 0x00e1e1e1, 0x00f8f8f8, 0x00989898, 0x00111111, 0x00696969, 0x00d9d9d9, 0x008e8e8e, 0x00949494, 0x009b9b9b, 0x001e1e1e, 0x00878787, 0x00e9e9e9, 0x00cecece, 0x00555555, 0x00282828, 0x00dfdfdf, 0x008c8c8c, 0x00a1a1a1, 0x00898989, 0x000d0d0d, 0x00bfbfbf, 0x00e6e6e6, 0x00424242, 0x00686868, 0x00414141, 0x00999999, 0x002d2d2d, 0x000f0f0f, 0x00b0b0b0, 0x00545454, 0x00bbbbbb, 0x00161616 }, { /* s2 */ 0xe200e2e2, 0x4e004e4e, 0x54005454, 0xfc00fcfc, 0x94009494, 0xc200c2c2, 0x4a004a4a, 0xcc00cccc, 0x62006262, 0x0d000d0d, 0x6a006a6a, 0x46004646, 0x3c003c3c, 0x4d004d4d, 0x8b008b8b, 0xd100d1d1, 0x5e005e5e, 0xfa00fafa, 0x64006464, 0xcb00cbcb, 0xb400b4b4, 0x97009797, 0xbe00bebe, 0x2b002b2b, 0xbc00bcbc, 0x77007777, 0x2e002e2e, 0x03000303, 0xd300d3d3, 0x19001919, 0x59005959, 0xc100c1c1, 0x1d001d1d, 0x06000606, 0x41004141, 0x6b006b6b, 0x55005555, 0xf000f0f0, 0x99009999, 0x69006969, 0xea00eaea, 0x9c009c9c, 0x18001818, 0xae00aeae, 0x63006363, 0xdf00dfdf, 0xe700e7e7, 0xbb00bbbb, 0x00000000, 0x73007373, 0x66006666, 0xfb00fbfb, 0x96009696, 0x4c004c4c, 0x85008585, 0xe400e4e4, 0x3a003a3a, 0x09000909, 0x45004545, 0xaa00aaaa, 0x0f000f0f, 0xee00eeee, 0x10001010, 0xeb00ebeb, 0x2d002d2d, 0x7f007f7f, 0xf400f4f4, 0x29002929, 0xac00acac, 0xcf00cfcf, 0xad00adad, 0x91009191, 0x8d008d8d, 0x78007878, 0xc800c8c8, 0x95009595, 0xf900f9f9, 0x2f002f2f, 0xce00cece, 0xcd00cdcd, 0x08000808, 0x7a007a7a, 0x88008888, 0x38003838, 0x5c005c5c, 0x83008383, 0x2a002a2a, 0x28002828, 0x47004747, 0xdb00dbdb, 0xb800b8b8, 0xc700c7c7, 0x93009393, 0xa400a4a4, 0x12001212, 0x53005353, 0xff00ffff, 0x87008787, 0x0e000e0e, 0x31003131, 0x36003636, 0x21002121, 0x58005858, 0x48004848, 0x01000101, 0x8e008e8e, 0x37003737, 0x74007474, 0x32003232, 0xca00caca, 0xe900e9e9, 0xb100b1b1, 0xb700b7b7, 0xab00abab, 0x0c000c0c, 0xd700d7d7, 0xc400c4c4, 0x56005656, 0x42004242, 0x26002626, 0x07000707, 0x98009898, 0x60006060, 0xd900d9d9, 0xb600b6b6, 0xb900b9b9, 0x11001111, 0x40004040, 0xec00ecec, 0x20002020, 0x8c008c8c, 0xbd00bdbd, 0xa000a0a0, 0xc900c9c9, 0x84008484, 0x04000404, 0x49004949, 0x23002323, 0xf100f1f1, 0x4f004f4f, 0x50005050, 0x1f001f1f, 0x13001313, 0xdc00dcdc, 0xd800d8d8, 0xc000c0c0, 0x9e009e9e, 0x57005757, 0xe300e3e3, 0xc300c3c3, 0x7b007b7b, 0x65006565, 0x3b003b3b, 0x02000202, 0x8f008f8f, 0x3e003e3e, 0xe800e8e8, 0x25002525, 0x92009292, 0xe500e5e5, 0x15001515, 0xdd00dddd, 0xfd00fdfd, 0x17001717, 0xa900a9a9, 0xbf00bfbf, 0xd400d4d4, 0x9a009a9a, 0x7e007e7e, 0xc500c5c5, 0x39003939, 0x67006767, 0xfe00fefe, 0x76007676, 0x9d009d9d, 0x43004343, 0xa700a7a7, 0xe100e1e1, 0xd000d0d0, 0xf500f5f5, 0x68006868, 0xf200f2f2, 0x1b001b1b, 0x34003434, 0x70007070, 0x05000505, 0xa300a3a3, 0x8a008a8a, 0xd500d5d5, 0x79007979, 0x86008686, 0xa800a8a8, 0x30003030, 0xc600c6c6, 0x51005151, 0x4b004b4b, 0x1e001e1e, 0xa600a6a6, 0x27002727, 0xf600f6f6, 0x35003535, 0xd200d2d2, 0x6e006e6e, 0x24002424, 0x16001616, 0x82008282, 0x5f005f5f, 0xda00dada, 0xe600e6e6, 0x75007575, 0xa200a2a2, 0xef00efef, 0x2c002c2c, 0xb200b2b2, 0x1c001c1c, 0x9f009f9f, 0x5d005d5d, 0x6f006f6f, 0x80008080, 0x0a000a0a, 0x72007272, 0x44004444, 0x9b009b9b, 0x6c006c6c, 0x90009090, 0x0b000b0b, 0x5b005b5b, 0x33003333, 0x7d007d7d, 0x5a005a5a, 0x52005252, 0xf300f3f3, 0x61006161, 0xa100a1a1, 0xf700f7f7, 0xb000b0b0, 0xd600d6d6, 0x3f003f3f, 0x7c007c7c, 0x6d006d6d, 0xed00eded, 0x14001414, 0xe000e0e0, 0xa500a5a5, 0x3d003d3d, 0x22002222, 0xb300b3b3, 0xf800f8f8, 0x89008989, 0xde00dede, 0x71007171, 0x1a001a1a, 0xaf00afaf, 0xba00baba, 0xb500b5b5, 0x81008181 }, { /* x1 */ 0x52520052, 0x09090009, 0x6a6a006a, 0xd5d500d5, 0x30300030, 0x36360036, 0xa5a500a5, 0x38380038, 0xbfbf00bf, 0x40400040, 0xa3a300a3, 0x9e9e009e, 0x81810081, 0xf3f300f3, 0xd7d700d7, 0xfbfb00fb, 0x7c7c007c, 0xe3e300e3, 0x39390039, 0x82820082, 0x9b9b009b, 0x2f2f002f, 0xffff00ff, 0x87870087, 0x34340034, 0x8e8e008e, 0x43430043, 0x44440044, 0xc4c400c4, 0xdede00de, 0xe9e900e9, 0xcbcb00cb, 0x54540054, 0x7b7b007b, 0x94940094, 0x32320032, 0xa6a600a6, 0xc2c200c2, 0x23230023, 0x3d3d003d, 0xeeee00ee, 0x4c4c004c, 0x95950095, 0x0b0b000b, 0x42420042, 0xfafa00fa, 0xc3c300c3, 0x4e4e004e, 0x08080008, 0x2e2e002e, 0xa1a100a1, 0x66660066, 0x28280028, 0xd9d900d9, 0x24240024, 0xb2b200b2, 0x76760076, 0x5b5b005b, 0xa2a200a2, 0x49490049, 0x6d6d006d, 0x8b8b008b, 0xd1d100d1, 0x25250025, 0x72720072, 0xf8f800f8, 0xf6f600f6, 0x64640064, 0x86860086, 0x68680068, 0x98980098, 0x16160016, 0xd4d400d4, 0xa4a400a4, 0x5c5c005c, 0xcccc00cc, 0x5d5d005d, 0x65650065, 0xb6b600b6, 0x92920092, 0x6c6c006c, 0x70700070, 0x48480048, 0x50500050, 0xfdfd00fd, 0xeded00ed, 0xb9b900b9, 0xdada00da, 0x5e5e005e, 0x15150015, 0x46460046, 0x57570057, 0xa7a700a7, 0x8d8d008d, 0x9d9d009d, 0x84840084, 0x90900090, 0xd8d800d8, 0xabab00ab, 0x00000000, 0x8c8c008c, 0xbcbc00bc, 0xd3d300d3, 0x0a0a000a, 0xf7f700f7, 0xe4e400e4, 0x58580058, 0x05050005, 0xb8b800b8, 0xb3b300b3, 0x45450045, 0x06060006, 0xd0d000d0, 0x2c2c002c, 0x1e1e001e, 0x8f8f008f, 0xcaca00ca, 0x3f3f003f, 0x0f0f000f, 0x02020002, 0xc1c100c1, 0xafaf00af, 0xbdbd00bd, 0x03030003, 0x01010001, 0x13130013, 0x8a8a008a, 0x6b6b006b, 0x3a3a003a, 0x91910091, 0x11110011, 0x41410041, 0x4f4f004f, 0x67670067, 0xdcdc00dc, 0xeaea00ea, 0x97970097, 0xf2f200f2, 0xcfcf00cf, 0xcece00ce, 0xf0f000f0, 0xb4b400b4, 0xe6e600e6, 0x73730073, 0x96960096, 0xacac00ac, 0x74740074, 0x22220022, 0xe7e700e7, 0xadad00ad, 0x35350035, 0x85850085, 0xe2e200e2, 0xf9f900f9, 0x37370037, 0xe8e800e8, 0x1c1c001c, 0x75750075, 0xdfdf00df, 0x6e6e006e, 0x47470047, 0xf1f100f1, 0x1a1a001a, 0x71710071, 0x1d1d001d, 0x29290029, 0xc5c500c5, 0x89890089, 0x6f6f006f, 0xb7b700b7, 0x62620062, 0x0e0e000e, 0xaaaa00aa, 0x18180018, 0xbebe00be, 0x1b1b001b, 0xfcfc00fc, 0x56560056, 0x3e3e003e, 0x4b4b004b, 0xc6c600c6, 0xd2d200d2, 0x79790079, 0x20200020, 0x9a9a009a, 0xdbdb00db, 0xc0c000c0, 0xfefe00fe, 0x78780078, 0xcdcd00cd, 0x5a5a005a, 0xf4f400f4, 0x1f1f001f, 0xdddd00dd, 0xa8a800a8, 0x33330033, 0x88880088, 0x07070007, 0xc7c700c7, 0x31310031, 0xb1b100b1, 0x12120012, 0x10100010, 0x59590059, 0x27270027, 0x80800080, 0xecec00ec, 0x5f5f005f, 0x60600060, 0x51510051, 0x7f7f007f, 0xa9a900a9, 0x19190019, 0xb5b500b5, 0x4a4a004a, 0x0d0d000d, 0x2d2d002d, 0xe5e500e5, 0x7a7a007a, 0x9f9f009f, 0x93930093, 0xc9c900c9, 0x9c9c009c, 0xefef00ef, 0xa0a000a0, 0xe0e000e0, 0x3b3b003b, 0x4d4d004d, 0xaeae00ae, 0x2a2a002a, 0xf5f500f5, 0xb0b000b0, 0xc8c800c8, 0xebeb00eb, 0xbbbb00bb, 0x3c3c003c, 0x83830083, 0x53530053, 0x99990099, 0x61610061, 0x17170017, 0x2b2b002b, 0x04040004, 0x7e7e007e, 0xbaba00ba, 0x77770077, 0xd6d600d6, 0x26260026, 0xe1e100e1, 0x69690069, 0x14140014, 0x63630063, 0x55550055, 0x21210021, 0x0c0c000c, 0x7d7d007d }, { /* x2 */ 0x30303000, 0x68686800, 0x99999900, 0x1b1b1b00, 0x87878700, 0xb9b9b900, 0x21212100, 0x78787800, 0x50505000, 0x39393900, 0xdbdbdb00, 0xe1e1e100, 0x72727200, 0x09090900, 0x62626200, 0x3c3c3c00, 0x3e3e3e00, 0x7e7e7e00, 0x5e5e5e00, 0x8e8e8e00, 0xf1f1f100, 0xa0a0a000, 0xcccccc00, 0xa3a3a300, 0x2a2a2a00, 0x1d1d1d00, 0xfbfbfb00, 0xb6b6b600, 0xd6d6d600, 0x20202000, 0xc4c4c400, 0x8d8d8d00, 0x81818100, 0x65656500, 0xf5f5f500, 0x89898900, 0xcbcbcb00, 0x9d9d9d00, 0x77777700, 0xc6c6c600, 0x57575700, 0x43434300, 0x56565600, 0x17171700, 0xd4d4d400, 0x40404000, 0x1a1a1a00, 0x4d4d4d00, 0xc0c0c000, 0x63636300, 0x6c6c6c00, 0xe3e3e300, 0xb7b7b700, 0xc8c8c800, 0x64646400, 0x6a6a6a00, 0x53535300, 0xaaaaaa00, 0x38383800, 0x98989800, 0x0c0c0c00, 0xf4f4f400, 0x9b9b9b00, 0xededed00, 0x7f7f7f00, 0x22222200, 0x76767600, 0xafafaf00, 0xdddddd00, 0x3a3a3a00, 0x0b0b0b00, 0x58585800, 0x67676700, 0x88888800, 0x06060600, 0xc3c3c300, 0x35353500, 0x0d0d0d00, 0x01010100, 0x8b8b8b00, 0x8c8c8c00, 0xc2c2c200, 0xe6e6e600, 0x5f5f5f00, 0x02020200, 0x24242400, 0x75757500, 0x93939300, 0x66666600, 0x1e1e1e00, 0xe5e5e500, 0xe2e2e200, 0x54545400, 0xd8d8d800, 0x10101000, 0xcecece00, 0x7a7a7a00, 0xe8e8e800, 0x08080800, 0x2c2c2c00, 0x12121200, 0x97979700, 0x32323200, 0xababab00, 0xb4b4b400, 0x27272700, 0x0a0a0a00, 0x23232300, 0xdfdfdf00, 0xefefef00, 0xcacaca00, 0xd9d9d900, 0xb8b8b800, 0xfafafa00, 0xdcdcdc00, 0x31313100, 0x6b6b6b00, 0xd1d1d100, 0xadadad00, 0x19191900, 0x49494900, 0xbdbdbd00, 0x51515100, 0x96969600, 0xeeeeee00, 0xe4e4e400, 0xa8a8a800, 0x41414100, 0xdadada00, 0xffffff00, 0xcdcdcd00, 0x55555500, 0x86868600, 0x36363600, 0xbebebe00, 0x61616100, 0x52525200, 0xf8f8f800, 0xbbbbbb00, 0x0e0e0e00, 0x82828200, 0x48484800, 0x69696900, 0x9a9a9a00, 0xe0e0e000, 0x47474700, 0x9e9e9e00, 0x5c5c5c00, 0x04040400, 0x4b4b4b00, 0x34343400, 0x15151500, 0x79797900, 0x26262600, 0xa7a7a700, 0xdedede00, 0x29292900, 0xaeaeae00, 0x92929200, 0xd7d7d700, 0x84848400, 0xe9e9e900, 0xd2d2d200, 0xbababa00, 0x5d5d5d00, 0xf3f3f300, 0xc5c5c500, 0xb0b0b000, 0xbfbfbf00, 0xa4a4a400, 0x3b3b3b00, 0x71717100, 0x44444400, 0x46464600, 0x2b2b2b00, 0xfcfcfc00, 0xebebeb00, 0x6f6f6f00, 0xd5d5d500, 0xf6f6f600, 0x14141400, 0xfefefe00, 0x7c7c7c00, 0x70707000, 0x5a5a5a00, 0x7d7d7d00, 0xfdfdfd00, 0x2f2f2f00, 0x18181800, 0x83838300, 0x16161600, 0xa5a5a500, 0x91919100, 0x1f1f1f00, 0x05050500, 0x95959500, 0x74747400, 0xa9a9a900, 0xc1c1c100, 0x5b5b5b00, 0x4a4a4a00, 0x85858500, 0x6d6d6d00, 0x13131300, 0x07070700, 0x4f4f4f00, 0x4e4e4e00, 0x45454500, 0xb2b2b200, 0x0f0f0f00, 0xc9c9c900, 0x1c1c1c00, 0xa6a6a600, 0xbcbcbc00, 0xececec00, 0x73737300, 0x90909000, 0x7b7b7b00, 0xcfcfcf00, 0x59595900, 0x8f8f8f00, 0xa1a1a100, 0xf9f9f900, 0x2d2d2d00, 0xf2f2f200, 0xb1b1b100, 0x00000000, 0x94949400, 0x37373700, 0x9f9f9f00, 0xd0d0d000, 0x2e2e2e00, 0x9c9c9c00, 0x6e6e6e00, 0x28282800, 0x3f3f3f00, 0x80808000, 0xf0f0f000, 0x3d3d3d00, 0xd3d3d300, 0x25252500, 0x8a8a8a00, 0xb5b5b500, 0xe7e7e700, 0x42424200, 0xb3b3b300, 0xc7c7c700, 0xeaeaea00, 0xf7f7f700, 0x4c4c4c00, 0x11111100, 0x33333300, 0x03030300, 0xa2a2a200, 0xacacac00, 0x60606000 }, 0 }; +#ifdef USE_AESNI_AVX +extern unsigned int +_gcry_aria_aesni_avx_ecb_crypt_blk1_16(const void *ctx, byte *out, + const byte *in, const void *key, + u64 nblks) ASM_FUNC_ABI; +extern unsigned int +_gcry_aria_aesni_avx_ctr_crypt_blk16(const void *ctx, byte *out, + const byte *in, byte *iv) ASM_FUNC_ABI; + +#ifdef USE_GFNI_AVX +extern unsigned int +_gcry_aria_gfni_avx_ecb_crypt_blk1_16(const void *ctx, byte *out, + const byte *in, const void *key, + u64 nblks) ASM_FUNC_ABI; +extern unsigned int +_gcry_aria_gfni_avx_ctr_crypt_blk16(const void *ctx, byte *out, + const byte *in, byte *iv) ASM_FUNC_ABI; +#endif /* USE_GFNI_AVX */ + +static inline unsigned int +aria_avx_ecb_crypt_blk1_16(const ARIA_context *ctx, byte *out, const byte *in, + const u32 key[][ARIA_RD_KEY_WORDS], size_t nblks) +{ +#ifdef USE_GFNI_AVX + if (ctx->use_gfni_avx) + return _gcry_aria_gfni_avx_ecb_crypt_blk1_16(ctx, out, in, key, nblks) + + ASM_EXTRA_STACK; + else +#endif /* USE_GFNI_AVX */ + return _gcry_aria_aesni_avx_ecb_crypt_blk1_16(ctx, out, in, key, nblks) + + ASM_EXTRA_STACK; +} + +static inline unsigned int +aria_avx_ctr_crypt_blk16(const ARIA_context *ctx, byte *out, const byte *in, + byte *iv) +{ +#ifdef USE_GFNI_AVX + if (ctx->use_gfni_avx) + return _gcry_aria_gfni_avx_ctr_crypt_blk16(ctx, out, in, iv) + + ASM_EXTRA_STACK; + else +#endif /* USE_GFNI_AVX */ + return _gcry_aria_aesni_avx_ctr_crypt_blk16(ctx, out, in, iv) + + ASM_EXTRA_STACK; +} +#endif /* USE_AESNI_AVX */ + +#ifdef USE_AESNI_AVX2 +extern unsigned int +_gcry_aria_aesni_avx2_ecb_crypt_blk32(const void *ctx, byte *out, + const byte *in, + const void *key) ASM_FUNC_ABI; +extern unsigned int +_gcry_aria_aesni_avx2_ctr_crypt_blk32(const void *ctx, byte *out, + const byte *in, byte *iv) ASM_FUNC_ABI; + +#ifdef USE_GFNI_AVX2 +extern unsigned int +_gcry_aria_gfni_avx2_ecb_crypt_blk32(const void *ctx, byte *out, + const byte *in, + const void *key) ASM_FUNC_ABI; +extern unsigned int +_gcry_aria_gfni_avx2_ctr_crypt_blk32(const void *ctx, byte *out, + const byte *in, byte *iv) ASM_FUNC_ABI; +#endif /* USE_GFNI_AVX2 */ + +static inline unsigned int +aria_avx2_ecb_crypt_blk32(const ARIA_context *ctx, byte *out, const byte *in, + const u32 key[][ARIA_RD_KEY_WORDS]) +{ +#ifdef USE_GFNI_AVX2 + if (ctx->use_gfni_avx2) + return _gcry_aria_gfni_avx2_ecb_crypt_blk32(ctx, out, in, key) + + ASM_EXTRA_STACK; + else +#endif /* USE_GFNI_AVX2 */ + return _gcry_aria_aesni_avx2_ecb_crypt_blk32(ctx, out, in, key) + + ASM_EXTRA_STACK; +} + +static inline unsigned int +aria_avx2_ctr_crypt_blk32(const ARIA_context *ctx, byte *out, const byte *in, + byte *iv) +{ +#ifdef USE_GFNI_AVX2 + if (ctx->use_gfni_avx2) + return _gcry_aria_gfni_avx2_ctr_crypt_blk32(ctx, out, in, iv) + + ASM_EXTRA_STACK; + else +#endif /* USE_GFNI_AVX2 */ + return _gcry_aria_aesni_avx2_ctr_crypt_blk32(ctx, out, in, iv) + + ASM_EXTRA_STACK; +} +#endif /* USE_AESNI_AVX2 */ + /* Prefetching for sbox tables. */ static inline void prefetch_table(const volatile byte *tab, size_t len) { size_t i; for (i = 0; len - i >= 8 * 32; i += 8 * 32) { (void)tab[i + 0 * 32]; (void)tab[i + 1 * 32]; (void)tab[i + 2 * 32]; (void)tab[i + 3 * 32]; (void)tab[i + 4 * 32]; (void)tab[i + 5 * 32]; (void)tab[i + 6 * 32]; (void)tab[i + 7 * 32]; } for (; i < len; i += 32) { (void)tab[i]; } (void)tab[len - 1]; } static inline void prefetch_sboxes(void) { /* Modify counters to trigger copy-on-write and unsharing if physical pages * of look-up table are shared between processes. Modifying counters also * causes checksums for pages to change and hint same-page merging algorithm * that these pages are frequently changing. */ sboxes.counter_head++; sboxes.counter_tail++; /* Prefetch look-up tables to cache. */ prefetch_table((const void *)&sboxes, sizeof(sboxes)); } static ALWAYS_INLINE u32 rotr32(u32 v, u32 r) { return ror(v, r); } static ALWAYS_INLINE u32 bswap32(u32 v) { return _gcry_bswap32(v); } static ALWAYS_INLINE u32 get_u8(u32 x, u32 y) { return (x >> ((3 - y) * 8)) & 0xFF; } static ALWAYS_INLINE u32 make_u32(byte v0, byte v1, byte v2, byte v3) { return ((u32)v0 << 24) | ((u32)v1 << 16) | ((u32)v2 << 8) | ((u32)v3); } static ALWAYS_INLINE u32 aria_m(u32 t0) { return rotr32(t0, 8) ^ rotr32(t0 ^ rotr32(t0, 8), 16); } /* S-Box Layer 1 + M */ static ALWAYS_INLINE void aria_sbox_layer1_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, u32 *t3) { *t0 = sboxes.s1[get_u8(*t0, 0)] ^ sboxes.s2[get_u8(*t0, 1)] ^ sboxes.x1[get_u8(*t0, 2)] ^ sboxes.x2[get_u8(*t0, 3)]; *t1 = sboxes.s1[get_u8(*t1, 0)] ^ sboxes.s2[get_u8(*t1, 1)] ^ sboxes.x1[get_u8(*t1, 2)] ^ sboxes.x2[get_u8(*t1, 3)]; *t2 = sboxes.s1[get_u8(*t2, 0)] ^ sboxes.s2[get_u8(*t2, 1)] ^ sboxes.x1[get_u8(*t2, 2)] ^ sboxes.x2[get_u8(*t2, 3)]; *t3 = sboxes.s1[get_u8(*t3, 0)] ^ sboxes.s2[get_u8(*t3, 1)] ^ sboxes.x1[get_u8(*t3, 2)] ^ sboxes.x2[get_u8(*t3, 3)]; } /* S-Box Layer 2 + M */ static ALWAYS_INLINE void aria_sbox_layer2_with_pre_diff(u32 *t0, u32 *t1, u32 *t2, u32 *t3) { *t0 = sboxes.x1[get_u8(*t0, 0)] ^ sboxes.x2[get_u8(*t0, 1)] ^ sboxes.s1[get_u8(*t0, 2)] ^ sboxes.s2[get_u8(*t0, 3)]; *t1 = sboxes.x1[get_u8(*t1, 0)] ^ sboxes.x2[get_u8(*t1, 1)] ^ sboxes.s1[get_u8(*t1, 2)] ^ sboxes.s2[get_u8(*t1, 3)]; *t2 = sboxes.x1[get_u8(*t2, 0)] ^ sboxes.x2[get_u8(*t2, 1)] ^ sboxes.s1[get_u8(*t2, 2)] ^ sboxes.s2[get_u8(*t2, 3)]; *t3 = sboxes.x1[get_u8(*t3, 0)] ^ sboxes.x2[get_u8(*t3, 1)] ^ sboxes.s1[get_u8(*t3, 2)] ^ sboxes.s2[get_u8(*t3, 3)]; } /* Word-level diffusion */ static ALWAYS_INLINE void aria_diff_word(u32 *t0, u32 *t1, u32 *t2, u32 *t3) { *t1 ^= *t2; *t2 ^= *t3; *t0 ^= *t1; *t3 ^= *t1; *t2 ^= *t0; *t1 ^= *t2; } /* Byte-level diffusion */ static inline void aria_diff_byte(u32 *t1, u32 *t2, u32 *t3) { *t1 = ((*t1 << 8) & 0xff00ff00) ^ ((*t1 >> 8) & 0x00ff00ff); *t2 = rotr32(*t2, 16); *t3 = bswap32(*t3); } /* Key XOR Layer */ static ALWAYS_INLINE void aria_add_round_key(u32 *rk, u32 *t0, u32 *t1, u32 *t2, u32 *t3) { *t0 ^= rk[0]; *t1 ^= rk[1]; *t2 ^= rk[2]; *t3 ^= rk[3]; } /* Odd round Substitution & Diffusion */ static ALWAYS_INLINE void aria_subst_diff_odd(u32 *t0, u32 *t1, u32 *t2, u32 *t3) { aria_sbox_layer1_with_pre_diff(t0, t1, t2, t3); aria_diff_word(t0, t1, t2, t3); aria_diff_byte(t1, t2, t3); aria_diff_word(t0, t1, t2, t3); } /* Even round Substitution & Diffusion */ static ALWAYS_INLINE void aria_subst_diff_even(u32 *t0, u32 *t1, u32 *t2, u32 *t3) { aria_sbox_layer2_with_pre_diff(t0, t1, t2, t3); aria_diff_word(t0, t1, t2, t3); aria_diff_byte(t3, t0, t1); aria_diff_word(t0, t1, t2, t3); } /* Last round */ static ALWAYS_INLINE void aria_last_round(u32 *t0, u32 *t1, u32 *t2, u32 *t3) { *t0 = make_u32((byte)(sboxes.x1[get_u8(*t0, 0)]), (byte)(sboxes.x2[get_u8(*t0, 1)] >> 24), (byte)(sboxes.s1[get_u8(*t0, 2)]), (byte)(sboxes.s2[get_u8(*t0, 3)])); *t1 = make_u32((byte)(sboxes.x1[get_u8(*t1, 0)]), (byte)(sboxes.x2[get_u8(*t1, 1)] >> 24), (byte)(sboxes.s1[get_u8(*t1, 2)]), (byte)(sboxes.s2[get_u8(*t1, 3)])); *t2 = make_u32((byte)(sboxes.x1[get_u8(*t2, 0)]), (byte)(sboxes.x2[get_u8(*t2, 1)] >> 24), (byte)(sboxes.s1[get_u8(*t2, 2)]), (byte)(sboxes.s2[get_u8(*t2, 3)])); *t3 = make_u32((byte)(sboxes.x1[get_u8(*t3, 0)]), (byte)(sboxes.x2[get_u8(*t3, 1)] >> 24), (byte)(sboxes.s1[get_u8(*t3, 2)]), (byte)(sboxes.s2[get_u8(*t3, 3)])); } /* Q, R Macro expanded ARIA GSRK */ static ALWAYS_INLINE void aria_gsrk(u32 *rk, u32 *x, u32 *y, u32 n) { int q = 4 - (n / 32); int r = n % 32; rk[0] = (x[0]) ^ ((y[q % 4]) >> r) ^ ((y[(q + 3) % 4]) << (32 - r)); rk[1] = (x[1]) ^ ((y[(q + 1) % 4]) >> r) ^ ((y[q % 4]) << (32 - r)); rk[2] = (x[2]) ^ ((y[(q + 2) % 4]) >> r) ^ ((y[(q + 1) % 4]) << (32 - r)); rk[3] = (x[3]) ^ ((y[(q + 3) % 4]) >> r) ^ ((y[(q + 2) % 4]) << (32 - r)); } static NO_INLINE void aria_set_encrypt_key(ARIA_context *ctx, const byte *in_key, u32 key_len) { u32 w0[4], w1[4], w2[4], w3[4]; u32 reg0, reg1, reg2, reg3; const u32 *ck; int rkidx = 0; ctx->rounds = (key_len + 32) / 4; prefetch_sboxes(); ck = &key_rc[(key_len - 16) / 2]; w0[0] = buf_get_be32(in_key + 0); w0[1] = buf_get_be32(in_key + 4); w0[2] = buf_get_be32(in_key + 8); w0[3] = buf_get_be32(in_key + 12); reg0 = w0[0] ^ ck[0]; reg1 = w0[1] ^ ck[1]; reg2 = w0[2] ^ ck[2]; reg3 = w0[3] ^ ck[3]; aria_subst_diff_odd(®0, ®1, ®2, ®3); if (key_len > 16) { w1[0] = buf_get_be32(in_key + 16); w1[1] = buf_get_be32(in_key + 20); if (key_len > 24) { w1[2] = buf_get_be32(in_key + 24); w1[3] = buf_get_be32(in_key + 28); } else { w1[2] = 0; w1[3] = 0; } } else { w1[0] = 0; w1[1] = 0; w1[2] = 0; w1[3] = 0; } w1[0] ^= reg0; w1[1] ^= reg1; w1[2] ^= reg2; w1[3] ^= reg3; reg0 = w1[0]; reg1 = w1[1]; reg2 = w1[2]; reg3 = w1[3]; reg0 ^= ck[4]; reg1 ^= ck[5]; reg2 ^= ck[6]; reg3 ^= ck[7]; aria_subst_diff_even(®0, ®1, ®2, ®3); reg0 ^= w0[0]; reg1 ^= w0[1]; reg2 ^= w0[2]; reg3 ^= w0[3]; w2[0] = reg0; w2[1] = reg1; w2[2] = reg2; w2[3] = reg3; reg0 ^= ck[8]; reg1 ^= ck[9]; reg2 ^= ck[10]; reg3 ^= ck[11]; aria_subst_diff_odd(®0, ®1, ®2, ®3); w3[0] = reg0 ^ w1[0]; w3[1] = reg1 ^ w1[1]; w3[2] = reg2 ^ w1[2]; w3[3] = reg3 ^ w1[3]; aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97); if (key_len > 16) { rkidx++; aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97); if (key_len > 24) { rkidx++; aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97); rkidx++; aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109); } } wipememory(w0, sizeof(w0)); wipememory(w1, sizeof(w1)); wipememory(w2, sizeof(w2)); wipememory(w3, sizeof(w3)); } static void aria_set_decrypt_key(ARIA_context *ctx) { int i; for (i = 0; i < 4; i++) { ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i]; ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i]; } for (i = 1; i < ctx->rounds; i++) { ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]); ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]); ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]); ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]); aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], &ctx->dec_key[i][2], &ctx->dec_key[i][3]); aria_diff_byte(&ctx->dec_key[i][1], &ctx->dec_key[i][2], &ctx->dec_key[i][3]); aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1], &ctx->dec_key[i][2], &ctx->dec_key[i][3]); } } static NO_INLINE unsigned int aria_crypt(ARIA_context *ctx, byte *out, const byte *in, u32 key[][ARIA_RD_KEY_WORDS]) { u32 reg0, reg1, reg2, reg3; int rounds = ctx->rounds; int rkidx = 0; reg0 = buf_get_be32(in + 0); reg1 = buf_get_be32(in + 4); reg2 = buf_get_be32(in + 8); reg3 = buf_get_be32(in + 12); aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); rkidx++; while (1) { aria_subst_diff_odd(®0, ®1, ®2, ®3); aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); rkidx++; if (rkidx >= rounds) break; aria_subst_diff_even(®0, ®1, ®2, ®3); aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); rkidx++; } aria_last_round(®0, ®1, ®2, ®3); aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3); buf_put_be32(out + 0, reg0); buf_put_be32(out + 4, reg1); buf_put_be32(out + 8, reg2); buf_put_be32(out + 12, reg3); return 4 * sizeof(void *) + 4 * sizeof(u32); /* stack burn depth */ } unsigned int aria_encrypt(void *c, byte *outbuf, const byte *inbuf) { ARIA_context *ctx = (ARIA_context *)c; prefetch_sboxes (); return aria_crypt (ctx, outbuf, inbuf, ctx->enc_key); } unsigned int aria_decrypt(void *c, byte *outbuf, const byte *inbuf) { ARIA_context *ctx = (ARIA_context *)c; if (!ctx->decryption_prepared) { aria_set_decrypt_key (ctx); ctx->decryption_prepared = 1; } prefetch_sboxes (); return aria_crypt (ctx, outbuf, inbuf, ctx->dec_key); } static unsigned int aria_crypt_2blks(ARIA_context *ctx, byte *out, const byte *in, u32 key[][ARIA_RD_KEY_WORDS]) { u32 ra0, ra1, ra2, ra3; u32 rb0, rb1, rb2, rb3; int rounds = ctx->rounds; int rkidx = 0; ra0 = buf_get_be32(in + 0); ra1 = buf_get_be32(in + 4); ra2 = buf_get_be32(in + 8); ra3 = buf_get_be32(in + 12); rb0 = buf_get_be32(in + 16); rb1 = buf_get_be32(in + 20); rb2 = buf_get_be32(in + 24); rb3 = buf_get_be32(in + 28); while (1) { aria_add_round_key(key[rkidx], &ra0, &ra1, &ra2, &ra3); aria_add_round_key(key[rkidx], &rb0, &rb1, &rb2, &rb3); rkidx++; aria_subst_diff_odd(&ra0, &ra1, &ra2, &ra3); aria_subst_diff_odd(&rb0, &rb1, &rb2, &rb3); aria_add_round_key(key[rkidx], &ra0, &ra1, &ra2, &ra3); aria_add_round_key(key[rkidx], &rb0, &rb1, &rb2, &rb3); rkidx++; if (rkidx >= rounds) break; aria_subst_diff_even(&ra0, &ra1, &ra2, &ra3); aria_subst_diff_even(&rb0, &rb1, &rb2, &rb3); } aria_last_round(&ra0, &ra1, &ra2, &ra3); aria_last_round(&rb0, &rb1, &rb2, &rb3); aria_add_round_key(key[rkidx], &ra0, &ra1, &ra2, &ra3); aria_add_round_key(key[rkidx], &rb0, &rb1, &rb2, &rb3); buf_put_be32(out + 0, ra0); buf_put_be32(out + 4, ra1); buf_put_be32(out + 8, ra2); buf_put_be32(out + 12, ra3); buf_put_be32(out + 16, rb0); buf_put_be32(out + 20, rb1); buf_put_be32(out + 24, rb2); buf_put_be32(out + 28, rb3); return 4 * sizeof(void *) + 8 * sizeof(u32); /* stack burn depth */ } static unsigned int aria_crypt_blocks (ARIA_context *ctx, byte *out, const byte *in, size_t num_blks, u32 key[][ARIA_RD_KEY_WORDS]) { unsigned int burn_depth = 0; - unsigned int nburn; + +#ifdef USE_AESNI_AVX2 + if (ctx->use_aesni_avx2 || ctx->use_gfni_avx2) + { + unsigned int nburn = 0; + + while (num_blks >= 32) + { + nburn = aria_avx2_ecb_crypt_blk32 (ctx, out, in, key); + in += 32 * ARIA_BLOCK_SIZE; + out += 32 * ARIA_BLOCK_SIZE; + num_blks -= 32; + } + + burn_depth = nburn > burn_depth ? nburn : burn_depth; + + if (num_blks == 0) + return burn_depth; + } +#endif /* USE_AESNI_AVX2 */ + +#ifdef USE_AESNI_AVX + if (ctx->use_aesni_avx || ctx->use_gfni_avx) + { + unsigned int nburn = 0; + + while (num_blks >= 3) + { + size_t curr_blks = num_blks < 16 ? num_blks : 16; + nburn = aria_avx_ecb_crypt_blk1_16 (ctx, out, in, key, curr_blks); + in += curr_blks * ARIA_BLOCK_SIZE; + out += curr_blks * ARIA_BLOCK_SIZE; + num_blks -= curr_blks; + } + + burn_depth = nburn > burn_depth ? nburn : burn_depth; + + if (num_blks == 0) + return burn_depth; + } +#endif /* USE_AESNI_AVX */ if (!ctx->bulk_prefetch_ready) { prefetch_sboxes(); ctx->bulk_prefetch_ready = 1; } while (num_blks >= 2) { - nburn = aria_crypt_2blks (ctx, out, in, key); + unsigned int nburn = aria_crypt_2blks (ctx, out, in, key); burn_depth = nburn > burn_depth ? nburn : burn_depth; - out += 2 * 16; - in += 2 * 16; + out += 2 * ARIA_BLOCK_SIZE; + in += 2 * ARIA_BLOCK_SIZE; num_blks -= 2; } while (num_blks) { - nburn = aria_crypt (ctx, out, in, key); + unsigned int nburn = aria_crypt (ctx, out, in, key); burn_depth = nburn > burn_depth ? nburn : burn_depth; - out += 16; - in += 16; + out += ARIA_BLOCK_SIZE; + in += ARIA_BLOCK_SIZE; num_blks--; } if (burn_depth) burn_depth += sizeof(void *) * 5; return burn_depth; } static unsigned int aria_enc_blocks (void *c, byte *out, const byte *in, size_t num_blks) { ARIA_context *ctx = (ARIA_context *)c; return aria_crypt_blocks (ctx, out, in, num_blks, ctx->enc_key); } static unsigned int aria_dec_blocks (void *c, byte *out, const byte *in, size_t num_blks) { ARIA_context *ctx = (ARIA_context *)c; return aria_crypt_blocks (ctx, out, in, num_blks, ctx->dec_key); } /* Bulk encryption of complete blocks in CTR mode. This function is only intended for the bulk encryption feature of cipher.c. CTR is expected to be of size 16. */ static void _gcry_aria_ctr_enc(void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { ARIA_context *ctx = context; byte *outbuf = outbuf_arg; const byte *inbuf = inbuf_arg; int burn_stack_depth = 0; +#ifdef USE_AESNI_AVX2 + if (ctx->use_aesni_avx2 || ctx->use_gfni_avx2) + { + size_t nburn = 0; + + while (nblocks >= 32) + { + nburn = aria_avx2_ctr_crypt_blk32 (ctx, outbuf, inbuf, ctr); + inbuf += 32 * ARIA_BLOCK_SIZE; + outbuf += 32 * ARIA_BLOCK_SIZE; + nblocks -= 32; + } + + burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; + } +#endif /* USE_AESNI_AVX */ + +#ifdef USE_AESNI_AVX + if (ctx->use_aesni_avx || ctx->use_gfni_avx) + { + size_t nburn = 0; + + while (nblocks >= 16) + { + nburn = aria_avx_ctr_crypt_blk16 (ctx, outbuf, inbuf, ctr); + inbuf += 16 * ARIA_BLOCK_SIZE; + outbuf += 16 * ARIA_BLOCK_SIZE; + nblocks -= 16; + } + + burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; + } +#endif /* USE_AESNI_AVX */ + /* Process remaining blocks. */ if (nblocks) { - byte tmpbuf[16 * ARIA_BLOCK_SIZE]; + byte tmpbuf[MAX_PARALLEL_BLKS * ARIA_BLOCK_SIZE]; unsigned int tmp_used = ARIA_BLOCK_SIZE; - size_t nburn; + size_t nburn = 0; ctx->bulk_prefetch_ready = 0; nburn = bulk_ctr_enc_128(ctx, aria_enc_blocks, outbuf, inbuf, nblocks, ctr, tmpbuf, sizeof(tmpbuf) / ARIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); } /* Bulk encryption of complete blocks in CBC mode. */ static void _gcry_aria_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac) { ARIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned char *last_iv; unsigned int burn_depth = 0; prefetch_sboxes(); last_iv = iv; for (; nblocks; nblocks--) { cipher_block_xor (outbuf, inbuf, last_iv, ARIA_BLOCK_SIZE); burn_depth = aria_crypt (ctx, outbuf, outbuf, ctx->enc_key); last_iv = outbuf; inbuf += ARIA_BLOCK_SIZE; if (!cbc_mac) outbuf += ARIA_BLOCK_SIZE; } if (last_iv != iv) cipher_block_cpy (iv, last_iv, ARIA_BLOCK_SIZE); if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); } /* Bulk decryption of complete blocks in CBC mode. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_aria_cbc_dec(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { ARIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; if (!ctx->decryption_prepared) { aria_set_decrypt_key (ctx); ctx->decryption_prepared = 1; } /* Process remaining blocks. */ if (nblocks) { - unsigned char tmpbuf[16 * ARIA_BLOCK_SIZE]; + unsigned char tmpbuf[MAX_PARALLEL_BLKS * ARIA_BLOCK_SIZE]; unsigned int tmp_used = ARIA_BLOCK_SIZE; size_t nburn; ctx->bulk_prefetch_ready = 0; nburn = bulk_cbc_dec_128(ctx, aria_dec_blocks, outbuf, inbuf, nblocks, iv, tmpbuf, sizeof(tmpbuf) / ARIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); } /* Bulk encryption of complete blocks in CFB mode. */ static void _gcry_aria_cfb_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { ARIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int burn_depth = 0; prefetch_sboxes(); for (; nblocks; nblocks--) { /* Encrypt the IV. */ burn_depth = aria_crypt (ctx, iv, iv, ctx->enc_key); /* XOR the input with the IV and store input into IV. */ cipher_block_xor_2dst(outbuf, iv, inbuf, ARIA_BLOCK_SIZE); outbuf += ARIA_BLOCK_SIZE; inbuf += ARIA_BLOCK_SIZE; } if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); } /* Bulk decryption of complete blocks in CFB mode. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_aria_cfb_dec(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { ARIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; /* Process remaining blocks. */ if (nblocks) { - unsigned char tmpbuf[16 * ARIA_BLOCK_SIZE]; + unsigned char tmpbuf[MAX_PARALLEL_BLKS * ARIA_BLOCK_SIZE]; unsigned int tmp_used = ARIA_BLOCK_SIZE; size_t nburn; ctx->bulk_prefetch_ready = 0; nburn = bulk_cfb_dec_128(ctx, aria_enc_blocks, outbuf, inbuf, nblocks, iv, tmpbuf, sizeof(tmpbuf) / ARIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); } /* Bulk encryption/decryption in ECB mode. */ static void _gcry_aria_ecb_crypt (void *context, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { ARIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; if (!encrypt && !ctx->decryption_prepared) { aria_set_decrypt_key (ctx); ctx->decryption_prepared = 1; } /* Process remaining blocks. */ if (nblocks) { - bulk_crypt_fn_t crypt_blk1_16; + bulk_crypt_fn_t crypt_blk1_n; size_t nburn; ctx->bulk_prefetch_ready = 0; - crypt_blk1_16 = encrypt ? aria_enc_blocks : aria_dec_blocks; + crypt_blk1_n = encrypt ? aria_enc_blocks : aria_dec_blocks; - nburn = bulk_ecb_crypt_128(ctx, crypt_blk1_16, - outbuf, inbuf, nblocks, 16); + nburn = bulk_ecb_crypt_128(ctx, crypt_blk1_n, + outbuf, inbuf, nblocks, MAX_PARALLEL_BLKS); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; } if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); } /* Bulk encryption/decryption of complete blocks in XTS mode. */ static void _gcry_aria_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { ARIA_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; int burn_stack_depth = 0; if (!encrypt && !ctx->decryption_prepared) { aria_set_decrypt_key (ctx); ctx->decryption_prepared = 1; } /* Process remaining blocks. */ if (nblocks) { - unsigned char tmpbuf[16 * ARIA_BLOCK_SIZE]; + unsigned char tmpbuf[MAX_PARALLEL_BLKS * ARIA_BLOCK_SIZE]; unsigned int tmp_used = ARIA_BLOCK_SIZE; - bulk_crypt_fn_t crypt_blk1_16; + bulk_crypt_fn_t crypt_blk1_n; size_t nburn; ctx->bulk_prefetch_ready = 0; - crypt_blk1_16 = encrypt ? aria_enc_blocks : aria_dec_blocks; + crypt_blk1_n = encrypt ? aria_enc_blocks : aria_dec_blocks; - nburn = bulk_xts_crypt_128(ctx, crypt_blk1_16, + nburn = bulk_xts_crypt_128(ctx, crypt_blk1_n, outbuf, inbuf, nblocks, tweak, tmpbuf, sizeof(tmpbuf) / ARIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); } /* Bulk encryption of complete blocks in CTR32LE mode (for GCM-SIV). */ static void _gcry_aria_ctr32le_enc(void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { ARIA_context *ctx = context; byte *outbuf = outbuf_arg; const byte *inbuf = inbuf_arg; int burn_stack_depth = 0; /* Process remaining blocks. */ if (nblocks) { - unsigned char tmpbuf[16 * ARIA_BLOCK_SIZE]; + unsigned char tmpbuf[MAX_PARALLEL_BLKS * ARIA_BLOCK_SIZE]; unsigned int tmp_used = ARIA_BLOCK_SIZE; size_t nburn; ctx->bulk_prefetch_ready = 0; nburn = bulk_ctr32le_enc_128 (ctx, aria_enc_blocks, outbuf, inbuf, nblocks, ctr, tmpbuf, sizeof(tmpbuf) / ARIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); } /* Bulk encryption/decryption of complete blocks in OCB mode. */ static size_t _gcry_aria_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { ARIA_context *ctx = (void *)&c->context.c; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; u64 blkn = c->u_mode.ocb.data_nblocks; int burn_stack_depth = 0; if (!encrypt && !ctx->decryption_prepared) { aria_set_decrypt_key (ctx); ctx->decryption_prepared = 1; } /* Process remaining blocks. */ if (nblocks) { - unsigned char tmpbuf[16 * ARIA_BLOCK_SIZE]; + unsigned char tmpbuf[MAX_PARALLEL_BLKS * ARIA_BLOCK_SIZE]; unsigned int tmp_used = ARIA_BLOCK_SIZE; - bulk_crypt_fn_t crypt_blk1_16; + bulk_crypt_fn_t crypt_blk1_n; size_t nburn; ctx->bulk_prefetch_ready = 0; - crypt_blk1_16 = encrypt ? aria_enc_blocks : aria_dec_blocks; + crypt_blk1_n = encrypt ? aria_enc_blocks : aria_dec_blocks; - nburn = bulk_ocb_crypt_128 (c, ctx, crypt_blk1_16, outbuf, inbuf, nblocks, + nburn = bulk_ocb_crypt_128 (c, ctx, crypt_blk1_n, outbuf, inbuf, nblocks, &blkn, encrypt, tmpbuf, sizeof(tmpbuf) / ARIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } c->u_mode.ocb.data_nblocks = blkn; if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); return 0; } /* Bulk authentication of complete blocks in OCB mode. */ static size_t _gcry_aria_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks) { ARIA_context *ctx = (void *)&c->context.c; const unsigned char *abuf = abuf_arg; u64 blkn = c->u_mode.ocb.aad_nblocks; int burn_stack_depth = 0; /* Process remaining blocks. */ if (nblocks) { - unsigned char tmpbuf[16 * ARIA_BLOCK_SIZE]; + unsigned char tmpbuf[MAX_PARALLEL_BLKS * ARIA_BLOCK_SIZE]; unsigned int tmp_used = ARIA_BLOCK_SIZE; size_t nburn; ctx->bulk_prefetch_ready = 0; nburn = bulk_ocb_auth_128 (c, ctx, aria_enc_blocks, abuf, nblocks, &blkn, tmpbuf, sizeof(tmpbuf) / ARIA_BLOCK_SIZE, &tmp_used); burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth; wipememory (tmpbuf, tmp_used); } c->u_mode.ocb.aad_nblocks = blkn; if (burn_stack_depth) _gcry_burn_stack (burn_stack_depth); return 0; } static gcry_err_code_t aria_setkey(void *c, const byte *key, unsigned keylen, cipher_bulk_ops_t *bulk_ops) { ARIA_context *ctx = c; static int initialized = 0; static const char *selftest_failed = NULL; + unsigned int hwf = _gcry_get_hw_features (); + + (void)hwf; if (keylen != 16 && keylen != 24 && keylen != 32) return GPG_ERR_INV_KEYLEN; if (!initialized) { initialized = 1; selftest_failed = aria_selftest (); if (selftest_failed) log_error("%s\n", selftest_failed); } if (selftest_failed) return GPG_ERR_SELFTEST_FAILED; +#ifdef USE_AESNI_AVX2 + ctx->use_aesni_avx2 = (hwf & HWF_INTEL_AESNI) && (hwf & HWF_INTEL_AVX2); +#endif +#ifdef USE_GFNI_AVX2 + ctx->use_gfni_avx2 = (hwf & HWF_INTEL_GFNI) && (hwf & HWF_INTEL_AVX2); +#endif +#ifdef USE_AESNI_AVX + ctx->use_aesni_avx = (hwf & HWF_INTEL_AESNI) && (hwf & HWF_INTEL_AVX); +#endif +#ifdef USE_GFNI_AVX + ctx->use_gfni_avx = (hwf & HWF_INTEL_GFNI) && (hwf & HWF_INTEL_AVX); +#endif + /* Setup bulk encryption routines. */ memset (bulk_ops, 0, sizeof(*bulk_ops)); bulk_ops->cbc_enc = _gcry_aria_cbc_enc; bulk_ops->cbc_dec = _gcry_aria_cbc_dec; bulk_ops->cfb_enc = _gcry_aria_cfb_enc; bulk_ops->cfb_dec = _gcry_aria_cfb_dec; bulk_ops->ctr_enc = _gcry_aria_ctr_enc; bulk_ops->ctr32le_enc = _gcry_aria_ctr32le_enc; bulk_ops->ecb_crypt = _gcry_aria_ecb_crypt; bulk_ops->xts_crypt = _gcry_aria_xts_crypt; bulk_ops->ocb_crypt = _gcry_aria_ocb_crypt; bulk_ops->ocb_auth = _gcry_aria_ocb_auth; /* Setup context and encryption key. */ ctx->decryption_prepared = 0; aria_set_encrypt_key (ctx, key, keylen); _gcry_burn_stack (3 * sizeof(void *) + 5 * 4 * sizeof(u32)); return 0; } static const char * aria_selftest (void) { ARIA_context ctx; byte scratch[16]; static const byte key[16] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; static const byte plaintext[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, }; static const byte ciphertext[16] = { 0xd7, 0x18, 0xfb, 0xd6, 0xab, 0x64, 0x4c, 0x73, 0x9d, 0xa9, 0x5f, 0x3b, 0xe6, 0x45, 0x17, 0x78 }; memset (&ctx, 0, sizeof(ctx)); aria_set_encrypt_key (&ctx, key, 16); aria_encrypt (&ctx, scratch, plaintext); if (memcmp (scratch, ciphertext, sizeof (ciphertext))) return "ARIA test encryption failed."; aria_decrypt (&ctx, scratch, scratch); if (memcmp (scratch, plaintext, sizeof (plaintext))) return "ARIA test decryption failed."; return NULL; } static const gcry_cipher_oid_spec_t aria128_oids[] = { { "1.2.410.200046.1.1.1", GCRY_CIPHER_MODE_ECB }, { "1.2.410.200046.1.1.2", GCRY_CIPHER_MODE_CBC }, { "1.2.410.200046.1.1.3", GCRY_CIPHER_MODE_CFB }, { "1.2.410.200046.1.1.4", GCRY_CIPHER_MODE_OFB }, { "1.2.410.200046.1.1.5", GCRY_CIPHER_MODE_CTR }, { "1.2.410.200046.1.1.34", GCRY_CIPHER_MODE_GCM }, { "1.2.410.200046.1.1.37", GCRY_CIPHER_MODE_CCM }, { NULL } }; static const gcry_cipher_oid_spec_t aria192_oids[] = { { "1.2.410.200046.1.1.6", GCRY_CIPHER_MODE_ECB }, { "1.2.410.200046.1.1.7", GCRY_CIPHER_MODE_CBC }, { "1.2.410.200046.1.1.8", GCRY_CIPHER_MODE_CFB }, { "1.2.410.200046.1.1.9", GCRY_CIPHER_MODE_OFB }, { "1.2.410.200046.1.1.10", GCRY_CIPHER_MODE_CTR }, { "1.2.410.200046.1.1.35", GCRY_CIPHER_MODE_GCM }, { "1.2.410.200046.1.1.38", GCRY_CIPHER_MODE_CCM }, { NULL } }; static const gcry_cipher_oid_spec_t aria256_oids[] = { { "1.2.410.200046.1.1.11", GCRY_CIPHER_MODE_ECB }, { "1.2.410.200046.1.1.12", GCRY_CIPHER_MODE_CBC }, { "1.2.410.200046.1.1.13", GCRY_CIPHER_MODE_CFB }, { "1.2.410.200046.1.1.14", GCRY_CIPHER_MODE_OFB }, { "1.2.410.200046.1.1.15", GCRY_CIPHER_MODE_CTR }, { "1.2.410.200046.1.1.36", GCRY_CIPHER_MODE_GCM }, { "1.2.410.200046.1.1.39", GCRY_CIPHER_MODE_CCM }, { NULL } }; gcry_cipher_spec_t _gcry_cipher_spec_aria128 = { GCRY_CIPHER_ARIA128, { 0, 0 }, "ARIA128", NULL, aria128_oids, ARIA_BLOCK_SIZE, 128, sizeof(ARIA_context), aria_setkey, aria_encrypt, aria_decrypt }; gcry_cipher_spec_t _gcry_cipher_spec_aria192 = { GCRY_CIPHER_ARIA192, { 0, 0 }, "ARIA192",NULL,aria192_oids, ARIA_BLOCK_SIZE, 192, sizeof(ARIA_context), aria_setkey, aria_encrypt, aria_decrypt }; gcry_cipher_spec_t _gcry_cipher_spec_aria256 = { GCRY_CIPHER_ARIA256, { 0, 0 }, "ARIA256", NULL, aria256_oids, ARIA_BLOCK_SIZE, 256, sizeof(ARIA_context), aria_setkey, aria_encrypt, aria_decrypt }; diff --git a/configure.ac b/configure.ac index 9163b2ed..4f983a58 100644 --- a/configure.ac +++ b/configure.ac @@ -1,3602 +1,3610 @@ # Configure.ac script for Libgcrypt # Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2006, # 2007, 2008, 2009, 2011 Free Software Foundation, Inc. # Copyright (C) 2012-2021 g10 Code GmbH # # This file is part of Libgcrypt. # # Libgcrypt is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation; either version 2.1 of # the License, or (at your option) any later version. # # Libgcrypt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, see . # (Process this file with autoconf to produce a configure script.) AC_REVISION($Revision$) AC_PREREQ([2.69]) min_automake_version="1.14" # To build a release you need to create a tag with the version number # (git tag -s libgcrypt-n.m.k) and run "./autogen.sh --force". Please # bump the version number immediately after the release and do another # commit and push so that the git magic is able to work. See below # for the LT versions. m4_define([mym4_package],[libgcrypt]) m4_define([mym4_major], [1]) m4_define([mym4_minor], [11]) m4_define([mym4_micro], [0]) # Below is m4 magic to extract and compute the git revision number, # the decimalized short revision number, a beta version string and a # flag indicating a development version (mym4_isbeta). Note that the # m4 processing is done by autoconf and not during the configure run. m4_define([mym4_verslist], m4_split(m4_esyscmd([./autogen.sh --find-version] \ mym4_package mym4_major mym4_minor mym4_micro),[:])) m4_define([mym4_isbeta], m4_argn(2, mym4_verslist)) m4_define([mym4_version], m4_argn(4, mym4_verslist)) m4_define([mym4_revision], m4_argn(7, mym4_verslist)) m4_define([mym4_revision_dec], m4_argn(8, mym4_verslist)) m4_esyscmd([echo ]mym4_version[>VERSION]) AC_INIT([mym4_package],[mym4_version],[https://bugs.gnupg.org]) # LT Version numbers, remember to change them just *before* a release. # NOET NOTE - Already updated for a 1.11 series - NOTE NOTE # (Code changed: REVISION++) # (Interfaces added/removed/changed: CURRENT++, REVISION=0) # (Interfaces added: AGE++) # (Interfaces removed: AGE=0) # # (Interfaces removed: CURRENT++, AGE=0, REVISION=0) # (Interfaces added: CURRENT++, AGE++, REVISION=0) # (No interfaces changed: REVISION++) LIBGCRYPT_LT_CURRENT=25 LIBGCRYPT_LT_AGE=5 LIBGCRYPT_LT_REVISION=0 ################################################ AC_SUBST(LIBGCRYPT_LT_CURRENT) AC_SUBST(LIBGCRYPT_LT_AGE) AC_SUBST(LIBGCRYPT_LT_REVISION) # If the API is changed in an incompatible way: increment the next counter. # # 1.6: ABI and API change but the change is to most users irrelevant # and thus the API version number has not been incremented. LIBGCRYPT_CONFIG_API_VERSION=1 # If you change the required gpg-error version, please remove # unnecessary error code defines in src/gcrypt-int.h. NEED_GPG_ERROR_VERSION=1.27 AC_CONFIG_AUX_DIR([build-aux]) AC_CONFIG_SRCDIR([src/libgcrypt.vers]) AM_INIT_AUTOMAKE([serial-tests dist-bzip2]) AC_CONFIG_HEADERS([config.h]) AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_LIBOBJ_DIR([compat]) AC_CANONICAL_HOST AM_MAINTAINER_MODE AM_SILENT_RULES AC_USE_SYSTEM_EXTENSIONS AC_ARG_VAR(SYSROOT,[locate config scripts also below that directory]) AH_TOP([ #ifndef _GCRYPT_CONFIG_H_INCLUDED #define _GCRYPT_CONFIG_H_INCLUDED /* Enable gpg-error's strerror macro for W32CE. */ #define GPG_ERR_ENABLE_ERRNO_MACROS 1 ]) AH_BOTTOM([ #define _GCRYPT_IN_LIBGCRYPT 1 /* Add .note.gnu.property section for Intel CET in assembler sources when CET is enabled. */ #if defined(__ASSEMBLER__) && defined(__CET__) # include #endif /* If the configure check for endianness has been disabled, get it from OS macros. This is intended for making fat binary builds on OS X. */ #ifdef DISABLED_ENDIAN_CHECK # if defined(__BIG_ENDIAN__) # define WORDS_BIGENDIAN 1 # elif defined(__LITTLE_ENDIAN__) # undef WORDS_BIGENDIAN # else # error "No endianness found" # endif #endif /*DISABLED_ENDIAN_CHECK*/ /* We basically use the original Camellia source. Make sure the symbols properly prefixed. */ #define CAMELLIA_EXT_SYM_PREFIX _gcry_ #endif /*_GCRYPT_CONFIG_H_INCLUDED*/ ]) AH_VERBATIM([_REENTRANT], [/* To allow the use of Libgcrypt in multithreaded programs we have to use special features from the library. */ #ifndef _REENTRANT # define _REENTRANT 1 #endif ]) ###################### ## Basic checks. ### (we need some results later on (e.g. $GCC) ###################### AC_PROG_MAKE_SET missing_dir=`cd $ac_aux_dir && pwd` AM_MISSING_PROG(ACLOCAL, aclocal, $missing_dir) AM_MISSING_PROG(AUTOCONF, autoconf, $missing_dir) AM_MISSING_PROG(AUTOMAKE, automake, $missing_dir) AM_MISSING_PROG(AUTOHEADER, autoheader, $missing_dir) # AM_MISSING_PROG(MAKEINFO, makeinfo, $missing_dir) AC_PROG_CC AC_PROG_CPP AM_PROG_CC_C_O AM_PROG_AS AC_SEARCH_LIBS([strerror],[cposix]) AC_PROG_INSTALL AC_PROG_AWK # Taken from mpfr-4.0.1, then modified for LDADD_FOR_TESTS_KLUDGE dnl Under Linux, make sure that the old dtags are used if LD_LIBRARY_PATH dnl is defined. The issue is that with the new dtags, LD_LIBRARY_PATH has dnl the precedence over the run path, so that if a compatible MPFR library dnl is installed in some directory from $LD_LIBRARY_PATH, then the tested dnl MPFR library will be this library instead of the MPFR library from the dnl build tree. Other OS with the same issue might be added later. dnl dnl References: dnl https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=859732 dnl http://lists.gnu.org/archive/html/libtool/2017-05/msg00000.html dnl dnl We need to check whether --disable-new-dtags is supported as alternate dnl linkers may be used (e.g., with tcc: CC=tcc LD=tcc). dnl case $host in *-*-linux*) if test -n "$LD_LIBRARY_PATH"; then saved_LDFLAGS="$LDFLAGS" LDADD_FOR_TESTS_KLUDGE="-Wl,--disable-new-dtags" LDFLAGS="$LDFLAGS $LDADD_FOR_TESTS_KLUDGE" AC_MSG_CHECKING(whether --disable-new-dtags is supported by the linker) AC_LINK_IFELSE([AC_LANG_SOURCE([[ int main (void) { return 0; } ]])], [AC_MSG_RESULT(yes (use it since LD_LIBRARY_PATH is set))], [AC_MSG_RESULT(no) LDADD_FOR_TESTS_KLUDGE="" ]) LDFLAGS="$saved_LDFLAGS" fi ;; esac AC_SUBST([LDADD_FOR_TESTS_KLUDGE]) VERSION_NUMBER=m4_esyscmd(printf "0x%02x%02x%02x" mym4_major \ mym4_minor mym4_micro) AC_SUBST(VERSION_NUMBER) # We need to compile and run a program on the build machine. AX_CC_FOR_BUILD LT_PREREQ([2.2.6]) LT_INIT([win32-dll disable-static]) LT_LANG([Windows Resource]) ########################## ## General definitions. ## ########################## # Used by libgcrypt-config LIBGCRYPT_CONFIG_LIBS="-lgcrypt" LIBGCRYPT_CONFIG_CFLAGS="" LIBGCRYPT_CONFIG_HOST="$host" # Definitions for symmetric ciphers. available_ciphers="arcfour blowfish cast5 des aes twofish serpent rfc2268 seed" available_ciphers="$available_ciphers camellia idea salsa20 gost28147 chacha20" available_ciphers="$available_ciphers sm4 aria" enabled_ciphers="" # Definitions for public-key ciphers. available_pubkey_ciphers="dsa elgamal rsa ecc" enabled_pubkey_ciphers="" # Definitions for message digests. available_digests="crc gostr3411-94 md2 md4 md5 rmd160 sha1 sha256 sha512" available_digests="$available_digests sha3 tiger whirlpool stribog blake2" available_digests="$available_digests sm3" enabled_digests="" # Definitions for kdfs (optional ones) available_kdfs="s2k pkdf2 scrypt" enabled_kdfs="" # Definitions for random modules. available_random_modules="getentropy linux egd unix" auto_random_modules="$available_random_modules" # Supported thread backends. LIBGCRYPT_THREAD_MODULES="" # Other definitions. have_w32_system=no have_w32ce_system=no have_pthread=no # Setup some stuff depending on host. case "${host}" in *-*-mingw32*) ac_cv_have_dev_random=no have_w32_system=yes case "${host}" in *-mingw32ce*) have_w32ce_system=yes available_random_modules="w32ce" ;; *) available_random_modules="w32" ;; esac AC_DEFINE(USE_ONLY_8DOT3,1, [set this to limit filenames to the 8.3 format]) AC_DEFINE(HAVE_DRIVE_LETTERS,1, [defined if we must run on a stupid file system]) AC_DEFINE(HAVE_DOSISH_SYSTEM,1, [defined if we run on some of the PCDOS like systems (DOS, Windoze. OS/2) with special properties like no file modes]) ;; i?86-emx-os2 | i?86-*-os2*emx) # OS/2 with the EMX environment ac_cv_have_dev_random=no AC_DEFINE(HAVE_DRIVE_LETTERS) AC_DEFINE(HAVE_DOSISH_SYSTEM) ;; i?86-*-msdosdjgpp*) # DOS with the DJGPP environment ac_cv_have_dev_random=no AC_DEFINE(HAVE_DRIVE_LETTERS) AC_DEFINE(HAVE_DOSISH_SYSTEM) ;; *-*-hpux*) if test -z "$GCC" ; then CFLAGS="$CFLAGS -Ae -D_HPUX_SOURCE" fi ;; *-dec-osf4*) if test -z "$GCC" ; then # Suppress all warnings # to get rid of the unsigned/signed char mismatch warnings. CFLAGS="$CFLAGS -w" fi ;; m68k-atari-mint) ;; *-apple-darwin*) AC_DEFINE(_DARWIN_C_SOURCE, 1, Expose all libc features (__DARWIN_C_FULL).) AC_DEFINE(USE_POSIX_SPAWN_FOR_TESTS, 1, [defined if we use posix_spawn in test program]) AC_CHECK_HEADERS(spawn.h) ;; *) ;; esac if test "$have_w32_system" = yes; then AC_DEFINE(HAVE_W32_SYSTEM,1, [Defined if we run on a W32 API based system]) if test "$have_w32ce_system" = yes; then AC_DEFINE(HAVE_W32CE_SYSTEM,1,[Defined if we run on WindowsCE]) fi fi AM_CONDITIONAL(HAVE_W32_SYSTEM, test "$have_w32_system" = yes) AM_CONDITIONAL(HAVE_W32CE_SYSTEM, test "$have_w32ce_system" = yes) # A printable OS Name is sometimes useful. case "${host}" in *-*-mingw32ce*) PRINTABLE_OS_NAME="W32CE" ;; *-*-mingw32*) PRINTABLE_OS_NAME="W32" ;; i?86-emx-os2 | i?86-*-os2*emx ) PRINTABLE_OS_NAME="OS/2" ;; i?86-*-msdosdjgpp*) PRINTABLE_OS_NAME="MSDOS/DJGPP" ;; *-linux*) PRINTABLE_OS_NAME="GNU/Linux" ;; *) PRINTABLE_OS_NAME=`uname -s || echo "Unknown"` ;; esac NAME_OF_DEV_RANDOM="/dev/random" NAME_OF_DEV_URANDOM="/dev/urandom" AC_ARG_ENABLE(endian-check, AS_HELP_STRING([--disable-endian-check], [disable the endian check and trust the OS provided macros]), endiancheck=$enableval,endiancheck=yes) if test x"$endiancheck" = xyes ; then AC_C_BIGENDIAN else AC_DEFINE(DISABLED_ENDIAN_CHECK,1,[configure did not test for endianness]) fi AC_CHECK_SIZEOF(unsigned short, 2) AC_CHECK_SIZEOF(unsigned int, 4) AC_CHECK_SIZEOF(unsigned long, 4) AC_CHECK_SIZEOF(unsigned long long, 0) AC_CHECK_SIZEOF(unsigned __int128, 0) AC_CHECK_SIZEOF(void *, 0) AC_TYPE_UINTPTR_T if test "$ac_cv_sizeof_unsigned_short" = "0" \ || test "$ac_cv_sizeof_unsigned_int" = "0" \ || test "$ac_cv_sizeof_unsigned_long" = "0"; then AC_MSG_WARN([Hmmm, something is wrong with the sizes - using defaults]); fi # Ensure that we have UINT64_C before we bother to check for uint64_t AC_CACHE_CHECK([for UINT64_C],[gnupg_cv_uint64_c_works], AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]], [[uint64_t foo=UINT64_C(42);]])], gnupg_cv_uint64_c_works=yes,gnupg_cv_uint64_c_works=no)) if test "$gnupg_cv_uint64_c_works" = "yes" ; then AC_CHECK_SIZEOF(uint64_t) fi # Do we have any 64-bit data types? if test "$ac_cv_sizeof_unsigned_int" != "8" \ && test "$ac_cv_sizeof_unsigned_long" != "8" \ && test "$ac_cv_sizeof_unsigned_long_long" != "8" \ && test "$ac_cv_sizeof_uint64_t" != "8"; then AC_MSG_ERROR([[ *** *** No 64-bit integer type available. *** It is not possible to build Libgcrypt on this platform. ***]]) fi # If not specified otherwise, all available algorithms will be # included. default_ciphers="$available_ciphers" default_pubkey_ciphers="$available_pubkey_ciphers" default_digests="$available_digests" default_kdfs="$available_kdfs" # Blacklist MD2 by default default_digests=`echo $default_digests | sed -e 's/md2//g'` # Substitutions to set generated files in a Emacs buffer to read-only. AC_SUBST(emacs_local_vars_begin, ['Local Variables:']) AC_SUBST(emacs_local_vars_read_only, ['buffer-read-only: t']) AC_SUBST(emacs_local_vars_end, ['End:']) ############################ ## Command line switches. ## ############################ # Implementation of the --enable-ciphers switch. AC_ARG_ENABLE(ciphers, AS_HELP_STRING([--enable-ciphers=ciphers], [select the symmetric ciphers to include]), [enabled_ciphers=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`], [enabled_ciphers=""]) if test "x$enabled_ciphers" = "x" \ -o "$enabled_ciphers" = "yes" \ -o "$enabled_ciphers" = "no"; then enabled_ciphers=$default_ciphers fi AC_MSG_CHECKING([which symmetric ciphers to include]) for cipher in $enabled_ciphers; do LIST_MEMBER($cipher, $available_ciphers) if test "$found" = "0"; then AC_MSG_ERROR([unsupported cipher "$cipher" specified]) fi done AC_MSG_RESULT([$enabled_ciphers]) # Implementation of the --enable-pubkey-ciphers switch. AC_ARG_ENABLE(pubkey-ciphers, AS_HELP_STRING([--enable-pubkey-ciphers=ciphers], [select the public-key ciphers to include]), [enabled_pubkey_ciphers=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`], [enabled_pubkey_ciphers=""]) if test "x$enabled_pubkey_ciphers" = "x" \ -o "$enabled_pubkey_ciphers" = "yes" \ -o "$enabled_pubkey_ciphers" = "no"; then enabled_pubkey_ciphers=$default_pubkey_ciphers fi AC_MSG_CHECKING([which public-key ciphers to include]) for cipher in $enabled_pubkey_ciphers; do LIST_MEMBER($cipher, $available_pubkey_ciphers) if test "$found" = "0"; then AC_MSG_ERROR([unsupported public-key cipher specified]) fi done AC_MSG_RESULT([$enabled_pubkey_ciphers]) # Implementation of the --enable-digests switch. AC_ARG_ENABLE(digests, AS_HELP_STRING([--enable-digests=digests], [select the message digests to include]), [enabled_digests=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`], [enabled_digests=""]) if test "x$enabled_digests" = "x" \ -o "$enabled_digests" = "yes" \ -o "$enabled_digests" = "no"; then enabled_digests=$default_digests fi AC_MSG_CHECKING([which message digests to include]) for digest in $enabled_digests; do LIST_MEMBER($digest, $available_digests) if test "$found" = "0"; then AC_MSG_ERROR([unsupported message digest specified]) fi done AC_MSG_RESULT([$enabled_digests]) # Implementation of the --enable-kdfs switch. AC_ARG_ENABLE(kdfs, AS_HELP_STRING([--enable-kfds=kdfs], [select the KDFs to include]), [enabled_kdfs=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`], [enabled_kdfs=""]) if test "x$enabled_kdfs" = "x" \ -o "$enabled_kdfs" = "yes" \ -o "$enabled_kdfs" = "no"; then enabled_kdfs=$default_kdfs fi AC_MSG_CHECKING([which key derivation functions to include]) for kdf in $enabled_kdfs; do LIST_MEMBER($kdf, $available_kdfs) if test "$found" = "0"; then AC_MSG_ERROR([unsupported key derivation function specified]) fi done AC_MSG_RESULT([$enabled_kdfs]) # Implementation of the --enable-random switch. AC_ARG_ENABLE(random, AS_HELP_STRING([--enable-random=name], [select which random number generator to use]), [random=`echo $enableval | tr '[A-Z]' '[a-z]'`], []) if test "x$random" = "x" -o "$random" = "yes" -o "$random" = "no"; then random=default fi AC_MSG_CHECKING([which random module to use]) if test "$random" != "default" -a "$random" != "auto"; then LIST_MEMBER($random, $available_random_modules) if test "$found" = "0"; then AC_MSG_ERROR([unsupported random module specified]) fi fi AC_MSG_RESULT($random) # Implementation of the --disable-dev-random switch. AC_MSG_CHECKING([whether use of /dev/random is requested]) AC_ARG_ENABLE(dev-random, [ --disable-dev-random disable the use of dev random], try_dev_random=$enableval, try_dev_random=yes) AC_MSG_RESULT($try_dev_random) # Implementation of the --with-egd-socket switch. AC_ARG_WITH(egd-socket, [ --with-egd-socket=NAME Use NAME for the EGD socket)], egd_socket_name="$withval", egd_socket_name="" ) AC_DEFINE_UNQUOTED(EGD_SOCKET_NAME, "$egd_socket_name", [Define if you don't want the default EGD socket name. For details see cipher/rndegd.c]) # Implementation of --disable-asm. AC_MSG_CHECKING([whether MPI and cipher assembler modules are requested]) AC_ARG_ENABLE([asm], AS_HELP_STRING([--disable-asm], [Disable MPI and cipher assembler modules]), [try_asm_modules=$enableval], [try_asm_modules=yes]) AC_MSG_RESULT($try_asm_modules) if test "$try_asm_modules" != yes ; then AC_DEFINE(ASM_DISABLED,1,[Defined if --disable-asm was used to configure]) fi # Implementation of the --enable-large-data-tests switch. AC_MSG_CHECKING([whether to run large data tests]) AC_ARG_ENABLE(large-data-tests, AS_HELP_STRING([--enable-large-data-tests], [Enable the real long ruinning large data tests]), large_data_tests=$enableval,large_data_tests=no) AC_MSG_RESULT($large_data_tests) AC_SUBST(RUN_LARGE_DATA_TESTS, $large_data_tests) # Implementation of --enable-force-soft-hwfeatures AC_MSG_CHECKING([whether 'soft' HW feature bits are forced on]) AC_ARG_ENABLE([force-soft-hwfeatures], AS_HELP_STRING([--enable-force-soft-hwfeatures], [Enable forcing 'soft' HW feature bits on]), [force_soft_hwfeatures=$enableval], [force_soft_hwfeatures=no]) AC_MSG_RESULT($force_soft_hwfeatures) # Implementation of the --with-capabilities switch. # Check whether we want to use Linux capabilities AC_MSG_CHECKING([whether use of capabilities is requested]) AC_ARG_WITH(capabilities, AS_HELP_STRING([--with-capabilities], [Use linux capabilities [default=no]]), [use_capabilities="$withval"],[use_capabilities=no]) AC_MSG_RESULT($use_capabilities) # Implementation of the --enable-hmac-binary-check. AC_MSG_CHECKING([whether a HMAC binary check is requested]) AC_ARG_ENABLE(hmac-binary-check, AS_HELP_STRING([--enable-hmac-binary-check], [Enable library integrity check]), [use_hmac_binary_check="$enableval"], [use_hmac_binary_check=no]) AC_MSG_RESULT($use_hmac_binary_check) if test "$use_hmac_binary_check" = no ; then DEF_HMAC_BINARY_CHECK='' else AC_DEFINE(ENABLE_HMAC_BINARY_CHECK,1, [Define to support an HMAC based integrity check]) AC_CHECK_TOOL(OBJCOPY, [objcopy]) AC_CHECK_TOOL(READELF, [readelf]) if test "$use_hmac_binary_check" != yes ; then DEF_HMAC_BINARY_CHECK=-DKEY_FOR_BINARY_CHECK="'\"$use_hmac_binary_check\"'" fi fi AM_CONDITIONAL(USE_HMAC_BINARY_CHECK, test "x$use_hmac_binary_check" != xno) AC_SUBST(DEF_HMAC_BINARY_CHECK) # Implementation of the --with-fips-module-version. AC_ARG_WITH(fips-module-version, AS_HELP_STRING([--with-fips-module-version=VERSION], [Specify the FIPS module version for the build]), fips_module_version="$withval", fips_module_version="" ) AC_DEFINE_UNQUOTED(FIPS_MODULE_VERSION, "$fips_module_version", [Define FIPS module version for certification]) # Implementation of the --disable-jent-support switch. AC_MSG_CHECKING([whether jitter entropy support is requested]) AC_ARG_ENABLE(jent-support, AS_HELP_STRING([--disable-jent-support], [Disable support for the Jitter entropy collector]), jentsupport=$enableval,jentsupport=yes) AC_MSG_RESULT($jentsupport) # Implementation of the --disable-padlock-support switch. AC_MSG_CHECKING([whether padlock support is requested]) AC_ARG_ENABLE(padlock-support, AS_HELP_STRING([--disable-padlock-support], [Disable support for the PadLock Engine of VIA processors]), padlocksupport=$enableval,padlocksupport=yes) AC_MSG_RESULT($padlocksupport) # Implementation of the --disable-aesni-support switch. AC_MSG_CHECKING([whether AESNI support is requested]) AC_ARG_ENABLE(aesni-support, AS_HELP_STRING([--disable-aesni-support], [Disable support for the Intel AES-NI instructions]), aesnisupport=$enableval,aesnisupport=yes) AC_MSG_RESULT($aesnisupport) # Implementation of the --disable-shaext-support switch. AC_MSG_CHECKING([whether SHAEXT support is requested]) AC_ARG_ENABLE(shaext-support, AS_HELP_STRING([--disable-shaext-support], [Disable support for the Intel SHAEXT instructions]), shaextsupport=$enableval,shaextsupport=yes) AC_MSG_RESULT($shaextsupport) # Implementation of the --disable-pclmul-support switch. AC_MSG_CHECKING([whether PCLMUL support is requested]) AC_ARG_ENABLE(pclmul-support, AS_HELP_STRING([--disable-pclmul-support], [Disable support for the Intel PCLMUL instructions]), pclmulsupport=$enableval,pclmulsupport=yes) AC_MSG_RESULT($pclmulsupport) # Implementation of the --disable-sse41-support switch. AC_MSG_CHECKING([whether SSE4.1 support is requested]) AC_ARG_ENABLE(sse41-support, AS_HELP_STRING([--disable-sse41-support], [Disable support for the Intel SSE4.1 instructions]), sse41support=$enableval,sse41support=yes) AC_MSG_RESULT($sse41support) # Implementation of the --disable-drng-support switch. AC_MSG_CHECKING([whether DRNG support is requested]) AC_ARG_ENABLE(drng-support, AS_HELP_STRING([--disable-drng-support], [Disable support for the Intel DRNG (RDRAND instruction)]), drngsupport=$enableval,drngsupport=yes) AC_MSG_RESULT($drngsupport) # Implementation of the --disable-avx-support switch. AC_MSG_CHECKING([whether AVX support is requested]) AC_ARG_ENABLE(avx-support, AS_HELP_STRING([--disable-avx-support], [Disable support for the Intel AVX instructions]), avxsupport=$enableval,avxsupport=yes) AC_MSG_RESULT($avxsupport) # Implementation of the --disable-avx2-support switch. AC_MSG_CHECKING([whether AVX2 support is requested]) AC_ARG_ENABLE(avx2-support, AS_HELP_STRING([--disable-avx2-support], [Disable support for the Intel AVX2 instructions]), avx2support=$enableval,avx2support=yes) AC_MSG_RESULT($avx2support) # Implementation of the --disable-avx512-support switch. AC_MSG_CHECKING([whether AVX512 support is requested]) AC_ARG_ENABLE(avx512-support, AS_HELP_STRING([--disable-avx512-support], [Disable support for the Intel AVX512 instructions]), avx512support=$enableval,avx512support=yes) AC_MSG_RESULT($avx512support) # Implementation of the --disable-gfni-support switch. AC_MSG_CHECKING([whether GFNI support is requested]) AC_ARG_ENABLE(gfni-support, AS_HELP_STRING([--disable-gfni-support], [Disable support for the Intel GFNI instructions]), gfnisupport=$enableval,gfnisupport=yes) AC_MSG_RESULT($gfnisupport) # Implementation of the --disable-neon-support switch. AC_MSG_CHECKING([whether NEON support is requested]) AC_ARG_ENABLE(neon-support, AS_HELP_STRING([--disable-neon-support], [Disable support for the ARM NEON instructions]), neonsupport=$enableval,neonsupport=yes) AC_MSG_RESULT($neonsupport) # Implementation of the --disable-arm-crypto-support switch. AC_MSG_CHECKING([whether ARMv8 Crypto Extension support is requested]) AC_ARG_ENABLE(arm-crypto-support, AS_HELP_STRING([--disable-arm-crypto-support], [Disable support for the ARMv8 Crypto Extension instructions]), armcryptosupport=$enableval,armcryptosupport=yes) AC_MSG_RESULT($armcryptosupport) # Implementation of the --disable-sve-support switch. AC_MSG_CHECKING([whether SVE support is requested]) AC_ARG_ENABLE(sve-support, AS_HELP_STRING([--disable-sve-support], [Disable support for the ARMv8 SVE instructions]), svesupport=$enableval,svesupport=yes) AC_MSG_RESULT($svesupport) # Implementation of the --disable-sve2-support switch. AC_MSG_CHECKING([whether SVE2 support is requested]) AC_ARG_ENABLE(sve2-support, AS_HELP_STRING([--disable-sve2-support], [Disable support for the ARMv9 SVE2 instructions]), sve2support=$enableval,sve2support=yes) AC_MSG_RESULT($sve2support) # Implementation of the --disable-ppc-crypto-support switch. AC_MSG_CHECKING([whether PPC crypto support is requested]) AC_ARG_ENABLE(ppc-crypto-support, AS_HELP_STRING([--disable-ppc-crypto-support], [Disable support for the PPC crypto instructions introduced in POWER 8 (PowerISA 2.07)]), ppccryptosupport=$enableval,ppccryptosupport=yes) AC_MSG_RESULT($ppccryptosupport) # Implementation of the --disable-O-flag-munging switch. AC_MSG_CHECKING([whether a -O flag munging is requested]) AC_ARG_ENABLE([O-flag-munging], AS_HELP_STRING([--disable-O-flag-munging], [Disable modification of the cc -O flag]), [enable_o_flag_munging=$enableval], [enable_o_flag_munging=yes]) AC_MSG_RESULT($enable_o_flag_munging) AM_CONDITIONAL(ENABLE_O_FLAG_MUNGING, test "$enable_o_flag_munging" = "yes") # Implementation of the --disable-instrumentation-munging switch. AC_MSG_CHECKING([whether a instrumentation (-fprofile, -fsanitize) munging is requested]) AC_ARG_ENABLE([instrumentation-munging], AS_HELP_STRING([--disable-instrumentation-munging], [Disable modification of the cc instrumentation options]), [enable_instrumentation_munging=$enableval], [enable_instrumentation_munging=yes]) AC_MSG_RESULT($enable_instrumentation_munging) AM_CONDITIONAL(ENABLE_INSTRUMENTATION_MUNGING, test "$enable_instrumentation_munging" = "yes") # Implementation of the --disable-amd64-as-feature-detection switch. AC_MSG_CHECKING([whether to enable AMD64 as(1) feature detection]) AC_ARG_ENABLE(amd64-as-feature-detection, AS_HELP_STRING([--disable-amd64-as-feature-detection], [Disable the auto-detection of AMD64 as(1) features]), amd64_as_feature_detection=$enableval, amd64_as_feature_detection=yes) AC_MSG_RESULT($amd64_as_feature_detection) AC_DEFINE_UNQUOTED(PRINTABLE_OS_NAME, "$PRINTABLE_OS_NAME", [A human readable text with the name of the OS]) # For some systems we know that we have ld_version scripts. # Use it then as default. have_ld_version_script=no case "${host}" in *-*-linux*) have_ld_version_script=yes ;; *-*-gnu*) have_ld_version_script=yes ;; esac AC_ARG_ENABLE([ld-version-script], AS_HELP_STRING([--enable-ld-version-script], [enable/disable use of linker version script. (default is system dependent)]), [have_ld_version_script=$enableval], [ : ] ) AM_CONDITIONAL(HAVE_LD_VERSION_SCRIPT, test "$have_ld_version_script" = "yes") AC_DEFINE_UNQUOTED(NAME_OF_DEV_RANDOM, "$NAME_OF_DEV_RANDOM", [defined to the name of the strong random device]) AC_DEFINE_UNQUOTED(NAME_OF_DEV_URANDOM, "$NAME_OF_DEV_URANDOM", [defined to the name of the weaker random device]) ############################### #### Checks for libraries. #### ############################### # # gpg-error is required. # AM_PATH_GPG_ERROR("$NEED_GPG_ERROR_VERSION") if test "x$GPG_ERROR_LIBS" = "x"; then AC_MSG_ERROR([libgpg-error is needed. See ftp://ftp.gnupg.org/gcrypt/libgpg-error/ .]) fi AC_DEFINE(GPG_ERR_SOURCE_DEFAULT, GPG_ERR_SOURCE_GCRYPT, [The default error source for libgcrypt.]) AM_CONDITIONAL(USE_GPGRT_CONFIG, [test -n "$GPGRT_CONFIG" \ -a "$ac_cv_path_GPG_ERROR_CONFIG" = no]) # # Check whether pthreads is available # if test "$have_w32_system" != yes; then AC_CHECK_LIB(pthread,pthread_create,have_pthread=yes) if test "$have_pthread" = yes; then AC_DEFINE(HAVE_PTHREAD, 1 ,[Define if we have pthread.]) fi fi # Solaris needs -lsocket and -lnsl. Unisys system includes # gethostbyname in libsocket but needs libnsl for socket. AC_SEARCH_LIBS(setsockopt, [socket], , [AC_SEARCH_LIBS(setsockopt, [socket], , , [-lnsl])]) AC_SEARCH_LIBS(setsockopt, [nsl]) ################################## #### Checks for header files. #### ################################## AC_CHECK_HEADERS(unistd.h sys/auxv.h sys/random.h sys/sysctl.h) ########################################## #### Checks for typedefs, structures, #### #### and compiler characteristics. #### ########################################## AC_C_CONST AC_C_INLINE AC_TYPE_SIZE_T AC_TYPE_PID_T AC_CHECK_TYPES([byte, ushort, u16, u32, u64]) # # Check for __builtin_bswap32 intrinsic. # AC_CACHE_CHECK(for __builtin_bswap32, [gcry_cv_have_builtin_bswap32], [gcry_cv_have_builtin_bswap32=no AC_LINK_IFELSE([AC_LANG_PROGRAM([], [int x = 0; int y = __builtin_bswap32(x); return y;])], [gcry_cv_have_builtin_bswap32=yes])]) if test "$gcry_cv_have_builtin_bswap32" = "yes" ; then AC_DEFINE(HAVE_BUILTIN_BSWAP32,1, [Defined if compiler has '__builtin_bswap32' intrinsic]) fi # # Check for __builtin_bswap64 intrinsic. # AC_CACHE_CHECK(for __builtin_bswap64, [gcry_cv_have_builtin_bswap64], [gcry_cv_have_builtin_bswap64=no AC_LINK_IFELSE([AC_LANG_PROGRAM([], [long long x = 0; long long y = __builtin_bswap64(x); return y;])], [gcry_cv_have_builtin_bswap64=yes])]) if test "$gcry_cv_have_builtin_bswap64" = "yes" ; then AC_DEFINE(HAVE_BUILTIN_BSWAP64,1, [Defined if compiler has '__builtin_bswap64' intrinsic]) fi # # Check for __builtin_ctz intrinsic. # AC_CACHE_CHECK(for __builtin_ctz, [gcry_cv_have_builtin_ctz], [gcry_cv_have_builtin_ctz=no AC_LINK_IFELSE([AC_LANG_PROGRAM([], [unsigned int x = 0; int y = __builtin_ctz(x); return y;])], [gcry_cv_have_builtin_ctz=yes])]) if test "$gcry_cv_have_builtin_ctz" = "yes" ; then AC_DEFINE(HAVE_BUILTIN_CTZ, 1, [Defined if compiler has '__builtin_ctz' intrinsic]) fi # # Check for __builtin_ctzl intrinsic. # AC_CACHE_CHECK(for __builtin_ctzl, [gcry_cv_have_builtin_ctzl], [gcry_cv_have_builtin_ctzl=no AC_LINK_IFELSE([AC_LANG_PROGRAM([], [unsigned long x = 0; long y = __builtin_ctzl(x); return y;])], [gcry_cv_have_builtin_ctzl=yes])]) if test "$gcry_cv_have_builtin_ctzl" = "yes" ; then AC_DEFINE(HAVE_BUILTIN_CTZL, 1, [Defined if compiler has '__builtin_ctzl' intrinsic]) fi # # Check for __builtin_clz intrinsic. # AC_CACHE_CHECK(for __builtin_clz, [gcry_cv_have_builtin_clz], [gcry_cv_have_builtin_clz=no AC_LINK_IFELSE([AC_LANG_PROGRAM([], [unsigned int x = 0; int y = __builtin_clz(x); return y;])], [gcry_cv_have_builtin_clz=yes])]) if test "$gcry_cv_have_builtin_clz" = "yes" ; then AC_DEFINE(HAVE_BUILTIN_CLZ, 1, [Defined if compiler has '__builtin_clz' intrinsic]) fi # # Check for __builtin_clzl intrinsic. # AC_CACHE_CHECK(for __builtin_clzl, [gcry_cv_have_builtin_clzl], [gcry_cv_have_builtin_clzl=no AC_LINK_IFELSE([AC_LANG_PROGRAM([], [unsigned long x = 0; long y = __builtin_clzl(x); return y;])], [gcry_cv_have_builtin_clzl=yes])]) if test "$gcry_cv_have_builtin_clzl" = "yes" ; then AC_DEFINE(HAVE_BUILTIN_CLZL, 1, [Defined if compiler has '__builtin_clzl' intrinsic]) fi # # Check for __sync_synchronize intrinsic. # AC_CACHE_CHECK(for __sync_synchronize, [gcry_cv_have_sync_synchronize], [gcry_cv_have_sync_synchronize=no AC_LINK_IFELSE([AC_LANG_PROGRAM([], [__sync_synchronize(); return 0;])], [gcry_cv_have_sync_synchronize=yes])]) if test "$gcry_cv_have_sync_synchronize" = "yes" ; then AC_DEFINE(HAVE_SYNC_SYNCHRONIZE, 1, [Defined if compiler has '__sync_synchronize' intrinsic]) fi # # Check for VLA support (variable length arrays). # AC_CACHE_CHECK(whether the variable length arrays are supported, [gcry_cv_have_vla], [gcry_cv_have_vla=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[void f1(char *, int); char foo(int i) { char b[(i < 0 ? 0 : i) + 1]; f1(b, sizeof b); return b[0];}]])], [gcry_cv_have_vla=yes])]) if test "$gcry_cv_have_vla" = "yes" ; then AC_DEFINE(HAVE_VLA,1, [Defined if variable length arrays are supported]) fi # # Check for ELF visibility support. # AC_CACHE_CHECK(whether the visibility attribute is supported, gcry_cv_visibility_attribute, [gcry_cv_visibility_attribute=no AC_LANG_CONFTEST([AC_LANG_SOURCE( [[int foo __attribute__ ((visibility ("hidden"))) = 1; int bar __attribute__ ((visibility ("protected"))) = 1; ]])]) if ${CC-cc} -Werror -S conftest.c -o conftest.s \ 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ; then if grep '\.hidden.*foo' conftest.s >/dev/null 2>&1 ; then if grep '\.protected.*bar' conftest.s >/dev/null 2>&1; then gcry_cv_visibility_attribute=yes fi fi fi ]) if test "$gcry_cv_visibility_attribute" = "yes"; then AC_CACHE_CHECK(for broken visibility attribute, gcry_cv_broken_visibility_attribute, [gcry_cv_broken_visibility_attribute=yes AC_LANG_CONFTEST([AC_LANG_SOURCE( [[int foo (int x); int bar (int x) __asm__ ("foo") __attribute__ ((visibility ("hidden"))); int bar (int x) { return x; } ]])]) if ${CC-cc} -Werror -S conftest.c -o conftest.s \ 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ; then if grep '\.hidden@<:@ _@:>@foo' conftest.s >/dev/null 2>&1; then gcry_cv_broken_visibility_attribute=no fi fi ]) fi if test "$gcry_cv_visibility_attribute" = "yes"; then AC_CACHE_CHECK(for broken alias attribute, gcry_cv_broken_alias_attribute, [gcry_cv_broken_alias_attribute=yes AC_LANG_CONFTEST([AC_LANG_SOURCE( [[extern int foo (int x) __asm ("xyzzy"); int bar (int x) { return x; } extern __typeof (bar) foo __attribute ((weak, alias ("bar"))); extern int dfoo; extern __typeof (dfoo) dfoo __asm ("abccb"); int dfoo = 1; ]])]) if ${CC-cc} -Werror -S conftest.c -o conftest.s \ 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ; then if grep 'xyzzy' conftest.s >/dev/null 2>&1 && \ grep 'abccb' conftest.s >/dev/null 2>&1; then gcry_cv_broken_alias_attribute=no fi fi ]) fi if test "$gcry_cv_visibility_attribute" = "yes"; then AC_CACHE_CHECK(if gcc supports -fvisibility=hidden, gcry_cv_gcc_has_f_visibility, [gcry_cv_gcc_has_f_visibility=no _gcc_cflags_save=$CFLAGS CFLAGS="-fvisibility=hidden" AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])], gcry_cv_gcc_has_f_visibility=yes) CFLAGS=$_gcc_cflags_save; ]) fi if test "$gcry_cv_visibility_attribute" = "yes" \ && test "$gcry_cv_broken_visibility_attribute" != "yes" \ && test "$gcry_cv_broken_alias_attribute" != "yes" \ && test "$gcry_cv_gcc_has_f_visibility" = "yes" then AC_DEFINE(GCRY_USE_VISIBILITY, 1, [Define to use the GNU C visibility attribute.]) CFLAGS="$CFLAGS -fvisibility=hidden" fi # Following attribute tests depend on warnings to cause compile to fail, # so set -Werror temporarily. _gcc_cflags_save=$CFLAGS CFLAGS="$CFLAGS -Werror" # # Check whether the compiler supports the GCC style aligned attribute # AC_CACHE_CHECK([whether the GCC style aligned attribute is supported], [gcry_cv_gcc_attribute_aligned], [gcry_cv_gcc_attribute_aligned=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[struct { int a; } foo __attribute__ ((aligned (16)));]])], [gcry_cv_gcc_attribute_aligned=yes])]) if test "$gcry_cv_gcc_attribute_aligned" = "yes" ; then AC_DEFINE(HAVE_GCC_ATTRIBUTE_ALIGNED,1, [Defined if a GCC style "__attribute__ ((aligned (n))" is supported]) fi # # Check whether the compiler supports the GCC style packed attribute # AC_CACHE_CHECK([whether the GCC style packed attribute is supported], [gcry_cv_gcc_attribute_packed], [gcry_cv_gcc_attribute_packed=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[struct foolong_s { long b; } __attribute__ ((packed)); struct foo_s { char a; struct foolong_s b; } __attribute__ ((packed)); enum bar { FOO = 1 / (sizeof(struct foo_s) == (sizeof(char) + sizeof(long))), };]])], [gcry_cv_gcc_attribute_packed=yes])]) if test "$gcry_cv_gcc_attribute_packed" = "yes" ; then AC_DEFINE(HAVE_GCC_ATTRIBUTE_PACKED,1, [Defined if a GCC style "__attribute__ ((packed))" is supported]) fi # # Check whether the compiler supports the GCC style may_alias attribute # AC_CACHE_CHECK([whether the GCC style may_alias attribute is supported], [gcry_cv_gcc_attribute_may_alias], [gcry_cv_gcc_attribute_may_alias=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[typedef struct foo_s { int a; } __attribute__ ((may_alias)) foo_t;]])], [gcry_cv_gcc_attribute_may_alias=yes])]) if test "$gcry_cv_gcc_attribute_may_alias" = "yes" ; then AC_DEFINE(HAVE_GCC_ATTRIBUTE_MAY_ALIAS,1, [Defined if a GCC style "__attribute__ ((may_alias))" is supported]) fi # Restore flags. CFLAGS=$_gcc_cflags_save; # # Check whether the compiler supports 'asm' or '__asm__' keyword for # assembler blocks. # AC_CACHE_CHECK([whether 'asm' assembler keyword is supported], [gcry_cv_have_asm], [gcry_cv_have_asm=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[void a(void) { asm("":::"memory"); }]])], [gcry_cv_have_asm=yes])]) AC_CACHE_CHECK([whether '__asm__' assembler keyword is supported], [gcry_cv_have___asm__], [gcry_cv_have___asm__=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[void a(void) { __asm__("":::"memory"); }]])], [gcry_cv_have___asm__=yes])]) if test "$gcry_cv_have_asm" = "no" ; then if test "$gcry_cv_have___asm__" = "yes" ; then AC_DEFINE(asm,__asm__, [Define to supported assembler block keyword, if plain 'asm' was not supported]) fi fi # # Check whether the compiler supports inline assembly memory barrier. # if test "$gcry_cv_have_asm" = "no" ; then if test "$gcry_cv_have___asm__" = "yes" ; then AC_CACHE_CHECK([whether inline assembly memory barrier is supported], [gcry_cv_have_asm_volatile_memory], [gcry_cv_have_asm_volatile_memory=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[void a(int x) { __asm__ volatile("":::"memory"); __asm__ volatile("":"+r"(x)::"memory"); }]])], [gcry_cv_have_asm_volatile_memory=yes])]) fi else AC_CACHE_CHECK([whether inline assembly memory barrier is supported], [gcry_cv_have_asm_volatile_memory], [gcry_cv_have_asm_volatile_memory=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[void a(int x) { asm volatile("":::"memory"); asm volatile("":"+r"(x)::"memory"); }]])], [gcry_cv_have_asm_volatile_memory=yes])]) fi if test "$gcry_cv_have_asm_volatile_memory" = "yes" ; then AC_DEFINE(HAVE_GCC_ASM_VOLATILE_MEMORY,1, [Define if inline asm memory barrier is supported]) fi # # Check whether GCC assembler supports features needed for our ARM # implementations. This needs to be done before setting up the # assembler stuff. # AC_CACHE_CHECK([whether GCC assembler is compatible for ARM assembly implementations], [gcry_cv_gcc_arm_platform_as_ok], [if test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_arm_platform_as_ok="n/a" else gcry_cv_gcc_arm_platform_as_ok=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( /* Test if assembler supports UAL syntax. */ ".syntax unified\n\t" ".arm\n\t" /* our assembly code is in ARM mode */ ".text\n\t" /* Following causes error if assembler ignored '.syntax unified'. */ "asmfunc:\n\t" "add r0, r0, r4, ror #12;\n\t" /* Test if '.type' and '.size' are supported. */ ".size asmfunc,.-asmfunc;\n\t" ".type asmfunc,%function;\n\t" ); void asmfunc(void);]], [ asmfunc(); ] )], [gcry_cv_gcc_arm_platform_as_ok=yes]) fi]) if test "$gcry_cv_gcc_arm_platform_as_ok" = "yes" ; then AC_DEFINE(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS,1, [Defined if underlying assembler is compatible with ARM assembly implementations]) fi # # Check whether GCC assembler supports features needed for our ARMv8/Aarch64 # implementations. This needs to be done before setting up the # assembler stuff. # AC_CACHE_CHECK([whether GCC assembler is compatible for ARMv8/Aarch64 assembly implementations], [gcry_cv_gcc_aarch64_platform_as_ok], [if test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_aarch64_platform_as_ok="n/a" else gcry_cv_gcc_aarch64_platform_as_ok=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".text\n\t" "asmfunc:\n\t" "eor x0, x0, x30, ror #12;\n\t" "add x0, x0, x30, asr #12;\n\t" "eor v0.16b, v0.16b, v31.16b;\n\t" ); void asmfunc(void);]], [ asmfunc(); ] )], [gcry_cv_gcc_aarch64_platform_as_ok=yes]) fi]) if test "$gcry_cv_gcc_aarch64_platform_as_ok" = "yes" ; then AC_DEFINE(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS,1, [Defined if underlying assembler is compatible with ARMv8/Aarch64 assembly implementations]) fi # # Check whether GCC assembler supports for CFI directives. # AC_CACHE_CHECK([whether GCC assembler supports for CFI directives], [gcry_cv_gcc_asm_cfi_directives], [gcry_cv_gcc_asm_cfi_directives=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".text\n\t" "ac_test:\n\t" ".cfi_startproc\n\t" ".cfi_remember_state\n\t" ".cfi_adjust_cfa_offset 8\n\t" ".cfi_rel_offset 0, 8\n\t" ".cfi_def_cfa_register 1\n\t" ".cfi_register 2, 3\n\t" ".cfi_restore 2\n\t" ".cfi_escape 0x0f, 0x02, 0x11, 0x00\n\t" ".cfi_restore_state\n\t" ".long 0\n\t" ".cfi_endproc\n\t" ); void asmfunc(void)]])], [gcry_cv_gcc_asm_cfi_directives=yes])]) if test "$gcry_cv_gcc_asm_cfi_directives" = "yes" ; then AC_DEFINE(HAVE_GCC_ASM_CFI_DIRECTIVES,1, [Defined if underlying assembler supports for CFI directives]) fi # # Check whether GCC assembler supports for ELF directives. # AC_CACHE_CHECK([whether GCC assembler supports for ELF directives], [gcry_cv_gcc_asm_elf_directives], [gcry_cv_gcc_asm_elf_directives=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( /* Test if ELF directives '.type' and '.size' are supported. */ ".text\n\t" "asmfunc:\n\t" ".size asmfunc,.-asmfunc;\n\t" ".type asmfunc,STT_FUNC;\n\t" );]])], [gcry_cv_gcc_asm_elf_directives=yes])]) if test "$gcry_cv_gcc_asm_elf_directives" = "yes" ; then AC_DEFINE(HAVE_GCC_ASM_ELF_DIRECTIVES,1, [Defined if underlying assembler supports for ELF directives]) fi # # Check whether underscores in symbols are required. This needs to be # done before setting up the assembler stuff. # GNUPG_SYS_SYMBOL_UNDERSCORE() ################################# #### #### #### Setup assembler stuff. #### #### Define mpi_cpu_arch. #### #### #### ################################# AC_ARG_ENABLE(mpi-path, AS_HELP_STRING([--enable-mpi-path=EXTRA_PATH], [prepend EXTRA_PATH to list of CPU specific optimizations]), mpi_extra_path="$enableval",mpi_extra_path="") AC_MSG_CHECKING(architecture and mpi assembler functions) if test -f $srcdir/mpi/config.links ; then . $srcdir/mpi/config.links AC_CONFIG_LINKS("$mpi_ln_list") ac_cv_mpi_sflags="$mpi_sflags" AC_MSG_RESULT($mpi_cpu_arch) else AC_MSG_RESULT(failed) AC_MSG_ERROR([mpi/config.links missing!]) fi MPI_SFLAGS="$ac_cv_mpi_sflags" AC_SUBST(MPI_SFLAGS) AM_CONDITIONAL(MPI_MOD_ASM_MPIH_ADD1, test "$mpi_mod_asm_mpih_add1" = yes) AM_CONDITIONAL(MPI_MOD_ASM_MPIH_SUB1, test "$mpi_mod_asm_mpih_sub1" = yes) AM_CONDITIONAL(MPI_MOD_ASM_MPIH_MUL1, test "$mpi_mod_asm_mpih_mul1" = yes) AM_CONDITIONAL(MPI_MOD_ASM_MPIH_MUL2, test "$mpi_mod_asm_mpih_mul2" = yes) AM_CONDITIONAL(MPI_MOD_ASM_MPIH_MUL3, test "$mpi_mod_asm_mpih_mul3" = yes) AM_CONDITIONAL(MPI_MOD_ASM_MPIH_LSHIFT, test "$mpi_mod_asm_mpih_lshift" = yes) AM_CONDITIONAL(MPI_MOD_ASM_MPIH_RSHIFT, test "$mpi_mod_asm_mpih_rshift" = yes) AM_CONDITIONAL(MPI_MOD_ASM_UDIV, test "$mpi_mod_asm_udiv" = yes) AM_CONDITIONAL(MPI_MOD_ASM_UDIV_QRNND, test "$mpi_mod_asm_udiv_qrnnd" = yes) AM_CONDITIONAL(MPI_MOD_C_MPIH_ADD1, test "$mpi_mod_c_mpih_add1" = yes) AM_CONDITIONAL(MPI_MOD_C_MPIH_SUB1, test "$mpi_mod_c_mpih_sub1" = yes) AM_CONDITIONAL(MPI_MOD_C_MPIH_MUL1, test "$mpi_mod_c_mpih_mul1" = yes) AM_CONDITIONAL(MPI_MOD_C_MPIH_MUL2, test "$mpi_mod_c_mpih_mul2" = yes) AM_CONDITIONAL(MPI_MOD_C_MPIH_MUL3, test "$mpi_mod_c_mpih_mul3" = yes) AM_CONDITIONAL(MPI_MOD_C_MPIH_LSHIFT, test "$mpi_mod_c_mpih_lshift" = yes) AM_CONDITIONAL(MPI_MOD_C_MPIH_RSHIFT, test "$mpi_mod_c_mpih_rshift" = yes) AM_CONDITIONAL(MPI_MOD_C_UDIV, test "$mpi_mod_c_udiv" = yes) AM_CONDITIONAL(MPI_MOD_C_UDIV_QRNND, test "$mpi_mod_c_udiv_qrnnd" = yes) # Reset non applicable feature flags. if test "$mpi_cpu_arch" != "x86" ; then aesnisupport="n/a" shaextsupport="n/a" pclmulsupport="n/a" sse41support="n/a" avxsupport="n/a" avx2support="n/a" avx512support="n/a" gfnisupport="n/a" padlocksupport="n/a" drngsupport="n/a" fi if test "$mpi_cpu_arch" != "arm" ; then if test "$mpi_cpu_arch" != "aarch64" ; then neonsupport="n/a" armcryptosupport="n/a" svesupport="n/a" sve2support="n/a" fi fi if test "$mpi_cpu_arch" != "ppc"; then ppccryptosupport="n/a" fi ############################################# #### #### #### Platform specific compiler checks. #### #### #### ############################################# # Following tests depend on warnings to cause compile to fail, so set -Werror # temporarily. _gcc_cflags_save=$CFLAGS CFLAGS="$CFLAGS -Werror" # # Check whether compiler supports 'ms_abi' function attribute. # AC_CACHE_CHECK([whether compiler supports 'ms_abi' function attribute], [gcry_cv_gcc_attribute_ms_abi], [gcry_cv_gcc_attribute_ms_abi=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[int __attribute__ ((ms_abi)) proto(int);]])], [gcry_cv_gcc_attribute_ms_abi=yes])]) if test "$gcry_cv_gcc_attribute_ms_abi" = "yes" ; then AC_DEFINE(HAVE_GCC_ATTRIBUTE_MS_ABI,1, [Defined if compiler supports "__attribute__ ((ms_abi))" function attribute]) fi # # Check whether compiler supports 'sysv_abi' function attribute. # AC_CACHE_CHECK([whether compiler supports 'sysv_abi' function attribute], [gcry_cv_gcc_attribute_sysv_abi], [gcry_cv_gcc_attribute_sysv_abi=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[int __attribute__ ((sysv_abi)) proto(int);]])], [gcry_cv_gcc_attribute_sysv_abi=yes])]) if test "$gcry_cv_gcc_attribute_sysv_abi" = "yes" ; then AC_DEFINE(HAVE_GCC_ATTRIBUTE_SYSV_ABI,1, [Defined if compiler supports "__attribute__ ((sysv_abi))" function attribute]) fi # # Check whether default calling convention is 'ms_abi'. # if test "$gcry_cv_gcc_attribute_ms_abi" = "yes" ; then AC_CACHE_CHECK([whether default calling convention is 'ms_abi'], [gcry_cv_gcc_default_abi_is_ms_abi], [gcry_cv_gcc_default_abi_is_ms_abi=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[void *test(void) { void *(*def_func)(void) = test; void *__attribute__((ms_abi))(*msabi_func)(void); /* warning on SysV abi targets, passes on Windows based targets */ msabi_func = def_func; return msabi_func; }]])], [gcry_cv_gcc_default_abi_is_ms_abi=yes])]) if test "$gcry_cv_gcc_default_abi_is_ms_abi" = "yes" ; then AC_DEFINE(HAVE_GCC_DEFAULT_ABI_IS_MS_ABI,1, [Defined if default calling convention is 'ms_abi']) fi fi # # Check whether default calling convention is 'sysv_abi'. # if test "$gcry_cv_gcc_attribute_sysv_abi" = "yes" ; then AC_CACHE_CHECK([whether default calling convention is 'sysv_abi'], [gcry_cv_gcc_default_abi_is_sysv_abi], [gcry_cv_gcc_default_abi_is_sysv_abi=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[void *test(void) { void *(*def_func)(void) = test; void *__attribute__((sysv_abi))(*sysvabi_func)(void); /* warning on MS ABI targets, passes on SysV ABI targets */ sysvabi_func = def_func; return sysvabi_func; }]])], [gcry_cv_gcc_default_abi_is_sysv_abi=yes])]) if test "$gcry_cv_gcc_default_abi_is_sysv_abi" = "yes" ; then AC_DEFINE(HAVE_GCC_DEFAULT_ABI_IS_SYSV_ABI,1, [Defined if default calling convention is 'sysv_abi']) fi fi # Restore flags. CFLAGS=$_gcc_cflags_save; # # Check whether GCC inline assembler supports SSSE3 instructions # This is required for the AES-NI instructions. # AC_CACHE_CHECK([whether GCC inline assembler supports SSSE3 instructions], [gcry_cv_gcc_inline_asm_ssse3], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_ssse3="n/a" else gcry_cv_gcc_inline_asm_ssse3=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[static unsigned char be_mask[16] __attribute__ ((aligned (16))) = { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; void a(void) { __asm__("pshufb %[mask], %%xmm2\n\t"::[mask]"m"(*be_mask):); }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_ssse3=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_ssse3" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_SSSE3,1, [Defined if inline assembler supports SSSE3 instructions]) fi # # Check whether GCC inline assembler supports PCLMUL instructions. # AC_CACHE_CHECK([whether GCC inline assembler supports PCLMUL instructions], [gcry_cv_gcc_inline_asm_pclmul], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_pclmul="n/a" else gcry_cv_gcc_inline_asm_pclmul=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { __asm__("pclmulqdq \$0, %%xmm1, %%xmm3\n\t":::"cc"); }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_pclmul=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_pclmul" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_PCLMUL,1, [Defined if inline assembler supports PCLMUL instructions]) fi # # Check whether GCC inline assembler supports SHA Extensions instructions. # AC_CACHE_CHECK([whether GCC inline assembler supports SHA Extensions instructions], [gcry_cv_gcc_inline_asm_shaext], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_shaext="n/a" else gcry_cv_gcc_inline_asm_shaext=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { __asm__("sha1rnds4 \$0, %%xmm1, %%xmm3\n\t":::"cc"); __asm__("sha1nexte %%xmm1, %%xmm3\n\t":::"cc"); __asm__("sha1msg1 %%xmm1, %%xmm3\n\t":::"cc"); __asm__("sha1msg2 %%xmm1, %%xmm3\n\t":::"cc"); __asm__("sha256rnds2 %%xmm0, %%xmm1, %%xmm3\n\t":::"cc"); __asm__("sha256msg1 %%xmm1, %%xmm3\n\t":::"cc"); __asm__("sha256msg2 %%xmm1, %%xmm3\n\t":::"cc"); }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_shaext=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_shaext" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_SHAEXT,1, [Defined if inline assembler supports SHA Extensions instructions]) fi # # Check whether GCC inline assembler supports SSE4.1 instructions. # AC_CACHE_CHECK([whether GCC inline assembler supports SSE4.1 instructions], [gcry_cv_gcc_inline_asm_sse41], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_sse41="n/a" else gcry_cv_gcc_inline_asm_sse41=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { int i; __asm__("pextrd \$2, %%xmm0, %[out]\n\t" : [out] "=m" (i)); }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_sse41=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_sse41" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_SSE41,1, [Defined if inline assembler supports SSE4.1 instructions]) fi # # Check whether GCC inline assembler supports AVX instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AVX instructions], [gcry_cv_gcc_inline_asm_avx], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_avx="n/a" else gcry_cv_gcc_inline_asm_avx=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { __asm__("xgetbv; vaesdeclast (%[mem]),%%xmm0,%%xmm7\n\t"::[mem]"r"(0):); }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_avx=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_avx" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AVX,1, [Defined if inline assembler supports AVX instructions]) fi # # Check whether GCC inline assembler supports AVX2 instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AVX2 instructions], [gcry_cv_gcc_inline_asm_avx2], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_avx2="n/a" else gcry_cv_gcc_inline_asm_avx2=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { __asm__("xgetbv; vpbroadcastb %%xmm7,%%ymm1\n\t":::"cc"); }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_avx2=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_avx2" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AVX2,1, [Defined if inline assembler supports AVX2 instructions]) fi # # Check whether GCC inline assembler supports AVX512 instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AVX512 instructions], [gcry_cv_gcc_inline_asm_avx512], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_avx512="n/a" else gcry_cv_gcc_inline_asm_avx512=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { __asm__("xgetbv; vpopcntq %%zmm7, %%zmm1%{%%k1%}%{z%};\n\t":::"cc"); __asm__("vpexpandb %%zmm3, %%zmm1;\n\t":::"cc"); __asm__("vpxorq %%xmm7, %%xmm7, %%xmm7;\n\t":::"cc"); __asm__("vpxorq %%ymm7, %%ymm7, %%ymm7;\n\t":::"cc"); __asm__("vpxorq (%%eax)%{1to8%}, %%zmm7, %%zmm7;\n\t":::"cc"); }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_avx512=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_avx512" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AVX512,1, [Defined if inline assembler supports AVX512 instructions]) fi # # Check whether GCC inline assembler supports VAES and VPCLMUL instructions # AC_CACHE_CHECK([whether GCC inline assembler supports VAES and VPCLMUL instructions], [gcry_cv_gcc_inline_asm_vaes_vpclmul], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_vaes_vpclmul="n/a" else gcry_cv_gcc_inline_asm_vaes_vpclmul=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { __asm__("vaesenclast %%ymm7,%%ymm7,%%ymm1\n\t":::"cc");/*256-bit*/ __asm__("vaesenclast %%zmm7,%%zmm7,%%zmm1\n\t":::"cc");/*512-bit*/ __asm__("vpclmulqdq \$0,%%ymm7,%%ymm7,%%ymm1\n\t":::"cc");/*256-bit*/ __asm__("vpclmulqdq \$0,%%zmm7,%%zmm7,%%zmm1\n\t":::"cc");/*512-bit*/ }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_vaes_vpclmul=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_vaes_vpclmul" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL,1, [Defined if inline assembler supports VAES and VPCLMUL instructions]) fi # # Check whether GCC inline assembler supports GFNI instructions # AC_CACHE_CHECK([whether GCC inline assembler supports GFNI instructions], [gcry_cv_gcc_inline_asm_gfni], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_gfni="n/a" else gcry_cv_gcc_inline_asm_gfni=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void a(void) { __asm__("gf2p8affineqb \$123, %%xmm0, %%xmm0;\n\t":::"cc"); /* SSE */ __asm__("vgf2p8affineinvqb \$234, %%ymm1, %%ymm1, %%ymm1;\n\t":::"cc"); /* AVX */ __asm__("vgf2p8mulb (%%eax), %%zmm2, %%zmm2;\n\t":::"cc"); /* AVX512 */ }]], [ a(); ] )], [gcry_cv_gcc_inline_asm_gfni=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_gfni" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_GFNI,1, [Defined if inline assembler supports GFNI instructions]) fi # # Check whether GCC inline assembler supports BMI2 instructions # AC_CACHE_CHECK([whether GCC inline assembler supports BMI2 instructions], [gcry_cv_gcc_inline_asm_bmi2], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_bmi2="n/a" else gcry_cv_gcc_inline_asm_bmi2=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[unsigned int a(unsigned int x, unsigned int y) { unsigned int tmp1, tmp2; asm ("rorxl %2, %1, %0" : "=r" (tmp1) : "rm0" (x), "J" (32 - ((23) & 31))); asm ("andnl %2, %1, %0" : "=r" (tmp2) : "r0" (x), "rm" (y)); return tmp1 + tmp2; }]], [ a(1, 2); ] )], [gcry_cv_gcc_inline_asm_bmi2=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_bmi2" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_BMI2,1, [Defined if inline assembler supports BMI2 instructions]) fi # # Check whether GCC assembler needs "-Wa,--divide" to correctly handle # constant division # if test $amd64_as_feature_detection = yes; then AC_CACHE_CHECK([whether GCC assembler handles division correctly], [gcry_cv_gcc_as_const_division_ok], [gcry_cv_gcc_as_const_division_ok=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__(".text\n\tfn:\n\t xorl \$(123456789/12345678), %ebp;\n\t"); void fn(void);]], [fn();])], [gcry_cv_gcc_as_const_division_ok=yes])]) if test "$gcry_cv_gcc_as_const_division_ok" = "no" ; then # # Add '-Wa,--divide' to CPPFLAGS and try check again. # _gcc_cppflags_save="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -Wa,--divide" AC_CACHE_CHECK([whether GCC assembler handles division correctly with "-Wa,--divide"], [gcry_cv_gcc_as_const_division_with_wadivide_ok], [gcry_cv_gcc_as_const_division_with_wadivide_ok=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__(".text\n\tfn:\n\t xorl \$(123456789/12345678), %ebp;\n\t"); void fn(void);]], [fn();])], [gcry_cv_gcc_as_const_division_with_wadivide_ok=yes])]) if test "$gcry_cv_gcc_as_const_division_with_wadivide_ok" = "no" ; then # '-Wa,--divide' did not work, restore old flags. CPPFLAGS="$_gcc_cppflags_save" fi fi fi # # Check whether GCC assembler supports features needed for our amd64 # implementations # if test $amd64_as_feature_detection = yes; then AC_CACHE_CHECK([whether GCC assembler is compatible for amd64 assembly implementations], [gcry_cv_gcc_amd64_platform_as_ok], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_amd64_platform_as_ok="n/a" else gcry_cv_gcc_amd64_platform_as_ok=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( /* Test if '.type' and '.size' are supported. */ /* These work only on ELF targets. */ ".text\n\t" "asmfunc:\n\t" ".size asmfunc,.-asmfunc;\n\t" ".type asmfunc,@function;\n\t" /* Test if assembler allows use of '/' for constant division * (Solaris/x86 issue). If previous constant division check * and "-Wa,--divide" workaround failed, this causes assembly * to be disable on this machine. */ "xorl \$(123456789/12345678), %ebp;\n\t" ); void asmfunc(void);]], [ asmfunc(); ])], [gcry_cv_gcc_amd64_platform_as_ok=yes]) fi]) if test "$gcry_cv_gcc_amd64_platform_as_ok" = "yes" ; then AC_DEFINE(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS,1, [Defined if underlying assembler is compatible with amd64 assembly implementations]) fi if test "$gcry_cv_gcc_amd64_platform_as_ok" = "no" && test "$gcry_cv_gcc_attribute_sysv_abi" = "yes" && test "$gcry_cv_gcc_default_abi_is_ms_abi" = "yes"; then AC_CACHE_CHECK([whether GCC assembler is compatible for WIN64 assembly implementations], [gcry_cv_gcc_win64_platform_as_ok], [gcry_cv_gcc_win64_platform_as_ok=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".text\n\t" ".globl asmfunc\n\t" "asmfunc:\n\t" "xorq \$(1234), %rbp;\n\t" ); void asmfunc(void);]], [ asmfunc(); ])], [gcry_cv_gcc_win64_platform_as_ok=yes])]) if test "$gcry_cv_gcc_win64_platform_as_ok" = "yes" ; then AC_DEFINE(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS,1, [Defined if underlying assembler is compatible with WIN64 assembly implementations]) fi fi fi # # Check whether GCC assembler supports features needed for assembly # implementations that use Intel syntax # AC_CACHE_CHECK([whether GCC assembler is compatible for Intel syntax assembly implementations], [gcry_cv_gcc_platform_as_ok_for_intel_syntax], [if test "$mpi_cpu_arch" != "x86" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_platform_as_ok_for_intel_syntax="n/a" else gcry_cv_gcc_platform_as_ok_for_intel_syntax=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".intel_syntax noprefix\n\t" ".text\n\t" "actest:\n\t" "pxor xmm1, xmm7;\n\t" "vperm2i128 ymm2, ymm3, ymm0, 1;\n\t" "add eax, ebp;\n\t" "rorx eax, ebp, 1;\n\t" "sub eax, [esp + 4];\n\t" "add dword ptr [esp + eax], 0b10101;\n\t" ".att_syntax prefix\n\t" ); void actest(void);]], [ actest(); ])], [gcry_cv_gcc_platform_as_ok_for_intel_syntax=yes]) fi]) if test "$gcry_cv_gcc_platform_as_ok_for_intel_syntax" = "yes" ; then AC_DEFINE(HAVE_INTEL_SYNTAX_PLATFORM_AS,1, [Defined if underlying assembler is compatible with Intel syntax assembly implementations]) fi # # Check whether compiler is configured for ARMv6 or newer architecture # AC_CACHE_CHECK([whether compiler is configured for ARMv6 or newer architecture], [gcry_cv_cc_arm_arch_is_v6], [if test "$mpi_cpu_arch" != "arm" || test "$try_asm_modules" != "yes" ; then gcry_cv_cc_arm_arch_is_v6="n/a" else gcry_cv_cc_arm_arch_is_v6=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[ #if defined(__arm__) && \ ((defined(__ARM_ARCH) && __ARM_ARCH >= 6) \ || defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) \ || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ || defined(__ARM_ARCH_7EM__)) /* empty */ #else /* fail compile if not ARMv6. */ not_armv6 not_armv6 = (not_armv6)not_armv6; #endif ]])], [gcry_cv_cc_arm_arch_is_v6=yes]) fi]) if test "$gcry_cv_cc_arm_arch_is_v6" = "yes" ; then AC_DEFINE(HAVE_ARM_ARCH_V6,1, [Defined if ARM architecture is v6 or newer]) fi # # Check whether GCC inline assembler supports NEON instructions # AC_CACHE_CHECK([whether GCC inline assembler supports NEON instructions], [gcry_cv_gcc_inline_asm_neon], [if test "$mpi_cpu_arch" != "arm" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_neon="n/a" else gcry_cv_gcc_inline_asm_neon=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".syntax unified\n\t" ".arm\n\t" ".fpu neon\n\t" ".text\n\t" "testfn:\n\t" "vld1.64 {q0-q1}, [r0]!;\n\t" "vrev64.8 q0, q3;\n\t" "vadd.u64 q0, q1;\n\t" "vadd.s64 d3, d2, d3;\n\t" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_neon=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_neon" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_NEON,1, [Defined if inline assembler supports NEON instructions]) fi # # Check whether GCC inline assembler supports AArch32 Crypto Extension instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AArch32 Crypto Extension instructions], [gcry_cv_gcc_inline_asm_aarch32_crypto], [if test "$mpi_cpu_arch" != "arm" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_aarch32_crypto="n/a" else gcry_cv_gcc_inline_asm_aarch32_crypto=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".syntax unified\n\t" ".arch armv8-a\n\t" ".arm\n\t" ".fpu crypto-neon-fp-armv8\n\t" ".text\n\t" "testfn:\n\t" "sha1h.32 q0, q0;\n\t" "sha1c.32 q0, q0, q0;\n\t" "sha1p.32 q0, q0, q0;\n\t" "sha1su0.32 q0, q0, q0;\n\t" "sha1su1.32 q0, q0;\n\t" "sha256h.32 q0, q0, q0;\n\t" "sha256h2.32 q0, q0, q0;\n\t" "sha1p.32 q0, q0, q0;\n\t" "sha256su0.32 q0, q0;\n\t" "sha256su1.32 q0, q0, q15;\n\t" "aese.8 q0, q0;\n\t" "aesd.8 q0, q0;\n\t" "aesmc.8 q0, q0;\n\t" "aesimc.8 q0, q0;\n\t" "vmull.p64 q0, d0, d0;\n\t" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_aarch32_crypto=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_aarch32_crypto" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO,1, [Defined if inline assembler supports AArch32 Crypto Extension instructions]) fi # # Check whether GCC inline assembler supports AArch64 NEON instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 NEON instructions], [gcry_cv_gcc_inline_asm_aarch64_neon], [if test "$mpi_cpu_arch" != "aarch64" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_aarch64_neon="n/a" else gcry_cv_gcc_inline_asm_aarch64_neon=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".cpu generic+simd\n\t" ".text\n\t" "testfn:\n\t" "mov w0, \#42;\n\t" "dup v0.8b, w0;\n\t" "ld4 {v0.8b,v1.8b,v2.8b,v3.8b},[x0],\#32;\n\t" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_aarch64_neon=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_aarch64_neon" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_NEON,1, [Defined if inline assembler supports AArch64 NEON instructions]) fi # # Check whether GCC inline assembler supports AArch64 Crypto Extension instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 Crypto Extension instructions], [gcry_cv_gcc_inline_asm_aarch64_crypto], [if test "$mpi_cpu_arch" != "aarch64" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_aarch64_crypto="n/a" else gcry_cv_gcc_inline_asm_aarch64_crypto=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".cpu generic+simd+crypto\n\t" ".text\n\t" "testfn:\n\t" "mov w0, \#42;\n\t" "dup v0.8b, w0;\n\t" "ld4 {v0.8b,v1.8b,v2.8b,v3.8b},[x0],\#32;\n\t" "sha1h s0, s0;\n\t" "sha1c q0, s0, v0.4s;\n\t" "sha1p q0, s0, v0.4s;\n\t" "sha1su0 v0.4s, v0.4s, v0.4s;\n\t" "sha1su1 v0.4s, v0.4s;\n\t" "sha256h q0, q0, v0.4s;\n\t" "sha256h2 q0, q0, v0.4s;\n\t" "sha1p q0, s0, v0.4s;\n\t" "sha256su0 v0.4s, v0.4s;\n\t" "sha256su1 v0.4s, v0.4s, v31.4s;\n\t" "aese v0.16b, v0.16b;\n\t" "aesd v0.16b, v0.16b;\n\t" "aesmc v0.16b, v0.16b;\n\t" "aesimc v0.16b, v0.16b;\n\t" "pmull v0.1q, v0.1d, v31.1d;\n\t" "pmull2 v0.1q, v0.2d, v31.2d;\n\t" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_aarch64_crypto=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_aarch64_crypto" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO,1, [Defined if inline assembler supports AArch64 Crypto Extension instructions]) fi # # Check whether GCC inline assembler supports AArch64 SVE instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 SVE instructions], [gcry_cv_gcc_inline_asm_aarch64_sve], [if test "$mpi_cpu_arch" != "aarch64" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_aarch64_sve="n/a" else gcry_cv_gcc_inline_asm_aarch64_sve=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".cpu generic+simd+sve\n\t" ".text\n\t" "testfn:\n\t" "mov x0, \#60;\n\t" "whilelo p0.s, xzr, x0;\n\t" "mov z0.s, p0/z, \#55;\n\t" "ld1b {z0.b}, p0/z, [x1];\n\t" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_aarch64_sve=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_aarch64_sve" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_SVE,1, [Defined if inline assembler supports AArch64 SVE instructions]) fi # # Check whether GCC inline assembler supports AArch64 SVE2 instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 SVE2 instructions], [gcry_cv_gcc_inline_asm_aarch64_sve2], [if test "$mpi_cpu_arch" != "aarch64" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_aarch64_sve2="n/a" else gcry_cv_gcc_inline_asm_aarch64_sve2=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".cpu generic+simd+sve2\n\t" ".text\n\t" "testfn:\n\t" ";\n\t" "eor3 z0.d, z0.d, z1.d, z2.d;\n\t" "ext z8.b, {z20.b, z21.b}, \#3;\n\t" "adclt z0.d, z1.d, z2.d;\n\t" "tbl z0.b, {z8.b, z9.b}, z1.b;\n\t" "addhnb z16.s, z17.d, z18.d;\n\t" "mov z0.s, p0/z, \#55;\n\t" "ld1b {z0.b}, p0/z, [x1];\n\t" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_aarch64_sve2=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_aarch64_sve2" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_SVE2,1, [Defined if inline assembler supports AArch64 SVE2 instructions]) fi # # Check whether GCC inline assembler supports AArch64 SHA3/SHA512/SM3/SM4 instructions # AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 SHA3/SHA512/SM3/SM4 instructions], [gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4], [if test "$mpi_cpu_arch" != "aarch64" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4="n/a" else gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__( ".arch armv8.2-a+sha3+sm4\n\t" ".text\n\t" "testfn:\n\t" /* Test for SHA512 instructions */ "sha512h q0, q0, v0.2d;\n\t" "sha512h2 q0, q0, v0.2d;\n\t" "sha512su0 v0.2d, v0.2d;\n\t" "sha512su1 v0.2d, v0.2d, v31.2d;\n\t" /* Test for SHA3 instructions */ "bcax v0.16b, v1.16b, v2.16b, v3.16b;\n\t" "eor3 v0.16b, v1.16b, v2.16b, v3.16b;\n\t" "rax1 v0.2d, v1.2d, v2.2d;\n\t" "xar v0.2d, v1.2d, v2.2d, \#1;\n\t" /* Test for SM3 instructions */ "sm3partw1 v0.4s, v1.4s, v2.4s;\n\t" "sm3partw2 v0.4s, v1.4s, v2.4s;\n\t" "sm3ss1 v0.4s, v1.4s, v2.4s, v3.4s;\n\t" "sm3tt1a v0.4s, v1.4s, v2.s[0];\n\t" "sm3tt1b v0.4s, v1.4s, v2.s[0];\n\t" "sm3tt2a v0.4s, v1.4s, v2.s[0];\n\t" "sm3tt2b v0.4s, v1.4s, v2.s[0];\n\t" /* Test for SM4 instructions */ "sm4e v0.4s, v1.4s;\n\t" "sm4ekey v0.4s, v1.4s, v2.4s;\n\t" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_SHA3_SHA512_SM3_SM4,1, [Defined if inline assembler supports AArch64 SHA3/SHA512/SM3/SM4 instructions]) fi # # Check whether PowerPC AltiVec/VSX intrinsics # AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics], [gcry_cv_cc_ppc_altivec], [if test "$mpi_cpu_arch" != "ppc" || test "$try_asm_modules" != "yes" ; then gcry_cv_cc_ppc_altivec="n/a" else gcry_cv_cc_ppc_altivec=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[#include typedef vector unsigned char block; typedef vector unsigned int vecu32; static inline __attribute__((always_inline)) vecu32 vec_sld_u32(vecu32 a, vecu32 b, unsigned int idx) { return vec_sld (a, b, (4 * idx) & 15); } block fn(block in) { block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0)); vecu32 y = vec_vsx_ld (0, (unsigned int*)0); y = vec_sld_u32 (y, y, 3); return vec_cipher_be (t, in) ^ (block)y; } ]])], [gcry_cv_cc_ppc_altivec=yes]) fi]) if test "$gcry_cv_cc_ppc_altivec" = "yes" ; then AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1, [Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics]) fi _gcc_cflags_save=$CFLAGS CFLAGS="$CFLAGS -O2 -maltivec -mvsx -mcrypto" if test "$gcry_cv_cc_ppc_altivec" = "no" && test "$mpi_cpu_arch" = "ppc" && test "$try_asm_modules" == "yes" ; then AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics with extra GCC flags], [gcry_cv_cc_ppc_altivec_cflags], [gcry_cv_cc_ppc_altivec_cflags=no AC_COMPILE_IFELSE([AC_LANG_SOURCE( [[#include typedef vector unsigned char block; typedef vector unsigned int vecu32; static inline __attribute__((always_inline)) vecu32 vec_sld_u32(vecu32 a, vecu32 b, unsigned int idx) { return vec_sld (a, b, (4 * idx) & 15); } block fn(block in) { block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0)); vecu32 y = vec_vsx_ld (0, (unsigned int*)0); y = vec_sld_u32 (y, y, 3); return vec_cipher_be (t, in) ^ (block)y; }]])], [gcry_cv_cc_ppc_altivec_cflags=yes])]) if test "$gcry_cv_cc_ppc_altivec_cflags" = "yes" ; then AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1, [Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics]) AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC_WITH_CFLAGS,1, [Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics with extra GCC flags]) fi fi AM_CONDITIONAL(ENABLE_PPC_VCRYPTO_EXTRA_CFLAGS, test "$gcry_cv_cc_ppc_altivec_cflags" = "yes") # Restore flags. CFLAGS=$_gcc_cflags_save; # # Check whether GCC inline assembler supports PowerPC AltiVec/VSX/crypto instructions # AC_CACHE_CHECK([whether GCC inline assembler supports PowerPC AltiVec/VSX/crypto instructions], [gcry_cv_gcc_inline_asm_ppc_altivec], [if test "$mpi_cpu_arch" != "ppc" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_ppc_altivec="n/a" else gcry_cv_gcc_inline_asm_ppc_altivec=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__(".globl testfn;\n" ".text\n\t" "testfn:\n" "stvx %v31,%r12,%r0;\n" "lvx %v20,%r12,%r0;\n" "vcipher %v0, %v1, %v22;\n" "lxvw4x %vs32, %r0, %r1;\n" "vadduwm %v0, %v1, %v22;\n" "vshasigmaw %v0, %v1, 0, 15;\n" "vshasigmad %v0, %v1, 0, 15;\n" "vpmsumd %v11, %v11, %v11;\n" ); void testfn(void); ]], [ testfn(); ] )], [gcry_cv_gcc_inline_asm_ppc_altivec=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC,1, [Defined if inline assembler supports PowerPC AltiVec/VSX/crypto instructions]) fi # # Check whether GCC inline assembler supports PowerISA 3.00 instructions # AC_CACHE_CHECK([whether GCC inline assembler supports PowerISA 3.00 instructions], [gcry_cv_gcc_inline_asm_ppc_arch_3_00], [if test "$mpi_cpu_arch" != "ppc" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_ppc_arch_3_00="n/a" else gcry_cv_gcc_inline_asm_ppc_arch_3_00=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[__asm__(".text\n\t" ".globl testfn;\n" "testfn:\n" "stxvb16x %r1,%v12,%v30;\n" ); void testfn(void); ]], [ testfn(); ])], [gcry_cv_gcc_inline_asm_ppc_arch_3_00=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_PPC_ARCH_3_00,1, [Defined if inline assembler supports PowerISA 3.00 instructions]) fi # # Check whether GCC inline assembler supports zSeries instructions # AC_CACHE_CHECK([whether GCC inline assembler supports zSeries instructions], [gcry_cv_gcc_inline_asm_s390x], [if test "$mpi_cpu_arch" != "s390x" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_s390x="n/a" else gcry_cv_gcc_inline_asm_s390x=no AC_LINK_IFELSE([AC_LANG_PROGRAM( [[typedef unsigned int u128_t __attribute__ ((mode (TI))); unsigned int testfunc(unsigned int x, void *y, unsigned int z) { unsigned long fac[8]; register unsigned long reg0 asm("0") = 0; register unsigned long reg1 asm("1") = x; u128_t r1 = ((u128_t)(unsigned long)y << 64) | (unsigned long)z; u128_t r2 = 0; u128_t r3 = 0; asm volatile (".insn rre,0xb92e << 16, %[r1], %[r2]\n\t" : [r1] "+a" (r1), [r2] "+a" (r2) : "r" (reg0), "r" (reg1) : "cc", "memory"); asm volatile (".insn rrf,0xb929 << 16, %[r1], %[r2], %[r3], 0\n\t" : [r1] "+a" (r1), [r2] "+a" (r2), [r3] "+a" (r3) : "r" (reg0), "r" (reg1) : "cc", "memory"); reg0 = 8 - 1; asm ("stfle %1\n\t" : "+d" (reg0), "=Q" (fac[0]) : : "cc", "memory"); asm volatile ("mvc 0(16, %0), 0(%1)\n\t" : : "a" (y), "a" (fac) : "memory"); asm volatile ("xc 0(16, %0), 0(%0)\n\t" : : "a" (fac) : "memory"); asm volatile ("risbgn %%r11, %%r11, 0, 129, 0\n\t" : : : "memory", "r11"); asm volatile ("algrk %%r14, %%r14, %%r14\n\t" : : : "memory", "r14"); return (unsigned int)r1 ^ reg0; } ]] , [ testfunc(0, 0, 0); ])], [gcry_cv_gcc_inline_asm_s390x=yes]) fi]) if test "$gcry_cv_gcc_inline_asm_s390x" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_S390X,1, [Defined if inline assembler supports zSeries instructions]) fi # # Check whether GCC inline assembler supports zSeries vector instructions # AC_CACHE_CHECK([whether GCC inline assembler supports zSeries vector instructions], [gcry_cv_gcc_inline_asm_s390x_vx], [if test "$mpi_cpu_arch" != "s390x" || test "$try_asm_modules" != "yes" ; then gcry_cv_gcc_inline_asm_s390x_vx="n/a" else gcry_cv_gcc_inline_asm_s390x_vx=no if test "$gcry_cv_gcc_inline_asm_s390x" = "yes" ; then AC_LINK_IFELSE([AC_LANG_PROGRAM( [[void testfunc(void) { asm volatile (".machine \"z13+vx\"\n\t" "vx %%v0, %%v1, %%v31\n\t" "verllf %%v11, %%v11, (16)(0)\n\t" : : : "memory"); } ]], [ testfunc(); ])], [gcry_cv_gcc_inline_asm_s390x_vx=yes]) fi fi]) if test "$gcry_cv_gcc_inline_asm_s390x_vx" = "yes" ; then AC_DEFINE(HAVE_GCC_INLINE_ASM_S390X_VX,1, [Defined if inline assembler supports zSeries vector instructions]) fi ####################################### #### Checks for library functions. #### ####################################### AC_FUNC_VPRINTF # We have replacements for these in src/missing-string.c AC_CHECK_FUNCS(stpcpy strcasecmp) # We have replacements for these in src/g10lib.h AC_CHECK_FUNCS(strtoul memmove stricmp atexit raise) # Other checks AC_CHECK_FUNCS(strerror rand mmap getpagesize sysconf waitpid wait4) AC_CHECK_FUNCS(gettimeofday getrusage gethrtime clock_gettime syslog) AC_CHECK_FUNCS(syscall fcntl ftruncate flockfile getauxval elf_aux_info) AC_CHECK_FUNCS(explicit_bzero explicit_memset getentropy sysctlbyname) GNUPG_CHECK_MLOCK # # Replacement functions. # AC_REPLACE_FUNCS([getpid clock]) # # Check whether it is necessary to link against libdl. # DL_LIBS="" if test "$use_hmac_binary_check" != no ; then _gcry_save_libs="$LIBS" LIBS="" AC_SEARCH_LIBS(dlopen, c dl,,,) DL_LIBS=$LIBS LIBS="$_gcry_save_libs" fi AC_SUBST(DL_LIBS) # # Check whether we can use Linux capabilities as requested. # if test "$use_capabilities" = "yes" ; then use_capabilities=no AC_CHECK_HEADERS(sys/capability.h) if test "$ac_cv_header_sys_capability_h" = "yes" ; then AC_CHECK_LIB(cap, cap_init, ac_need_libcap=1) if test "$ac_cv_lib_cap_cap_init" = "yes"; then AC_DEFINE(USE_CAPABILITIES,1, [define if capabilities should be used]) LIBS="$LIBS -lcap" use_capabilities=yes fi fi if test "$use_capabilities" = "no" ; then AC_MSG_WARN([[ *** *** The use of capabilities on this system is not possible. *** You need a recent Linux kernel and some patches: *** fcaps-2.2.9-990610.patch (kernel patch for 2.2.9) *** fcap-module-990613.tar.gz (kernel module) *** libcap-1.92.tar.gz (user mode library and utilities) *** And you have to configure the kernel with CONFIG_VFS_CAP_PLUGIN *** set (filesystems menu). Be warned: This code is *really* ALPHA. ***]]) fi fi # Check whether a random device is available. if test "$try_dev_random" = yes ; then AC_CACHE_CHECK(for random device, ac_cv_have_dev_random, [if test -r "$NAME_OF_DEV_RANDOM" && test -r "$NAME_OF_DEV_URANDOM" ; then ac_cv_have_dev_random=yes; else ac_cv_have_dev_random=no; fi]) if test "$ac_cv_have_dev_random" = yes; then AC_DEFINE(HAVE_DEV_RANDOM,1, [defined if the system supports a random device] ) fi else AC_MSG_CHECKING(for random device) ac_cv_have_dev_random=no AC_MSG_RESULT(has been disabled) fi # Figure out the random modules for this configuration. if test "$random" = "default"; then # Select default value. if test "$ac_cv_func_getentropy" = yes; then random_modules="getentropy" elif test "$ac_cv_have_dev_random" = yes; then # Try Linuxish random device. random_modules="linux" else case "${host}" in *-*-mingw32ce*) # WindowsCE random device. random_modules="w32ce" ;; *-*-mingw32*|*-*-cygwin*) # Windows random device. random_modules="w32" ;; *) # Build everything, allow to select at runtime. random_modules="$auto_random_modules" ;; esac fi else if test "$random" = "auto"; then # Build everything, allow to select at runtime. random_modules="$auto_random_modules" else random_modules="$random" fi fi # # Other defines # if test mym4_isgit = "yes"; then AC_DEFINE(IS_DEVELOPMENT_VERSION,1, [Defined if this is not a regular release]) fi AM_CONDITIONAL(CROSS_COMPILING, test x$cross_compiling = xyes) # This is handy for debugging so the compiler doesn't rearrange # things and eliminate variables. AC_ARG_ENABLE(optimization, AS_HELP_STRING([--disable-optimization], [disable compiler optimization]), [if test $enableval = no ; then CFLAGS=`echo $CFLAGS | sed 's/-O[[0-9]]//'` fi]) AC_MSG_NOTICE([checking for cc features]) # CFLAGS mangling when using gcc. if test "$GCC" = yes; then AC_MSG_CHECKING([if gcc supports -fno-delete-null-pointer-checks]) _gcc_cflags_save=$CFLAGS CFLAGS="-fno-delete-null-pointer-checks" AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])],_gcc_wopt=yes,_gcc_wopt=no) AC_MSG_RESULT($_gcc_wopt) CFLAGS=$_gcc_cflags_save; if test x"$_gcc_wopt" = xyes ; then CFLAGS="$CFLAGS -fno-delete-null-pointer-checks" fi CFLAGS="$CFLAGS -Wall" if test "$USE_MAINTAINER_MODE" = "yes"; then CFLAGS="$CFLAGS -Wcast-align -Wshadow -Wstrict-prototypes" CFLAGS="$CFLAGS -Wformat -Wno-format-y2k -Wformat-security" # If -Wno-missing-field-initializers is supported we can enable a # a bunch of really useful warnings. AC_MSG_CHECKING([if gcc supports -Wno-missing-field-initializers]) _gcc_cflags_save=$CFLAGS CFLAGS="-Wno-missing-field-initializers" AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])],_gcc_wopt=yes,_gcc_wopt=no) AC_MSG_RESULT($_gcc_wopt) CFLAGS=$_gcc_cflags_save; if test x"$_gcc_wopt" = xyes ; then CFLAGS="$CFLAGS -W -Wextra -Wbad-function-cast" CFLAGS="$CFLAGS -Wwrite-strings" CFLAGS="$CFLAGS -Wdeclaration-after-statement" CFLAGS="$CFLAGS -Wno-missing-field-initializers" CFLAGS="$CFLAGS -Wno-sign-compare" fi AC_MSG_CHECKING([if gcc supports -Wpointer-arith]) _gcc_cflags_save=$CFLAGS CFLAGS="-Wpointer-arith" AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])],_gcc_wopt=yes,_gcc_wopt=no) AC_MSG_RESULT($_gcc_wopt) CFLAGS=$_gcc_cflags_save; if test x"$_gcc_wopt" = xyes ; then CFLAGS="$CFLAGS -Wpointer-arith" fi fi fi # Check whether as(1) supports a noeexecstack feature. This test # includes an override option. CL_AS_NOEXECSTACK AC_SUBST(LIBGCRYPT_CONFIG_API_VERSION) AC_SUBST(LIBGCRYPT_CONFIG_LIBS) AC_SUBST(LIBGCRYPT_CONFIG_CFLAGS) AC_SUBST(LIBGCRYPT_CONFIG_HOST) AC_SUBST(LIBGCRYPT_THREAD_MODULES) AC_CONFIG_COMMANDS([gcrypt-conf],[[ chmod +x src/libgcrypt-config ]],[[ prefix=$prefix exec_prefix=$exec_prefix libdir=$libdir datadir=$datadir DATADIRNAME=$DATADIRNAME ]]) ##################### #### Conclusion. #### ##################### # Check that requested feature can actually be used and define # ENABLE_foo_SUPPORT macros. if test x"$aesnisupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_ssse3" != "yes" ; then aesnisupport="no (unsupported by compiler)" fi fi if test x"$shaextsupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_shaext" != "yes" ; then shaextsupport="no (unsupported by compiler)" fi fi if test x"$pclmulsupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_pclmul" != "yes" ; then pclmulsupport="no (unsupported by compiler)" fi fi if test x"$sse41support" = xyes ; then if test "$gcry_cv_gcc_inline_asm_sse41" != "yes" ; then sse41support="no (unsupported by compiler)" fi fi if test x"$avxsupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_avx" != "yes" ; then avxsupport="no (unsupported by compiler)" fi fi if test x"$avx2support" = xyes ; then if test "$gcry_cv_gcc_inline_asm_avx2" != "yes" ; then avx2support="no (unsupported by compiler)" fi fi if test x"$avx512support" = xyes ; then if test "$gcry_cv_gcc_inline_asm_avx512" != "yes" ; then avx512support="no (unsupported by compiler)" fi fi if test x"$gfnisupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_gfni" != "yes" ; then gfnisupport="no (unsupported by compiler)" fi fi if test x"$neonsupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_neon" != "yes" ; then if test "$gcry_cv_gcc_inline_asm_aarch64_neon" != "yes" ; then neonsupport="no (unsupported by compiler)" fi fi fi if test x"$armcryptosupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_aarch32_crypto" != "yes" ; then if test "$gcry_cv_gcc_inline_asm_aarch64_crypto" != "yes" ; then armcryptosupport="no (unsupported by compiler)" fi fi fi if test x"$svesupport" = xyes ; then if test "$gcry_cv_gcc_inline_asm_sve" != "yes" ; then if test "$gcry_cv_gcc_inline_asm_aarch64_sve" != "yes" ; then svesupport="no (unsupported by compiler)" fi fi fi if test x"$sve2support" = xyes ; then if test "$gcry_cv_gcc_inline_asm_sve2" != "yes" ; then if test "$gcry_cv_gcc_inline_asm_aarch64_sve2" != "yes" ; then sve2support="no (unsupported by compiler)" fi fi fi if test x"$aesnisupport" = xyes ; then AC_DEFINE(ENABLE_AESNI_SUPPORT, 1, [Enable support for Intel AES-NI instructions.]) fi if test x"$shaextsupport" = xyes ; then AC_DEFINE(ENABLE_SHAEXT_SUPPORT, 1, [Enable support for Intel SHAEXT instructions.]) fi if test x"$pclmulsupport" = xyes ; then AC_DEFINE(ENABLE_PCLMUL_SUPPORT, 1, [Enable support for Intel PCLMUL instructions.]) fi if test x"$sse41support" = xyes ; then AC_DEFINE(ENABLE_SSE41_SUPPORT, 1, [Enable support for Intel SSE4.1 instructions.]) fi if test x"$avxsupport" = xyes ; then AC_DEFINE(ENABLE_AVX_SUPPORT,1, [Enable support for Intel AVX instructions.]) fi if test x"$avx2support" = xyes ; then AC_DEFINE(ENABLE_AVX2_SUPPORT,1, [Enable support for Intel AVX2 instructions.]) fi if test x"$avx512support" = xyes ; then AC_DEFINE(ENABLE_AVX512_SUPPORT,1, [Enable support for Intel AVX512 instructions.]) fi if test x"$gfnisupport" = xyes ; then AC_DEFINE(ENABLE_GFNI_SUPPORT,1, [Enable support for Intel GFNI instructions.]) fi if test x"$neonsupport" = xyes ; then AC_DEFINE(ENABLE_NEON_SUPPORT,1, [Enable support for ARM NEON instructions.]) fi if test x"$armcryptosupport" = xyes ; then AC_DEFINE(ENABLE_ARM_CRYPTO_SUPPORT,1, [Enable support for ARMv8 Crypto Extension instructions.]) fi if test x"$svesupport" = xyes ; then AC_DEFINE(ENABLE_SVE_SUPPORT,1, [Enable support for ARMv8 SVE instructions.]) fi if test x"$sve2support" = xyes ; then AC_DEFINE(ENABLE_SVE2_SUPPORT,1, [Enable support for ARMv9 SVE2 instructions.]) fi if test x"$ppccryptosupport" = xyes ; then AC_DEFINE(ENABLE_PPC_CRYPTO_SUPPORT,1, [Enable support for POWER 8 (PowerISA 2.07) crypto extension.]) fi if test x"$jentsupport" = xyes ; then AC_DEFINE(ENABLE_JENT_SUPPORT, 1, [Enable support for the jitter entropy collector.]) fi if test x"$padlocksupport" = xyes ; then AC_DEFINE(ENABLE_PADLOCK_SUPPORT, 1, [Enable support for the PadLock engine.]) fi if test x"$drngsupport" = xyes ; then AC_DEFINE(ENABLE_DRNG_SUPPORT, 1, [Enable support for Intel DRNG (RDRAND instruction).]) fi if test x"$force_soft_hwfeatures" = xyes ; then AC_DEFINE(ENABLE_FORCE_SOFT_HWFEATURES, 1, [Enable forcing 'soft' HW feature bits on (for testing).]) fi # Define conditional sources and config.h symbols depending on the # selected ciphers, pubkey-ciphers, digests, kdfs, and random modules. LIST_MEMBER(arcfour, $enabled_ciphers) if test "$found" = "1"; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS arcfour.lo" AC_DEFINE(USE_ARCFOUR, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS arcfour-amd64.lo" ;; esac fi LIST_MEMBER(blowfish, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS blowfish.lo" AC_DEFINE(USE_BLOWFISH, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS blowfish-amd64.lo" ;; arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS blowfish-arm.lo" ;; esac fi LIST_MEMBER(cast5, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS cast5.lo" AC_DEFINE(USE_CAST5, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS cast5-amd64.lo" ;; arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS cast5-arm.lo" ;; esac fi LIST_MEMBER(des, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS des.lo" AC_DEFINE(USE_DES, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS des-amd64.lo" ;; esac fi LIST_MEMBER(aes, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS rijndael.lo" AC_DEFINE(USE_AES, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-amd64.lo" # Build with the SSSE3 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ssse3-amd64.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ssse3-amd64-asm.lo" # Build with the VAES/AVX2 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-vaes.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-vaes-avx2-amd64.lo" ;; arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-arm.lo" # Build with the ARMv8/AArch32 CE implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-ce.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-aarch32-ce.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-aarch64.lo" # Build with the ARMv8/AArch64 CE implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-ce.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-aarch64-ce.lo" ;; powerpc64le-*-*) # Build with the crypto extension implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc9le.lo" if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" && test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then # Build with AES-GCM bulk implementation for P10 GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-gcm-p10le.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-p10le.lo" fi ;; powerpc64-*-*) # Big-Endian. # Build with the crypto extension implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc.lo" ;; powerpc-*-*) # Big-Endian. # Build with the crypto extension implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc.lo" ;; s390x-*-*) # Big-Endian. # Build with the crypto extension implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-s390x.lo" ;; esac case "$mpi_cpu_arch" in x86) # Build with the AES-NI implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-aesni.lo" # Build with the Padlock implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-padlock.lo" ;; esac fi LIST_MEMBER(twofish, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS twofish.lo" AC_DEFINE(USE_TWOFISH, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-amd64.lo" if test x"$avx2support" = xyes ; then # Build with the AVX2 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-avx2-amd64.lo" fi ;; arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-arm.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-aarch64.lo" ;; esac fi LIST_MEMBER(serpent, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS serpent.lo" AC_DEFINE(USE_SERPENT, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the SSE2 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS serpent-sse2-amd64.lo" ;; esac if test x"$avx2support" = xyes ; then # Build with the AVX2 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS serpent-avx2-amd64.lo" fi if test x"$neonsupport" = xyes ; then # Build with the NEON implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS serpent-armv7-neon.lo" fi fi LIST_MEMBER(rfc2268, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS rfc2268.lo" AC_DEFINE(USE_RFC2268, 1, [Defined if this module should be included]) fi LIST_MEMBER(seed, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS seed.lo" AC_DEFINE(USE_SEED, 1, [Defined if this module should be included]) fi LIST_MEMBER(camellia, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS camellia.lo camellia-glue.lo" AC_DEFINE(USE_CAMELLIA, 1, [Defined if this module should be included]) case "${host}" in arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-arm.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-aarch64.lo" ;; esac if test x"$avxsupport" = xyes ; then if test x"$aesnisupport" = xyes ; then # Build with the AES-NI/AVX implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-aesni-avx-amd64.lo" fi fi if test x"$avx2support" = xyes ; then if test x"$aesnisupport" = xyes ; then # Build with the AES-NI/AVX2 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-aesni-avx2-amd64.lo" # Build with the VAES/AVX2 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-vaes-avx2-amd64.lo" # Build with the GFNI/AVX2 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-gfni-avx2-amd64.lo" # Build with the GFNI/AVX512 implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-gfni-avx512-amd64.lo" fi fi fi LIST_MEMBER(idea, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS idea.lo" AC_DEFINE(USE_IDEA, 1, [Defined if this module should be included]) fi LIST_MEMBER(salsa20, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS salsa20.lo" AC_DEFINE(USE_SALSA20, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS salsa20-amd64.lo" ;; esac if test x"$neonsupport" = xyes ; then # Build with the NEON implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS salsa20-armv7-neon.lo" fi fi LIST_MEMBER(gost28147, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS gost28147.lo" AC_DEFINE(USE_GOST28147, 1, [Defined if this module should be included]) fi LIST_MEMBER(chacha20, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20.lo" AC_DEFINE(USE_CHACHA20, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-amd64-ssse3.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-amd64-avx2.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-amd64-avx512.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-aarch64.lo" ;; powerpc64le-*-*) # Build with the ppc8 vector implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-ppc.lo" # Build with the assembly implementation if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" && test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-p10le-8x.lo" fi ;; powerpc64-*-*) # Build with the ppc8 vector implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-ppc.lo" ;; powerpc-*-*) # Build with the ppc8 vector implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-ppc.lo" ;; s390x-*-*) # Build with the s390x/zSeries vector implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-s390x.lo" ;; esac if test x"$neonsupport" = xyes ; then # Build with the NEON implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-armv7-neon.lo" fi fi LIST_MEMBER(sm4, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS sm4.lo" AC_DEFINE(USE_SM4, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aesni-avx-amd64.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aesni-avx2-amd64.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-gfni-avx2-amd64.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-gfni-avx512-amd64.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aarch64.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-armv8-aarch64-ce.lo" GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-armv9-aarch64-sve-ce.lo" esac fi LIST_MEMBER(aria, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_CIPHERS="$GCRYPT_CIPHERS aria.lo" AC_DEFINE(USE_ARIA, 1, [Defined if this module should be included]) + + case "${host}" in + x86_64-*-*) + # Build with the assembly implementation + GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS aria-aesni-avx-amd64.lo" + GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS aria-aesni-avx2-amd64.lo" + ;; + esac fi LIST_MEMBER(dsa, $enabled_pubkey_ciphers) AM_CONDITIONAL(USE_DSA, [test "$found" = "1"]) if test "$found" = "1" ; then GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS dsa.lo" AC_DEFINE(USE_DSA, 1, [Defined if this module should be included]) fi LIST_MEMBER(rsa, $enabled_pubkey_ciphers) AM_CONDITIONAL(USE_RSA, [test "$found" = "1"]) if test "$found" = "1" ; then GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS rsa.lo" AC_DEFINE(USE_RSA, 1, [Defined if this module should be included]) fi LIST_MEMBER(elgamal, $enabled_pubkey_ciphers) AM_CONDITIONAL(USE_ELGAMAL, [test "$found" = "1"]) if test "$found" = "1" ; then GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS elgamal.lo" AC_DEFINE(USE_ELGAMAL, 1, [Defined if this module should be included]) fi LIST_MEMBER(ecc, $enabled_pubkey_ciphers) AM_CONDITIONAL(USE_ECC, [test "$found" = "1"]) if test "$found" = "1" ; then GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS \ ecc.lo ecc-curves.lo ecc-misc.lo \ ecc-ecdh.lo ecc-ecdsa.lo ecc-eddsa.lo ecc-gost.lo \ ecc-sm2.lo" AC_DEFINE(USE_ECC, 1, [Defined if this module should be included]) fi LIST_MEMBER(crc, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS crc.lo" AC_DEFINE(USE_CRC, 1, [Defined if this module should be included]) case "${host}" in i?86-*-* | x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-intel-pclmul.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-armv8-ce.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-armv8-aarch64-ce.lo" ;; powerpc64le-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-ppc.lo" ;; powerpc64-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-ppc.lo" ;; powerpc-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-ppc.lo" ;; esac fi LIST_MEMBER(gostr3411-94, $enabled_digests) if test "$found" = "1" ; then # GOST R 34.11-94 internally uses GOST 28147-89 LIST_MEMBER(gost28147, $enabled_ciphers) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS gostr3411-94.lo" AC_DEFINE(USE_GOST_R_3411_94, 1, [Defined if this module should be included]) fi fi LIST_MEMBER(stribog, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS stribog.lo" AC_DEFINE(USE_GOST_R_3411_12, 1, [Defined if this module should be included]) fi LIST_MEMBER(md2, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS md2.lo" AC_DEFINE(USE_MD2, 1, [Defined if this module should be included]) fi LIST_MEMBER(md4, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS md4.lo" AC_DEFINE(USE_MD4, 1, [Defined if this module should be included]) fi LIST_MEMBER(md5, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS md5.lo" AC_DEFINE(USE_MD5, 1, [Defined if this module should be included]) fi LIST_MEMBER(rmd160, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS rmd160.lo" AC_DEFINE(USE_RMD160, 1, [Defined if this module should be included]) fi LIST_MEMBER(sha256, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256.lo" AC_DEFINE(USE_SHA256, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ssse3-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-avx-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-avx2-bmi2-amd64.lo" ;; arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-armv8-aarch32-ce.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-armv8-aarch64-ce.lo" ;; powerpc64le-*-*) # Build with the crypto extension implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ppc.lo" ;; powerpc64-*-*) # Big-Endian. # Build with the crypto extension implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ppc.lo" ;; powerpc-*-*) # Big-Endian. # Build with the crypto extension implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ppc.lo" esac case "$mpi_cpu_arch" in x86) # Build with the SHAEXT implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-intel-shaext.lo" ;; esac fi LIST_MEMBER(sha512, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512.lo" AC_DEFINE(USE_SHA512, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ssse3-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-avx-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-avx2-bmi2-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-avx512-amd64.lo" ;; i?86-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ssse3-i386.lo" ;; arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-arm.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-armv8-aarch64-ce.lo" ;; powerpc64le-*-*) # Build with the crypto extension implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ppc.lo" ;; powerpc64-*-*) # Big-Endian. # Build with the crypto extension implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ppc.lo" ;; powerpc-*-*) # Big-Endian. # Build with the crypto extension implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ppc.lo" esac if test x"$neonsupport" = xyes ; then # Build with the NEON implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-armv7-neon.lo" fi fi LIST_MEMBER(sha3, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS keccak.lo" AC_DEFINE(USE_SHA3, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS keccak-amd64-avx512.lo" ;; esac if test x"$neonsupport" = xyes ; then # Build with the NEON implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS keccak-armv7-neon.lo" fi fi LIST_MEMBER(tiger, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS tiger.lo" AC_DEFINE(USE_TIGER, 1, [Defined if this module should be included]) fi LIST_MEMBER(whirlpool, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS whirlpool.lo" AC_DEFINE(USE_WHIRLPOOL, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS whirlpool-sse2-amd64.lo" ;; esac fi LIST_MEMBER(blake2, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS blake2.lo" AC_DEFINE(USE_BLAKE2, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2b-amd64-avx2.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2b-amd64-avx512.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2s-amd64-avx.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2s-amd64-avx512.lo" ;; esac fi LIST_MEMBER(sm3, $enabled_digests) if test "$found" = "1" ; then GCRYPT_DIGESTS="$GCRYPT_DIGESTS sm3.lo" AC_DEFINE(USE_SM3, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sm3-avx-bmi2-amd64.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sm3-aarch64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sm3-armv8-aarch64-ce.lo" ;; esac fi # SHA-1 needs to be included always for example because it is used by # random-csprng.c. GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha1.lo" AC_DEFINE(USE_SHA1, 1, [Defined if this module should be included]) case "${host}" in x86_64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-ssse3-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-avx-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-avx-bmi2-amd64.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-avx2-bmi2-amd64.lo" ;; arm*-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-armv7-neon.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-armv8-aarch32-ce.lo" ;; aarch64-*-*) # Build with the assembly implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-armv8-aarch64-ce.lo" ;; esac case "$mpi_cpu_arch" in x86) # Build with the SHAEXT implementation GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-intel-shaext.lo" ;; esac # Arch specific GCM implementations case "${host}" in i?86-*-* | x86_64-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-intel-pclmul.lo" ;; arm*-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-armv7-neon.lo" GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-armv8-aarch32-ce.lo" ;; aarch64-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-armv8-aarch64-ce.lo" ;; powerpc64le-*-* | powerpc64-*-* | powerpc-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-ppc.lo" ;; esac # Arch specific MAC implementations case "${host}" in s390x-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS poly1305-s390x.lo" ;; x86_64-*-*) GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS poly1305-amd64-avx512.lo" ;; powerpc64le-*-*) # Build with the assembly implementation if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" && test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS poly1305-p10le.lo" fi ;; esac LIST_MEMBER(scrypt, $enabled_kdfs) if test "$found" = "1" ; then GCRYPT_KDFS="$GCRYPT_KDFS scrypt.lo" AC_DEFINE(USE_SCRYPT, 1, [Defined if this module should be included]) fi LIST_MEMBER(getentropy, $random_modules) if test "$found" = "1" ; then GCRYPT_RANDOM="$GCRYPT_RANDOM rndgetentropy.lo" AC_DEFINE(USE_RNDGETENTROPY, 1, [Defined if the getentropy RNG should be used.]) fi LIST_MEMBER(linux, $random_modules) if test "$found" = "1" ; then GCRYPT_RANDOM="$GCRYPT_RANDOM rndoldlinux.lo" AC_DEFINE(USE_RNDOLDLINUX, 1, [Defined if the /dev/random RNG should be used.]) fi LIST_MEMBER(unix, $random_modules) if test "$found" = "1" ; then GCRYPT_RANDOM="$GCRYPT_RANDOM rndunix.lo" AC_DEFINE(USE_RNDUNIX, 1, [Defined if the default Unix RNG should be used.]) fi LIST_MEMBER(egd, $random_modules) if test "$found" = "1" ; then GCRYPT_RANDOM="$GCRYPT_RANDOM rndegd.lo" AC_DEFINE(USE_RNDEGD, 1, [Defined if the EGD based RNG should be used.]) fi LIST_MEMBER(w32, $random_modules) if test "$found" = "1" ; then GCRYPT_RANDOM="$GCRYPT_RANDOM rndw32.lo" AC_DEFINE(USE_RNDW32, 1, [Defined if the Windows specific RNG should be used.]) fi LIST_MEMBER(w32ce, $random_modules) if test "$found" = "1" ; then GCRYPT_RANDOM="$GCRYPT_RANDOM rndw32ce.lo" AC_DEFINE(USE_RNDW32CE, 1, [Defined if the WindowsCE specific RNG should be used.]) fi if test "$try_asm_modules" = yes ; then # Build with assembly implementations GCRYPT_CIPHERS="$GCRYPT_CIPHERS $GCRYPT_ASM_CIPHERS" GCRYPT_DIGESTS="$GCRYPT_DIGESTS $GCRYPT_ASM_DIGESTS" fi AC_SUBST([GCRYPT_CIPHERS]) AC_SUBST([GCRYPT_PUBKEY_CIPHERS]) AC_SUBST([GCRYPT_DIGESTS]) AC_SUBST([GCRYPT_KDFS]) AC_SUBST([GCRYPT_RANDOM]) AC_SUBST(LIBGCRYPT_CIPHERS, $enabled_ciphers) AC_SUBST(LIBGCRYPT_PUBKEY_CIPHERS, $enabled_pubkey_ciphers) AC_SUBST(LIBGCRYPT_DIGESTS, $enabled_digests) # For printing the configuration we need a colon separated list of # algorithm names. tmp=`echo "$enabled_ciphers" | tr ' ' : ` AC_DEFINE_UNQUOTED(LIBGCRYPT_CIPHERS, "$tmp", [List of available cipher algorithms]) tmp=`echo "$enabled_pubkey_ciphers" | tr ' ' : ` AC_DEFINE_UNQUOTED(LIBGCRYPT_PUBKEY_CIPHERS, "$tmp", [List of available public key cipher algorithms]) tmp=`echo "$enabled_digests" | tr ' ' : ` AC_DEFINE_UNQUOTED(LIBGCRYPT_DIGESTS, "$tmp", [List of available digest algorithms]) tmp=`echo "$enabled_kdfs" | tr ' ' : ` AC_DEFINE_UNQUOTED(LIBGCRYPT_KDFS, "$tmp", [List of available KDF algorithms]) # # Define conditional sources depending on the used hardware platform. # Note that all possible modules must also be listed in # src/Makefile.am (EXTRA_libgcrypt_la_SOURCES). # GCRYPT_HWF_MODULES= case "$mpi_cpu_arch" in x86) AC_DEFINE(HAVE_CPU_ARCH_X86, 1, [Defined for the x86 platforms]) GCRYPT_HWF_MODULES="libgcrypt_la-hwf-x86.lo" ;; alpha) AC_DEFINE(HAVE_CPU_ARCH_ALPHA, 1, [Defined for Alpha platforms]) ;; sparc) AC_DEFINE(HAVE_CPU_ARCH_SPARC, 1, [Defined for SPARC platforms]) ;; mips) AC_DEFINE(HAVE_CPU_ARCH_MIPS, 1, [Defined for MIPS platforms]) ;; m68k) AC_DEFINE(HAVE_CPU_ARCH_M68K, 1, [Defined for M68k platforms]) ;; ppc) AC_DEFINE(HAVE_CPU_ARCH_PPC, 1, [Defined for PPC platforms]) GCRYPT_HWF_MODULES="libgcrypt_la-hwf-ppc.lo" ;; arm) AC_DEFINE(HAVE_CPU_ARCH_ARM, 1, [Defined for ARM platforms]) GCRYPT_HWF_MODULES="libgcrypt_la-hwf-arm.lo" ;; aarch64) AC_DEFINE(HAVE_CPU_ARCH_ARM, 1, [Defined for ARM AArch64 platforms]) GCRYPT_HWF_MODULES="libgcrypt_la-hwf-arm.lo" ;; s390x) AC_DEFINE(HAVE_CPU_ARCH_S390X, 1, [Defined for s390x/zSeries platforms]) GCRYPT_HWF_MODULES="libgcrypt_la-hwf-s390x.lo" ;; esac AC_SUBST([GCRYPT_HWF_MODULES]) # # Option to disable building of doc file # build_doc=yes AC_ARG_ENABLE([doc], AS_HELP_STRING([--disable-doc], [do not build the documentation]), build_doc=$enableval, build_doc=yes) AM_CONDITIONAL([BUILD_DOC], [test "x$build_doc" != xno]) # # Provide information about the build. # BUILD_REVISION="mym4_revision" AC_SUBST(BUILD_REVISION) AC_DEFINE_UNQUOTED(BUILD_REVISION, "$BUILD_REVISION", [GIT commit id revision used to build this package]) changequote(,)dnl BUILD_VERSION=`echo "$PACKAGE_VERSION" | sed 's/\([0-9.]*\).*/\1./'` changequote([,])dnl BUILD_VERSION="${BUILD_VERSION}mym4_revision_dec" BUILD_FILEVERSION=`echo "${BUILD_VERSION}" | tr . ,` AC_SUBST(BUILD_VERSION) AC_SUBST(BUILD_FILEVERSION) AC_ARG_ENABLE([build-timestamp], AS_HELP_STRING([--enable-build-timestamp], [set an explicit build timestamp for reproducibility. (default is the current time in ISO-8601 format)]), [if test "$enableval" = "yes"; then BUILD_TIMESTAMP=`date -u +%Y-%m-%dT%H:%M+0000 2>/dev/null || date` else BUILD_TIMESTAMP="$enableval" fi], [BUILD_TIMESTAMP=""]) AC_SUBST(BUILD_TIMESTAMP) AC_DEFINE_UNQUOTED(BUILD_TIMESTAMP, "$BUILD_TIMESTAMP", [The time this package was configured for a build]) # And create the files. AC_CONFIG_FILES([ Makefile m4/Makefile compat/Makefile mpi/Makefile cipher/Makefile random/Makefile doc/Makefile src/Makefile src/gcrypt.h src/libgcrypt-config src/libgcrypt.pc src/versioninfo.rc tests/Makefile ]) AC_CONFIG_FILES([tests/hashtest-6g], [chmod +x tests/hashtest-6g]) AC_CONFIG_FILES([tests/hashtest-256g], [chmod +x tests/hashtest-256g]) AC_CONFIG_FILES([tests/basic-disable-all-hwf], [chmod +x tests/basic-disable-all-hwf]) AC_OUTPUT detection_module="${GCRYPT_HWF_MODULES%.lo}" test -n "$detection_module" || detection_module="none" # Give some feedback GCRY_MSG_SHOW([],[]) GCRY_MSG_SHOW([Libgcrypt],[v${VERSION} has been configured as follows:]) GCRY_MSG_SHOW([],[]) GCRY_MSG_SHOW([Platform: ],[$PRINTABLE_OS_NAME ($host)]) GCRY_MSG_SHOW([Hardware detection module:],[$detection_module]) GCRY_MSG_WRAP([Enabled cipher algorithms:],[$enabled_ciphers]) GCRY_MSG_WRAP([Enabled digest algorithms:],[$enabled_digests]) GCRY_MSG_WRAP([Enabled kdf algorithms: ],[$enabled_kdfs]) GCRY_MSG_WRAP([Enabled pubkey algorithms:],[$enabled_pubkey_ciphers]) GCRY_MSG_SHOW([Random number generator: ],[$random]) GCRY_MSG_SHOW([Try using jitter entropy: ],[$jentsupport]) GCRY_MSG_SHOW([Using linux capabilities: ],[$use_capabilities]) GCRY_MSG_SHOW([FIPS module version: ],[$fips_module_version]) GCRY_MSG_SHOW([Try using Padlock crypto: ],[$padlocksupport]) GCRY_MSG_SHOW([Try using AES-NI crypto: ],[$aesnisupport]) GCRY_MSG_SHOW([Try using Intel SHAEXT: ],[$shaextsupport]) GCRY_MSG_SHOW([Try using Intel PCLMUL: ],[$pclmulsupport]) GCRY_MSG_SHOW([Try using Intel SSE4.1: ],[$sse41support]) GCRY_MSG_SHOW([Try using DRNG (RDRAND): ],[$drngsupport]) GCRY_MSG_SHOW([Try using Intel AVX: ],[$avxsupport]) GCRY_MSG_SHOW([Try using Intel AVX2: ],[$avx2support]) GCRY_MSG_SHOW([Try using Intel AVX512: ],[$avx512support]) GCRY_MSG_SHOW([Try using Intel GFNI: ],[$gfnisupport]) GCRY_MSG_SHOW([Try using ARM NEON: ],[$neonsupport]) GCRY_MSG_SHOW([Try using ARMv8 crypto: ],[$armcryptosupport]) GCRY_MSG_SHOW([Try using ARMv8 SVE: ],[$svesupport]) GCRY_MSG_SHOW([Try using ARMv9 SVE2: ],[$sve2support]) GCRY_MSG_SHOW([Try using PPC crypto: ],[$ppccryptosupport]) GCRY_MSG_SHOW([],[]) if test "x${gpg_config_script_warn}" != x; then cat <