Page MenuHome GnuPG

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cipher/Makefile.am b/cipher/Makefile.am
index e67b1ee2..8c7ec095 100644
--- a/cipher/Makefile.am
+++ b/cipher/Makefile.am
@@ -1,318 +1,331 @@
# Makefile for cipher modules
# Copyright (C) 1998, 1999, 2000, 2001, 2002,
# 2003, 2009 Free Software Foundation, Inc.
#
# This file is part of Libgcrypt.
#
# Libgcrypt is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Libgcrypt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, see <http://www.gnu.org/licenses/>.
# Process this file with automake to produce Makefile.in
# Need to include ../src in addition to top_srcdir because gcrypt.h is
# a built header.
AM_CPPFLAGS = -I../src -I$(top_srcdir)/src -I../mpi -I$(top_srcdir)/mpi
AM_CFLAGS = $(GPG_ERROR_CFLAGS)
AM_CCASFLAGS = $(NOEXECSTACK_FLAGS)
EXTRA_DIST = gost-s-box.c
CLEANFILES = gost-s-box$(EXEEXT_FOR_BUILD)
DISTCLEANFILES = gost-sb.h
noinst_LTLIBRARIES = libcipher.la
GCRYPT_MODULES = @GCRYPT_CIPHERS@ @GCRYPT_PUBKEY_CIPHERS@ \
@GCRYPT_DIGESTS@ @GCRYPT_KDFS@
libcipher_la_DEPENDENCIES = $(GCRYPT_MODULES)
libcipher_la_LIBADD = $(GCRYPT_MODULES)
libcipher_la_SOURCES = \
cipher.c cipher-internal.h \
cipher-cbc.c \
cipher-cfb.c \
cipher-ofb.c \
cipher-ctr.c \
cipher-aeswrap.c \
cipher-ccm.c \
cipher-cmac.c \
cipher-gcm.c \
cipher-poly1305.c \
cipher-ocb.c \
cipher-xts.c \
cipher-eax.c \
cipher-siv.c \
cipher-gcm-siv.c \
pubkey.c pubkey-internal.h pubkey-util.c \
md.c \
mac.c mac-internal.h \
mac-hmac.c mac-cmac.c mac-gmac.c mac-poly1305.c \
poly1305.c poly1305-internal.h \
kdf.c kdf-internal.h \
bithelp.h \
bufhelp.h \
bulkhelp.h \
primegen.c \
hash-common.c hash-common.h \
dsa-common.c rsa-common.c \
sha1.h
EXTRA_libcipher_la_SOURCES = \
asm-common-aarch64.h \
asm-common-amd64.h \
asm-common-s390x.h \
asm-inline-s390x.h \
asm-poly1305-aarch64.h \
asm-poly1305-amd64.h \
asm-poly1305-s390x.h \
aria.c aria-aesni-avx-amd64.S aria-aesni-avx2-amd64.S \
aria-gfni-avx512-amd64.S \
arcfour.c arcfour-amd64.S \
blowfish.c blowfish-amd64.S blowfish-arm.S \
cast5.c cast5-amd64.S cast5-arm.S \
chacha20.c chacha20-amd64-ssse3.S chacha20-amd64-avx2.S \
chacha20-amd64-avx512.S chacha20-armv7-neon.S chacha20-aarch64.S \
chacha20-ppc.c chacha20-s390x.S \
chacha20-p10le-8x.s \
cipher-gcm-ppc.c cipher-gcm-intel-pclmul.c cipher-gcm-armv7-neon.S \
cipher-gcm-armv8-aarch32-ce.S cipher-gcm-armv8-aarch64-ce.S \
crc.c crc-intel-pclmul.c crc-armv8-ce.c \
crc-armv8-aarch64-ce.S \
crc-ppc.c \
des.c des-amd64.S \
dsa.c \
elgamal.c \
ecc.c ecc-curves.c ecc-misc.c ecc-common.h \
ecc-ecdh.c ecc-ecdsa.c ecc-eddsa.c ecc-gost.c ecc-sm2.c \
idea.c \
gost28147.c gost.h \
gostr3411-94.c \
md4.c \
md5.c \
poly1305-s390x.S poly1305-amd64-avx512.S \
poly1305-p10le.s \
rijndael.c rijndael-internal.h rijndael-tables.h \
rijndael-aesni.c rijndael-padlock.c \
rijndael-amd64.S rijndael-arm.S \
rijndael-ssse3-amd64.c rijndael-ssse3-amd64-asm.S \
rijndael-vaes.c rijndael-vaes-avx2-amd64.S \
rijndael-armv8-ce.c rijndael-armv8-aarch32-ce.S \
rijndael-armv8-aarch64-ce.S rijndael-aarch64.S \
rijndael-ppc.c rijndael-ppc9le.c \
rijndael-p10le.c rijndael-gcm-p10le.s \
rijndael-ppc-common.h rijndael-ppc-functions.h \
rijndael-s390x.c \
rmd160.c \
rsa.c \
salsa20.c salsa20-amd64.S salsa20-armv7-neon.S \
scrypt.c \
seed.c \
- serpent.c serpent-sse2-amd64.S \
+ serpent.c serpent-sse2-amd64.S serpent-avx2-amd64.S \
+ serpent-avx512-x86.c serpent-armv7-neon.S \
sm4.c sm4-aesni-avx-amd64.S sm4-aesni-avx2-amd64.S \
sm4-gfni-avx2-amd64.S sm4-gfni-avx512-amd64.S \
sm4-aarch64.S sm4-armv8-aarch64-ce.S sm4-armv9-aarch64-sve-ce.S \
sm4-ppc.c \
- serpent-avx2-amd64.S serpent-armv7-neon.S \
sha1.c sha1-ssse3-amd64.S sha1-avx-amd64.S sha1-avx-bmi2-amd64.S \
sha1-avx2-bmi2-amd64.S sha1-armv7-neon.S sha1-armv8-aarch32-ce.S \
sha1-armv8-aarch64-ce.S sha1-intel-shaext.c \
sha256.c sha256-ssse3-amd64.S sha256-avx-amd64.S \
sha256-avx2-bmi2-amd64.S \
sha256-armv8-aarch32-ce.S sha256-armv8-aarch64-ce.S \
sha256-intel-shaext.c sha256-ppc.c \
sha512.c sha512-ssse3-amd64.S sha512-avx-amd64.S \
sha512-avx2-bmi2-amd64.S sha512-avx512-amd64.S \
sha512-armv7-neon.S sha512-armv8-aarch64-ce.S sha512-arm.S \
sha512-ppc.c sha512-ssse3-i386.c \
sm3.c sm3-avx-bmi2-amd64.S sm3-aarch64.S sm3-armv8-aarch64-ce.S \
keccak.c keccak_permute_32.h keccak_permute_64.h \
keccak-armv7-neon.S keccak-amd64-avx512.S \
stribog.c \
tiger.c \
whirlpool.c whirlpool-sse2-amd64.S \
twofish.c twofish-amd64.S twofish-arm.S twofish-aarch64.S \
twofish-avx2-amd64.S \
rfc2268.c \
camellia.c camellia.h camellia-glue.c camellia-aesni-avx-amd64.S \
camellia-aesni-avx2-amd64.h \
camellia-gfni-avx2-amd64.S camellia-gfni-avx512-amd64.S \
camellia-vaes-avx2-amd64.S camellia-aesni-avx2-amd64.S \
camellia-arm.S camellia-aarch64.S camellia-aarch64-ce.c \
camellia-simd128.h camellia-ppc8le.c camellia-ppc9le.c \
blake2.c \
blake2b-amd64-avx2.S blake2b-amd64-avx512.S \
blake2s-amd64-avx.S blake2s-amd64-avx512.S
gost28147.lo: gost-sb.h
gost-sb.h: gost-s-box$(EXEEXT_FOR_BUILD)
./gost-s-box$(EXEEXT_FOR_BUILD) $@
gost-s-box$(EXEEXT_FOR_BUILD): gost-s-box.c
$(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) \
$(CPPFLAGS_FOR_BUILD) -o $@ $(srcdir)/gost-s-box.c
if ENABLE_O_FLAG_MUNGING
o_flag_munging = sed -e 's/-O\([2-9sgz][2-9sgz]*\)/-O1/' -e 's/-Ofast/-O1/g'
else
o_flag_munging = cat
endif
# We need to lower the optimization for this module.
tiger.o: $(srcdir)/tiger.c Makefile
`echo $(COMPILE) -c $< | $(o_flag_munging) `
tiger.lo: $(srcdir)/tiger.c Makefile
`echo $(LTCOMPILE) -c $< | $(o_flag_munging) `
# We need to disable instrumentation for these modules as they use cc as
# thin assembly front-end and do not tolerate in-between function calls
# inserted by compiler as those functions may clobber the XMM registers.
if ENABLE_INSTRUMENTATION_MUNGING
instrumentation_munging = sed \
-e 's/-fsanitize[=,\-][=,a-z,A-Z,0-9,\,,\-]*//g' \
-e 's/-fprofile[=,\-][=,a-z,A-Z,0-9,\,,\-]*//g' \
-e 's/-fcoverage[=,\-][=,a-z,A-Z,0-9,\,,\-]*//g'
else
instrumentation_munging = cat
endif
rijndael-aesni.o: $(srcdir)/rijndael-aesni.c Makefile
`echo $(COMPILE) -c $< | $(instrumentation_munging) `
rijndael-aesni.lo: $(srcdir)/rijndael-aesni.c Makefile
`echo $(LTCOMPILE) -c $< | $(instrumentation_munging) `
rijndael-ssse3-amd64.o: $(srcdir)/rijndael-ssse3-amd64.c Makefile
`echo $(COMPILE) -c $< | $(instrumentation_munging) `
rijndael-ssse3-amd64.lo: $(srcdir)/rijndael-ssse3-amd64.c Makefile
`echo $(LTCOMPILE) -c $< | $(instrumentation_munging) `
cipher-gcm-intel-pclmul.o: $(srcdir)/cipher-gcm-intel-pclmul.c Makefile
`echo $(COMPILE) -c $< | $(instrumentation_munging) `
cipher-gcm-intel-pclmul.lo: $(srcdir)/cipher-gcm-intel-pclmul.c Makefile
`echo $(LTCOMPILE) -c $< | $(instrumentation_munging) `
sha1-intel-shaext.o: $(srcdir)/sha1-intel-shaext.c Makefile
`echo $(COMPILE) -c $< | $(instrumentation_munging) `
sha1-intel-shaext.lo: $(srcdir)/sha1-intel-shaext.c Makefile
`echo $(LTCOMPILE) -c $< | $(instrumentation_munging) `
sha256-intel-shaext.o: $(srcdir)/sha256-intel-shaext.c Makefile
`echo $(COMPILE) -c $< | $(instrumentation_munging) `
sha256-intel-shaext.lo: $(srcdir)/sha256-intel-shaext.c Makefile
`echo $(LTCOMPILE) -c $< | $(instrumentation_munging) `
sha256-ssse3-i386.o: $(srcdir)/sha256-ssse3-i386.c Makefile
`echo $(COMPILE) -c $< | $(instrumentation_munging) `
sha256-ssse3-i386.lo: $(srcdir)/sha256-ssse3-i386.c Makefile
`echo $(LTCOMPILE) -c $< | $(instrumentation_munging) `
crc-intel-pclmul.o: $(srcdir)/crc-intel-pclmul.c Makefile
`echo $(COMPILE) -c $< | $(instrumentation_munging) `
crc-intel-pclmul.lo: $(srcdir)/crc-intel-pclmul.c Makefile
`echo $(LTCOMPILE) -c $< | $(instrumentation_munging) `
if ENABLE_PPC_VCRYPTO_EXTRA_CFLAGS
ppc_vcrypto_cflags = -O2 -maltivec -mvsx -mcrypto
else
ppc_vcrypto_cflags =
endif
if ENABLE_AARCH64_NEON_INTRINSICS_EXTRA_CFLAGS
aarch64_neon_cflags = -O2 -march=armv8-a+crypto
else
aarch64_neon_cflags =
endif
rijndael-ppc.o: $(srcdir)/rijndael-ppc.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
rijndael-ppc.lo: $(srcdir)/rijndael-ppc.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
rijndael-ppc9le.o: $(srcdir)/rijndael-ppc9le.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
rijndael-ppc9le.lo: $(srcdir)/rijndael-ppc9le.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
rijndael-p10le.o: $(srcdir)/rijndael-p10le.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
rijndael-p10le.lo: $(srcdir)/rijndael-p10le.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
sha256-ppc.o: $(srcdir)/sha256-ppc.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
sha256-ppc.lo: $(srcdir)/sha256-ppc.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
sha512-ppc.o: $(srcdir)/sha512-ppc.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
sha512-ppc.lo: $(srcdir)/sha512-ppc.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
chacha20-ppc.o: $(srcdir)/chacha20-ppc.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
chacha20-ppc.lo: $(srcdir)/chacha20-ppc.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
crc-ppc.o: $(srcdir)/crc-ppc.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
crc-ppc.lo: $(srcdir)/crc-ppc.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
cipher-gcm-ppc.o: $(srcdir)/cipher-gcm-ppc.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
cipher-gcm-ppc.lo: $(srcdir)/cipher-gcm-ppc.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
camellia-ppc8le.o: $(srcdir)/camellia-ppc8le.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
camellia-ppc8le.lo: $(srcdir)/camellia-ppc8le.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
camellia-ppc9le.o: $(srcdir)/camellia-ppc9le.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
camellia-ppc9le.lo: $(srcdir)/camellia-ppc9le.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
camellia-aarch64-ce.o: $(srcdir)/camellia-aarch64-ce.c Makefile
`echo $(COMPILE) $(aarch64_neon_cflags) -c $< | $(instrumentation_munging) `
camellia-aarch64-ce.lo: $(srcdir)/camellia-aarch64-ce.c Makefile
`echo $(LTCOMPILE) $(aarch64_neon_cflags) -c $< | $(instrumentation_munging) `
sm4-ppc.o: $(srcdir)/sm4-ppc.c Makefile
`echo $(COMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
sm4-ppc.lo: $(srcdir)/sm4-ppc.c Makefile
`echo $(LTCOMPILE) $(ppc_vcrypto_cflags) -c $< | $(instrumentation_munging) `
+
+
+if ENABLE_X86_AVX512_INTRINSICS_EXTRA_CFLAGS
+avx512f_cflags = -mavx512f
+else
+avx512f_cflags =
+endif
+
+serpent-avx512-x86.o: $(srcdir)/serpent-avx512-x86.c Makefile
+ `echo $(COMPILE) $(avx512f_cflags) -c $< | $(instrumentation_munging) `
+
+serpent-avx512-x86.lo: $(srcdir)/serpent-avx512-x86.c Makefile
+ `echo $(LTCOMPILE) $(avx512f_cflags) -c $< | $(instrumentation_munging) `
diff --git a/cipher/serpent-avx2-amd64.S b/cipher/serpent-avx2-amd64.S
index e25e7d3b..7aba235f 100644
--- a/cipher/serpent-avx2-amd64.S
+++ b/cipher/serpent-avx2-amd64.S
@@ -1,1214 +1,1214 @@
/* serpent-avx2-amd64.S - AVX2 implementation of Serpent cipher
*
* Copyright (C) 2013-2015 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#ifdef __x86_64
#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && defined(USE_SERPENT) && \
defined(ENABLE_AVX2_SUPPORT)
#include "asm-common-amd64.h"
/* struct serpent_context: */
#define ctx_keys 0
/* register macros */
#define CTX %rdi
/* vector registers */
#define RA0 %ymm0
#define RA1 %ymm1
#define RA2 %ymm2
#define RA3 %ymm3
#define RA4 %ymm4
#define RB0 %ymm5
#define RB1 %ymm6
#define RB2 %ymm7
#define RB3 %ymm8
#define RB4 %ymm9
#define RNOT %ymm10
#define RTMP0 %ymm11
#define RTMP1 %ymm12
#define RTMP2 %ymm13
#define RTMP3 %ymm14
#define RTMP4 %ymm15
#define RNOTx %xmm10
#define RTMP0x %xmm11
#define RTMP1x %xmm12
#define RTMP2x %xmm13
#define RTMP3x %xmm14
#define RTMP4x %xmm15
/**********************************************************************
helper macros
**********************************************************************/
/* vector 32-bit rotation to left */
#define vec_rol(reg, nleft, tmp) \
vpslld $(nleft), reg, tmp; \
vpsrld $(32 - (nleft)), reg, reg; \
vpor tmp, reg, reg;
/* vector 32-bit rotation to right */
#define vec_ror(reg, nright, tmp) \
vec_rol(reg, 32 - nright, tmp)
/* 4x4 32-bit integer matrix transpose */
#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
vpunpckhdq x1, x0, t2; \
vpunpckldq x1, x0, x0; \
\
vpunpckldq x3, x2, t1; \
vpunpckhdq x3, x2, x2; \
\
vpunpckhqdq t1, x0, x1; \
vpunpcklqdq t1, x0, x0; \
\
vpunpckhqdq x2, t2, x3; \
vpunpcklqdq x2, t2, x2;
/**********************************************************************
16-way serpent
**********************************************************************/
/*
* These are the S-Boxes of Serpent from following research paper.
*
* D. A. Osvik, “Speeding up Serpent,” in Third AES Candidate Conference,
* (New York, New York, USA), p. 317–329, National Institute of Standards and
* Technology, 2000.
*
* Paper is also available at: http://www.ii.uib.no/~osvik/pub/aes3.pdf
*
*/
#define SBOX0(r0, r1, r2, r3, r4) \
vpxor r0, r3, r3; vmovdqa r1, r4; \
vpand r3, r1, r1; vpxor r2, r4, r4; \
vpxor r0, r1, r1; vpor r3, r0, r0; \
vpxor r4, r0, r0; vpxor r3, r4, r4; \
vpxor r2, r3, r3; vpor r1, r2, r2; \
vpxor r4, r2, r2; vpxor RNOT, r4, r4; \
vpor r1, r4, r4; vpxor r3, r1, r1; \
vpxor r4, r1, r1; vpor r0, r3, r3; \
vpxor r3, r1, r1; vpxor r3, r4, r4;
#define SBOX0_INVERSE(r0, r1, r2, r3, r4) \
vpxor RNOT, r2, r2; vmovdqa r1, r4; \
vpor r0, r1, r1; vpxor RNOT, r4, r4; \
vpxor r2, r1, r1; vpor r4, r2, r2; \
vpxor r3, r1, r1; vpxor r4, r0, r0; \
vpxor r0, r2, r2; vpand r3, r0, r0; \
vpxor r0, r4, r4; vpor r1, r0, r0; \
vpxor r2, r0, r0; vpxor r4, r3, r3; \
vpxor r1, r2, r2; vpxor r0, r3, r3; \
vpxor r1, r3, r3; \
vpand r3, r2, r2; \
vpxor r2, r4, r4;
#define SBOX1(r0, r1, r2, r3, r4) \
vpxor RNOT, r0, r0; vpxor RNOT, r2, r2; \
vmovdqa r0, r4; vpand r1, r0, r0; \
vpxor r0, r2, r2; vpor r3, r0, r0; \
vpxor r2, r3, r3; vpxor r0, r1, r1; \
vpxor r4, r0, r0; vpor r1, r4, r4; \
vpxor r3, r1, r1; vpor r0, r2, r2; \
vpand r4, r2, r2; vpxor r1, r0, r0; \
vpand r2, r1, r1; \
vpxor r0, r1, r1; vpand r2, r0, r0; \
vpxor r4, r0, r0;
#define SBOX1_INVERSE(r0, r1, r2, r3, r4) \
vmovdqa r1, r4; vpxor r3, r1, r1; \
vpand r1, r3, r3; vpxor r2, r4, r4; \
vpxor r0, r3, r3; vpor r1, r0, r0; \
vpxor r3, r2, r2; vpxor r4, r0, r0; \
vpor r2, r0, r0; vpxor r3, r1, r1; \
vpxor r1, r0, r0; vpor r3, r1, r1; \
vpxor r0, r1, r1; vpxor RNOT, r4, r4; \
vpxor r1, r4, r4; vpor r0, r1, r1; \
vpxor r0, r1, r1; \
vpor r4, r1, r1; \
vpxor r1, r3, r3;
#define SBOX2(r0, r1, r2, r3, r4) \
vmovdqa r0, r4; vpand r2, r0, r0; \
vpxor r3, r0, r0; vpxor r1, r2, r2; \
vpxor r0, r2, r2; vpor r4, r3, r3; \
vpxor r1, r3, r3; vpxor r2, r4, r4; \
vmovdqa r3, r1; vpor r4, r3, r3; \
vpxor r0, r3, r3; vpand r1, r0, r0; \
vpxor r0, r4, r4; vpxor r3, r1, r1; \
vpxor r4, r1, r1; vpxor RNOT, r4, r4;
#define SBOX2_INVERSE(r0, r1, r2, r3, r4) \
vpxor r3, r2, r2; vpxor r0, r3, r3; \
vmovdqa r3, r4; vpand r2, r3, r3; \
vpxor r1, r3, r3; vpor r2, r1, r1; \
vpxor r4, r1, r1; vpand r3, r4, r4; \
vpxor r3, r2, r2; vpand r0, r4, r4; \
vpxor r2, r4, r4; vpand r1, r2, r2; \
vpor r0, r2, r2; vpxor RNOT, r3, r3; \
vpxor r3, r2, r2; vpxor r3, r0, r0; \
vpand r1, r0, r0; vpxor r4, r3, r3; \
vpxor r0, r3, r3;
#define SBOX3(r0, r1, r2, r3, r4) \
vmovdqa r0, r4; vpor r3, r0, r0; \
vpxor r1, r3, r3; vpand r4, r1, r1; \
vpxor r2, r4, r4; vpxor r3, r2, r2; \
vpand r0, r3, r3; vpor r1, r4, r4; \
vpxor r4, r3, r3; vpxor r1, r0, r0; \
vpand r0, r4, r4; vpxor r3, r1, r1; \
vpxor r2, r4, r4; vpor r0, r1, r1; \
vpxor r2, r1, r1; vpxor r3, r0, r0; \
vmovdqa r1, r2; vpor r3, r1, r1; \
vpxor r0, r1, r1;
#define SBOX3_INVERSE(r0, r1, r2, r3, r4) \
vmovdqa r2, r4; vpxor r1, r2, r2; \
vpxor r2, r0, r0; vpand r2, r4, r4; \
vpxor r0, r4, r4; vpand r1, r0, r0; \
vpxor r3, r1, r1; vpor r4, r3, r3; \
vpxor r3, r2, r2; vpxor r3, r0, r0; \
vpxor r4, r1, r1; vpand r2, r3, r3; \
vpxor r1, r3, r3; vpxor r0, r1, r1; \
vpor r2, r1, r1; vpxor r3, r0, r0; \
vpxor r4, r1, r1; \
vpxor r1, r0, r0;
#define SBOX4(r0, r1, r2, r3, r4) \
vpxor r3, r1, r1; vpxor RNOT, r3, r3; \
vpxor r3, r2, r2; vpxor r0, r3, r3; \
vmovdqa r1, r4; vpand r3, r1, r1; \
vpxor r2, r1, r1; vpxor r3, r4, r4; \
vpxor r4, r0, r0; vpand r4, r2, r2; \
vpxor r0, r2, r2; vpand r1, r0, r0; \
vpxor r0, r3, r3; vpor r1, r4, r4; \
vpxor r0, r4, r4; vpor r3, r0, r0; \
vpxor r2, r0, r0; vpand r3, r2, r2; \
vpxor RNOT, r0, r0; vpxor r2, r4, r4;
#define SBOX4_INVERSE(r0, r1, r2, r3, r4) \
vmovdqa r2, r4; vpand r3, r2, r2; \
vpxor r1, r2, r2; vpor r3, r1, r1; \
vpand r0, r1, r1; vpxor r2, r4, r4; \
vpxor r1, r4, r4; vpand r2, r1, r1; \
vpxor RNOT, r0, r0; vpxor r4, r3, r3; \
vpxor r3, r1, r1; vpand r0, r3, r3; \
vpxor r2, r3, r3; vpxor r1, r0, r0; \
vpand r0, r2, r2; vpxor r0, r3, r3; \
vpxor r4, r2, r2; \
vpor r3, r2, r2; vpxor r0, r3, r3; \
vpxor r1, r2, r2;
#define SBOX5(r0, r1, r2, r3, r4) \
vpxor r1, r0, r0; vpxor r3, r1, r1; \
vpxor RNOT, r3, r3; vmovdqa r1, r4; \
vpand r0, r1, r1; vpxor r3, r2, r2; \
vpxor r2, r1, r1; vpor r4, r2, r2; \
vpxor r3, r4, r4; vpand r1, r3, r3; \
vpxor r0, r3, r3; vpxor r1, r4, r4; \
vpxor r2, r4, r4; vpxor r0, r2, r2; \
vpand r3, r0, r0; vpxor RNOT, r2, r2; \
vpxor r4, r0, r0; vpor r3, r4, r4; \
vpxor r4, r2, r2;
#define SBOX5_INVERSE(r0, r1, r2, r3, r4) \
vpxor RNOT, r1, r1; vmovdqa r3, r4; \
vpxor r1, r2, r2; vpor r0, r3, r3; \
vpxor r2, r3, r3; vpor r1, r2, r2; \
vpand r0, r2, r2; vpxor r3, r4, r4; \
vpxor r4, r2, r2; vpor r0, r4, r4; \
vpxor r1, r4, r4; vpand r2, r1, r1; \
vpxor r3, r1, r1; vpxor r2, r4, r4; \
vpand r4, r3, r3; vpxor r1, r4, r4; \
vpxor r4, r3, r3; vpxor RNOT, r4, r4; \
vpxor r0, r3, r3;
#define SBOX6(r0, r1, r2, r3, r4) \
vpxor RNOT, r2, r2; vmovdqa r3, r4; \
vpand r0, r3, r3; vpxor r4, r0, r0; \
vpxor r2, r3, r3; vpor r4, r2, r2; \
vpxor r3, r1, r1; vpxor r0, r2, r2; \
vpor r1, r0, r0; vpxor r1, r2, r2; \
vpxor r0, r4, r4; vpor r3, r0, r0; \
vpxor r2, r0, r0; vpxor r3, r4, r4; \
vpxor r0, r4, r4; vpxor RNOT, r3, r3; \
vpand r4, r2, r2; \
vpxor r3, r2, r2;
#define SBOX6_INVERSE(r0, r1, r2, r3, r4) \
vpxor r2, r0, r0; vmovdqa r2, r4; \
vpand r0, r2, r2; vpxor r3, r4, r4; \
vpxor RNOT, r2, r2; vpxor r1, r3, r3; \
vpxor r3, r2, r2; vpor r0, r4, r4; \
vpxor r2, r0, r0; vpxor r4, r3, r3; \
vpxor r1, r4, r4; vpand r3, r1, r1; \
vpxor r0, r1, r1; vpxor r3, r0, r0; \
vpor r2, r0, r0; vpxor r1, r3, r3; \
vpxor r0, r4, r4;
#define SBOX7(r0, r1, r2, r3, r4) \
vmovdqa r1, r4; vpor r2, r1, r1; \
vpxor r3, r1, r1; vpxor r2, r4, r4; \
vpxor r1, r2, r2; vpor r4, r3, r3; \
vpand r0, r3, r3; vpxor r2, r4, r4; \
vpxor r1, r3, r3; vpor r4, r1, r1; \
vpxor r0, r1, r1; vpor r4, r0, r0; \
vpxor r2, r0, r0; vpxor r4, r1, r1; \
vpxor r1, r2, r2; vpand r0, r1, r1; \
vpxor r4, r1, r1; vpxor RNOT, r2, r2; \
vpor r0, r2, r2; \
vpxor r2, r4, r4;
#define SBOX7_INVERSE(r0, r1, r2, r3, r4) \
vmovdqa r2, r4; vpxor r0, r2, r2; \
vpand r3, r0, r0; vpor r3, r4, r4; \
vpxor RNOT, r2, r2; vpxor r1, r3, r3; \
vpor r0, r1, r1; vpxor r2, r0, r0; \
vpand r4, r2, r2; vpand r4, r3, r3; \
vpxor r2, r1, r1; vpxor r0, r2, r2; \
vpor r2, r0, r0; vpxor r1, r4, r4; \
vpxor r3, r0, r0; vpxor r4, r3, r3; \
vpor r0, r4, r4; vpxor r2, r3, r3; \
vpxor r2, r4, r4;
/* Apply SBOX number WHICH to to the block. */
#define SBOX(which, r0, r1, r2, r3, r4) \
SBOX##which (r0, r1, r2, r3, r4)
/* Apply inverse SBOX number WHICH to to the block. */
#define SBOX_INVERSE(which, r0, r1, r2, r3, r4) \
SBOX##which##_INVERSE (r0, r1, r2, r3, r4)
/* XOR round key into block state in r0,r1,r2,r3. r4 used as temporary. */
#define BLOCK_XOR_KEY(r0, r1, r2, r3, r4, round) \
vpbroadcastd (ctx_keys + (round) * 16 + 0 * 4)(CTX), r4; \
vpxor r4, r0, r0; \
vpbroadcastd (ctx_keys + (round) * 16 + 1 * 4)(CTX), r4; \
vpxor r4, r1, r1; \
vpbroadcastd (ctx_keys + (round) * 16 + 2 * 4)(CTX), r4; \
vpxor r4, r2, r2; \
vpbroadcastd (ctx_keys + (round) * 16 + 3 * 4)(CTX), r4; \
vpxor r4, r3, r3;
/* Apply the linear transformation to BLOCK. */
#define LINEAR_TRANSFORMATION(r0, r1, r2, r3, r4) \
vec_rol(r0, 13, r4); \
vec_rol(r2, 3, r4); \
vpxor r0, r1, r1; \
vpxor r2, r1, r1; \
vpslld $3, r0, r4; \
vpxor r2, r3, r3; \
vpxor r4, r3, r3; \
vec_rol(r1, 1, r4); \
vec_rol(r3, 7, r4); \
vpxor r1, r0, r0; \
vpxor r3, r0, r0; \
vpslld $7, r1, r4; \
vpxor r3, r2, r2; \
vpxor r4, r2, r2; \
vec_rol(r0, 5, r4); \
vec_rol(r2, 22, r4);
/* Apply the inverse linear transformation to BLOCK. */
#define LINEAR_TRANSFORMATION_INVERSE(r0, r1, r2, r3, r4) \
vec_ror(r2, 22, r4); \
vec_ror(r0, 5, r4); \
vpslld $7, r1, r4; \
vpxor r3, r2, r2; \
vpxor r4, r2, r2; \
vpxor r1, r0, r0; \
vpxor r3, r0, r0; \
vec_ror(r3, 7, r4); \
vec_ror(r1, 1, r4); \
vpslld $3, r0, r4; \
vpxor r2, r3, r3; \
vpxor r4, r3, r3; \
vpxor r0, r1, r1; \
vpxor r2, r1, r1; \
vec_ror(r2, 3, r4); \
vec_ror(r0, 13, r4);
/* Apply a Serpent round to sixteen parallel blocks. This macro increments
`round'. */
#define ROUND(round, which, a0, a1, a2, a3, a4, na0, na1, na2, na3, na4, \
b0, b1, b2, b3, b4, nb0, nb1, nb2, nb3, nb4) \
BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round); \
SBOX (which, a0, a1, a2, a3, a4); \
BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round); \
SBOX (which, b0, b1, b2, b3, b4); \
LINEAR_TRANSFORMATION (na0, na1, na2, na3, na4); \
LINEAR_TRANSFORMATION (nb0, nb1, nb2, nb3, nb4);
/* Apply the last Serpent round to sixteen parallel blocks. This macro
increments `round'. */
#define ROUND_LAST(round, which, a0, a1, a2, a3, a4, na0, na1, na2, na3, na4, \
b0, b1, b2, b3, b4, nb0, nb1, nb2, nb3, nb4) \
BLOCK_XOR_KEY (a0, a1, a2, a3, a4, round); \
SBOX (which, a0, a1, a2, a3, a4); \
BLOCK_XOR_KEY (b0, b1, b2, b3, b4, round); \
SBOX (which, b0, b1, b2, b3, b4); \
BLOCK_XOR_KEY (na0, na1, na2, na3, na4, ((round) + 1)); \
BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, ((round) + 1));
/* Apply an inverse Serpent round to sixteen parallel blocks. This macro
increments `round'. */
#define ROUND_INVERSE(round, which, a0, a1, a2, a3, a4, \
na0, na1, na2, na3, na4, \
b0, b1, b2, b3, b4, \
nb0, nb1, nb2, nb3, nb4) \
LINEAR_TRANSFORMATION_INVERSE (a0, a1, a2, a3, a4); \
LINEAR_TRANSFORMATION_INVERSE (b0, b1, b2, b3, b4); \
SBOX_INVERSE (which, a0, a1, a2, a3, a4); \
BLOCK_XOR_KEY (na0, na1, na2, na3, na4, round); \
SBOX_INVERSE (which, b0, b1, b2, b3, b4); \
BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, round);
/* Apply the first inverse Serpent round to sixteen parallel blocks. This macro
increments `round'. */
#define ROUND_FIRST_INVERSE(round, which, a0, a1, a2, a3, a4, \
na0, na1, na2, na3, na4, \
b0, b1, b2, b3, b4, \
nb0, nb1, nb2, nb3, nb4) \
BLOCK_XOR_KEY (a0, a1, a2, a3, a4, ((round) + 1)); \
BLOCK_XOR_KEY (b0, b1, b2, b3, b4, ((round) + 1)); \
SBOX_INVERSE (which, a0, a1, a2, a3, a4); \
BLOCK_XOR_KEY (na0, na1, na2, na3, na4, round); \
SBOX_INVERSE (which, b0, b1, b2, b3, b4); \
BLOCK_XOR_KEY (nb0, nb1, nb2, nb3, nb4, round);
.text
.align 16
ELF(.type __serpent_enc_blk16,@function;)
__serpent_enc_blk16:
/* input:
* %rdi: ctx, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* plaintext blocks
* output:
* RA4, RA1, RA2, RA0, RB4, RB1, RB2, RB0: sixteen parallel
* ciphertext blocks
*/
CFI_STARTPROC();
vpcmpeqd RNOT, RNOT, RNOT;
transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1);
ROUND (0, 0, RA0, RA1, RA2, RA3, RA4, RA1, RA4, RA2, RA0, RA3,
RB0, RB1, RB2, RB3, RB4, RB1, RB4, RB2, RB0, RB3);
ROUND (1, 1, RA1, RA4, RA2, RA0, RA3, RA2, RA1, RA0, RA4, RA3,
RB1, RB4, RB2, RB0, RB3, RB2, RB1, RB0, RB4, RB3);
ROUND (2, 2, RA2, RA1, RA0, RA4, RA3, RA0, RA4, RA1, RA3, RA2,
RB2, RB1, RB0, RB4, RB3, RB0, RB4, RB1, RB3, RB2);
ROUND (3, 3, RA0, RA4, RA1, RA3, RA2, RA4, RA1, RA3, RA2, RA0,
RB0, RB4, RB1, RB3, RB2, RB4, RB1, RB3, RB2, RB0);
ROUND (4, 4, RA4, RA1, RA3, RA2, RA0, RA1, RA0, RA4, RA2, RA3,
RB4, RB1, RB3, RB2, RB0, RB1, RB0, RB4, RB2, RB3);
ROUND (5, 5, RA1, RA0, RA4, RA2, RA3, RA0, RA2, RA1, RA4, RA3,
RB1, RB0, RB4, RB2, RB3, RB0, RB2, RB1, RB4, RB3);
ROUND (6, 6, RA0, RA2, RA1, RA4, RA3, RA0, RA2, RA3, RA1, RA4,
RB0, RB2, RB1, RB4, RB3, RB0, RB2, RB3, RB1, RB4);
ROUND (7, 7, RA0, RA2, RA3, RA1, RA4, RA4, RA1, RA2, RA0, RA3,
RB0, RB2, RB3, RB1, RB4, RB4, RB1, RB2, RB0, RB3);
ROUND (8, 0, RA4, RA1, RA2, RA0, RA3, RA1, RA3, RA2, RA4, RA0,
RB4, RB1, RB2, RB0, RB3, RB1, RB3, RB2, RB4, RB0);
ROUND (9, 1, RA1, RA3, RA2, RA4, RA0, RA2, RA1, RA4, RA3, RA0,
RB1, RB3, RB2, RB4, RB0, RB2, RB1, RB4, RB3, RB0);
ROUND (10, 2, RA2, RA1, RA4, RA3, RA0, RA4, RA3, RA1, RA0, RA2,
RB2, RB1, RB4, RB3, RB0, RB4, RB3, RB1, RB0, RB2);
ROUND (11, 3, RA4, RA3, RA1, RA0, RA2, RA3, RA1, RA0, RA2, RA4,
RB4, RB3, RB1, RB0, RB2, RB3, RB1, RB0, RB2, RB4);
ROUND (12, 4, RA3, RA1, RA0, RA2, RA4, RA1, RA4, RA3, RA2, RA0,
RB3, RB1, RB0, RB2, RB4, RB1, RB4, RB3, RB2, RB0);
ROUND (13, 5, RA1, RA4, RA3, RA2, RA0, RA4, RA2, RA1, RA3, RA0,
RB1, RB4, RB3, RB2, RB0, RB4, RB2, RB1, RB3, RB0);
ROUND (14, 6, RA4, RA2, RA1, RA3, RA0, RA4, RA2, RA0, RA1, RA3,
RB4, RB2, RB1, RB3, RB0, RB4, RB2, RB0, RB1, RB3);
ROUND (15, 7, RA4, RA2, RA0, RA1, RA3, RA3, RA1, RA2, RA4, RA0,
RB4, RB2, RB0, RB1, RB3, RB3, RB1, RB2, RB4, RB0);
ROUND (16, 0, RA3, RA1, RA2, RA4, RA0, RA1, RA0, RA2, RA3, RA4,
RB3, RB1, RB2, RB4, RB0, RB1, RB0, RB2, RB3, RB4);
ROUND (17, 1, RA1, RA0, RA2, RA3, RA4, RA2, RA1, RA3, RA0, RA4,
RB1, RB0, RB2, RB3, RB4, RB2, RB1, RB3, RB0, RB4);
ROUND (18, 2, RA2, RA1, RA3, RA0, RA4, RA3, RA0, RA1, RA4, RA2,
RB2, RB1, RB3, RB0, RB4, RB3, RB0, RB1, RB4, RB2);
ROUND (19, 3, RA3, RA0, RA1, RA4, RA2, RA0, RA1, RA4, RA2, RA3,
RB3, RB0, RB1, RB4, RB2, RB0, RB1, RB4, RB2, RB3);
ROUND (20, 4, RA0, RA1, RA4, RA2, RA3, RA1, RA3, RA0, RA2, RA4,
RB0, RB1, RB4, RB2, RB3, RB1, RB3, RB0, RB2, RB4);
ROUND (21, 5, RA1, RA3, RA0, RA2, RA4, RA3, RA2, RA1, RA0, RA4,
RB1, RB3, RB0, RB2, RB4, RB3, RB2, RB1, RB0, RB4);
ROUND (22, 6, RA3, RA2, RA1, RA0, RA4, RA3, RA2, RA4, RA1, RA0,
RB3, RB2, RB1, RB0, RB4, RB3, RB2, RB4, RB1, RB0);
ROUND (23, 7, RA3, RA2, RA4, RA1, RA0, RA0, RA1, RA2, RA3, RA4,
RB3, RB2, RB4, RB1, RB0, RB0, RB1, RB2, RB3, RB4);
ROUND (24, 0, RA0, RA1, RA2, RA3, RA4, RA1, RA4, RA2, RA0, RA3,
RB0, RB1, RB2, RB3, RB4, RB1, RB4, RB2, RB0, RB3);
ROUND (25, 1, RA1, RA4, RA2, RA0, RA3, RA2, RA1, RA0, RA4, RA3,
RB1, RB4, RB2, RB0, RB3, RB2, RB1, RB0, RB4, RB3);
ROUND (26, 2, RA2, RA1, RA0, RA4, RA3, RA0, RA4, RA1, RA3, RA2,
RB2, RB1, RB0, RB4, RB3, RB0, RB4, RB1, RB3, RB2);
ROUND (27, 3, RA0, RA4, RA1, RA3, RA2, RA4, RA1, RA3, RA2, RA0,
RB0, RB4, RB1, RB3, RB2, RB4, RB1, RB3, RB2, RB0);
ROUND (28, 4, RA4, RA1, RA3, RA2, RA0, RA1, RA0, RA4, RA2, RA3,
RB4, RB1, RB3, RB2, RB0, RB1, RB0, RB4, RB2, RB3);
ROUND (29, 5, RA1, RA0, RA4, RA2, RA3, RA0, RA2, RA1, RA4, RA3,
RB1, RB0, RB4, RB2, RB3, RB0, RB2, RB1, RB4, RB3);
ROUND (30, 6, RA0, RA2, RA1, RA4, RA3, RA0, RA2, RA3, RA1, RA4,
RB0, RB2, RB1, RB4, RB3, RB0, RB2, RB3, RB1, RB4);
ROUND_LAST (31, 7, RA0, RA2, RA3, RA1, RA4, RA4, RA1, RA2, RA0, RA3,
RB0, RB2, RB3, RB1, RB4, RB4, RB1, RB2, RB0, RB3);
transpose_4x4(RA4, RA1, RA2, RA0, RA3, RTMP0, RTMP1);
transpose_4x4(RB4, RB1, RB2, RB0, RB3, RTMP0, RTMP1);
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __serpent_enc_blk16,.-__serpent_enc_blk16;)
.align 16
ELF(.type __serpent_dec_blk16,@function;)
__serpent_dec_blk16:
/* input:
* %rdi: ctx, CTX
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* ciphertext blocks
* output:
* RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
* plaintext blocks
*/
CFI_STARTPROC();
vpcmpeqd RNOT, RNOT, RNOT;
transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1);
ROUND_FIRST_INVERSE (31, 7, RA0, RA1, RA2, RA3, RA4,
RA3, RA0, RA1, RA4, RA2,
RB0, RB1, RB2, RB3, RB4,
RB3, RB0, RB1, RB4, RB2);
ROUND_INVERSE (30, 6, RA3, RA0, RA1, RA4, RA2, RA0, RA1, RA2, RA4, RA3,
RB3, RB0, RB1, RB4, RB2, RB0, RB1, RB2, RB4, RB3);
ROUND_INVERSE (29, 5, RA0, RA1, RA2, RA4, RA3, RA1, RA3, RA4, RA2, RA0,
RB0, RB1, RB2, RB4, RB3, RB1, RB3, RB4, RB2, RB0);
ROUND_INVERSE (28, 4, RA1, RA3, RA4, RA2, RA0, RA1, RA2, RA4, RA0, RA3,
RB1, RB3, RB4, RB2, RB0, RB1, RB2, RB4, RB0, RB3);
ROUND_INVERSE (27, 3, RA1, RA2, RA4, RA0, RA3, RA4, RA2, RA0, RA1, RA3,
RB1, RB2, RB4, RB0, RB3, RB4, RB2, RB0, RB1, RB3);
ROUND_INVERSE (26, 2, RA4, RA2, RA0, RA1, RA3, RA2, RA3, RA0, RA1, RA4,
RB4, RB2, RB0, RB1, RB3, RB2, RB3, RB0, RB1, RB4);
ROUND_INVERSE (25, 1, RA2, RA3, RA0, RA1, RA4, RA4, RA2, RA1, RA0, RA3,
RB2, RB3, RB0, RB1, RB4, RB4, RB2, RB1, RB0, RB3);
ROUND_INVERSE (24, 0, RA4, RA2, RA1, RA0, RA3, RA4, RA3, RA2, RA0, RA1,
RB4, RB2, RB1, RB0, RB3, RB4, RB3, RB2, RB0, RB1);
ROUND_INVERSE (23, 7, RA4, RA3, RA2, RA0, RA1, RA0, RA4, RA3, RA1, RA2,
RB4, RB3, RB2, RB0, RB1, RB0, RB4, RB3, RB1, RB2);
ROUND_INVERSE (22, 6, RA0, RA4, RA3, RA1, RA2, RA4, RA3, RA2, RA1, RA0,
RB0, RB4, RB3, RB1, RB2, RB4, RB3, RB2, RB1, RB0);
ROUND_INVERSE (21, 5, RA4, RA3, RA2, RA1, RA0, RA3, RA0, RA1, RA2, RA4,
RB4, RB3, RB2, RB1, RB0, RB3, RB0, RB1, RB2, RB4);
ROUND_INVERSE (20, 4, RA3, RA0, RA1, RA2, RA4, RA3, RA2, RA1, RA4, RA0,
RB3, RB0, RB1, RB2, RB4, RB3, RB2, RB1, RB4, RB0);
ROUND_INVERSE (19, 3, RA3, RA2, RA1, RA4, RA0, RA1, RA2, RA4, RA3, RA0,
RB3, RB2, RB1, RB4, RB0, RB1, RB2, RB4, RB3, RB0);
ROUND_INVERSE (18, 2, RA1, RA2, RA4, RA3, RA0, RA2, RA0, RA4, RA3, RA1,
RB1, RB2, RB4, RB3, RB0, RB2, RB0, RB4, RB3, RB1);
ROUND_INVERSE (17, 1, RA2, RA0, RA4, RA3, RA1, RA1, RA2, RA3, RA4, RA0,
RB2, RB0, RB4, RB3, RB1, RB1, RB2, RB3, RB4, RB0);
ROUND_INVERSE (16, 0, RA1, RA2, RA3, RA4, RA0, RA1, RA0, RA2, RA4, RA3,
RB1, RB2, RB3, RB4, RB0, RB1, RB0, RB2, RB4, RB3);
ROUND_INVERSE (15, 7, RA1, RA0, RA2, RA4, RA3, RA4, RA1, RA0, RA3, RA2,
RB1, RB0, RB2, RB4, RB3, RB4, RB1, RB0, RB3, RB2);
ROUND_INVERSE (14, 6, RA4, RA1, RA0, RA3, RA2, RA1, RA0, RA2, RA3, RA4,
RB4, RB1, RB0, RB3, RB2, RB1, RB0, RB2, RB3, RB4);
ROUND_INVERSE (13, 5, RA1, RA0, RA2, RA3, RA4, RA0, RA4, RA3, RA2, RA1,
RB1, RB0, RB2, RB3, RB4, RB0, RB4, RB3, RB2, RB1);
ROUND_INVERSE (12, 4, RA0, RA4, RA3, RA2, RA1, RA0, RA2, RA3, RA1, RA4,
RB0, RB4, RB3, RB2, RB1, RB0, RB2, RB3, RB1, RB4);
ROUND_INVERSE (11, 3, RA0, RA2, RA3, RA1, RA4, RA3, RA2, RA1, RA0, RA4,
RB0, RB2, RB3, RB1, RB4, RB3, RB2, RB1, RB0, RB4);
ROUND_INVERSE (10, 2, RA3, RA2, RA1, RA0, RA4, RA2, RA4, RA1, RA0, RA3,
RB3, RB2, RB1, RB0, RB4, RB2, RB4, RB1, RB0, RB3);
ROUND_INVERSE (9, 1, RA2, RA4, RA1, RA0, RA3, RA3, RA2, RA0, RA1, RA4,
RB2, RB4, RB1, RB0, RB3, RB3, RB2, RB0, RB1, RB4);
ROUND_INVERSE (8, 0, RA3, RA2, RA0, RA1, RA4, RA3, RA4, RA2, RA1, RA0,
RB3, RB2, RB0, RB1, RB4, RB3, RB4, RB2, RB1, RB0);
ROUND_INVERSE (7, 7, RA3, RA4, RA2, RA1, RA0, RA1, RA3, RA4, RA0, RA2,
RB3, RB4, RB2, RB1, RB0, RB1, RB3, RB4, RB0, RB2);
ROUND_INVERSE (6, 6, RA1, RA3, RA4, RA0, RA2, RA3, RA4, RA2, RA0, RA1,
RB1, RB3, RB4, RB0, RB2, RB3, RB4, RB2, RB0, RB1);
ROUND_INVERSE (5, 5, RA3, RA4, RA2, RA0, RA1, RA4, RA1, RA0, RA2, RA3,
RB3, RB4, RB2, RB0, RB1, RB4, RB1, RB0, RB2, RB3);
ROUND_INVERSE (4, 4, RA4, RA1, RA0, RA2, RA3, RA4, RA2, RA0, RA3, RA1,
RB4, RB1, RB0, RB2, RB3, RB4, RB2, RB0, RB3, RB1);
ROUND_INVERSE (3, 3, RA4, RA2, RA0, RA3, RA1, RA0, RA2, RA3, RA4, RA1,
RB4, RB2, RB0, RB3, RB1, RB0, RB2, RB3, RB4, RB1);
ROUND_INVERSE (2, 2, RA0, RA2, RA3, RA4, RA1, RA2, RA1, RA3, RA4, RA0,
RB0, RB2, RB3, RB4, RB1, RB2, RB1, RB3, RB4, RB0);
ROUND_INVERSE (1, 1, RA2, RA1, RA3, RA4, RA0, RA0, RA2, RA4, RA3, RA1,
RB2, RB1, RB3, RB4, RB0, RB0, RB2, RB4, RB3, RB1);
ROUND_INVERSE (0, 0, RA0, RA2, RA4, RA3, RA1, RA0, RA1, RA2, RA3, RA4,
RB0, RB2, RB4, RB3, RB1, RB0, RB1, RB2, RB3, RB4);
transpose_4x4(RA0, RA1, RA2, RA3, RA4, RTMP0, RTMP1);
transpose_4x4(RB0, RB1, RB2, RB3, RB4, RTMP0, RTMP1);
ret_spec_stop;
CFI_ENDPROC();
ELF(.size __serpent_dec_blk16,.-__serpent_dec_blk16;)
.align 16
.globl _gcry_serpent_avx2_blk16
ELF(.type _gcry_serpent_avx2_blk16,@function;)
_gcry_serpent_avx2_blk16:
/* input:
* %rdi: ctx, CTX
- * %rsi: dst (8 blocks)
- * %rdx: src (8 blocks)
+ * %rsi: dst (16 blocks)
+ * %rdx: src (16 blocks)
* %ecx: encrypt
*/
CFI_STARTPROC();
vmovdqu (0 * 32)(%rdx), RA0;
vmovdqu (1 * 32)(%rdx), RA1;
vmovdqu (2 * 32)(%rdx), RA2;
vmovdqu (3 * 32)(%rdx), RA3;
vmovdqu (4 * 32)(%rdx), RB0;
vmovdqu (5 * 32)(%rdx), RB1;
vmovdqu (6 * 32)(%rdx), RB2;
vmovdqu (7 * 32)(%rdx), RB3;
testl %ecx, %ecx;
jz .Lblk16_dec;
call __serpent_enc_blk16;
vmovdqu RA4, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA0, (3 * 32)(%rsi);
vmovdqu RB4, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB0, (7 * 32)(%rsi);
jmp .Lblk16_end;
.Lblk16_dec:
call __serpent_dec_blk16;
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
.Lblk16_end:
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_serpent_avx2_blk16,.-_gcry_serpent_avx2_blk16;)
#define inc_le128(x, minus_one, tmp) \
vpcmpeqq minus_one, x, tmp; \
vpsubq minus_one, x, x; \
vpslldq $8, tmp, tmp; \
vpsubq tmp, x, x;
.align 16
.globl _gcry_serpent_avx2_ctr_enc
ELF(.type _gcry_serpent_avx2_ctr_enc,@function;)
_gcry_serpent_avx2_ctr_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv (big endian, 128bit)
*/
CFI_STARTPROC();
movq 8(%rcx), %rax;
bswapq %rax;
vzeroupper;
vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
vpcmpeqd RNOT, RNOT, RNOT;
vpsrldq $8, RNOT, RNOT; /* ab: -1:0 ; cd: -1:0 */
vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
/* load IV and byteswap */
vmovdqu (%rcx), RTMP4x;
vpshufb RTMP3x, RTMP4x, RTMP4x;
vmovdqa RTMP4x, RTMP0x;
inc_le128(RTMP4x, RNOTx, RTMP1x);
vinserti128 $1, RTMP4x, RTMP0, RTMP0;
vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
/* check need for handling 64-bit overflow and carry */
cmpq $(0xffffffffffffffff - 16), %rax;
ja .Lhandle_ctr_carry;
/* construct IVs */
vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
vpshufb RTMP3, RTMP0, RA1;
vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
vpshufb RTMP3, RTMP0, RA2;
vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
vpshufb RTMP3, RTMP0, RA3;
vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
vpshufb RTMP3, RTMP0, RB0;
vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
vpshufb RTMP3, RTMP0, RB1;
vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
vpshufb RTMP3, RTMP0, RB2;
vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
vpshufb RTMP3, RTMP0, RB3;
vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
vpshufb RTMP3x, RTMP0x, RTMP0x;
jmp .Lctr_carry_done;
.Lhandle_ctr_carry:
/* construct IVs */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
inc_le128(RTMP0, RNOT, RTMP1);
inc_le128(RTMP0, RNOT, RTMP1);
vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
inc_le128(RTMP0, RNOT, RTMP1);
vextracti128 $1, RTMP0, RTMP0x;
vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
.align 4
.Lctr_carry_done:
/* store new IV */
vmovdqu RTMP0x, (%rcx);
call __serpent_enc_blk16;
vpxor (0 * 32)(%rdx), RA4, RA4;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA0, RA0;
vpxor (4 * 32)(%rdx), RB4, RB4;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB0, RB0;
vmovdqu RA4, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA0, (3 * 32)(%rsi);
vmovdqu RB4, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB0, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_serpent_avx2_ctr_enc,.-_gcry_serpent_avx2_ctr_enc;)
.align 16
.globl _gcry_serpent_avx2_cbc_dec
ELF(.type _gcry_serpent_avx2_cbc_dec,@function;)
_gcry_serpent_avx2_cbc_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
vzeroupper;
vmovdqu (0 * 32)(%rdx), RA0;
vmovdqu (1 * 32)(%rdx), RA1;
vmovdqu (2 * 32)(%rdx), RA2;
vmovdqu (3 * 32)(%rdx), RA3;
vmovdqu (4 * 32)(%rdx), RB0;
vmovdqu (5 * 32)(%rdx), RB1;
vmovdqu (6 * 32)(%rdx), RB2;
vmovdqu (7 * 32)(%rdx), RB3;
call __serpent_dec_blk16;
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RNOT;
vpxor RNOT, RA0, RA0;
vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx); /* store new IV */
vmovdqu RA0, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA3, (3 * 32)(%rsi);
vmovdqu RB0, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB3, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_serpent_avx2_cbc_dec,.-_gcry_serpent_avx2_cbc_dec;)
.align 16
.globl _gcry_serpent_avx2_cfb_dec
ELF(.type _gcry_serpent_avx2_cfb_dec,@function;)
_gcry_serpent_avx2_cfb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: iv
*/
CFI_STARTPROC();
vzeroupper;
/* Load input */
vmovdqu (%rcx), RNOTx;
vinserti128 $1, (%rdx), RNOT, RA0;
vmovdqu (0 * 32 + 16)(%rdx), RA1;
vmovdqu (1 * 32 + 16)(%rdx), RA2;
vmovdqu (2 * 32 + 16)(%rdx), RA3;
vmovdqu (3 * 32 + 16)(%rdx), RB0;
vmovdqu (4 * 32 + 16)(%rdx), RB1;
vmovdqu (5 * 32 + 16)(%rdx), RB2;
vmovdqu (6 * 32 + 16)(%rdx), RB3;
/* Update IV */
vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
vmovdqu RNOTx, (%rcx);
call __serpent_enc_blk16;
vpxor (0 * 32)(%rdx), RA4, RA4;
vpxor (1 * 32)(%rdx), RA1, RA1;
vpxor (2 * 32)(%rdx), RA2, RA2;
vpxor (3 * 32)(%rdx), RA0, RA0;
vpxor (4 * 32)(%rdx), RB4, RB4;
vpxor (5 * 32)(%rdx), RB1, RB1;
vpxor (6 * 32)(%rdx), RB2, RB2;
vpxor (7 * 32)(%rdx), RB0, RB0;
vmovdqu RA4, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA0, (3 * 32)(%rsi);
vmovdqu RB4, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB0, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_serpent_avx2_cfb_dec,.-_gcry_serpent_avx2_cfb_dec;)
.align 16
.globl _gcry_serpent_avx2_ocb_enc
ELF(.type _gcry_serpent_avx2_ocb_enc,@function;)
_gcry_serpent_avx2_ocb_enc:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
vzeroupper;
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
vmovdqu (%r8), RTMP1x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Checksum_i = Checksum_{i-1} xor P_i */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rdx), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RTMP1, RTMP1; \
vpxor yreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vextracti128 $1, RTMP1, RNOTx;
vmovdqu RTMP0x, (%rcx);
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%r8);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __serpent_enc_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor (0 * 32)(%rsi), RA4, RA4;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA0, RA0;
vpxor (4 * 32)(%rsi), RB4, RB4;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB0, RB0;
vmovdqu RA4, (0 * 32)(%rsi);
vmovdqu RA1, (1 * 32)(%rsi);
vmovdqu RA2, (2 * 32)(%rsi);
vmovdqu RA0, (3 * 32)(%rsi);
vmovdqu RB4, (4 * 32)(%rsi);
vmovdqu RB1, (5 * 32)(%rsi);
vmovdqu RB2, (6 * 32)(%rsi);
vmovdqu RB0, (7 * 32)(%rsi);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_serpent_avx2_ocb_enc,.-_gcry_serpent_avx2_ocb_enc;)
.align 16
.globl _gcry_serpent_avx2_ocb_dec
ELF(.type _gcry_serpent_avx2_ocb_dec,@function;)
_gcry_serpent_avx2_ocb_dec:
/* input:
* %rdi: ctx, CTX
* %rsi: dst (16 blocks)
* %rdx: src (16 blocks)
* %rcx: offset
* %r8 : checksum
* %r9 : L pointers (void *L[16])
*/
CFI_STARTPROC();
vzeroupper;
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rcx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rdx), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg; \
vmovdqu RNOT, (n * 32)(%rsi);
movq (0 * 8)(%r9), %r10;
movq (1 * 8)(%r9), %r11;
movq (2 * 8)(%r9), %r12;
movq (3 * 8)(%r9), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r9), %r10;
movq (5 * 8)(%r9), %r11;
movq (6 * 8)(%r9), %r12;
movq (7 * 8)(%r9), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r9), %r10;
movq (9 * 8)(%r9), %r11;
movq (10 * 8)(%r9), %r12;
movq (11 * 8)(%r9), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r9), %r10;
movq (13 * 8)(%r9), %r11;
movq (14 * 8)(%r9), %r12;
movq (15 * 8)(%r9), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rcx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __serpent_dec_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vmovdqu (%r8), RTMP1x;
vpxor (0 * 32)(%rsi), RA0, RA0;
vpxor (1 * 32)(%rsi), RA1, RA1;
vpxor (2 * 32)(%rsi), RA2, RA2;
vpxor (3 * 32)(%rsi), RA3, RA3;
vpxor (4 * 32)(%rsi), RB0, RB0;
vpxor (5 * 32)(%rsi), RB1, RB1;
vpxor (6 * 32)(%rsi), RB2, RB2;
vpxor (7 * 32)(%rsi), RB3, RB3;
/* Checksum_i = Checksum_{i-1} xor P_i */
vmovdqu RA0, (0 * 32)(%rsi);
vpxor RA0, RTMP1, RTMP1;
vmovdqu RA1, (1 * 32)(%rsi);
vpxor RA1, RTMP1, RTMP1;
vmovdqu RA2, (2 * 32)(%rsi);
vpxor RA2, RTMP1, RTMP1;
vmovdqu RA3, (3 * 32)(%rsi);
vpxor RA3, RTMP1, RTMP1;
vmovdqu RB0, (4 * 32)(%rsi);
vpxor RB0, RTMP1, RTMP1;
vmovdqu RB1, (5 * 32)(%rsi);
vpxor RB1, RTMP1, RTMP1;
vmovdqu RB2, (6 * 32)(%rsi);
vpxor RB2, RTMP1, RTMP1;
vmovdqu RB3, (7 * 32)(%rsi);
vpxor RB3, RTMP1, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%r8);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_serpent_avx2_ocb_dec,.-_gcry_serpent_avx2_ocb_dec;)
.align 16
.globl _gcry_serpent_avx2_ocb_auth
ELF(.type _gcry_serpent_avx2_ocb_auth,@function;)
_gcry_serpent_avx2_ocb_auth:
/* input:
* %rdi: ctx, CTX
* %rsi: abuf (16 blocks)
* %rdx: offset
* %rcx: checksum
* %r8 : L pointers (void *L[16])
*/
CFI_STARTPROC();
vzeroupper;
subq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(4 * 8);
movq %r10, (0 * 8)(%rsp);
movq %r11, (1 * 8)(%rsp);
movq %r12, (2 * 8)(%rsp);
movq %r13, (3 * 8)(%rsp);
CFI_REL_OFFSET(%r10, 0 * 8);
CFI_REL_OFFSET(%r11, 1 * 8);
CFI_REL_OFFSET(%r12, 2 * 8);
CFI_REL_OFFSET(%r13, 3 * 8);
vmovdqu (%rdx), RTMP0x;
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
/* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
#define OCB_INPUT(n, l0reg, l1reg, yreg) \
vmovdqu (n * 32)(%rsi), yreg; \
vpxor (l0reg), RTMP0x, RNOTx; \
vpxor (l1reg), RNOTx, RTMP0x; \
vinserti128 $1, RTMP0x, RNOT, RNOT; \
vpxor yreg, RNOT, yreg;
movq (0 * 8)(%r8), %r10;
movq (1 * 8)(%r8), %r11;
movq (2 * 8)(%r8), %r12;
movq (3 * 8)(%r8), %r13;
OCB_INPUT(0, %r10, %r11, RA0);
OCB_INPUT(1, %r12, %r13, RA1);
movq (4 * 8)(%r8), %r10;
movq (5 * 8)(%r8), %r11;
movq (6 * 8)(%r8), %r12;
movq (7 * 8)(%r8), %r13;
OCB_INPUT(2, %r10, %r11, RA2);
OCB_INPUT(3, %r12, %r13, RA3);
movq (8 * 8)(%r8), %r10;
movq (9 * 8)(%r8), %r11;
movq (10 * 8)(%r8), %r12;
movq (11 * 8)(%r8), %r13;
OCB_INPUT(4, %r10, %r11, RB0);
OCB_INPUT(5, %r12, %r13, RB1);
movq (12 * 8)(%r8), %r10;
movq (13 * 8)(%r8), %r11;
movq (14 * 8)(%r8), %r12;
movq (15 * 8)(%r8), %r13;
OCB_INPUT(6, %r10, %r11, RB2);
OCB_INPUT(7, %r12, %r13, RB3);
#undef OCB_INPUT
vmovdqu RTMP0x, (%rdx);
movq (0 * 8)(%rsp), %r10;
movq (1 * 8)(%rsp), %r11;
movq (2 * 8)(%rsp), %r12;
movq (3 * 8)(%rsp), %r13;
CFI_RESTORE(%r10);
CFI_RESTORE(%r11);
CFI_RESTORE(%r12);
CFI_RESTORE(%r13);
call __serpent_enc_blk16;
addq $(4 * 8), %rsp;
CFI_ADJUST_CFA_OFFSET(-4 * 8);
vpxor RA4, RB4, RA4;
vpxor RA1, RB1, RA1;
vpxor RA2, RB2, RA2;
vpxor RA0, RB0, RA0;
vpxor RA4, RA1, RA1;
vpxor RA2, RA0, RA0;
vpxor RA1, RA0, RTMP1;
vextracti128 $1, RTMP1, RNOTx;
vpxor (%rcx), RTMP1x, RTMP1x;
vpxor RNOTx, RTMP1x, RTMP1x;
vmovdqu RTMP1x, (%rcx);
vzeroall;
ret_spec_stop;
CFI_ENDPROC();
ELF(.size _gcry_serpent_avx2_ocb_auth,.-_gcry_serpent_avx2_ocb_auth;)
SECTION_RODATA
ELF(.type _serpent_avx2_consts,@object)
_serpent_avx2_consts:
/* For CTR-mode IV byteswap */
.align 16
.Lbswap128_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
#endif /*defined(USE_SERPENT) && defined(ENABLE_AVX2_SUPPORT)*/
#endif /*__x86_64*/
diff --git a/cipher/serpent-avx512-x86.c b/cipher/serpent-avx512-x86.c
new file mode 100644
index 00000000..762c09e1
--- /dev/null
+++ b/cipher/serpent-avx512-x86.c
@@ -0,0 +1,994 @@
+/* serpent-avx512-x86.c - AVX512 implementation of Serpent cipher
+ *
+ * Copyright (C) 2023 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This file is part of Libgcrypt.
+ *
+ * Libgcrypt is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as
+ * published by the Free Software Foundation; either version 2.1 of
+ * the License, or (at your option) any later version.
+ *
+ * Libgcrypt is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <config.h>
+
+#if defined(__x86_64) || defined(__i386)
+#if defined(HAVE_COMPATIBLE_CC_X86_AVX512_INTRINSICS) && \
+ defined(USE_SERPENT) && defined(ENABLE_AVX512_SUPPORT)
+
+#include <immintrin.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "g10lib.h"
+#include "types.h"
+#include "cipher.h"
+#include "bithelp.h"
+#include "bufhelp.h"
+#include "cipher-internal.h"
+#include "bulkhelp.h"
+
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define NO_INLINE __attribute__((noinline))
+
+/* Number of rounds per Serpent encrypt/decrypt operation. */
+#define ROUNDS 32
+
+/* Serpent works on 128 bit blocks. */
+typedef unsigned int serpent_block_t[4];
+
+/* The key schedule consists of 33 128 bit subkeys. */
+typedef unsigned int serpent_subkeys_t[ROUNDS + 1][4];
+
+#define vpunpckhdq(a, b, o) ((o) = _mm512_unpackhi_epi32((b), (a)))
+#define vpunpckldq(a, b, o) ((o) = _mm512_unpacklo_epi32((b), (a)))
+#define vpunpckhqdq(a, b, o) ((o) = _mm512_unpackhi_epi64((b), (a)))
+#define vpunpcklqdq(a, b, o) ((o) = _mm512_unpacklo_epi64((b), (a)))
+
+#define vpbroadcastd(v) _mm512_set1_epi32(v)
+
+#define vrol(x, s) _mm512_rol_epi32((x), (s))
+#define vror(x, s) _mm512_ror_epi32((x), (s))
+#define vshl(x, s) _mm512_slli_epi32((x), (s))
+
+/* 4x4 32-bit integer matrix transpose */
+#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
+ vpunpckhdq(x1, x0, t2); \
+ vpunpckldq(x1, x0, x0); \
+ \
+ vpunpckldq(x3, x2, t1); \
+ vpunpckhdq(x3, x2, x2); \
+ \
+ vpunpckhqdq(t1, x0, x1); \
+ vpunpcklqdq(t1, x0, x0); \
+ \
+ vpunpckhqdq(x2, t2, x3); \
+ vpunpcklqdq(x2, t2, x2);
+
+/*
+ * These are the S-Boxes of Serpent from following research paper.
+ *
+ * D. A. Osvik, “Speeding up Serpent,” in Third AES Candidate Conference,
+ * (New York, New York, USA), p. 317–329, National Institute of Standards and
+ * Technology, 2000.
+ *
+ * Paper is also available at: http://www.ii.uib.no/~osvik/pub/aes3.pdf
+ *
+ * --
+ *
+ * Following logic gets heavily optimized by compiler to use AVX512F
+ * 'vpternlogq' instruction. This gives higher performance increase than
+ * would be expected from simple wideing of vectors from AVX2/256bit to
+ * AVX512/512bit.
+ *
+ */
+
+#define SBOX0(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r3 ^= r0; r4 = r1; \
+ r1 &= r3; r4 ^= r2; \
+ r1 ^= r0; r0 |= r3; \
+ r0 ^= r4; r4 ^= r3; \
+ r3 ^= r2; r2 |= r1; \
+ r2 ^= r4; r4 = ~r4; \
+ r4 |= r1; r1 ^= r3; \
+ r1 ^= r4; r3 |= r0; \
+ r1 ^= r3; r4 ^= r3; \
+ \
+ w = r1; x = r4; y = r2; z = r0; \
+ }
+
+#define SBOX0_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r2 = ~r2; r4 = r1; \
+ r1 |= r0; r4 = ~r4; \
+ r1 ^= r2; r2 |= r4; \
+ r1 ^= r3; r0 ^= r4; \
+ r2 ^= r0; r0 &= r3; \
+ r4 ^= r0; r0 |= r1; \
+ r0 ^= r2; r3 ^= r4; \
+ r2 ^= r1; r3 ^= r0; \
+ r3 ^= r1; \
+ r2 &= r3; \
+ r4 ^= r2; \
+ \
+ w = r0; x = r4; y = r1; z = r3; \
+ }
+
+#define SBOX1(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r0 = ~r0; r2 = ~r2; \
+ r4 = r0; r0 &= r1; \
+ r2 ^= r0; r0 |= r3; \
+ r3 ^= r2; r1 ^= r0; \
+ r0 ^= r4; r4 |= r1; \
+ r1 ^= r3; r2 |= r0; \
+ r2 &= r4; r0 ^= r1; \
+ r1 &= r2; \
+ r1 ^= r0; r0 &= r2; \
+ r0 ^= r4; \
+ \
+ w = r2; x = r0; y = r3; z = r1; \
+ }
+
+#define SBOX1_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r4 = r1; r1 ^= r3; \
+ r3 &= r1; r4 ^= r2; \
+ r3 ^= r0; r0 |= r1; \
+ r2 ^= r3; r0 ^= r4; \
+ r0 |= r2; r1 ^= r3; \
+ r0 ^= r1; r1 |= r3; \
+ r1 ^= r0; r4 = ~r4; \
+ r4 ^= r1; r1 |= r0; \
+ r1 ^= r0; \
+ r1 |= r4; \
+ r3 ^= r1; \
+ \
+ w = r4; x = r0; y = r3; z = r2; \
+ }
+
+#define SBOX2(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r4 = r0; r0 &= r2; \
+ r0 ^= r3; r2 ^= r1; \
+ r2 ^= r0; r3 |= r4; \
+ r3 ^= r1; r4 ^= r2; \
+ r1 = r3; r3 |= r4; \
+ r3 ^= r0; r0 &= r1; \
+ r4 ^= r0; r1 ^= r3; \
+ r1 ^= r4; r4 = ~r4; \
+ \
+ w = r2; x = r3; y = r1; z = r4; \
+ }
+
+#define SBOX2_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r2 ^= r3; r3 ^= r0; \
+ r4 = r3; r3 &= r2; \
+ r3 ^= r1; r1 |= r2; \
+ r1 ^= r4; r4 &= r3; \
+ r2 ^= r3; r4 &= r0; \
+ r4 ^= r2; r2 &= r1; \
+ r2 |= r0; r3 = ~r3; \
+ r2 ^= r3; r0 ^= r3; \
+ r0 &= r1; r3 ^= r4; \
+ r3 ^= r0; \
+ \
+ w = r1; x = r4; y = r2; z = r3; \
+ }
+
+#define SBOX3(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r4 = r0; r0 |= r3; \
+ r3 ^= r1; r1 &= r4; \
+ r4 ^= r2; r2 ^= r3; \
+ r3 &= r0; r4 |= r1; \
+ r3 ^= r4; r0 ^= r1; \
+ r4 &= r0; r1 ^= r3; \
+ r4 ^= r2; r1 |= r0; \
+ r1 ^= r2; r0 ^= r3; \
+ r2 = r1; r1 |= r3; \
+ r1 ^= r0; \
+ \
+ w = r1; x = r2; y = r3; z = r4; \
+ }
+
+#define SBOX3_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r4 = r2; r2 ^= r1; \
+ r0 ^= r2; r4 &= r2; \
+ r4 ^= r0; r0 &= r1; \
+ r1 ^= r3; r3 |= r4; \
+ r2 ^= r3; r0 ^= r3; \
+ r1 ^= r4; r3 &= r2; \
+ r3 ^= r1; r1 ^= r0; \
+ r1 |= r2; r0 ^= r3; \
+ r1 ^= r4; \
+ r0 ^= r1; \
+ \
+ w = r2; x = r1; y = r3; z = r0; \
+ }
+
+#define SBOX4(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r1 ^= r3; r3 = ~r3; \
+ r2 ^= r3; r3 ^= r0; \
+ r4 = r1; r1 &= r3; \
+ r1 ^= r2; r4 ^= r3; \
+ r0 ^= r4; r2 &= r4; \
+ r2 ^= r0; r0 &= r1; \
+ r3 ^= r0; r4 |= r1; \
+ r4 ^= r0; r0 |= r3; \
+ r0 ^= r2; r2 &= r3; \
+ r0 = ~r0; r4 ^= r2; \
+ \
+ w = r1; x = r4; y = r0; z = r3; \
+ }
+
+#define SBOX4_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r4 = r2; r2 &= r3; \
+ r2 ^= r1; r1 |= r3; \
+ r1 &= r0; r4 ^= r2; \
+ r4 ^= r1; r1 &= r2; \
+ r0 = ~r0; r3 ^= r4; \
+ r1 ^= r3; r3 &= r0; \
+ r3 ^= r2; r0 ^= r1; \
+ r2 &= r0; r3 ^= r0; \
+ r2 ^= r4; \
+ r2 |= r3; r3 ^= r0; \
+ r2 ^= r1; \
+ \
+ w = r0; x = r3; y = r2; z = r4; \
+ }
+
+#define SBOX5(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r0 ^= r1; r1 ^= r3; \
+ r3 = ~r3; r4 = r1; \
+ r1 &= r0; r2 ^= r3; \
+ r1 ^= r2; r2 |= r4; \
+ r4 ^= r3; r3 &= r1; \
+ r3 ^= r0; r4 ^= r1; \
+ r4 ^= r2; r2 ^= r0; \
+ r0 &= r3; r2 = ~r2; \
+ r0 ^= r4; r4 |= r3; \
+ r2 ^= r4; \
+ \
+ w = r1; x = r3; y = r0; z = r2; \
+ }
+
+#define SBOX5_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r1 = ~r1; r4 = r3; \
+ r2 ^= r1; r3 |= r0; \
+ r3 ^= r2; r2 |= r1; \
+ r2 &= r0; r4 ^= r3; \
+ r2 ^= r4; r4 |= r0; \
+ r4 ^= r1; r1 &= r2; \
+ r1 ^= r3; r4 ^= r2; \
+ r3 &= r4; r4 ^= r1; \
+ r3 ^= r4; r4 = ~r4; \
+ r3 ^= r0; \
+ \
+ w = r1; x = r4; y = r3; z = r2; \
+ }
+
+#define SBOX6(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r2 = ~r2; r4 = r3; \
+ r3 &= r0; r0 ^= r4; \
+ r3 ^= r2; r2 |= r4; \
+ r1 ^= r3; r2 ^= r0; \
+ r0 |= r1; r2 ^= r1; \
+ r4 ^= r0; r0 |= r3; \
+ r0 ^= r2; r4 ^= r3; \
+ r4 ^= r0; r3 = ~r3; \
+ r2 &= r4; \
+ r2 ^= r3; \
+ \
+ w = r0; x = r1; y = r4; z = r2; \
+ }
+
+#define SBOX6_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r0 ^= r2; r4 = r2; \
+ r2 &= r0; r4 ^= r3; \
+ r2 = ~r2; r3 ^= r1; \
+ r2 ^= r3; r4 |= r0; \
+ r0 ^= r2; r3 ^= r4; \
+ r4 ^= r1; r1 &= r3; \
+ r1 ^= r0; r0 ^= r3; \
+ r0 |= r2; r3 ^= r1; \
+ r4 ^= r0; \
+ \
+ w = r1; x = r2; y = r4; z = r3; \
+ }
+
+#define SBOX7(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r4 = r1; r1 |= r2; \
+ r1 ^= r3; r4 ^= r2; \
+ r2 ^= r1; r3 |= r4; \
+ r3 &= r0; r4 ^= r2; \
+ r3 ^= r1; r1 |= r4; \
+ r1 ^= r0; r0 |= r4; \
+ r0 ^= r2; r1 ^= r4; \
+ r2 ^= r1; r1 &= r0; \
+ r1 ^= r4; r2 = ~r2; \
+ r2 |= r0; \
+ r4 ^= r2; \
+ \
+ w = r4; x = r3; y = r1; z = r0; \
+ }
+
+#define SBOX7_INVERSE(r0, r1, r2, r3, w, x, y, z) \
+ { \
+ __m512i r4; \
+ \
+ r4 = r2; r2 ^= r0; \
+ r0 &= r3; r4 |= r3; \
+ r2 = ~r2; r3 ^= r1; \
+ r1 |= r0; r0 ^= r2; \
+ r2 &= r4; r3 &= r4; \
+ r1 ^= r2; r2 ^= r0; \
+ r0 |= r2; r4 ^= r1; \
+ r0 ^= r3; r3 ^= r4; \
+ r4 |= r0; r3 ^= r2; \
+ r4 ^= r2; \
+ \
+ w = r3; x = r0; y = r1; z = r4; \
+ }
+
+/* XOR BLOCK1 into BLOCK0. */
+#define BLOCK_XOR_KEY(block0, rkey) \
+ { \
+ block0[0] ^= vpbroadcastd(rkey[0]); \
+ block0[1] ^= vpbroadcastd(rkey[1]); \
+ block0[2] ^= vpbroadcastd(rkey[2]); \
+ block0[3] ^= vpbroadcastd(rkey[3]); \
+ }
+
+/* Copy BLOCK_SRC to BLOCK_DST. */
+#define BLOCK_COPY(block_dst, block_src) \
+ { \
+ block_dst[0] = block_src[0]; \
+ block_dst[1] = block_src[1]; \
+ block_dst[2] = block_src[2]; \
+ block_dst[3] = block_src[3]; \
+ }
+
+/* Apply SBOX number WHICH to to the block found in ARRAY0, writing
+ the output to the block found in ARRAY1. */
+#define SBOX(which, array0, array1) \
+ SBOX##which (array0[0], array0[1], array0[2], array0[3], \
+ array1[0], array1[1], array1[2], array1[3]);
+
+/* Apply inverse SBOX number WHICH to to the block found in ARRAY0, writing
+ the output to the block found in ARRAY1. */
+#define SBOX_INVERSE(which, array0, array1) \
+ SBOX##which##_INVERSE (array0[0], array0[1], array0[2], array0[3], \
+ array1[0], array1[1], array1[2], array1[3]);
+
+/* Apply the linear transformation to BLOCK. */
+#define LINEAR_TRANSFORMATION(block) \
+ { \
+ block[0] = vrol (block[0], 13); \
+ block[2] = vrol (block[2], 3); \
+ block[1] = block[1] ^ block[0] ^ block[2]; \
+ block[3] = block[3] ^ block[2] ^ vshl(block[0], 3); \
+ block[1] = vrol (block[1], 1); \
+ block[3] = vrol (block[3], 7); \
+ block[0] = block[0] ^ block[1] ^ block[3]; \
+ block[2] = block[2] ^ block[3] ^ vshl(block[1], 7); \
+ block[0] = vrol (block[0], 5); \
+ block[2] = vrol (block[2], 22); \
+ }
+
+/* Apply the inverse linear transformation to BLOCK. */
+#define LINEAR_TRANSFORMATION_INVERSE(block) \
+ { \
+ block[2] = vror (block[2], 22); \
+ block[0] = vror (block[0] , 5); \
+ block[2] = block[2] ^ block[3] ^ vshl(block[1], 7); \
+ block[0] = block[0] ^ block[1] ^ block[3]; \
+ block[3] = vror (block[3], 7); \
+ block[1] = vror (block[1], 1); \
+ block[3] = block[3] ^ block[2] ^ vshl(block[0], 3); \
+ block[1] = block[1] ^ block[0] ^ block[2]; \
+ block[2] = vror (block[2], 3); \
+ block[0] = vror (block[0], 13); \
+ }
+
+/* Apply a Serpent round to BLOCK, using the SBOX number WHICH and the
+ subkeys contained in SUBKEYS. Use BLOCK_TMP as temporary storage.
+ This macro increments `round'. */
+#define ROUND(which, subkeys, block, block_tmp) \
+ { \
+ BLOCK_XOR_KEY (block, subkeys[round]); \
+ SBOX (which, block, block_tmp); \
+ LINEAR_TRANSFORMATION (block_tmp); \
+ BLOCK_COPY (block, block_tmp); \
+ }
+
+/* Apply the last Serpent round to BLOCK, using the SBOX number WHICH
+ and the subkeys contained in SUBKEYS. Use BLOCK_TMP as temporary
+ storage. The result will be stored in BLOCK_TMP. This macro
+ increments `round'. */
+#define ROUND_LAST(which, subkeys, block, block_tmp) \
+ { \
+ BLOCK_XOR_KEY (block, subkeys[round]); \
+ SBOX (which, block, block_tmp); \
+ BLOCK_XOR_KEY (block_tmp, subkeys[round+1]); \
+ }
+
+/* Apply an inverse Serpent round to BLOCK, using the SBOX number
+ WHICH and the subkeys contained in SUBKEYS. Use BLOCK_TMP as
+ temporary storage. This macro increments `round'. */
+#define ROUND_INVERSE(which, subkey, block, block_tmp) \
+ { \
+ LINEAR_TRANSFORMATION_INVERSE (block); \
+ SBOX_INVERSE (which, block, block_tmp); \
+ BLOCK_XOR_KEY (block_tmp, subkey[round]); \
+ BLOCK_COPY (block, block_tmp); \
+ }
+
+/* Apply the first Serpent round to BLOCK, using the SBOX number WHICH
+ and the subkeys contained in SUBKEYS. Use BLOCK_TMP as temporary
+ storage. The result will be stored in BLOCK_TMP. This macro
+ increments `round'. */
+#define ROUND_FIRST_INVERSE(which, subkeys, block, block_tmp) \
+ { \
+ BLOCK_XOR_KEY (block, subkeys[round]); \
+ SBOX_INVERSE (which, block, block_tmp); \
+ BLOCK_XOR_KEY (block_tmp, subkeys[round-1]); \
+ }
+
+static ALWAYS_INLINE void
+serpent_encrypt_internal_avx512 (const serpent_subkeys_t keys,
+ const __m512i vin[8], __m512i vout[8])
+{
+ __m512i b[4];
+ __m512i c[4];
+ __m512i b_next[4];
+ __m512i c_next[4];
+ int round = 0;
+
+ b_next[0] = vin[0];
+ b_next[1] = vin[1];
+ b_next[2] = vin[2];
+ b_next[3] = vin[3];
+ c_next[0] = vin[4];
+ c_next[1] = vin[5];
+ c_next[2] = vin[6];
+ c_next[3] = vin[7];
+ transpose_4x4 (b_next[0], b_next[1], b_next[2], b_next[3], b[0], b[1], b[2]);
+ transpose_4x4 (c_next[0], c_next[1], c_next[2], c_next[3], c[0], c[1], c[2]);
+
+ b[0] = b_next[0];
+ b[1] = b_next[1];
+ b[2] = b_next[2];
+ b[3] = b_next[3];
+ c[0] = c_next[0];
+ c[1] = c_next[1];
+ c[2] = c_next[2];
+ c[3] = c_next[3];
+
+ while (1)
+ {
+ ROUND (0, keys, b, b_next); ROUND (0, keys, c, c_next); round++;
+ ROUND (1, keys, b, b_next); ROUND (1, keys, c, c_next); round++;
+ ROUND (2, keys, b, b_next); ROUND (2, keys, c, c_next); round++;
+ ROUND (3, keys, b, b_next); ROUND (3, keys, c, c_next); round++;
+ ROUND (4, keys, b, b_next); ROUND (4, keys, c, c_next); round++;
+ ROUND (5, keys, b, b_next); ROUND (5, keys, c, c_next); round++;
+ ROUND (6, keys, b, b_next); ROUND (6, keys, c, c_next); round++;
+ if (round >= ROUNDS - 1)
+ break;
+ ROUND (7, keys, b, b_next); ROUND (7, keys, c, c_next); round++;
+ }
+
+ ROUND_LAST (7, keys, b, b_next); ROUND_LAST (7, keys, c, c_next);
+
+ transpose_4x4 (b_next[0], b_next[1], b_next[2], b_next[3], b[0], b[1], b[2]);
+ transpose_4x4 (c_next[0], c_next[1], c_next[2], c_next[3], c[0], c[1], c[2]);
+ vout[0] = b_next[0];
+ vout[1] = b_next[1];
+ vout[2] = b_next[2];
+ vout[3] = b_next[3];
+ vout[4] = c_next[0];
+ vout[5] = c_next[1];
+ vout[6] = c_next[2];
+ vout[7] = c_next[3];
+}
+
+static ALWAYS_INLINE void
+serpent_decrypt_internal_avx512 (const serpent_subkeys_t keys,
+ const __m512i vin[8], __m512i vout[8])
+{
+ __m512i b[4];
+ __m512i c[4];
+ __m512i b_next[4];
+ __m512i c_next[4];
+ int round = ROUNDS;
+
+ b_next[0] = vin[0];
+ b_next[1] = vin[1];
+ b_next[2] = vin[2];
+ b_next[3] = vin[3];
+ c_next[0] = vin[4];
+ c_next[1] = vin[5];
+ c_next[2] = vin[6];
+ c_next[3] = vin[7];
+ transpose_4x4 (b_next[0], b_next[1], b_next[2], b_next[3], b[0], b[1], b[2]);
+ transpose_4x4 (c_next[0], c_next[1], c_next[2], c_next[3], c[0], c[1], c[2]);
+
+ ROUND_FIRST_INVERSE (7, keys, b_next, b); ROUND_FIRST_INVERSE (7, keys, c_next, c);
+ round -= 2;
+
+ while (1)
+ {
+ ROUND_INVERSE (6, keys, b, b_next); ROUND_INVERSE (6, keys, c, c_next); round--;
+ ROUND_INVERSE (5, keys, b, b_next); ROUND_INVERSE (5, keys, c, c_next); round--;
+ ROUND_INVERSE (4, keys, b, b_next); ROUND_INVERSE (4, keys, c, c_next); round--;
+ ROUND_INVERSE (3, keys, b, b_next); ROUND_INVERSE (3, keys, c, c_next); round--;
+ ROUND_INVERSE (2, keys, b, b_next); ROUND_INVERSE (2, keys, c, c_next); round--;
+ ROUND_INVERSE (1, keys, b, b_next); ROUND_INVERSE (1, keys, c, c_next); round--;
+ ROUND_INVERSE (0, keys, b, b_next); ROUND_INVERSE (0, keys, c, c_next); round--;
+ if (round <= 0)
+ break;
+ ROUND_INVERSE (7, keys, b, b_next); ROUND_INVERSE (7, keys, c, c_next); round--;
+ }
+
+ transpose_4x4 (b_next[0], b_next[1], b_next[2], b_next[3], b[0], b[1], b[2]);
+ transpose_4x4 (c_next[0], c_next[1], c_next[2], c_next[3], c[0], c[1], c[2]);
+ vout[0] = b_next[0];
+ vout[1] = b_next[1];
+ vout[2] = b_next[2];
+ vout[3] = b_next[3];
+ vout[4] = c_next[0];
+ vout[5] = c_next[1];
+ vout[6] = c_next[2];
+ vout[7] = c_next[3];
+}
+
+enum crypt_mode_e
+{
+ ECB_ENC = 0,
+ ECB_DEC,
+ CBC_DEC,
+ CFB_DEC,
+ CTR_ENC,
+ OCB_ENC,
+ OCB_DEC
+};
+
+static ALWAYS_INLINE void
+ctr_generate(unsigned char *ctr, __m512i vin[8])
+{
+ const unsigned int blocksize = 16;
+ unsigned char ctr_low = ctr[15];
+
+ if (ctr_low + 32 <= 256)
+ {
+ const __m512i add0123 = _mm512_set_epi64(3LL << 56, 0,
+ 2LL << 56, 0,
+ 1LL << 56, 0,
+ 0LL << 56, 0);
+ const __m512i add4444 = _mm512_set_epi64(4LL << 56, 0,
+ 4LL << 56, 0,
+ 4LL << 56, 0,
+ 4LL << 56, 0);
+ const __m512i add4567 = _mm512_add_epi32(add0123, add4444);
+ const __m512i add8888 = _mm512_add_epi32(add4444, add4444);
+
+ // Fast path without carry handling.
+ __m512i vctr =
+ _mm512_broadcast_i32x4(_mm_loadu_si128((const void *)ctr));
+
+ cipher_block_add(ctr, 32, blocksize);
+ vin[0] = _mm512_add_epi32(vctr, add0123);
+ vin[1] = _mm512_add_epi32(vctr, add4567);
+ vin[2] = _mm512_add_epi32(vin[0], add8888);
+ vin[3] = _mm512_add_epi32(vin[1], add8888);
+ vin[4] = _mm512_add_epi32(vin[2], add8888);
+ vin[5] = _mm512_add_epi32(vin[3], add8888);
+ vin[6] = _mm512_add_epi32(vin[4], add8888);
+ vin[7] = _mm512_add_epi32(vin[5], add8888);
+ }
+ else
+ {
+ // Slow path.
+ u32 blocks[4][blocksize / sizeof(u32)];
+
+ cipher_block_cpy(blocks[0], ctr, blocksize);
+ cipher_block_cpy(blocks[1], ctr, blocksize);
+ cipher_block_cpy(blocks[2], ctr, blocksize);
+ cipher_block_cpy(blocks[3], ctr, blocksize);
+ cipher_block_add(ctr, 32, blocksize);
+ cipher_block_add(blocks[1], 1, blocksize);
+ cipher_block_add(blocks[2], 2, blocksize);
+ cipher_block_add(blocks[3], 3, blocksize);
+ vin[0] = _mm512_loadu_epi32 (blocks);
+ cipher_block_add(blocks[0], 4, blocksize);
+ cipher_block_add(blocks[1], 4, blocksize);
+ cipher_block_add(blocks[2], 4, blocksize);
+ cipher_block_add(blocks[3], 4, blocksize);
+ vin[1] = _mm512_loadu_epi32 (blocks);
+ cipher_block_add(blocks[0], 4, blocksize);
+ cipher_block_add(blocks[1], 4, blocksize);
+ cipher_block_add(blocks[2], 4, blocksize);
+ cipher_block_add(blocks[3], 4, blocksize);
+ vin[2] = _mm512_loadu_epi32 (blocks);
+ cipher_block_add(blocks[0], 4, blocksize);
+ cipher_block_add(blocks[1], 4, blocksize);
+ cipher_block_add(blocks[2], 4, blocksize);
+ cipher_block_add(blocks[3], 4, blocksize);
+ vin[3] = _mm512_loadu_epi32 (blocks);
+ cipher_block_add(blocks[0], 4, blocksize);
+ cipher_block_add(blocks[1], 4, blocksize);
+ cipher_block_add(blocks[2], 4, blocksize);
+ cipher_block_add(blocks[3], 4, blocksize);
+ vin[4] = _mm512_loadu_epi32 (blocks);
+ cipher_block_add(blocks[0], 4, blocksize);
+ cipher_block_add(blocks[1], 4, blocksize);
+ cipher_block_add(blocks[2], 4, blocksize);
+ cipher_block_add(blocks[3], 4, blocksize);
+ vin[5] = _mm512_loadu_epi32 (blocks);
+ cipher_block_add(blocks[0], 4, blocksize);
+ cipher_block_add(blocks[1], 4, blocksize);
+ cipher_block_add(blocks[2], 4, blocksize);
+ cipher_block_add(blocks[3], 4, blocksize);
+ vin[6] = _mm512_loadu_epi32 (blocks);
+ cipher_block_add(blocks[0], 4, blocksize);
+ cipher_block_add(blocks[1], 4, blocksize);
+ cipher_block_add(blocks[2], 4, blocksize);
+ cipher_block_add(blocks[3], 4, blocksize);
+ vin[7] = _mm512_loadu_epi32 (blocks);
+
+ wipememory(blocks, sizeof(blocks));
+ }
+}
+
+static ALWAYS_INLINE __m512i
+ocb_input(__m512i *vchecksum, __m128i *voffset, const unsigned char *input,
+ unsigned char *output, const ocb_L_uintptr_t L[4])
+{
+ __m128i L0 = _mm_loadu_si128((const void *)(uintptr_t)L[0]);
+ __m128i L1 = _mm_loadu_si128((const void *)(uintptr_t)L[1]);
+ __m128i L2 = _mm_loadu_si128((const void *)(uintptr_t)L[2]);
+ __m128i L3 = _mm_loadu_si128((const void *)(uintptr_t)L[3]);
+ __m512i vin = _mm512_loadu_epi32 (input);
+ __m512i voffsets;
+
+ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
+ /* Checksum_i = Checksum_{i-1} xor P_i */
+ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
+
+ if (vchecksum)
+ *vchecksum ^= _mm512_loadu_epi32 (input);
+
+ *voffset ^= L0;
+ voffsets = _mm512_castsi128_si512(*voffset);
+ *voffset ^= L1;
+ voffsets = _mm512_inserti32x4(voffsets, *voffset, 1);
+ *voffset ^= L2;
+ voffsets = _mm512_inserti32x4(voffsets, *voffset, 2);
+ *voffset ^= L3;
+ voffsets = _mm512_inserti32x4(voffsets, *voffset, 3);
+ _mm512_storeu_epi32 (output, voffsets);
+
+ return vin ^ voffsets;
+}
+
+static NO_INLINE void
+serpent_avx512_blk32(const void *c, unsigned char *output,
+ const unsigned char *input, int mode,
+ unsigned char *iv, unsigned char *checksum,
+ const ocb_L_uintptr_t Ls[32])
+{
+ __m512i vin[8];
+ __m512i vout[8];
+ int encrypt = 1;
+
+ asm volatile ("vpxor %%ymm0, %%ymm0, %%ymm0;\n\t"
+ "vpopcntb %%zmm0, %%zmm6;\n\t" /* spec stop for old AVX512 CPUs */
+ "vpxor %%ymm6, %%ymm6, %%ymm6;\n\t"
+ :
+ : "m"(*input), "m"(*output)
+ : "xmm6", "xmm0", "memory", "cc");
+
+ // Input handling
+ switch (mode)
+ {
+ default:
+ case CBC_DEC:
+ case ECB_DEC:
+ encrypt = 0;
+ /* fall through */
+ case ECB_ENC:
+ vin[0] = _mm512_loadu_epi32 (input + 0 * 64);
+ vin[1] = _mm512_loadu_epi32 (input + 1 * 64);
+ vin[2] = _mm512_loadu_epi32 (input + 2 * 64);
+ vin[3] = _mm512_loadu_epi32 (input + 3 * 64);
+ vin[4] = _mm512_loadu_epi32 (input + 4 * 64);
+ vin[5] = _mm512_loadu_epi32 (input + 5 * 64);
+ vin[6] = _mm512_loadu_epi32 (input + 6 * 64);
+ vin[7] = _mm512_loadu_epi32 (input + 7 * 64);
+ break;
+
+ case CFB_DEC:
+ {
+ __m128i viv = _mm_loadu_si128((const void *)iv);
+ vin[0] = _mm512_maskz_loadu_epi32(_cvtu32_mask16(0xfff0),
+ input - 1 * 64 + 48)
+ ^ _mm512_castsi128_si512(viv);
+ vin[1] = _mm512_loadu_epi32(input + 0 * 64 + 48);
+ vin[2] = _mm512_loadu_epi32(input + 1 * 64 + 48);
+ vin[3] = _mm512_loadu_epi32(input + 2 * 64 + 48);
+ vin[4] = _mm512_loadu_epi32(input + 3 * 64 + 48);
+ vin[5] = _mm512_loadu_epi32(input + 4 * 64 + 48);
+ vin[6] = _mm512_loadu_epi32(input + 5 * 64 + 48);
+ vin[7] = _mm512_loadu_epi32(input + 6 * 64 + 48);
+ viv = _mm_loadu_si128((const void *)(input + 7 * 64 + 48));
+ _mm_storeu_si128((void *)iv, viv);
+ break;
+ }
+
+ case CTR_ENC:
+ ctr_generate(iv, vin);
+ break;
+
+ case OCB_ENC:
+ {
+ const ocb_L_uintptr_t *L = Ls;
+ __m512i vchecksum = _mm512_setzero_epi32();
+ __m128i vchecksum128 = _mm_loadu_si128((const void *)checksum);
+ __m128i voffset = _mm_loadu_si128((const void *)iv);
+ vin[0] = ocb_input(&vchecksum, &voffset, input + 0 * 64, output + 0 * 64, L); L += 4;
+ vin[1] = ocb_input(&vchecksum, &voffset, input + 1 * 64, output + 1 * 64, L); L += 4;
+ vin[2] = ocb_input(&vchecksum, &voffset, input + 2 * 64, output + 2 * 64, L); L += 4;
+ vin[3] = ocb_input(&vchecksum, &voffset, input + 3 * 64, output + 3 * 64, L); L += 4;
+ vin[4] = ocb_input(&vchecksum, &voffset, input + 4 * 64, output + 4 * 64, L); L += 4;
+ vin[5] = ocb_input(&vchecksum, &voffset, input + 5 * 64, output + 5 * 64, L); L += 4;
+ vin[6] = ocb_input(&vchecksum, &voffset, input + 6 * 64, output + 6 * 64, L); L += 4;
+ vin[7] = ocb_input(&vchecksum, &voffset, input + 7 * 64, output + 7 * 64, L);
+ vchecksum128 ^= _mm512_extracti32x4_epi32(vchecksum, 0)
+ ^ _mm512_extracti32x4_epi32(vchecksum, 1)
+ ^ _mm512_extracti32x4_epi32(vchecksum, 2)
+ ^ _mm512_extracti32x4_epi32(vchecksum, 3);
+ _mm_storeu_si128((void *)checksum, vchecksum128);
+ _mm_storeu_si128((void *)iv, voffset);
+ break;
+ }
+
+ case OCB_DEC:
+ {
+ const ocb_L_uintptr_t *L = Ls;
+ __m128i voffset = _mm_loadu_si128((const void *)iv);
+ encrypt = 0;
+ vin[0] = ocb_input(NULL, &voffset, input + 0 * 64, output + 0 * 64, L); L += 4;
+ vin[1] = ocb_input(NULL, &voffset, input + 1 * 64, output + 1 * 64, L); L += 4;
+ vin[2] = ocb_input(NULL, &voffset, input + 2 * 64, output + 2 * 64, L); L += 4;
+ vin[3] = ocb_input(NULL, &voffset, input + 3 * 64, output + 3 * 64, L); L += 4;
+ vin[4] = ocb_input(NULL, &voffset, input + 4 * 64, output + 4 * 64, L); L += 4;
+ vin[5] = ocb_input(NULL, &voffset, input + 5 * 64, output + 5 * 64, L); L += 4;
+ vin[6] = ocb_input(NULL, &voffset, input + 6 * 64, output + 6 * 64, L); L += 4;
+ vin[7] = ocb_input(NULL, &voffset, input + 7 * 64, output + 7 * 64, L);
+ _mm_storeu_si128((void *)iv, voffset);
+ break;
+ }
+ }
+
+ if (encrypt)
+ serpent_encrypt_internal_avx512(c, vin, vout);
+ else
+ serpent_decrypt_internal_avx512(c, vin, vout);
+
+ switch (mode)
+ {
+ case CTR_ENC:
+ case CFB_DEC:
+ vout[0] ^= _mm512_loadu_epi32 (input + 0 * 64);
+ vout[1] ^= _mm512_loadu_epi32 (input + 1 * 64);
+ vout[2] ^= _mm512_loadu_epi32 (input + 2 * 64);
+ vout[3] ^= _mm512_loadu_epi32 (input + 3 * 64);
+ vout[4] ^= _mm512_loadu_epi32 (input + 4 * 64);
+ vout[5] ^= _mm512_loadu_epi32 (input + 5 * 64);
+ vout[6] ^= _mm512_loadu_epi32 (input + 6 * 64);
+ vout[7] ^= _mm512_loadu_epi32 (input + 7 * 64);
+ /* fall through */
+ default:
+ case ECB_DEC:
+ case ECB_ENC:
+ _mm512_storeu_epi32 (output + 0 * 64, vout[0]);
+ _mm512_storeu_epi32 (output + 1 * 64, vout[1]);
+ _mm512_storeu_epi32 (output + 2 * 64, vout[2]);
+ _mm512_storeu_epi32 (output + 3 * 64, vout[3]);
+ _mm512_storeu_epi32 (output + 4 * 64, vout[4]);
+ _mm512_storeu_epi32 (output + 5 * 64, vout[5]);
+ _mm512_storeu_epi32 (output + 6 * 64, vout[6]);
+ _mm512_storeu_epi32 (output + 7 * 64, vout[7]);
+ break;
+
+ case CBC_DEC:
+ {
+ __m128i viv = _mm_loadu_si128((const void *)iv);
+ vout[0] ^= _mm512_maskz_loadu_epi32(_cvtu32_mask16(0xfff0),
+ input - 1 * 64 + 48)
+ ^ _mm512_castsi128_si512(viv);
+ vout[1] ^= _mm512_loadu_epi32(input + 0 * 64 + 48);
+ vout[2] ^= _mm512_loadu_epi32(input + 1 * 64 + 48);
+ vout[3] ^= _mm512_loadu_epi32(input + 2 * 64 + 48);
+ vout[4] ^= _mm512_loadu_epi32(input + 3 * 64 + 48);
+ vout[5] ^= _mm512_loadu_epi32(input + 4 * 64 + 48);
+ vout[6] ^= _mm512_loadu_epi32(input + 5 * 64 + 48);
+ vout[7] ^= _mm512_loadu_epi32(input + 6 * 64 + 48);
+ viv = _mm_loadu_si128((const void *)(input + 7 * 64 + 48));
+ _mm_storeu_si128((void *)iv, viv);
+ _mm512_storeu_epi32 (output + 0 * 64, vout[0]);
+ _mm512_storeu_epi32 (output + 1 * 64, vout[1]);
+ _mm512_storeu_epi32 (output + 2 * 64, vout[2]);
+ _mm512_storeu_epi32 (output + 3 * 64, vout[3]);
+ _mm512_storeu_epi32 (output + 4 * 64, vout[4]);
+ _mm512_storeu_epi32 (output + 5 * 64, vout[5]);
+ _mm512_storeu_epi32 (output + 6 * 64, vout[6]);
+ _mm512_storeu_epi32 (output + 7 * 64, vout[7]);
+ break;
+ }
+
+ case OCB_ENC:
+ vout[0] ^= _mm512_loadu_epi32 (output + 0 * 64);
+ vout[1] ^= _mm512_loadu_epi32 (output + 1 * 64);
+ vout[2] ^= _mm512_loadu_epi32 (output + 2 * 64);
+ vout[3] ^= _mm512_loadu_epi32 (output + 3 * 64);
+ vout[4] ^= _mm512_loadu_epi32 (output + 4 * 64);
+ vout[5] ^= _mm512_loadu_epi32 (output + 5 * 64);
+ vout[6] ^= _mm512_loadu_epi32 (output + 6 * 64);
+ vout[7] ^= _mm512_loadu_epi32 (output + 7 * 64);
+ _mm512_storeu_epi32 (output + 0 * 64, vout[0]);
+ _mm512_storeu_epi32 (output + 1 * 64, vout[1]);
+ _mm512_storeu_epi32 (output + 2 * 64, vout[2]);
+ _mm512_storeu_epi32 (output + 3 * 64, vout[3]);
+ _mm512_storeu_epi32 (output + 4 * 64, vout[4]);
+ _mm512_storeu_epi32 (output + 5 * 64, vout[5]);
+ _mm512_storeu_epi32 (output + 6 * 64, vout[6]);
+ _mm512_storeu_epi32 (output + 7 * 64, vout[7]);
+ break;
+
+ case OCB_DEC:
+ {
+ __m512i vchecksum = _mm512_setzero_epi32();
+ __m128i vchecksum128 = _mm_loadu_si128((const void *)checksum);
+ vout[0] ^= _mm512_loadu_epi32 (output + 0 * 64);
+ vout[1] ^= _mm512_loadu_epi32 (output + 1 * 64);
+ vout[2] ^= _mm512_loadu_epi32 (output + 2 * 64);
+ vout[3] ^= _mm512_loadu_epi32 (output + 3 * 64);
+ vout[4] ^= _mm512_loadu_epi32 (output + 4 * 64);
+ vout[5] ^= _mm512_loadu_epi32 (output + 5 * 64);
+ vout[6] ^= _mm512_loadu_epi32 (output + 6 * 64);
+ vout[7] ^= _mm512_loadu_epi32 (output + 7 * 64);
+ vchecksum ^= vout[0];
+ vchecksum ^= vout[1];
+ vchecksum ^= vout[2];
+ vchecksum ^= vout[3];
+ vchecksum ^= vout[4];
+ vchecksum ^= vout[5];
+ vchecksum ^= vout[6];
+ vchecksum ^= vout[7];
+ _mm512_storeu_epi32 (output + 0 * 64, vout[0]);
+ _mm512_storeu_epi32 (output + 1 * 64, vout[1]);
+ _mm512_storeu_epi32 (output + 2 * 64, vout[2]);
+ _mm512_storeu_epi32 (output + 3 * 64, vout[3]);
+ _mm512_storeu_epi32 (output + 4 * 64, vout[4]);
+ _mm512_storeu_epi32 (output + 5 * 64, vout[5]);
+ _mm512_storeu_epi32 (output + 6 * 64, vout[6]);
+ _mm512_storeu_epi32 (output + 7 * 64, vout[7]);
+ vchecksum128 ^= _mm512_extracti32x4_epi32(vchecksum, 0)
+ ^ _mm512_extracti32x4_epi32(vchecksum, 1)
+ ^ _mm512_extracti32x4_epi32(vchecksum, 2)
+ ^ _mm512_extracti32x4_epi32(vchecksum, 3);
+ _mm_storeu_si128((void *)checksum, vchecksum128);
+ break;
+ }
+ }
+
+ _mm256_zeroall();
+#ifdef __x86_64__
+ asm volatile (
+#define CLEAR(mm) "vpxord %%" #mm ", %%" #mm ", %%" #mm ";\n\t"
+ CLEAR(ymm16) CLEAR(ymm17) CLEAR(ymm18) CLEAR(ymm19)
+ CLEAR(ymm20) CLEAR(ymm21) CLEAR(ymm22) CLEAR(ymm23)
+ CLEAR(ymm24) CLEAR(ymm25) CLEAR(ymm26) CLEAR(ymm27)
+ CLEAR(ymm28) CLEAR(ymm29) CLEAR(ymm30) CLEAR(ymm31)
+#undef CLEAR
+ :
+ : "m"(*input), "m"(*output)
+ : "xmm16", "xmm17", "xmm18", "xmm19",
+ "xmm20", "xmm21", "xmm22", "xmm23",
+ "xmm24", "xmm25", "xmm26", "xmm27",
+ "xmm28", "xmm29", "xmm30", "xmm31",
+ "memory", "cc");
+#endif
+}
+
+void
+_gcry_serpent_avx512_blk32(const void *ctx, unsigned char *out,
+ const unsigned char *in, int encrypt)
+{
+ serpent_avx512_blk32 (ctx, out, in, encrypt ? ECB_ENC : ECB_DEC,
+ NULL, NULL, NULL);
+}
+
+void
+_gcry_serpent_avx512_cbc_dec(const void *ctx, unsigned char *out,
+ const unsigned char *in, unsigned char *iv)
+{
+ serpent_avx512_blk32 (ctx, out, in, CBC_DEC, iv, NULL, NULL);
+}
+
+void
+_gcry_serpent_avx512_cfb_dec(const void *ctx, unsigned char *out,
+ const unsigned char *in, unsigned char *iv)
+{
+ serpent_avx512_blk32 (ctx, out, in, CFB_DEC, iv, NULL, NULL);
+}
+
+void
+_gcry_serpent_avx512_ctr_enc(const void *ctx, unsigned char *out,
+ const unsigned char *in, unsigned char *iv)
+{
+ serpent_avx512_blk32 (ctx, out, in, CTR_ENC, iv, NULL, NULL);
+}
+
+void
+_gcry_serpent_avx512_ocb_crypt(const void *ctx, unsigned char *out,
+ const unsigned char *in, unsigned char *offset,
+ unsigned char *checksum,
+ const ocb_L_uintptr_t Ls[32], int encrypt)
+{
+ serpent_avx512_blk32 (ctx, out, in, encrypt ? OCB_ENC : OCB_DEC, offset,
+ checksum, Ls);
+}
+
+#endif /*defined(USE_SERPENT) && defined(ENABLE_AVX512_SUPPORT)*/
+#endif /*__x86_64 || __i386*/
diff --git a/cipher/serpent.c b/cipher/serpent.c
index 908523c2..2b951aba 100644
--- a/cipher/serpent.c
+++ b/cipher/serpent.c
@@ -1,1836 +1,2020 @@
/* serpent.c - Implementation of the Serpent encryption algorithm.
* Copyright (C) 2003, 2004, 2005 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser general Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <https://www.gnu.org/licenses/>.
* SPDX-License-Identifier: LGPL-2.1-or-later
*/
#include <config.h>
#include <string.h>
#include <stdio.h>
#include "types.h"
#include "g10lib.h"
#include "cipher.h"
#include "bithelp.h"
#include "bufhelp.h"
#include "cipher-internal.h"
#include "bulkhelp.h"
-/* USE_SSE2 indicates whether to compile with AMD64 SSE2 code. */
+/* USE_SSE2 indicates whether to compile with x86-64 SSE2 code. */
#undef USE_SSE2
#if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_SSE2 1
#endif
-/* USE_AVX2 indicates whether to compile with AMD64 AVX2 code. */
+/* USE_AVX2 indicates whether to compile with x86-64 AVX2 code. */
#undef USE_AVX2
#if defined(__x86_64__) && (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# if defined(ENABLE_AVX2_SUPPORT)
# define USE_AVX2 1
# endif
#endif
+/* USE_AVX512 indicates whether to compile with x86 AVX512 code. */
+#undef USE_AVX512
+#if (defined(__x86_64) || defined(__i386)) && \
+ defined(HAVE_COMPATIBLE_CC_X86_AVX512_INTRINSICS)
+# if defined(ENABLE_AVX512_SUPPORT)
+# define USE_AVX512 1
+# endif
+#endif
+
/* USE_NEON indicates whether to enable ARM NEON assembly code. */
#undef USE_NEON
#ifdef ENABLE_NEON_SUPPORT
# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
&& defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
&& defined(HAVE_GCC_INLINE_ASM_NEON)
# define USE_NEON 1
# endif
#endif /*ENABLE_NEON_SUPPORT*/
/* Number of rounds per Serpent encrypt/decrypt operation. */
#define ROUNDS 32
/* Magic number, used during generating of the subkeys. */
#define PHI 0x9E3779B9
/* Serpent works on 128 bit blocks. */
typedef u32 serpent_block_t[4];
/* Serpent key, provided by the user. If the original key is shorter
than 256 bits, it is padded. */
typedef u32 serpent_key_t[8];
/* The key schedule consists of 33 128 bit subkeys. */
typedef u32 serpent_subkeys_t[ROUNDS + 1][4];
/* A Serpent context. */
typedef struct serpent_context
{
serpent_subkeys_t keys; /* Generated subkeys. */
#ifdef USE_AVX2
int use_avx2;
#endif
+#ifdef USE_AVX512
+ int use_avx512;
+#endif
#ifdef USE_NEON
int use_neon;
#endif
} serpent_context_t;
/* Assembly implementations use SystemV ABI, ABI conversion and additional
* stack to store XMM6-XMM15 needed on Win64. */
#undef ASM_FUNC_ABI
#if defined(USE_SSE2) || defined(USE_AVX2)
# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
# define ASM_FUNC_ABI __attribute__((sysv_abi))
# else
# define ASM_FUNC_ABI
# endif
#endif
#ifdef USE_SSE2
/* Assembler implementations of Serpent using SSE2. Process 8 block in
parallel.
*/
extern void _gcry_serpent_sse2_ctr_enc(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *ctr) ASM_FUNC_ABI;
extern void _gcry_serpent_sse2_cbc_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *iv) ASM_FUNC_ABI;
extern void _gcry_serpent_sse2_cfb_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *iv) ASM_FUNC_ABI;
extern void _gcry_serpent_sse2_ocb_enc(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *offset,
unsigned char *checksum,
const u64 Ls[8]) ASM_FUNC_ABI;
extern void _gcry_serpent_sse2_ocb_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *offset,
unsigned char *checksum,
const u64 Ls[8]) ASM_FUNC_ABI;
extern void _gcry_serpent_sse2_ocb_auth(serpent_context_t *ctx,
const unsigned char *abuf,
unsigned char *offset,
unsigned char *checksum,
const u64 Ls[8]) ASM_FUNC_ABI;
extern void _gcry_serpent_sse2_blk8(const serpent_context_t *c, byte *out,
const byte *in, int encrypt) ASM_FUNC_ABI;
#endif
#ifdef USE_AVX2
/* Assembler implementations of Serpent using AVX2. Process 16 block in
parallel.
*/
extern void _gcry_serpent_avx2_ctr_enc(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *ctr) ASM_FUNC_ABI;
extern void _gcry_serpent_avx2_cbc_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *iv) ASM_FUNC_ABI;
extern void _gcry_serpent_avx2_cfb_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *iv) ASM_FUNC_ABI;
extern void _gcry_serpent_avx2_ocb_enc(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *offset,
unsigned char *checksum,
const u64 Ls[16]) ASM_FUNC_ABI;
extern void _gcry_serpent_avx2_ocb_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *offset,
unsigned char *checksum,
const u64 Ls[16]) ASM_FUNC_ABI;
extern void _gcry_serpent_avx2_ocb_auth(serpent_context_t *ctx,
const unsigned char *abuf,
unsigned char *offset,
unsigned char *checksum,
const u64 Ls[16]) ASM_FUNC_ABI;
extern void _gcry_serpent_avx2_blk16(const serpent_context_t *c, byte *out,
const byte *in, int encrypt) ASM_FUNC_ABI;
#endif
+#ifdef USE_AVX512
+/* Assembler implementations of Serpent using AVX512. Processing 32 blocks in
+ parallel.
+ */
+extern void _gcry_serpent_avx512_cbc_dec(const void *ctx,
+ unsigned char *out,
+ const unsigned char *in,
+ unsigned char *iv);
+
+extern void _gcry_serpent_avx512_cfb_dec(const void *ctx,
+ unsigned char *out,
+ const unsigned char *in,
+ unsigned char *iv);
+
+extern void _gcry_serpent_avx512_ctr_enc(const void *ctx,
+ unsigned char *out,
+ const unsigned char *in,
+ unsigned char *ctr);
+
+extern void _gcry_serpent_avx512_ocb_crypt(const void *ctx,
+ unsigned char *out,
+ const unsigned char *in,
+ unsigned char *offset,
+ unsigned char *checksum,
+ const ocb_L_uintptr_t Ls[32],
+ int encrypt);
+
+extern void _gcry_serpent_avx512_blk32(const void *c, byte *out,
+ const byte *in,
+ int encrypt);
+#endif
+
#ifdef USE_NEON
/* Assembler implementations of Serpent using ARM NEON. Process 8 block in
parallel.
*/
extern void _gcry_serpent_neon_ctr_enc(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *ctr);
extern void _gcry_serpent_neon_cbc_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *iv);
extern void _gcry_serpent_neon_cfb_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *iv);
extern void _gcry_serpent_neon_ocb_enc(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *offset,
unsigned char *checksum,
const void *Ls[8]);
extern void _gcry_serpent_neon_ocb_dec(serpent_context_t *ctx,
unsigned char *out,
const unsigned char *in,
unsigned char *offset,
unsigned char *checksum,
const void *Ls[8]);
extern void _gcry_serpent_neon_ocb_auth(serpent_context_t *ctx,
const unsigned char *abuf,
unsigned char *offset,
unsigned char *checksum,
const void *Ls[8]);
extern void _gcry_serpent_neon_blk8(const serpent_context_t *c, byte *out,
const byte *in, int encrypt);
#endif
/* Prototypes. */
static const char *serpent_test (void);
static void _gcry_serpent_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
static void _gcry_serpent_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
static void _gcry_serpent_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
static size_t _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks,
int encrypt);
static size_t _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
size_t nblocks);
static void _gcry_serpent_xts_crypt (void *context, unsigned char *tweak,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks, int encrypt);
static void _gcry_serpent_ecb_crypt (void *context, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks,
int encrypt);
/*
* These are the S-Boxes of Serpent from following research paper.
*
* D. A. Osvik, “Speeding up Serpent,” in Third AES Candidate Conference,
* (New York, New York, USA), p. 317–329, National Institute of Standards and
* Technology, 2000.
*
* Paper is also available at: http://www.ii.uib.no/~osvik/pub/aes3.pdf
*
*/
#define SBOX0(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r3 ^= r0; r4 = r1; \
r1 &= r3; r4 ^= r2; \
r1 ^= r0; r0 |= r3; \
r0 ^= r4; r4 ^= r3; \
r3 ^= r2; r2 |= r1; \
r2 ^= r4; r4 = ~r4; \
r4 |= r1; r1 ^= r3; \
r1 ^= r4; r3 |= r0; \
r1 ^= r3; r4 ^= r3; \
\
w = r1; x = r4; y = r2; z = r0; \
}
#define SBOX0_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r2 = ~r2; r4 = r1; \
r1 |= r0; r4 = ~r4; \
r1 ^= r2; r2 |= r4; \
r1 ^= r3; r0 ^= r4; \
r2 ^= r0; r0 &= r3; \
r4 ^= r0; r0 |= r1; \
r0 ^= r2; r3 ^= r4; \
r2 ^= r1; r3 ^= r0; \
r3 ^= r1; \
r2 &= r3; \
r4 ^= r2; \
\
w = r0; x = r4; y = r1; z = r3; \
}
#define SBOX1(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r0 = ~r0; r2 = ~r2; \
r4 = r0; r0 &= r1; \
r2 ^= r0; r0 |= r3; \
r3 ^= r2; r1 ^= r0; \
r0 ^= r4; r4 |= r1; \
r1 ^= r3; r2 |= r0; \
r2 &= r4; r0 ^= r1; \
r1 &= r2; \
r1 ^= r0; r0 &= r2; \
r0 ^= r4; \
\
w = r2; x = r0; y = r3; z = r1; \
}
#define SBOX1_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r4 = r1; r1 ^= r3; \
r3 &= r1; r4 ^= r2; \
r3 ^= r0; r0 |= r1; \
r2 ^= r3; r0 ^= r4; \
r0 |= r2; r1 ^= r3; \
r0 ^= r1; r1 |= r3; \
r1 ^= r0; r4 = ~r4; \
r4 ^= r1; r1 |= r0; \
r1 ^= r0; \
r1 |= r4; \
r3 ^= r1; \
\
w = r4; x = r0; y = r3; z = r2; \
}
#define SBOX2(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r4 = r0; r0 &= r2; \
r0 ^= r3; r2 ^= r1; \
r2 ^= r0; r3 |= r4; \
r3 ^= r1; r4 ^= r2; \
r1 = r3; r3 |= r4; \
r3 ^= r0; r0 &= r1; \
r4 ^= r0; r1 ^= r3; \
r1 ^= r4; r4 = ~r4; \
\
w = r2; x = r3; y = r1; z = r4; \
}
#define SBOX2_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r2 ^= r3; r3 ^= r0; \
r4 = r3; r3 &= r2; \
r3 ^= r1; r1 |= r2; \
r1 ^= r4; r4 &= r3; \
r2 ^= r3; r4 &= r0; \
r4 ^= r2; r2 &= r1; \
r2 |= r0; r3 = ~r3; \
r2 ^= r3; r0 ^= r3; \
r0 &= r1; r3 ^= r4; \
r3 ^= r0; \
\
w = r1; x = r4; y = r2; z = r3; \
}
#define SBOX3(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r4 = r0; r0 |= r3; \
r3 ^= r1; r1 &= r4; \
r4 ^= r2; r2 ^= r3; \
r3 &= r0; r4 |= r1; \
r3 ^= r4; r0 ^= r1; \
r4 &= r0; r1 ^= r3; \
r4 ^= r2; r1 |= r0; \
r1 ^= r2; r0 ^= r3; \
r2 = r1; r1 |= r3; \
r1 ^= r0; \
\
w = r1; x = r2; y = r3; z = r4; \
}
#define SBOX3_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r4 = r2; r2 ^= r1; \
r0 ^= r2; r4 &= r2; \
r4 ^= r0; r0 &= r1; \
r1 ^= r3; r3 |= r4; \
r2 ^= r3; r0 ^= r3; \
r1 ^= r4; r3 &= r2; \
r3 ^= r1; r1 ^= r0; \
r1 |= r2; r0 ^= r3; \
r1 ^= r4; \
r0 ^= r1; \
\
w = r2; x = r1; y = r3; z = r0; \
}
#define SBOX4(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r1 ^= r3; r3 = ~r3; \
r2 ^= r3; r3 ^= r0; \
r4 = r1; r1 &= r3; \
r1 ^= r2; r4 ^= r3; \
r0 ^= r4; r2 &= r4; \
r2 ^= r0; r0 &= r1; \
r3 ^= r0; r4 |= r1; \
r4 ^= r0; r0 |= r3; \
r0 ^= r2; r2 &= r3; \
r0 = ~r0; r4 ^= r2; \
\
w = r1; x = r4; y = r0; z = r3; \
}
#define SBOX4_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r4 = r2; r2 &= r3; \
r2 ^= r1; r1 |= r3; \
r1 &= r0; r4 ^= r2; \
r4 ^= r1; r1 &= r2; \
r0 = ~r0; r3 ^= r4; \
r1 ^= r3; r3 &= r0; \
r3 ^= r2; r0 ^= r1; \
r2 &= r0; r3 ^= r0; \
r2 ^= r4; \
r2 |= r3; r3 ^= r0; \
r2 ^= r1; \
\
w = r0; x = r3; y = r2; z = r4; \
}
#define SBOX5(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r0 ^= r1; r1 ^= r3; \
r3 = ~r3; r4 = r1; \
r1 &= r0; r2 ^= r3; \
r1 ^= r2; r2 |= r4; \
r4 ^= r3; r3 &= r1; \
r3 ^= r0; r4 ^= r1; \
r4 ^= r2; r2 ^= r0; \
r0 &= r3; r2 = ~r2; \
r0 ^= r4; r4 |= r3; \
r2 ^= r4; \
\
w = r1; x = r3; y = r0; z = r2; \
}
#define SBOX5_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r1 = ~r1; r4 = r3; \
r2 ^= r1; r3 |= r0; \
r3 ^= r2; r2 |= r1; \
r2 &= r0; r4 ^= r3; \
r2 ^= r4; r4 |= r0; \
r4 ^= r1; r1 &= r2; \
r1 ^= r3; r4 ^= r2; \
r3 &= r4; r4 ^= r1; \
r3 ^= r4; r4 = ~r4; \
r3 ^= r0; \
\
w = r1; x = r4; y = r3; z = r2; \
}
#define SBOX6(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r2 = ~r2; r4 = r3; \
r3 &= r0; r0 ^= r4; \
r3 ^= r2; r2 |= r4; \
r1 ^= r3; r2 ^= r0; \
r0 |= r1; r2 ^= r1; \
r4 ^= r0; r0 |= r3; \
r0 ^= r2; r4 ^= r3; \
r4 ^= r0; r3 = ~r3; \
r2 &= r4; \
r2 ^= r3; \
\
w = r0; x = r1; y = r4; z = r2; \
}
#define SBOX6_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r0 ^= r2; r4 = r2; \
r2 &= r0; r4 ^= r3; \
r2 = ~r2; r3 ^= r1; \
r2 ^= r3; r4 |= r0; \
r0 ^= r2; r3 ^= r4; \
r4 ^= r1; r1 &= r3; \
r1 ^= r0; r0 ^= r3; \
r0 |= r2; r3 ^= r1; \
r4 ^= r0; \
\
w = r1; x = r2; y = r4; z = r3; \
}
#define SBOX7(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r4 = r1; r1 |= r2; \
r1 ^= r3; r4 ^= r2; \
r2 ^= r1; r3 |= r4; \
r3 &= r0; r4 ^= r2; \
r3 ^= r1; r1 |= r4; \
r1 ^= r0; r0 |= r4; \
r0 ^= r2; r1 ^= r4; \
r2 ^= r1; r1 &= r0; \
r1 ^= r4; r2 = ~r2; \
r2 |= r0; \
r4 ^= r2; \
\
w = r4; x = r3; y = r1; z = r0; \
}
#define SBOX7_INVERSE(r0, r1, r2, r3, w, x, y, z) \
{ \
u32 r4; \
\
r4 = r2; r2 ^= r0; \
r0 &= r3; r4 |= r3; \
r2 = ~r2; r3 ^= r1; \
r1 |= r0; r0 ^= r2; \
r2 &= r4; r3 &= r4; \
r1 ^= r2; r2 ^= r0; \
r0 |= r2; r4 ^= r1; \
r0 ^= r3; r3 ^= r4; \
r4 |= r0; r3 ^= r2; \
r4 ^= r2; \
\
w = r3; x = r0; y = r1; z = r4; \
}
/* XOR BLOCK1 into BLOCK0. */
#define BLOCK_XOR(block0, block1) \
{ \
block0[0] ^= block1[0]; \
block0[1] ^= block1[1]; \
block0[2] ^= block1[2]; \
block0[3] ^= block1[3]; \
}
/* Copy BLOCK_SRC to BLOCK_DST. */
#define BLOCK_COPY(block_dst, block_src) \
{ \
block_dst[0] = block_src[0]; \
block_dst[1] = block_src[1]; \
block_dst[2] = block_src[2]; \
block_dst[3] = block_src[3]; \
}
/* Apply SBOX number WHICH to to the block found in ARRAY0, writing
the output to the block found in ARRAY1. */
#define SBOX(which, array0, array1) \
SBOX##which (array0[0], array0[1], array0[2], array0[3], \
array1[0], array1[1], array1[2], array1[3]);
/* Apply inverse SBOX number WHICH to to the block found in ARRAY0, writing
the output to the block found in ARRAY1. */
#define SBOX_INVERSE(which, array0, array1) \
SBOX##which##_INVERSE (array0[0], array0[1], array0[2], array0[3], \
array1[0], array1[1], array1[2], array1[3]);
/* Apply the linear transformation to BLOCK. */
#define LINEAR_TRANSFORMATION(block) \
{ \
block[0] = rol (block[0], 13); \
block[2] = rol (block[2], 3); \
block[1] = block[1] ^ block[0] ^ block[2]; \
block[3] = block[3] ^ block[2] ^ (block[0] << 3); \
block[1] = rol (block[1], 1); \
block[3] = rol (block[3], 7); \
block[0] = block[0] ^ block[1] ^ block[3]; \
block[2] = block[2] ^ block[3] ^ (block[1] << 7); \
block[0] = rol (block[0], 5); \
block[2] = rol (block[2], 22); \
}
/* Apply the inverse linear transformation to BLOCK. */
#define LINEAR_TRANSFORMATION_INVERSE(block) \
{ \
block[2] = ror (block[2], 22); \
block[0] = ror (block[0] , 5); \
block[2] = block[2] ^ block[3] ^ (block[1] << 7); \
block[0] = block[0] ^ block[1] ^ block[3]; \
block[3] = ror (block[3], 7); \
block[1] = ror (block[1], 1); \
block[3] = block[3] ^ block[2] ^ (block[0] << 3); \
block[1] = block[1] ^ block[0] ^ block[2]; \
block[2] = ror (block[2], 3); \
block[0] = ror (block[0], 13); \
}
/* Apply a Serpent round to BLOCK, using the SBOX number WHICH and the
subkeys contained in SUBKEYS. Use BLOCK_TMP as temporary storage.
This macro increments `round'. */
#define ROUND(which, subkeys, block, block_tmp) \
{ \
BLOCK_XOR (block, subkeys[round]); \
round++; \
SBOX (which, block, block_tmp); \
LINEAR_TRANSFORMATION (block_tmp); \
BLOCK_COPY (block, block_tmp); \
}
/* Apply the last Serpent round to BLOCK, using the SBOX number WHICH
and the subkeys contained in SUBKEYS. Use BLOCK_TMP as temporary
storage. The result will be stored in BLOCK_TMP. This macro
increments `round'. */
#define ROUND_LAST(which, subkeys, block, block_tmp) \
{ \
BLOCK_XOR (block, subkeys[round]); \
round++; \
SBOX (which, block, block_tmp); \
BLOCK_XOR (block_tmp, subkeys[round]); \
round++; \
}
/* Apply an inverse Serpent round to BLOCK, using the SBOX number
WHICH and the subkeys contained in SUBKEYS. Use BLOCK_TMP as
temporary storage. This macro increments `round'. */
#define ROUND_INVERSE(which, subkey, block, block_tmp) \
{ \
LINEAR_TRANSFORMATION_INVERSE (block); \
SBOX_INVERSE (which, block, block_tmp); \
BLOCK_XOR (block_tmp, subkey[round]); \
round--; \
BLOCK_COPY (block, block_tmp); \
}
/* Apply the first Serpent round to BLOCK, using the SBOX number WHICH
and the subkeys contained in SUBKEYS. Use BLOCK_TMP as temporary
storage. The result will be stored in BLOCK_TMP. This macro
increments `round'. */
#define ROUND_FIRST_INVERSE(which, subkeys, block, block_tmp) \
{ \
BLOCK_XOR (block, subkeys[round]); \
round--; \
SBOX_INVERSE (which, block, block_tmp); \
BLOCK_XOR (block_tmp, subkeys[round]); \
round--; \
}
/* Convert the user provided key KEY of KEY_LENGTH bytes into the
internally used format. */
static void
serpent_key_prepare (const byte *key, unsigned int key_length,
serpent_key_t key_prepared)
{
int i;
/* Copy key. */
key_length /= 4;
for (i = 0; i < key_length; i++)
key_prepared[i] = buf_get_le32 (key + i * 4);
if (i < 8)
{
/* Key must be padded according to the Serpent
specification. */
key_prepared[i] = 0x00000001;
for (i++; i < 8; i++)
key_prepared[i] = 0;
}
}
/* Derive the 33 subkeys from KEY and store them in SUBKEYS. */
static void
serpent_subkeys_generate (serpent_key_t key, serpent_subkeys_t subkeys)
{
u32 w[8]; /* The `prekey'. */
u32 ws[4];
u32 wt[4];
/* Initialize with key values. */
w[0] = key[0];
w[1] = key[1];
w[2] = key[2];
w[3] = key[3];
w[4] = key[4];
w[5] = key[5];
w[6] = key[6];
w[7] = key[7];
/* Expand to intermediate key using the affine recurrence. */
#define EXPAND_KEY4(wo, r) \
wo[0] = w[(r+0)%8] = \
rol (w[(r+0)%8] ^ w[(r+3)%8] ^ w[(r+5)%8] ^ w[(r+7)%8] ^ PHI ^ (r+0), 11); \
wo[1] = w[(r+1)%8] = \
rol (w[(r+1)%8] ^ w[(r+4)%8] ^ w[(r+6)%8] ^ w[(r+0)%8] ^ PHI ^ (r+1), 11); \
wo[2] = w[(r+2)%8] = \
rol (w[(r+2)%8] ^ w[(r+5)%8] ^ w[(r+7)%8] ^ w[(r+1)%8] ^ PHI ^ (r+2), 11); \
wo[3] = w[(r+3)%8] = \
rol (w[(r+3)%8] ^ w[(r+6)%8] ^ w[(r+0)%8] ^ w[(r+2)%8] ^ PHI ^ (r+3), 11);
#define EXPAND_KEY(r) \
EXPAND_KEY4(ws, (r)); \
EXPAND_KEY4(wt, (r + 4));
/* Calculate subkeys via S-Boxes, in bitslice mode. */
EXPAND_KEY (0); SBOX (3, ws, subkeys[0]); SBOX (2, wt, subkeys[1]);
EXPAND_KEY (8); SBOX (1, ws, subkeys[2]); SBOX (0, wt, subkeys[3]);
EXPAND_KEY (16); SBOX (7, ws, subkeys[4]); SBOX (6, wt, subkeys[5]);
EXPAND_KEY (24); SBOX (5, ws, subkeys[6]); SBOX (4, wt, subkeys[7]);
EXPAND_KEY (32); SBOX (3, ws, subkeys[8]); SBOX (2, wt, subkeys[9]);
EXPAND_KEY (40); SBOX (1, ws, subkeys[10]); SBOX (0, wt, subkeys[11]);
EXPAND_KEY (48); SBOX (7, ws, subkeys[12]); SBOX (6, wt, subkeys[13]);
EXPAND_KEY (56); SBOX (5, ws, subkeys[14]); SBOX (4, wt, subkeys[15]);
EXPAND_KEY (64); SBOX (3, ws, subkeys[16]); SBOX (2, wt, subkeys[17]);
EXPAND_KEY (72); SBOX (1, ws, subkeys[18]); SBOX (0, wt, subkeys[19]);
EXPAND_KEY (80); SBOX (7, ws, subkeys[20]); SBOX (6, wt, subkeys[21]);
EXPAND_KEY (88); SBOX (5, ws, subkeys[22]); SBOX (4, wt, subkeys[23]);
EXPAND_KEY (96); SBOX (3, ws, subkeys[24]); SBOX (2, wt, subkeys[25]);
EXPAND_KEY (104); SBOX (1, ws, subkeys[26]); SBOX (0, wt, subkeys[27]);
EXPAND_KEY (112); SBOX (7, ws, subkeys[28]); SBOX (6, wt, subkeys[29]);
EXPAND_KEY (120); SBOX (5, ws, subkeys[30]); SBOX (4, wt, subkeys[31]);
EXPAND_KEY4 (ws, 128); SBOX (3, ws, subkeys[32]);
wipememory (ws, sizeof (ws));
wipememory (wt, sizeof (wt));
wipememory (w, sizeof (w));
}
/* Initialize CONTEXT with the key KEY of KEY_LENGTH bits. */
static gcry_err_code_t
serpent_setkey_internal (serpent_context_t *context,
const byte *key, unsigned int key_length)
{
serpent_key_t key_prepared;
if (key_length > 32)
return GPG_ERR_INV_KEYLEN;
serpent_key_prepare (key, key_length, key_prepared);
serpent_subkeys_generate (key_prepared, context->keys);
+#ifdef USE_AVX512
+ context->use_avx512 = 0;
+ if ((_gcry_get_hw_features () & HWF_INTEL_AVX512))
+ {
+ context->use_avx512 = 1;
+ }
+#endif
+
#ifdef USE_AVX2
context->use_avx2 = 0;
if ((_gcry_get_hw_features () & HWF_INTEL_AVX2))
{
context->use_avx2 = 1;
}
#endif
#ifdef USE_NEON
context->use_neon = 0;
if ((_gcry_get_hw_features () & HWF_ARM_NEON))
{
context->use_neon = 1;
}
#endif
wipememory (key_prepared, sizeof(key_prepared));
return 0;
}
/* Initialize CTX with the key KEY of KEY_LENGTH bytes. */
static gcry_err_code_t
serpent_setkey (void *ctx,
const byte *key, unsigned int key_length,
cipher_bulk_ops_t *bulk_ops)
{
serpent_context_t *context = ctx;
static const char *serpent_test_ret;
static int serpent_init_done;
gcry_err_code_t ret = GPG_ERR_NO_ERROR;
if (! serpent_init_done)
{
/* Execute a self-test the first time, Serpent is used. */
serpent_init_done = 1;
serpent_test_ret = serpent_test ();
if (serpent_test_ret)
log_error ("Serpent test failure: %s\n", serpent_test_ret);
}
/* Setup bulk encryption routines. */
memset (bulk_ops, 0, sizeof(*bulk_ops));
bulk_ops->cbc_dec = _gcry_serpent_cbc_dec;
bulk_ops->cfb_dec = _gcry_serpent_cfb_dec;
bulk_ops->ctr_enc = _gcry_serpent_ctr_enc;
bulk_ops->ocb_crypt = _gcry_serpent_ocb_crypt;
bulk_ops->ocb_auth = _gcry_serpent_ocb_auth;
bulk_ops->xts_crypt = _gcry_serpent_xts_crypt;
bulk_ops->ecb_crypt = _gcry_serpent_ecb_crypt;
if (serpent_test_ret)
ret = GPG_ERR_SELFTEST_FAILED;
else
ret = serpent_setkey_internal (context, key, key_length);
return ret;
}
static void
serpent_encrypt_internal (serpent_context_t *context,
const byte *input, byte *output)
{
serpent_block_t b, b_next;
int round = 0;
b[0] = buf_get_le32 (input + 0);
b[1] = buf_get_le32 (input + 4);
b[2] = buf_get_le32 (input + 8);
b[3] = buf_get_le32 (input + 12);
ROUND (0, context->keys, b, b_next);
ROUND (1, context->keys, b, b_next);
ROUND (2, context->keys, b, b_next);
ROUND (3, context->keys, b, b_next);
ROUND (4, context->keys, b, b_next);
ROUND (5, context->keys, b, b_next);
ROUND (6, context->keys, b, b_next);
ROUND (7, context->keys, b, b_next);
ROUND (0, context->keys, b, b_next);
ROUND (1, context->keys, b, b_next);
ROUND (2, context->keys, b, b_next);
ROUND (3, context->keys, b, b_next);
ROUND (4, context->keys, b, b_next);
ROUND (5, context->keys, b, b_next);
ROUND (6, context->keys, b, b_next);
ROUND (7, context->keys, b, b_next);
ROUND (0, context->keys, b, b_next);
ROUND (1, context->keys, b, b_next);
ROUND (2, context->keys, b, b_next);
ROUND (3, context->keys, b, b_next);
ROUND (4, context->keys, b, b_next);
ROUND (5, context->keys, b, b_next);
ROUND (6, context->keys, b, b_next);
ROUND (7, context->keys, b, b_next);
ROUND (0, context->keys, b, b_next);
ROUND (1, context->keys, b, b_next);
ROUND (2, context->keys, b, b_next);
ROUND (3, context->keys, b, b_next);
ROUND (4, context->keys, b, b_next);
ROUND (5, context->keys, b, b_next);
ROUND (6, context->keys, b, b_next);
ROUND_LAST (7, context->keys, b, b_next);
buf_put_le32 (output + 0, b_next[0]);
buf_put_le32 (output + 4, b_next[1]);
buf_put_le32 (output + 8, b_next[2]);
buf_put_le32 (output + 12, b_next[3]);
}
static void
serpent_decrypt_internal (serpent_context_t *context,
const byte *input, byte *output)
{
serpent_block_t b, b_next;
int round = ROUNDS;
b_next[0] = buf_get_le32 (input + 0);
b_next[1] = buf_get_le32 (input + 4);
b_next[2] = buf_get_le32 (input + 8);
b_next[3] = buf_get_le32 (input + 12);
ROUND_FIRST_INVERSE (7, context->keys, b_next, b);
ROUND_INVERSE (6, context->keys, b, b_next);
ROUND_INVERSE (5, context->keys, b, b_next);
ROUND_INVERSE (4, context->keys, b, b_next);
ROUND_INVERSE (3, context->keys, b, b_next);
ROUND_INVERSE (2, context->keys, b, b_next);
ROUND_INVERSE (1, context->keys, b, b_next);
ROUND_INVERSE (0, context->keys, b, b_next);
ROUND_INVERSE (7, context->keys, b, b_next);
ROUND_INVERSE (6, context->keys, b, b_next);
ROUND_INVERSE (5, context->keys, b, b_next);
ROUND_INVERSE (4, context->keys, b, b_next);
ROUND_INVERSE (3, context->keys, b, b_next);
ROUND_INVERSE (2, context->keys, b, b_next);
ROUND_INVERSE (1, context->keys, b, b_next);
ROUND_INVERSE (0, context->keys, b, b_next);
ROUND_INVERSE (7, context->keys, b, b_next);
ROUND_INVERSE (6, context->keys, b, b_next);
ROUND_INVERSE (5, context->keys, b, b_next);
ROUND_INVERSE (4, context->keys, b, b_next);
ROUND_INVERSE (3, context->keys, b, b_next);
ROUND_INVERSE (2, context->keys, b, b_next);
ROUND_INVERSE (1, context->keys, b, b_next);
ROUND_INVERSE (0, context->keys, b, b_next);
ROUND_INVERSE (7, context->keys, b, b_next);
ROUND_INVERSE (6, context->keys, b, b_next);
ROUND_INVERSE (5, context->keys, b, b_next);
ROUND_INVERSE (4, context->keys, b, b_next);
ROUND_INVERSE (3, context->keys, b, b_next);
ROUND_INVERSE (2, context->keys, b, b_next);
ROUND_INVERSE (1, context->keys, b, b_next);
ROUND_INVERSE (0, context->keys, b, b_next);
buf_put_le32 (output + 0, b_next[0]);
buf_put_le32 (output + 4, b_next[1]);
buf_put_le32 (output + 8, b_next[2]);
buf_put_le32 (output + 12, b_next[3]);
}
static unsigned int
serpent_encrypt (void *ctx, byte *buffer_out, const byte *buffer_in)
{
serpent_context_t *context = ctx;
serpent_encrypt_internal (context, buffer_in, buffer_out);
return /*burn_stack*/ (2 * sizeof (serpent_block_t));
}
static unsigned int
serpent_decrypt (void *ctx, byte *buffer_out, const byte *buffer_in)
{
serpent_context_t *context = ctx;
serpent_decrypt_internal (context, buffer_in, buffer_out);
return /*burn_stack*/ (2 * sizeof (serpent_block_t));
}
/* Bulk encryption of complete blocks in CTR mode. This function is only
intended for the bulk encryption feature of cipher.c. CTR is expected to be
of size sizeof(serpent_block_t). */
static void
_gcry_serpent_ctr_enc(void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks)
{
serpent_context_t *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
unsigned char tmpbuf[sizeof(serpent_block_t)];
int burn_stack_depth = 2 * sizeof (serpent_block_t);
+#ifdef USE_AVX512
+ if (ctx->use_avx512)
+ {
+ int did_use_avx512 = 0;
+
+ /* Process data in 32 block chunks. */
+ while (nblocks >= 32)
+ {
+ _gcry_serpent_avx512_ctr_enc(ctx, outbuf, inbuf, ctr);
+
+ nblocks -= 32;
+ outbuf += 32 * sizeof(serpent_block_t);
+ inbuf += 32 * sizeof(serpent_block_t);
+ did_use_avx512 = 1;
+ }
+
+ if (did_use_avx512)
+ {
+ /* serpent-avx512 code does not use stack */
+ if (nblocks == 0)
+ burn_stack_depth = 0;
+ }
+
+ /* Use generic/avx2/sse2 code to handle smaller chunks... */
+ /* TODO: use caching instead? */
+ }
+#endif
+
#ifdef USE_AVX2
if (ctx->use_avx2)
{
int did_use_avx2 = 0;
/* Process data in 16 block chunks. */
while (nblocks >= 16)
{
_gcry_serpent_avx2_ctr_enc(ctx, outbuf, inbuf, ctr);
nblocks -= 16;
outbuf += 16 * sizeof(serpent_block_t);
inbuf += 16 * sizeof(serpent_block_t);
did_use_avx2 = 1;
}
if (did_use_avx2)
{
/* serpent-avx2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic/sse2 code to handle smaller chunks... */
/* TODO: use caching instead? */
}
#endif
#ifdef USE_SSE2
{
int did_use_sse2 = 0;
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
_gcry_serpent_sse2_ctr_enc(ctx, outbuf, inbuf, ctr);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_sse2 = 1;
}
if (did_use_sse2)
{
/* serpent-sse2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
/* TODO: use caching instead? */
}
#endif
#ifdef USE_NEON
if (ctx->use_neon)
{
int did_use_neon = 0;
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
_gcry_serpent_neon_ctr_enc(ctx, outbuf, inbuf, ctr);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_neon = 1;
}
if (did_use_neon)
{
/* serpent-neon assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
/* TODO: use caching instead? */
}
#endif
for ( ;nblocks; nblocks-- )
{
/* Encrypt the counter. */
serpent_encrypt_internal(ctx, ctr, tmpbuf);
/* XOR the input with the encrypted counter and store in output. */
cipher_block_xor(outbuf, tmpbuf, inbuf, sizeof(serpent_block_t));
outbuf += sizeof(serpent_block_t);
inbuf += sizeof(serpent_block_t);
/* Increment the counter. */
cipher_block_add(ctr, 1, sizeof(serpent_block_t));
}
wipememory(tmpbuf, sizeof(tmpbuf));
_gcry_burn_stack(burn_stack_depth);
}
/* Bulk decryption of complete blocks in CBC mode. This function is only
intended for the bulk encryption feature of cipher.c. */
static void
_gcry_serpent_cbc_dec(void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks)
{
serpent_context_t *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
unsigned char savebuf[sizeof(serpent_block_t)];
int burn_stack_depth = 2 * sizeof (serpent_block_t);
+#ifdef USE_AVX512
+ if (ctx->use_avx512)
+ {
+ int did_use_avx512 = 0;
+
+ /* Process data in 32 block chunks. */
+ while (nblocks >= 32)
+ {
+ _gcry_serpent_avx512_cbc_dec(ctx, outbuf, inbuf, iv);
+
+ nblocks -= 32;
+ outbuf += 32 * sizeof(serpent_block_t);
+ inbuf += 32 * sizeof(serpent_block_t);
+ did_use_avx512 = 1;
+ }
+
+ if (did_use_avx512)
+ {
+ /* serpent-avx512 code does not use stack */
+ if (nblocks == 0)
+ burn_stack_depth = 0;
+ }
+
+ /* Use generic/avx2/sse2 code to handle smaller chunks... */
+ }
+#endif
+
#ifdef USE_AVX2
if (ctx->use_avx2)
{
int did_use_avx2 = 0;
/* Process data in 16 block chunks. */
while (nblocks >= 16)
{
_gcry_serpent_avx2_cbc_dec(ctx, outbuf, inbuf, iv);
nblocks -= 16;
outbuf += 16 * sizeof(serpent_block_t);
inbuf += 16 * sizeof(serpent_block_t);
did_use_avx2 = 1;
}
if (did_use_avx2)
{
/* serpent-avx2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic/sse2 code to handle smaller chunks... */
}
#endif
#ifdef USE_SSE2
{
int did_use_sse2 = 0;
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
_gcry_serpent_sse2_cbc_dec(ctx, outbuf, inbuf, iv);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_sse2 = 1;
}
if (did_use_sse2)
{
/* serpent-sse2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
#ifdef USE_NEON
if (ctx->use_neon)
{
int did_use_neon = 0;
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
_gcry_serpent_neon_cbc_dec(ctx, outbuf, inbuf, iv);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_neon = 1;
}
if (did_use_neon)
{
/* serpent-neon assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
for ( ;nblocks; nblocks-- )
{
/* INBUF is needed later and it may be identical to OUTBUF, so store
the intermediate result to SAVEBUF. */
serpent_decrypt_internal (ctx, inbuf, savebuf);
cipher_block_xor_n_copy_2(outbuf, savebuf, iv, inbuf,
sizeof(serpent_block_t));
inbuf += sizeof(serpent_block_t);
outbuf += sizeof(serpent_block_t);
}
wipememory(savebuf, sizeof(savebuf));
_gcry_burn_stack(burn_stack_depth);
}
/* Bulk decryption of complete blocks in CFB mode. This function is only
intended for the bulk encryption feature of cipher.c. */
static void
_gcry_serpent_cfb_dec(void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks)
{
serpent_context_t *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
int burn_stack_depth = 2 * sizeof (serpent_block_t);
+#ifdef USE_AVX512
+ if (ctx->use_avx512)
+ {
+ int did_use_avx512 = 0;
+
+ /* Process data in 32 block chunks. */
+ while (nblocks >= 32)
+ {
+ _gcry_serpent_avx512_cfb_dec(ctx, outbuf, inbuf, iv);
+
+ nblocks -= 32;
+ outbuf += 32 * sizeof(serpent_block_t);
+ inbuf += 32 * sizeof(serpent_block_t);
+ did_use_avx512 = 1;
+ }
+
+ if (did_use_avx512)
+ {
+ /* serpent-avx512 code does not use stack */
+ if (nblocks == 0)
+ burn_stack_depth = 0;
+ }
+
+ /* Use generic/avx2/sse2 code to handle smaller chunks... */
+ }
+#endif
+
#ifdef USE_AVX2
if (ctx->use_avx2)
{
int did_use_avx2 = 0;
/* Process data in 16 block chunks. */
while (nblocks >= 16)
{
_gcry_serpent_avx2_cfb_dec(ctx, outbuf, inbuf, iv);
nblocks -= 16;
outbuf += 16 * sizeof(serpent_block_t);
inbuf += 16 * sizeof(serpent_block_t);
did_use_avx2 = 1;
}
if (did_use_avx2)
{
/* serpent-avx2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic/sse2 code to handle smaller chunks... */
}
#endif
#ifdef USE_SSE2
{
int did_use_sse2 = 0;
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
_gcry_serpent_sse2_cfb_dec(ctx, outbuf, inbuf, iv);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_sse2 = 1;
}
if (did_use_sse2)
{
/* serpent-sse2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
#ifdef USE_NEON
if (ctx->use_neon)
{
int did_use_neon = 0;
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
_gcry_serpent_neon_cfb_dec(ctx, outbuf, inbuf, iv);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_neon = 1;
}
if (did_use_neon)
{
/* serpent-neon assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
for ( ;nblocks; nblocks-- )
{
serpent_encrypt_internal(ctx, iv, iv);
cipher_block_xor_n_copy(outbuf, iv, inbuf, sizeof(serpent_block_t));
outbuf += sizeof(serpent_block_t);
inbuf += sizeof(serpent_block_t);
}
_gcry_burn_stack(burn_stack_depth);
}
/* Bulk encryption/decryption of complete blocks in OCB mode. */
static size_t
_gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks, int encrypt)
{
-#if defined(USE_AVX2) || defined(USE_SSE2) || defined(USE_NEON)
+#if defined(USE_AVX512) || defined(USE_AVX2) || defined(USE_SSE2) \
+ || defined(USE_NEON)
serpent_context_t *ctx = (void *)&c->context.c;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
int burn_stack_depth = 2 * sizeof (serpent_block_t);
u64 blkn = c->u_mode.ocb.data_nblocks;
#else
(void)c;
(void)outbuf_arg;
(void)inbuf_arg;
(void)encrypt;
#endif
+#ifdef USE_AVX512
+ if (ctx->use_avx512)
+ {
+ int did_use_avx512 = 0;
+ ocb_L_uintptr_t Ls[32];
+ ocb_L_uintptr_t *l;
+
+ if (nblocks >= 32)
+ {
+ l = bulk_ocb_prepare_L_pointers_array_blk32 (c, Ls, blkn);
+
+ /* Process data in 32 block chunks. */
+ while (nblocks >= 32)
+ {
+ blkn += 32;
+ *l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 32);
+
+ _gcry_serpent_avx512_ocb_crypt(ctx, outbuf, inbuf, c->u_iv.iv,
+ c->u_ctr.ctr, Ls, encrypt);
+
+ nblocks -= 32;
+ outbuf += 32 * sizeof(serpent_block_t);
+ inbuf += 32 * sizeof(serpent_block_t);
+ did_use_avx512 = 1;
+ }
+ }
+
+ if (did_use_avx512)
+ {
+ /* serpent-avx512 code does not use stack */
+ if (nblocks == 0)
+ burn_stack_depth = 0;
+ }
+
+ /* Use generic code to handle smaller chunks... */
+ }
+#endif
+
#ifdef USE_AVX2
if (ctx->use_avx2)
{
int did_use_avx2 = 0;
u64 Ls[16];
u64 *l;
if (nblocks >= 16)
{
l = bulk_ocb_prepare_L_pointers_array_blk16 (c, Ls, blkn);
/* Process data in 16 block chunks. */
while (nblocks >= 16)
{
blkn += 16;
*l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 16);
if (encrypt)
_gcry_serpent_avx2_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv,
c->u_ctr.ctr, Ls);
else
_gcry_serpent_avx2_ocb_dec(ctx, outbuf, inbuf, c->u_iv.iv,
c->u_ctr.ctr, Ls);
nblocks -= 16;
outbuf += 16 * sizeof(serpent_block_t);
inbuf += 16 * sizeof(serpent_block_t);
did_use_avx2 = 1;
}
}
if (did_use_avx2)
{
/* serpent-avx2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
#ifdef USE_SSE2
{
int did_use_sse2 = 0;
u64 Ls[8];
u64 *l;
if (nblocks >= 8)
{
l = bulk_ocb_prepare_L_pointers_array_blk8 (c, Ls, blkn);
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
blkn += 8;
*l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 8);
if (encrypt)
_gcry_serpent_sse2_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv,
c->u_ctr.ctr, Ls);
else
_gcry_serpent_sse2_ocb_dec(ctx, outbuf, inbuf, c->u_iv.iv,
c->u_ctr.ctr, Ls);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_sse2 = 1;
}
}
if (did_use_sse2)
{
/* serpent-sse2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
#ifdef USE_NEON
if (ctx->use_neon)
{
int did_use_neon = 0;
uintptr_t Ls[8];
uintptr_t *l;
if (nblocks >= 8)
{
l = bulk_ocb_prepare_L_pointers_array_blk8 (c, Ls, blkn);
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
blkn += 8;
*l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 8);
if (encrypt)
_gcry_serpent_neon_ocb_enc(ctx, outbuf, inbuf, c->u_iv.iv,
c->u_ctr.ctr, (const void **)Ls);
else
_gcry_serpent_neon_ocb_dec(ctx, outbuf, inbuf, c->u_iv.iv,
c->u_ctr.ctr, (const void **)Ls);
nblocks -= 8;
outbuf += 8 * sizeof(serpent_block_t);
inbuf += 8 * sizeof(serpent_block_t);
did_use_neon = 1;
}
}
if (did_use_neon)
{
/* serpent-neon assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
-#if defined(USE_AVX2) || defined(USE_SSE2) || defined(USE_NEON)
+#if defined(USE_AVX512) || defined(USE_AVX2) || defined(USE_SSE2) \
+ || defined(USE_NEON)
c->u_mode.ocb.data_nblocks = blkn;
if (burn_stack_depth)
_gcry_burn_stack (burn_stack_depth + 4 * sizeof(void *));
#endif
return nblocks;
}
/* Bulk authentication of complete blocks in OCB mode. */
static size_t
_gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
size_t nblocks)
{
#if defined(USE_AVX2) || defined(USE_SSE2) || defined(USE_NEON)
serpent_context_t *ctx = (void *)&c->context.c;
const unsigned char *abuf = abuf_arg;
int burn_stack_depth = 2 * sizeof(serpent_block_t);
u64 blkn = c->u_mode.ocb.aad_nblocks;
#else
(void)c;
(void)abuf_arg;
#endif
#ifdef USE_AVX2
if (ctx->use_avx2)
{
int did_use_avx2 = 0;
u64 Ls[16];
u64 *l;
if (nblocks >= 16)
{
l = bulk_ocb_prepare_L_pointers_array_blk16 (c, Ls, blkn);
/* Process data in 16 block chunks. */
while (nblocks >= 16)
{
blkn += 16;
*l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 16);
_gcry_serpent_avx2_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,
c->u_mode.ocb.aad_sum, Ls);
nblocks -= 16;
abuf += 16 * sizeof(serpent_block_t);
did_use_avx2 = 1;
}
}
if (did_use_avx2)
{
/* serpent-avx2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
#ifdef USE_SSE2
{
int did_use_sse2 = 0;
u64 Ls[8];
u64 *l;
if (nblocks >= 8)
{
l = bulk_ocb_prepare_L_pointers_array_blk8 (c, Ls, blkn);
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
blkn += 8;
*l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 8);
_gcry_serpent_sse2_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,
c->u_mode.ocb.aad_sum, Ls);
nblocks -= 8;
abuf += 8 * sizeof(serpent_block_t);
did_use_sse2 = 1;
}
}
if (did_use_sse2)
{
/* serpent-avx2 assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
#ifdef USE_NEON
if (ctx->use_neon)
{
int did_use_neon = 0;
uintptr_t Ls[8];
uintptr_t *l;
if (nblocks >= 8)
{
l = bulk_ocb_prepare_L_pointers_array_blk8 (c, Ls, blkn);
/* Process data in 8 block chunks. */
while (nblocks >= 8)
{
blkn += 8;
*l = (uintptr_t)(void *)ocb_get_l(c, blkn - blkn % 8);
_gcry_serpent_neon_ocb_auth(ctx, abuf, c->u_mode.ocb.aad_offset,
c->u_mode.ocb.aad_sum,
(const void **)Ls);
nblocks -= 8;
abuf += 8 * sizeof(serpent_block_t);
did_use_neon = 1;
}
}
if (did_use_neon)
{
/* serpent-neon assembly code does not use stack */
if (nblocks == 0)
burn_stack_depth = 0;
}
/* Use generic code to handle smaller chunks... */
}
#endif
#if defined(USE_AVX2) || defined(USE_SSE2) || defined(USE_NEON)
c->u_mode.ocb.aad_nblocks = blkn;
if (burn_stack_depth)
_gcry_burn_stack (burn_stack_depth + 4 * sizeof(void *));
#endif
return nblocks;
}
static unsigned int
-serpent_crypt_blk1_16(void *context, byte *out, const byte *in,
+serpent_crypt_blk1_32(void *context, byte *out, const byte *in,
size_t num_blks, int encrypt)
{
serpent_context_t *ctx = context;
unsigned int burn, burn_stack_depth = 0;
+#ifdef USE_AVX512
+ if (num_blks == 32 && ctx->use_avx512)
+ {
+ _gcry_serpent_avx512_blk32 (ctx, out, in, encrypt);
+ return 0;
+ }
+#endif
+
#ifdef USE_AVX2
- if (num_blks == 16 && ctx->use_avx2)
+ while (num_blks == 16 && ctx->use_avx2)
{
_gcry_serpent_avx2_blk16 (ctx, out, in, encrypt);
- return 0;
+ out += 16 * sizeof(serpent_block_t);
+ in += 16 * sizeof(serpent_block_t);
+ num_blks -= 16;
}
#endif
#ifdef USE_SSE2
while (num_blks >= 8)
{
_gcry_serpent_sse2_blk8 (ctx, out, in, encrypt);
out += 8 * sizeof(serpent_block_t);
in += 8 * sizeof(serpent_block_t);
num_blks -= 8;
}
#endif
#ifdef USE_NEON
if (ctx->use_neon)
{
while (num_blks >= 8)
{
_gcry_serpent_neon_blk8 (ctx, out, in, encrypt);
out += 8 * sizeof(serpent_block_t);
in += 8 * sizeof(serpent_block_t);
num_blks -= 8;
}
}
#endif
while (num_blks >= 1)
{
if (encrypt)
serpent_encrypt_internal((void *)ctx, in, out);
else
serpent_decrypt_internal((void *)ctx, in, out);
burn = 2 * sizeof(serpent_block_t);
burn_stack_depth = (burn > burn_stack_depth) ? burn : burn_stack_depth;
out += sizeof(serpent_block_t);
in += sizeof(serpent_block_t);
num_blks--;
}
return burn_stack_depth;
}
static unsigned int
-serpent_encrypt_blk1_16(void *ctx, byte *out, const byte *in,
+serpent_encrypt_blk1_32(void *ctx, byte *out, const byte *in,
size_t num_blks)
{
- return serpent_crypt_blk1_16 (ctx, out, in, num_blks, 1);
+ return serpent_crypt_blk1_32 (ctx, out, in, num_blks, 1);
}
static unsigned int
-serpent_decrypt_blk1_16(void *ctx, byte *out, const byte *in,
+serpent_decrypt_blk1_32(void *ctx, byte *out, const byte *in,
size_t num_blks)
{
- return serpent_crypt_blk1_16 (ctx, out, in, num_blks, 0);
+ return serpent_crypt_blk1_32 (ctx, out, in, num_blks, 0);
}
/* Bulk encryption/decryption of complete blocks in XTS mode. */
static void
_gcry_serpent_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks, int encrypt)
{
serpent_context_t *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
int burn_stack_depth = 0;
/* Process remaining blocks. */
if (nblocks)
{
- unsigned char tmpbuf[16 * 16];
+ unsigned char tmpbuf[32 * 16];
unsigned int tmp_used = 16;
size_t nburn;
- nburn = bulk_xts_crypt_128(ctx, encrypt ? serpent_encrypt_blk1_16
- : serpent_decrypt_blk1_16,
+ nburn = bulk_xts_crypt_128(ctx, encrypt ? serpent_encrypt_blk1_32
+ : serpent_decrypt_blk1_32,
outbuf, inbuf, nblocks,
tweak, tmpbuf, sizeof(tmpbuf) / 16,
&tmp_used);
burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth;
wipememory(tmpbuf, tmp_used);
}
if (burn_stack_depth)
_gcry_burn_stack(burn_stack_depth);
}
/* Bulk encryption/decryption in ECB mode. */
static void
_gcry_serpent_ecb_crypt (void *context, void *outbuf_arg, const void *inbuf_arg,
size_t nblocks, int encrypt)
{
serpent_context_t *ctx = context;
unsigned char *outbuf = outbuf_arg;
const unsigned char *inbuf = inbuf_arg;
int burn_stack_depth = 0;
/* Process remaining blocks. */
if (nblocks)
{
size_t nburn;
- nburn = bulk_ecb_crypt_128(ctx, encrypt ? serpent_encrypt_blk1_16
- : serpent_decrypt_blk1_16,
- outbuf, inbuf, nblocks, 16);
+ nburn = bulk_ecb_crypt_128(ctx, encrypt ? serpent_encrypt_blk1_32
+ : serpent_decrypt_blk1_32,
+ outbuf, inbuf, nblocks, 32);
burn_stack_depth = nburn > burn_stack_depth ? nburn : burn_stack_depth;
}
if (burn_stack_depth)
_gcry_burn_stack(burn_stack_depth);
}
/* Serpent test. */
static const char *
serpent_test (void)
{
serpent_context_t context;
unsigned char scratch[16];
unsigned int i;
static struct test
{
int key_length;
unsigned char key[32];
unsigned char text_plain[16];
unsigned char text_cipher[16];
} test_data[] =
{
{
16,
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
"\xD2\x9D\x57\x6F\xCE\xA3\xA3\xA7\xED\x90\x99\xF2\x92\x73\xD7\x8E",
"\xB2\x28\x8B\x96\x8A\xE8\xB0\x86\x48\xD1\xCE\x96\x06\xFD\x99\x2D"
},
{
24,
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00",
"\xD2\x9D\x57\x6F\xCE\xAB\xA3\xA7\xED\x98\x99\xF2\x92\x7B\xD7\x8E",
"\x13\x0E\x35\x3E\x10\x37\xC2\x24\x05\xE8\xFA\xEF\xB2\xC3\xC3\xE9"
},
{
32,
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
"\xD0\x95\x57\x6F\xCE\xA3\xE3\xA7\xED\x98\xD9\xF2\x90\x73\xD7\x8E",
"\xB9\x0E\xE5\x86\x2D\xE6\x91\x68\xF2\xBD\xD5\x12\x5B\x45\x47\x2B"
},
{
32,
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
"\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00",
"\x20\x61\xA4\x27\x82\xBD\x52\xEC\x69\x1E\xC3\x83\xB0\x3B\xA7\x7C"
},
{
0
},
};
for (i = 0; test_data[i].key_length; i++)
{
serpent_setkey_internal (&context, test_data[i].key,
test_data[i].key_length);
serpent_encrypt_internal (&context, test_data[i].text_plain, scratch);
if (memcmp (scratch, test_data[i].text_cipher, sizeof (serpent_block_t)))
switch (test_data[i].key_length)
{
case 16:
return "Serpent-128 test encryption failed.";
case 24:
return "Serpent-192 test encryption failed.";
case 32:
return "Serpent-256 test encryption failed.";
}
serpent_decrypt_internal (&context, test_data[i].text_cipher, scratch);
if (memcmp (scratch, test_data[i].text_plain, sizeof (serpent_block_t)))
switch (test_data[i].key_length)
{
case 16:
return "Serpent-128 test decryption failed.";
case 24:
return "Serpent-192 test decryption failed.";
case 32:
return "Serpent-256 test decryption failed.";
}
}
return NULL;
}
static const gcry_cipher_oid_spec_t serpent128_oids[] =
{
{"1.3.6.1.4.1.11591.13.2.1", GCRY_CIPHER_MODE_ECB },
{"1.3.6.1.4.1.11591.13.2.2", GCRY_CIPHER_MODE_CBC },
{"1.3.6.1.4.1.11591.13.2.3", GCRY_CIPHER_MODE_OFB },
{"1.3.6.1.4.1.11591.13.2.4", GCRY_CIPHER_MODE_CFB },
{ NULL }
};
static const gcry_cipher_oid_spec_t serpent192_oids[] =
{
{"1.3.6.1.4.1.11591.13.2.21", GCRY_CIPHER_MODE_ECB },
{"1.3.6.1.4.1.11591.13.2.22", GCRY_CIPHER_MODE_CBC },
{"1.3.6.1.4.1.11591.13.2.23", GCRY_CIPHER_MODE_OFB },
{"1.3.6.1.4.1.11591.13.2.24", GCRY_CIPHER_MODE_CFB },
{ NULL }
};
static const gcry_cipher_oid_spec_t serpent256_oids[] =
{
{"1.3.6.1.4.1.11591.13.2.41", GCRY_CIPHER_MODE_ECB },
{"1.3.6.1.4.1.11591.13.2.42", GCRY_CIPHER_MODE_CBC },
{"1.3.6.1.4.1.11591.13.2.43", GCRY_CIPHER_MODE_OFB },
{"1.3.6.1.4.1.11591.13.2.44", GCRY_CIPHER_MODE_CFB },
{ NULL }
};
static const char *serpent128_aliases[] =
{
"SERPENT",
"SERPENT-128",
NULL
};
static const char *serpent192_aliases[] =
{
"SERPENT-192",
NULL
};
static const char *serpent256_aliases[] =
{
"SERPENT-256",
NULL
};
gcry_cipher_spec_t _gcry_cipher_spec_serpent128 =
{
GCRY_CIPHER_SERPENT128, {0, 0},
"SERPENT128", serpent128_aliases, serpent128_oids, 16, 128,
sizeof (serpent_context_t),
serpent_setkey, serpent_encrypt, serpent_decrypt
};
gcry_cipher_spec_t _gcry_cipher_spec_serpent192 =
{
GCRY_CIPHER_SERPENT192, {0, 0},
"SERPENT192", serpent192_aliases, serpent192_oids, 16, 192,
sizeof (serpent_context_t),
serpent_setkey, serpent_encrypt, serpent_decrypt
};
gcry_cipher_spec_t _gcry_cipher_spec_serpent256 =
{
GCRY_CIPHER_SERPENT256, {0, 0},
"SERPENT256", serpent256_aliases, serpent256_oids, 16, 256,
sizeof (serpent_context_t),
serpent_setkey, serpent_encrypt, serpent_decrypt
};
diff --git a/configure.ac b/configure.ac
index 60fb1f75..572fe279 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,3780 +1,3825 @@
# Configure.ac script for Libgcrypt
# Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2006,
# 2007, 2008, 2009, 2011 Free Software Foundation, Inc.
# Copyright (C) 2012-2021 g10 Code GmbH
#
# This file is part of Libgcrypt.
#
# Libgcrypt is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Libgcrypt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, see <http://www.gnu.org/licenses/>.
# (Process this file with autoconf to produce a configure script.)
AC_REVISION($Revision$)
AC_PREREQ([2.69])
min_automake_version="1.14"
# To build a release you need to create a tag with the version number
# (git tag -s libgcrypt-n.m.k) and run "./autogen.sh --force". Please
# bump the version number immediately after the release and do another
# commit and push so that the git magic is able to work. See below
# for the LT versions.
m4_define([mym4_package],[libgcrypt])
m4_define([mym4_major], [1])
m4_define([mym4_minor], [11])
m4_define([mym4_micro], [0])
# Below is m4 magic to extract and compute the git revision number,
# the decimalized short revision number, a beta version string and a
# flag indicating a development version (mym4_isbeta). Note that the
# m4 processing is done by autoconf and not during the configure run.
m4_define([mym4_verslist], m4_split(m4_esyscmd([./autogen.sh --find-version] \
mym4_package mym4_major mym4_minor mym4_micro),[:]))
m4_define([mym4_isbeta], m4_argn(2, mym4_verslist))
m4_define([mym4_version], m4_argn(4, mym4_verslist))
m4_define([mym4_revision], m4_argn(7, mym4_verslist))
m4_define([mym4_revision_dec], m4_argn(8, mym4_verslist))
m4_esyscmd([echo ]mym4_version[>VERSION])
AC_INIT([mym4_package],[mym4_version],[https://bugs.gnupg.org])
# LT Version numbers, remember to change them just *before* a release.
# NOET NOTE - Already updated for a 1.11 series - NOTE NOTE
# (Code changed: REVISION++)
# (Interfaces added/removed/changed: CURRENT++, REVISION=0)
# (Interfaces added: AGE++)
# (Interfaces removed: AGE=0)
#
# (Interfaces removed: CURRENT++, AGE=0, REVISION=0)
# (Interfaces added: CURRENT++, AGE++, REVISION=0)
# (No interfaces changed: REVISION++)
LIBGCRYPT_LT_CURRENT=25
LIBGCRYPT_LT_AGE=5
LIBGCRYPT_LT_REVISION=0
################################################
AC_SUBST(LIBGCRYPT_LT_CURRENT)
AC_SUBST(LIBGCRYPT_LT_AGE)
AC_SUBST(LIBGCRYPT_LT_REVISION)
# If the API is changed in an incompatible way: increment the next counter.
#
# 1.6: ABI and API change but the change is to most users irrelevant
# and thus the API version number has not been incremented.
LIBGCRYPT_CONFIG_API_VERSION=1
# If you change the required gpg-error version, please remove
# unnecessary error code defines in src/gcrypt-int.h.
NEED_GPG_ERROR_VERSION=1.27
AC_CONFIG_AUX_DIR([build-aux])
AC_CONFIG_SRCDIR([src/libgcrypt.vers])
AM_INIT_AUTOMAKE([serial-tests dist-bzip2])
AC_CONFIG_HEADERS([config.h])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_LIBOBJ_DIR([compat])
AC_CANONICAL_HOST
AM_MAINTAINER_MODE
AM_SILENT_RULES
AC_USE_SYSTEM_EXTENSIONS
AC_ARG_VAR(SYSROOT,[locate config scripts also below that directory])
AH_TOP([
#ifndef _GCRYPT_CONFIG_H_INCLUDED
#define _GCRYPT_CONFIG_H_INCLUDED
/* Enable gpg-error's strerror macro for W32CE. */
#define GPG_ERR_ENABLE_ERRNO_MACROS 1
])
AH_BOTTOM([
#define _GCRYPT_IN_LIBGCRYPT 1
/* Add .note.gnu.property section for Intel CET in assembler sources
when CET is enabled. */
#if defined(__ASSEMBLER__) && defined(__CET__)
# include <cet.h>
#endif
/* If the configure check for endianness has been disabled, get it from
OS macros. This is intended for making fat binary builds on OS X. */
#ifdef DISABLED_ENDIAN_CHECK
# if defined(__BIG_ENDIAN__)
# define WORDS_BIGENDIAN 1
# elif defined(__LITTLE_ENDIAN__)
# undef WORDS_BIGENDIAN
# else
# error "No endianness found"
# endif
#endif /*DISABLED_ENDIAN_CHECK*/
/* We basically use the original Camellia source. Make sure the symbols
properly prefixed. */
#define CAMELLIA_EXT_SYM_PREFIX _gcry_
#endif /*_GCRYPT_CONFIG_H_INCLUDED*/
])
AH_VERBATIM([_REENTRANT],
[/* To allow the use of Libgcrypt in multithreaded programs we have to use
special features from the library. */
#ifndef _REENTRANT
# define _REENTRANT 1
#endif
])
######################
## Basic checks. ### (we need some results later on (e.g. $GCC)
######################
AC_PROG_MAKE_SET
missing_dir=`cd $ac_aux_dir && pwd`
AM_MISSING_PROG(ACLOCAL, aclocal, $missing_dir)
AM_MISSING_PROG(AUTOCONF, autoconf, $missing_dir)
AM_MISSING_PROG(AUTOMAKE, automake, $missing_dir)
AM_MISSING_PROG(AUTOHEADER, autoheader, $missing_dir)
# AM_MISSING_PROG(MAKEINFO, makeinfo, $missing_dir)
AC_PROG_CC
AC_PROG_CPP
AM_PROG_CC_C_O
AM_PROG_AS
AC_SEARCH_LIBS([strerror],[cposix])
AC_PROG_INSTALL
AC_PROG_AWK
# Taken from mpfr-4.0.1, then modified for LDADD_FOR_TESTS_KLUDGE
dnl Under Linux, make sure that the old dtags are used if LD_LIBRARY_PATH
dnl is defined. The issue is that with the new dtags, LD_LIBRARY_PATH has
dnl the precedence over the run path, so that if a compatible MPFR library
dnl is installed in some directory from $LD_LIBRARY_PATH, then the tested
dnl MPFR library will be this library instead of the MPFR library from the
dnl build tree. Other OS with the same issue might be added later.
dnl
dnl References:
dnl https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=859732
dnl http://lists.gnu.org/archive/html/libtool/2017-05/msg00000.html
dnl
dnl We need to check whether --disable-new-dtags is supported as alternate
dnl linkers may be used (e.g., with tcc: CC=tcc LD=tcc).
dnl
case $host in
*-*-linux*)
if test -n "$LD_LIBRARY_PATH"; then
saved_LDFLAGS="$LDFLAGS"
LDADD_FOR_TESTS_KLUDGE="-Wl,--disable-new-dtags"
LDFLAGS="$LDFLAGS $LDADD_FOR_TESTS_KLUDGE"
AC_MSG_CHECKING(whether --disable-new-dtags is supported by the linker)
AC_LINK_IFELSE([AC_LANG_SOURCE([[
int main (void) { return 0; }
]])],
[AC_MSG_RESULT(yes (use it since LD_LIBRARY_PATH is set))],
[AC_MSG_RESULT(no)
LDADD_FOR_TESTS_KLUDGE=""
])
LDFLAGS="$saved_LDFLAGS"
fi
;;
esac
AC_SUBST([LDADD_FOR_TESTS_KLUDGE])
VERSION_NUMBER=m4_esyscmd(printf "0x%02x%02x%02x" mym4_major \
mym4_minor mym4_micro)
AC_SUBST(VERSION_NUMBER)
# We need to compile and run a program on the build machine.
AX_CC_FOR_BUILD
LT_PREREQ([2.2.6])
LT_INIT([win32-dll disable-static])
LT_LANG([Windows Resource])
##########################
## General definitions. ##
##########################
# Used by libgcrypt-config
LIBGCRYPT_CONFIG_LIBS="-lgcrypt"
LIBGCRYPT_CONFIG_CFLAGS=""
LIBGCRYPT_CONFIG_HOST="$host"
# Definitions for symmetric ciphers.
available_ciphers="arcfour blowfish cast5 des aes twofish serpent rfc2268 seed"
available_ciphers="$available_ciphers camellia idea salsa20 gost28147 chacha20"
available_ciphers="$available_ciphers sm4 aria"
enabled_ciphers=""
# Definitions for public-key ciphers.
available_pubkey_ciphers="dsa elgamal rsa ecc"
enabled_pubkey_ciphers=""
# Definitions for message digests.
available_digests="crc gostr3411-94 md2 md4 md5 rmd160 sha1 sha256 sha512"
available_digests="$available_digests sha3 tiger whirlpool stribog blake2"
available_digests="$available_digests sm3"
enabled_digests=""
# Definitions for kdfs (optional ones)
available_kdfs="s2k pkdf2 scrypt"
enabled_kdfs=""
# Definitions for random modules.
available_random_modules="getentropy linux egd unix"
auto_random_modules="$available_random_modules"
# Supported thread backends.
LIBGCRYPT_THREAD_MODULES=""
# Other definitions.
have_w32_system=no
have_w32ce_system=no
have_pthread=no
# Setup some stuff depending on host.
case "${host}" in
*-*-mingw32*)
ac_cv_have_dev_random=no
have_w32_system=yes
case "${host}" in
*-mingw32ce*)
have_w32ce_system=yes
available_random_modules="w32ce"
;;
*)
available_random_modules="w32"
;;
esac
AC_DEFINE(USE_ONLY_8DOT3,1,
[set this to limit filenames to the 8.3 format])
AC_DEFINE(HAVE_DRIVE_LETTERS,1,
[defined if we must run on a stupid file system])
AC_DEFINE(HAVE_DOSISH_SYSTEM,1,
[defined if we run on some of the PCDOS like systems
(DOS, Windoze. OS/2) with special properties like
no file modes])
;;
i?86-emx-os2 | i?86-*-os2*emx)
# OS/2 with the EMX environment
ac_cv_have_dev_random=no
AC_DEFINE(HAVE_DRIVE_LETTERS)
AC_DEFINE(HAVE_DOSISH_SYSTEM)
;;
i?86-*-msdosdjgpp*)
# DOS with the DJGPP environment
ac_cv_have_dev_random=no
AC_DEFINE(HAVE_DRIVE_LETTERS)
AC_DEFINE(HAVE_DOSISH_SYSTEM)
;;
*-*-hpux*)
if test -z "$GCC" ; then
CFLAGS="$CFLAGS -Ae -D_HPUX_SOURCE"
fi
;;
*-dec-osf4*)
if test -z "$GCC" ; then
# Suppress all warnings
# to get rid of the unsigned/signed char mismatch warnings.
CFLAGS="$CFLAGS -w"
fi
;;
m68k-atari-mint)
;;
*-apple-darwin*)
AC_DEFINE(_DARWIN_C_SOURCE, 1,
Expose all libc features (__DARWIN_C_FULL).)
AC_DEFINE(USE_POSIX_SPAWN_FOR_TESTS, 1,
[defined if we use posix_spawn in test program])
AC_CHECK_HEADERS(spawn.h)
;;
*)
;;
esac
if test "$have_w32_system" = yes; then
AC_DEFINE(HAVE_W32_SYSTEM,1, [Defined if we run on a W32 API based system])
if test "$have_w32ce_system" = yes; then
AC_DEFINE(HAVE_W32CE_SYSTEM,1,[Defined if we run on WindowsCE])
fi
fi
AM_CONDITIONAL(HAVE_W32_SYSTEM, test "$have_w32_system" = yes)
AM_CONDITIONAL(HAVE_W32CE_SYSTEM, test "$have_w32ce_system" = yes)
# A printable OS Name is sometimes useful.
case "${host}" in
*-*-mingw32ce*)
PRINTABLE_OS_NAME="W32CE"
;;
*-*-mingw32*)
PRINTABLE_OS_NAME="W32"
;;
i?86-emx-os2 | i?86-*-os2*emx )
PRINTABLE_OS_NAME="OS/2"
;;
i?86-*-msdosdjgpp*)
PRINTABLE_OS_NAME="MSDOS/DJGPP"
;;
*-linux*)
PRINTABLE_OS_NAME="GNU/Linux"
;;
*)
PRINTABLE_OS_NAME=`uname -s || echo "Unknown"`
;;
esac
NAME_OF_DEV_RANDOM="/dev/random"
NAME_OF_DEV_URANDOM="/dev/urandom"
AC_ARG_ENABLE(endian-check,
AS_HELP_STRING([--disable-endian-check],
[disable the endian check and trust the OS provided macros]),
endiancheck=$enableval,endiancheck=yes)
if test x"$endiancheck" = xyes ; then
AC_C_BIGENDIAN
else
AC_DEFINE(DISABLED_ENDIAN_CHECK,1,[configure did not test for endianness])
fi
AC_CHECK_SIZEOF(unsigned short, 2)
AC_CHECK_SIZEOF(unsigned int, 4)
AC_CHECK_SIZEOF(unsigned long, 4)
AC_CHECK_SIZEOF(unsigned long long, 0)
AC_CHECK_SIZEOF(unsigned __int128, 0)
AC_CHECK_SIZEOF(void *, 0)
AC_TYPE_UINTPTR_T
if test "$ac_cv_sizeof_unsigned_short" = "0" \
|| test "$ac_cv_sizeof_unsigned_int" = "0" \
|| test "$ac_cv_sizeof_unsigned_long" = "0"; then
AC_MSG_WARN([Hmmm, something is wrong with the sizes - using defaults]);
fi
# Ensure that we have UINT64_C before we bother to check for uint64_t
AC_CACHE_CHECK([for UINT64_C],[gnupg_cv_uint64_c_works],
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <inttypes.h>]],
[[uint64_t foo=UINT64_C(42);]])],
gnupg_cv_uint64_c_works=yes,gnupg_cv_uint64_c_works=no))
if test "$gnupg_cv_uint64_c_works" = "yes" ; then
AC_CHECK_SIZEOF(uint64_t)
fi
# Do we have any 64-bit data types?
if test "$ac_cv_sizeof_unsigned_int" != "8" \
&& test "$ac_cv_sizeof_unsigned_long" != "8" \
&& test "$ac_cv_sizeof_unsigned_long_long" != "8" \
&& test "$ac_cv_sizeof_uint64_t" != "8"; then
AC_MSG_ERROR([[
***
*** No 64-bit integer type available.
*** It is not possible to build Libgcrypt on this platform.
***]])
fi
# If not specified otherwise, all available algorithms will be
# included.
default_ciphers="$available_ciphers"
default_pubkey_ciphers="$available_pubkey_ciphers"
default_digests="$available_digests"
default_kdfs="$available_kdfs"
# Blacklist MD2 by default
default_digests=`echo $default_digests | sed -e 's/md2//g'`
# Substitutions to set generated files in a Emacs buffer to read-only.
AC_SUBST(emacs_local_vars_begin, ['Local Variables:'])
AC_SUBST(emacs_local_vars_read_only, ['buffer-read-only: t'])
AC_SUBST(emacs_local_vars_end, ['End:'])
############################
## Command line switches. ##
############################
# Implementation of the --enable-ciphers switch.
AC_ARG_ENABLE(ciphers,
AS_HELP_STRING([--enable-ciphers=ciphers],
[select the symmetric ciphers to include]),
[enabled_ciphers=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`],
[enabled_ciphers=""])
if test "x$enabled_ciphers" = "x" \
-o "$enabled_ciphers" = "yes" \
-o "$enabled_ciphers" = "no"; then
enabled_ciphers=$default_ciphers
fi
AC_MSG_CHECKING([which symmetric ciphers to include])
for cipher in $enabled_ciphers; do
LIST_MEMBER($cipher, $available_ciphers)
if test "$found" = "0"; then
AC_MSG_ERROR([unsupported cipher "$cipher" specified])
fi
done
AC_MSG_RESULT([$enabled_ciphers])
# Implementation of the --enable-pubkey-ciphers switch.
AC_ARG_ENABLE(pubkey-ciphers,
AS_HELP_STRING([--enable-pubkey-ciphers=ciphers],
[select the public-key ciphers to include]),
[enabled_pubkey_ciphers=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`],
[enabled_pubkey_ciphers=""])
if test "x$enabled_pubkey_ciphers" = "x" \
-o "$enabled_pubkey_ciphers" = "yes" \
-o "$enabled_pubkey_ciphers" = "no"; then
enabled_pubkey_ciphers=$default_pubkey_ciphers
fi
AC_MSG_CHECKING([which public-key ciphers to include])
for cipher in $enabled_pubkey_ciphers; do
LIST_MEMBER($cipher, $available_pubkey_ciphers)
if test "$found" = "0"; then
AC_MSG_ERROR([unsupported public-key cipher specified])
fi
done
AC_MSG_RESULT([$enabled_pubkey_ciphers])
# Implementation of the --enable-digests switch.
AC_ARG_ENABLE(digests,
AS_HELP_STRING([--enable-digests=digests],
[select the message digests to include]),
[enabled_digests=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`],
[enabled_digests=""])
if test "x$enabled_digests" = "x" \
-o "$enabled_digests" = "yes" \
-o "$enabled_digests" = "no"; then
enabled_digests=$default_digests
fi
AC_MSG_CHECKING([which message digests to include])
for digest in $enabled_digests; do
LIST_MEMBER($digest, $available_digests)
if test "$found" = "0"; then
AC_MSG_ERROR([unsupported message digest specified])
fi
done
AC_MSG_RESULT([$enabled_digests])
# Implementation of the --enable-kdfs switch.
AC_ARG_ENABLE(kdfs,
AS_HELP_STRING([--enable-kfds=kdfs],
[select the KDFs to include]),
[enabled_kdfs=`echo $enableval | tr ',:' ' ' | tr '[A-Z]' '[a-z]'`],
[enabled_kdfs=""])
if test "x$enabled_kdfs" = "x" \
-o "$enabled_kdfs" = "yes" \
-o "$enabled_kdfs" = "no"; then
enabled_kdfs=$default_kdfs
fi
AC_MSG_CHECKING([which key derivation functions to include])
for kdf in $enabled_kdfs; do
LIST_MEMBER($kdf, $available_kdfs)
if test "$found" = "0"; then
AC_MSG_ERROR([unsupported key derivation function specified])
fi
done
AC_MSG_RESULT([$enabled_kdfs])
# Implementation of the --enable-random switch.
AC_ARG_ENABLE(random,
AS_HELP_STRING([--enable-random=name],
[select which random number generator to use]),
[random=`echo $enableval | tr '[A-Z]' '[a-z]'`],
[])
if test "x$random" = "x" -o "$random" = "yes" -o "$random" = "no"; then
random=default
fi
AC_MSG_CHECKING([which random module to use])
if test "$random" != "default" -a "$random" != "auto"; then
LIST_MEMBER($random, $available_random_modules)
if test "$found" = "0"; then
AC_MSG_ERROR([unsupported random module specified])
fi
fi
AC_MSG_RESULT($random)
# Implementation of the --disable-dev-random switch.
AC_MSG_CHECKING([whether use of /dev/random is requested])
AC_ARG_ENABLE(dev-random,
[ --disable-dev-random disable the use of dev random],
try_dev_random=$enableval, try_dev_random=yes)
AC_MSG_RESULT($try_dev_random)
# Implementation of the --with-egd-socket switch.
AC_ARG_WITH(egd-socket,
[ --with-egd-socket=NAME Use NAME for the EGD socket)],
egd_socket_name="$withval", egd_socket_name="" )
AC_DEFINE_UNQUOTED(EGD_SOCKET_NAME, "$egd_socket_name",
[Define if you don't want the default EGD socket name.
For details see cipher/rndegd.c])
# Implementation of --disable-asm.
AC_MSG_CHECKING([whether MPI and cipher assembler modules are requested])
AC_ARG_ENABLE([asm],
AS_HELP_STRING([--disable-asm],
[Disable MPI and cipher assembler modules]),
[try_asm_modules=$enableval],
[try_asm_modules=yes])
AC_MSG_RESULT($try_asm_modules)
if test "$try_asm_modules" != yes ; then
AC_DEFINE(ASM_DISABLED,1,[Defined if --disable-asm was used to configure])
fi
# Implementation of the --enable-large-data-tests switch.
AC_MSG_CHECKING([whether to run large data tests])
AC_ARG_ENABLE(large-data-tests,
AS_HELP_STRING([--enable-large-data-tests],
[Enable the real long ruinning large data tests]),
large_data_tests=$enableval,large_data_tests=no)
AC_MSG_RESULT($large_data_tests)
AC_SUBST(RUN_LARGE_DATA_TESTS, $large_data_tests)
# Implementation of --enable-force-soft-hwfeatures
AC_MSG_CHECKING([whether 'soft' HW feature bits are forced on])
AC_ARG_ENABLE([force-soft-hwfeatures],
AS_HELP_STRING([--enable-force-soft-hwfeatures],
[Enable forcing 'soft' HW feature bits on]),
[force_soft_hwfeatures=$enableval],
[force_soft_hwfeatures=no])
AC_MSG_RESULT($force_soft_hwfeatures)
# Implementation of the --with-capabilities switch.
# Check whether we want to use Linux capabilities
AC_MSG_CHECKING([whether use of capabilities is requested])
AC_ARG_WITH(capabilities,
AS_HELP_STRING([--with-capabilities],
[Use linux capabilities [default=no]]),
[use_capabilities="$withval"],[use_capabilities=no])
AC_MSG_RESULT($use_capabilities)
# Implementation of the --enable-hmac-binary-check.
AC_MSG_CHECKING([whether a HMAC binary check is requested])
AC_ARG_ENABLE(hmac-binary-check,
AS_HELP_STRING([--enable-hmac-binary-check],
[Enable library integrity check]),
[use_hmac_binary_check="$enableval"],
[use_hmac_binary_check=no])
AC_MSG_RESULT($use_hmac_binary_check)
if test "$use_hmac_binary_check" = no ; then
DEF_HMAC_BINARY_CHECK=''
else
AC_DEFINE(ENABLE_HMAC_BINARY_CHECK,1,
[Define to support an HMAC based integrity check])
AC_CHECK_TOOL(OBJCOPY, [objcopy])
AC_CHECK_TOOL(READELF, [readelf])
if test "$use_hmac_binary_check" != yes ; then
DEF_HMAC_BINARY_CHECK=-DKEY_FOR_BINARY_CHECK="'\"$use_hmac_binary_check\"'"
fi
fi
AM_CONDITIONAL(USE_HMAC_BINARY_CHECK, test "x$use_hmac_binary_check" != xno)
AC_SUBST(DEF_HMAC_BINARY_CHECK)
# Implementation of the --with-fips-module-version.
AC_ARG_WITH(fips-module-version,
AS_HELP_STRING([--with-fips-module-version=VERSION],
[Specify the FIPS module version for the build]),
fips_module_version="$withval", fips_module_version="" )
AC_DEFINE_UNQUOTED(FIPS_MODULE_VERSION, "$fips_module_version",
[Define FIPS module version for certification])
# Implementation of the --disable-jent-support switch.
AC_MSG_CHECKING([whether jitter entropy support is requested])
AC_ARG_ENABLE(jent-support,
AS_HELP_STRING([--disable-jent-support],
[Disable support for the Jitter entropy collector]),
jentsupport=$enableval,jentsupport=yes)
AC_MSG_RESULT($jentsupport)
# Implementation of the --disable-padlock-support switch.
AC_MSG_CHECKING([whether padlock support is requested])
AC_ARG_ENABLE(padlock-support,
AS_HELP_STRING([--disable-padlock-support],
[Disable support for the PadLock Engine of VIA processors]),
padlocksupport=$enableval,padlocksupport=yes)
AC_MSG_RESULT($padlocksupport)
# Implementation of the --disable-aesni-support switch.
AC_MSG_CHECKING([whether AESNI support is requested])
AC_ARG_ENABLE(aesni-support,
AS_HELP_STRING([--disable-aesni-support],
[Disable support for the Intel AES-NI instructions]),
aesnisupport=$enableval,aesnisupport=yes)
AC_MSG_RESULT($aesnisupport)
# Implementation of the --disable-shaext-support switch.
AC_MSG_CHECKING([whether SHAEXT support is requested])
AC_ARG_ENABLE(shaext-support,
AS_HELP_STRING([--disable-shaext-support],
[Disable support for the Intel SHAEXT instructions]),
shaextsupport=$enableval,shaextsupport=yes)
AC_MSG_RESULT($shaextsupport)
# Implementation of the --disable-pclmul-support switch.
AC_MSG_CHECKING([whether PCLMUL support is requested])
AC_ARG_ENABLE(pclmul-support,
AS_HELP_STRING([--disable-pclmul-support],
[Disable support for the Intel PCLMUL instructions]),
pclmulsupport=$enableval,pclmulsupport=yes)
AC_MSG_RESULT($pclmulsupport)
# Implementation of the --disable-sse41-support switch.
AC_MSG_CHECKING([whether SSE4.1 support is requested])
AC_ARG_ENABLE(sse41-support,
AS_HELP_STRING([--disable-sse41-support],
[Disable support for the Intel SSE4.1 instructions]),
sse41support=$enableval,sse41support=yes)
AC_MSG_RESULT($sse41support)
# Implementation of the --disable-drng-support switch.
AC_MSG_CHECKING([whether DRNG support is requested])
AC_ARG_ENABLE(drng-support,
AS_HELP_STRING([--disable-drng-support],
[Disable support for the Intel DRNG (RDRAND instruction)]),
drngsupport=$enableval,drngsupport=yes)
AC_MSG_RESULT($drngsupport)
# Implementation of the --disable-avx-support switch.
AC_MSG_CHECKING([whether AVX support is requested])
AC_ARG_ENABLE(avx-support,
AS_HELP_STRING([--disable-avx-support],
[Disable support for the Intel AVX instructions]),
avxsupport=$enableval,avxsupport=yes)
AC_MSG_RESULT($avxsupport)
# Implementation of the --disable-avx2-support switch.
AC_MSG_CHECKING([whether AVX2 support is requested])
AC_ARG_ENABLE(avx2-support,
AS_HELP_STRING([--disable-avx2-support],
[Disable support for the Intel AVX2 instructions]),
avx2support=$enableval,avx2support=yes)
AC_MSG_RESULT($avx2support)
# Implementation of the --disable-avx512-support switch.
AC_MSG_CHECKING([whether AVX512 support is requested])
AC_ARG_ENABLE(avx512-support,
AS_HELP_STRING([--disable-avx512-support],
[Disable support for the Intel AVX512 instructions]),
avx512support=$enableval,avx512support=yes)
AC_MSG_RESULT($avx512support)
# Implementation of the --disable-gfni-support switch.
AC_MSG_CHECKING([whether GFNI support is requested])
AC_ARG_ENABLE(gfni-support,
AS_HELP_STRING([--disable-gfni-support],
[Disable support for the Intel GFNI instructions]),
gfnisupport=$enableval,gfnisupport=yes)
AC_MSG_RESULT($gfnisupport)
# Implementation of the --disable-neon-support switch.
AC_MSG_CHECKING([whether NEON support is requested])
AC_ARG_ENABLE(neon-support,
AS_HELP_STRING([--disable-neon-support],
[Disable support for the ARM NEON instructions]),
neonsupport=$enableval,neonsupport=yes)
AC_MSG_RESULT($neonsupport)
# Implementation of the --disable-arm-crypto-support switch.
AC_MSG_CHECKING([whether ARMv8 Crypto Extension support is requested])
AC_ARG_ENABLE(arm-crypto-support,
AS_HELP_STRING([--disable-arm-crypto-support],
[Disable support for the ARMv8 Crypto Extension instructions]),
armcryptosupport=$enableval,armcryptosupport=yes)
AC_MSG_RESULT($armcryptosupport)
# Implementation of the --disable-sve-support switch.
AC_MSG_CHECKING([whether SVE support is requested])
AC_ARG_ENABLE(sve-support,
AS_HELP_STRING([--disable-sve-support],
[Disable support for the ARMv8 SVE instructions]),
svesupport=$enableval,svesupport=yes)
AC_MSG_RESULT($svesupport)
# Implementation of the --disable-sve2-support switch.
AC_MSG_CHECKING([whether SVE2 support is requested])
AC_ARG_ENABLE(sve2-support,
AS_HELP_STRING([--disable-sve2-support],
[Disable support for the ARMv9 SVE2 instructions]),
sve2support=$enableval,sve2support=yes)
AC_MSG_RESULT($sve2support)
# Implementation of the --disable-ppc-crypto-support switch.
AC_MSG_CHECKING([whether PPC crypto support is requested])
AC_ARG_ENABLE(ppc-crypto-support,
AS_HELP_STRING([--disable-ppc-crypto-support],
[Disable support for the PPC crypto instructions introduced in POWER 8 (PowerISA 2.07)]),
ppccryptosupport=$enableval,ppccryptosupport=yes)
AC_MSG_RESULT($ppccryptosupport)
# Implementation of the --disable-O-flag-munging switch.
AC_MSG_CHECKING([whether a -O flag munging is requested])
AC_ARG_ENABLE([O-flag-munging],
AS_HELP_STRING([--disable-O-flag-munging],
[Disable modification of the cc -O flag]),
[enable_o_flag_munging=$enableval],
[enable_o_flag_munging=yes])
AC_MSG_RESULT($enable_o_flag_munging)
AM_CONDITIONAL(ENABLE_O_FLAG_MUNGING, test "$enable_o_flag_munging" = "yes")
# Implementation of the --disable-instrumentation-munging switch.
AC_MSG_CHECKING([whether a instrumentation (-fprofile, -fsanitize) munging is requested])
AC_ARG_ENABLE([instrumentation-munging],
AS_HELP_STRING([--disable-instrumentation-munging],
[Disable modification of the cc instrumentation options]),
[enable_instrumentation_munging=$enableval],
[enable_instrumentation_munging=yes])
AC_MSG_RESULT($enable_instrumentation_munging)
AM_CONDITIONAL(ENABLE_INSTRUMENTATION_MUNGING,
test "$enable_instrumentation_munging" = "yes")
# Implementation of the --disable-amd64-as-feature-detection switch.
AC_MSG_CHECKING([whether to enable AMD64 as(1) feature detection])
AC_ARG_ENABLE(amd64-as-feature-detection,
AS_HELP_STRING([--disable-amd64-as-feature-detection],
[Disable the auto-detection of AMD64 as(1) features]),
amd64_as_feature_detection=$enableval,
amd64_as_feature_detection=yes)
AC_MSG_RESULT($amd64_as_feature_detection)
AC_DEFINE_UNQUOTED(PRINTABLE_OS_NAME, "$PRINTABLE_OS_NAME",
[A human readable text with the name of the OS])
# For some systems we know that we have ld_version scripts.
# Use it then as default.
have_ld_version_script=no
case "${host}" in
*-*-linux*)
have_ld_version_script=yes
;;
*-*-gnu*)
have_ld_version_script=yes
;;
esac
AC_ARG_ENABLE([ld-version-script],
AS_HELP_STRING([--enable-ld-version-script],
[enable/disable use of linker version script.
(default is system dependent)]),
[have_ld_version_script=$enableval],
[ : ] )
AM_CONDITIONAL(HAVE_LD_VERSION_SCRIPT, test "$have_ld_version_script" = "yes")
AC_DEFINE_UNQUOTED(NAME_OF_DEV_RANDOM, "$NAME_OF_DEV_RANDOM",
[defined to the name of the strong random device])
AC_DEFINE_UNQUOTED(NAME_OF_DEV_URANDOM, "$NAME_OF_DEV_URANDOM",
[defined to the name of the weaker random device])
###############################
#### Checks for libraries. ####
###############################
#
# gpg-error is required.
#
AM_PATH_GPG_ERROR("$NEED_GPG_ERROR_VERSION")
if test "x$GPG_ERROR_LIBS" = "x"; then
AC_MSG_ERROR([libgpg-error is needed.
See ftp://ftp.gnupg.org/gcrypt/libgpg-error/ .])
fi
AC_DEFINE(GPG_ERR_SOURCE_DEFAULT, GPG_ERR_SOURCE_GCRYPT,
[The default error source for libgcrypt.])
AM_CONDITIONAL(USE_GPGRT_CONFIG, [test -n "$GPGRT_CONFIG" \
-a "$ac_cv_path_GPG_ERROR_CONFIG" = no])
#
# Check whether pthreads is available
#
if test "$have_w32_system" != yes; then
AC_CHECK_LIB(pthread,pthread_create,have_pthread=yes)
if test "$have_pthread" = yes; then
AC_DEFINE(HAVE_PTHREAD, 1 ,[Define if we have pthread.])
fi
fi
# Solaris needs -lsocket and -lnsl. Unisys system includes
# gethostbyname in libsocket but needs libnsl for socket.
AC_SEARCH_LIBS(setsockopt, [socket], ,
[AC_SEARCH_LIBS(setsockopt, [socket], , , [-lnsl])])
AC_SEARCH_LIBS(setsockopt, [nsl])
##################################
#### Checks for header files. ####
##################################
AC_CHECK_HEADERS(unistd.h sys/auxv.h sys/random.h sys/sysctl.h)
##########################################
#### Checks for typedefs, structures, ####
#### and compiler characteristics. ####
##########################################
AC_C_CONST
AC_C_INLINE
AC_TYPE_SIZE_T
AC_TYPE_PID_T
AC_CHECK_TYPES([byte, ushort, u16, u32, u64])
#
# Check for __builtin_bswap32 intrinsic.
#
AC_CACHE_CHECK(for __builtin_bswap32,
[gcry_cv_have_builtin_bswap32],
[gcry_cv_have_builtin_bswap32=no
AC_LINK_IFELSE([AC_LANG_PROGRAM([],
[int x = 0; int y = __builtin_bswap32(x); return y;])],
[gcry_cv_have_builtin_bswap32=yes])])
if test "$gcry_cv_have_builtin_bswap32" = "yes" ; then
AC_DEFINE(HAVE_BUILTIN_BSWAP32,1,
[Defined if compiler has '__builtin_bswap32' intrinsic])
fi
#
# Check for __builtin_bswap64 intrinsic.
#
AC_CACHE_CHECK(for __builtin_bswap64,
[gcry_cv_have_builtin_bswap64],
[gcry_cv_have_builtin_bswap64=no
AC_LINK_IFELSE([AC_LANG_PROGRAM([],
[long long x = 0; long long y = __builtin_bswap64(x); return y;])],
[gcry_cv_have_builtin_bswap64=yes])])
if test "$gcry_cv_have_builtin_bswap64" = "yes" ; then
AC_DEFINE(HAVE_BUILTIN_BSWAP64,1,
[Defined if compiler has '__builtin_bswap64' intrinsic])
fi
#
# Check for __builtin_ctz intrinsic.
#
AC_CACHE_CHECK(for __builtin_ctz,
[gcry_cv_have_builtin_ctz],
[gcry_cv_have_builtin_ctz=no
AC_LINK_IFELSE([AC_LANG_PROGRAM([],
[unsigned int x = 0; int y = __builtin_ctz(x); return y;])],
[gcry_cv_have_builtin_ctz=yes])])
if test "$gcry_cv_have_builtin_ctz" = "yes" ; then
AC_DEFINE(HAVE_BUILTIN_CTZ, 1,
[Defined if compiler has '__builtin_ctz' intrinsic])
fi
#
# Check for __builtin_ctzl intrinsic.
#
AC_CACHE_CHECK(for __builtin_ctzl,
[gcry_cv_have_builtin_ctzl],
[gcry_cv_have_builtin_ctzl=no
AC_LINK_IFELSE([AC_LANG_PROGRAM([],
[unsigned long x = 0; long y = __builtin_ctzl(x); return y;])],
[gcry_cv_have_builtin_ctzl=yes])])
if test "$gcry_cv_have_builtin_ctzl" = "yes" ; then
AC_DEFINE(HAVE_BUILTIN_CTZL, 1,
[Defined if compiler has '__builtin_ctzl' intrinsic])
fi
#
# Check for __builtin_clz intrinsic.
#
AC_CACHE_CHECK(for __builtin_clz,
[gcry_cv_have_builtin_clz],
[gcry_cv_have_builtin_clz=no
AC_LINK_IFELSE([AC_LANG_PROGRAM([],
[unsigned int x = 0; int y = __builtin_clz(x); return y;])],
[gcry_cv_have_builtin_clz=yes])])
if test "$gcry_cv_have_builtin_clz" = "yes" ; then
AC_DEFINE(HAVE_BUILTIN_CLZ, 1,
[Defined if compiler has '__builtin_clz' intrinsic])
fi
#
# Check for __builtin_clzl intrinsic.
#
AC_CACHE_CHECK(for __builtin_clzl,
[gcry_cv_have_builtin_clzl],
[gcry_cv_have_builtin_clzl=no
AC_LINK_IFELSE([AC_LANG_PROGRAM([],
[unsigned long x = 0; long y = __builtin_clzl(x); return y;])],
[gcry_cv_have_builtin_clzl=yes])])
if test "$gcry_cv_have_builtin_clzl" = "yes" ; then
AC_DEFINE(HAVE_BUILTIN_CLZL, 1,
[Defined if compiler has '__builtin_clzl' intrinsic])
fi
#
# Check for __sync_synchronize intrinsic.
#
AC_CACHE_CHECK(for __sync_synchronize,
[gcry_cv_have_sync_synchronize],
[gcry_cv_have_sync_synchronize=no
AC_LINK_IFELSE([AC_LANG_PROGRAM([],
[__sync_synchronize(); return 0;])],
[gcry_cv_have_sync_synchronize=yes])])
if test "$gcry_cv_have_sync_synchronize" = "yes" ; then
AC_DEFINE(HAVE_SYNC_SYNCHRONIZE, 1,
[Defined if compiler has '__sync_synchronize' intrinsic])
fi
#
# Check for VLA support (variable length arrays).
#
AC_CACHE_CHECK(whether the variable length arrays are supported,
[gcry_cv_have_vla],
[gcry_cv_have_vla=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[void f1(char *, int);
char foo(int i) {
char b[(i < 0 ? 0 : i) + 1];
f1(b, sizeof b); return b[0];}]])],
[gcry_cv_have_vla=yes])])
if test "$gcry_cv_have_vla" = "yes" ; then
AC_DEFINE(HAVE_VLA,1, [Defined if variable length arrays are supported])
fi
#
# Check for ELF visibility support.
#
AC_CACHE_CHECK(whether the visibility attribute is supported,
gcry_cv_visibility_attribute,
[gcry_cv_visibility_attribute=no
AC_LANG_CONFTEST([AC_LANG_SOURCE(
[[int foo __attribute__ ((visibility ("hidden"))) = 1;
int bar __attribute__ ((visibility ("protected"))) = 1;
]])])
if ${CC-cc} -Werror -S conftest.c -o conftest.s \
1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ; then
if grep '\.hidden.*foo' conftest.s >/dev/null 2>&1 ; then
if grep '\.protected.*bar' conftest.s >/dev/null 2>&1; then
gcry_cv_visibility_attribute=yes
fi
fi
fi
])
if test "$gcry_cv_visibility_attribute" = "yes"; then
AC_CACHE_CHECK(for broken visibility attribute,
gcry_cv_broken_visibility_attribute,
[gcry_cv_broken_visibility_attribute=yes
AC_LANG_CONFTEST([AC_LANG_SOURCE(
[[int foo (int x);
int bar (int x) __asm__ ("foo")
__attribute__ ((visibility ("hidden")));
int bar (int x) { return x; }
]])])
if ${CC-cc} -Werror -S conftest.c -o conftest.s \
1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ; then
if grep '\.hidden@<:@ _@:>@foo' conftest.s >/dev/null 2>&1;
then
gcry_cv_broken_visibility_attribute=no
fi
fi
])
fi
if test "$gcry_cv_visibility_attribute" = "yes"; then
AC_CACHE_CHECK(for broken alias attribute,
gcry_cv_broken_alias_attribute,
[gcry_cv_broken_alias_attribute=yes
AC_LANG_CONFTEST([AC_LANG_SOURCE(
[[extern int foo (int x) __asm ("xyzzy");
int bar (int x) { return x; }
extern __typeof (bar) foo __attribute ((weak, alias ("bar")));
extern int dfoo;
extern __typeof (dfoo) dfoo __asm ("abccb");
int dfoo = 1;
]])])
if ${CC-cc} -Werror -S conftest.c -o conftest.s \
1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ; then
if grep 'xyzzy' conftest.s >/dev/null 2>&1 && \
grep 'abccb' conftest.s >/dev/null 2>&1; then
gcry_cv_broken_alias_attribute=no
fi
fi
])
fi
if test "$gcry_cv_visibility_attribute" = "yes"; then
AC_CACHE_CHECK(if gcc supports -fvisibility=hidden,
gcry_cv_gcc_has_f_visibility,
[gcry_cv_gcc_has_f_visibility=no
_gcc_cflags_save=$CFLAGS
CFLAGS="-fvisibility=hidden"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])],
gcry_cv_gcc_has_f_visibility=yes)
CFLAGS=$_gcc_cflags_save;
])
fi
if test "$gcry_cv_visibility_attribute" = "yes" \
&& test "$gcry_cv_broken_visibility_attribute" != "yes" \
&& test "$gcry_cv_broken_alias_attribute" != "yes" \
&& test "$gcry_cv_gcc_has_f_visibility" = "yes"
then
AC_DEFINE(GCRY_USE_VISIBILITY, 1,
[Define to use the GNU C visibility attribute.])
CFLAGS="$CFLAGS -fvisibility=hidden"
fi
# Following attribute tests depend on warnings to cause compile to fail,
# so set -Werror temporarily.
_gcc_cflags_save=$CFLAGS
CFLAGS="$CFLAGS -Werror"
#
# Check whether the compiler supports the GCC style aligned attribute
#
AC_CACHE_CHECK([whether the GCC style aligned attribute is supported],
[gcry_cv_gcc_attribute_aligned],
[gcry_cv_gcc_attribute_aligned=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[struct { int a; } foo __attribute__ ((aligned (16)));]])],
[gcry_cv_gcc_attribute_aligned=yes])])
if test "$gcry_cv_gcc_attribute_aligned" = "yes" ; then
AC_DEFINE(HAVE_GCC_ATTRIBUTE_ALIGNED,1,
[Defined if a GCC style "__attribute__ ((aligned (n))" is supported])
fi
#
# Check whether the compiler supports the GCC style packed attribute
#
AC_CACHE_CHECK([whether the GCC style packed attribute is supported],
[gcry_cv_gcc_attribute_packed],
[gcry_cv_gcc_attribute_packed=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[struct foolong_s { long b; } __attribute__ ((packed));
struct foo_s { char a; struct foolong_s b; }
__attribute__ ((packed));
enum bar {
FOO = 1 / (sizeof(struct foo_s) == (sizeof(char) + sizeof(long))),
};]])],
[gcry_cv_gcc_attribute_packed=yes])])
if test "$gcry_cv_gcc_attribute_packed" = "yes" ; then
AC_DEFINE(HAVE_GCC_ATTRIBUTE_PACKED,1,
[Defined if a GCC style "__attribute__ ((packed))" is supported])
fi
#
# Check whether the compiler supports the GCC style may_alias attribute
#
AC_CACHE_CHECK([whether the GCC style may_alias attribute is supported],
[gcry_cv_gcc_attribute_may_alias],
[gcry_cv_gcc_attribute_may_alias=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[typedef struct foo_s { int a; }
__attribute__ ((may_alias)) foo_t;]])],
[gcry_cv_gcc_attribute_may_alias=yes])])
if test "$gcry_cv_gcc_attribute_may_alias" = "yes" ; then
AC_DEFINE(HAVE_GCC_ATTRIBUTE_MAY_ALIAS,1,
[Defined if a GCC style "__attribute__ ((may_alias))" is supported])
fi
# Restore flags.
CFLAGS=$_gcc_cflags_save;
#
# Check whether the compiler supports 'asm' or '__asm__' keyword for
# assembler blocks.
#
AC_CACHE_CHECK([whether 'asm' assembler keyword is supported],
[gcry_cv_have_asm],
[gcry_cv_have_asm=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[void a(void) { asm("":::"memory"); }]])],
[gcry_cv_have_asm=yes])])
AC_CACHE_CHECK([whether '__asm__' assembler keyword is supported],
[gcry_cv_have___asm__],
[gcry_cv_have___asm__=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[void a(void) { __asm__("":::"memory"); }]])],
[gcry_cv_have___asm__=yes])])
if test "$gcry_cv_have_asm" = "no" ; then
if test "$gcry_cv_have___asm__" = "yes" ; then
AC_DEFINE(asm,__asm__,
[Define to supported assembler block keyword, if plain 'asm' was not
supported])
fi
fi
#
# Check whether the compiler supports inline assembly memory barrier.
#
if test "$gcry_cv_have_asm" = "no" ; then
if test "$gcry_cv_have___asm__" = "yes" ; then
AC_CACHE_CHECK([whether inline assembly memory barrier is supported],
[gcry_cv_have_asm_volatile_memory],
[gcry_cv_have_asm_volatile_memory=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[void a(int x)
{
__asm__ volatile("":::"memory");
__asm__ volatile("":"+r"(x)::"memory");
}]])],
[gcry_cv_have_asm_volatile_memory=yes])])
fi
else
AC_CACHE_CHECK([whether inline assembly memory barrier is supported],
[gcry_cv_have_asm_volatile_memory],
[gcry_cv_have_asm_volatile_memory=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[void a(int x)
{
asm volatile("":::"memory");
asm volatile("":"+r"(x)::"memory"); }]])],
[gcry_cv_have_asm_volatile_memory=yes])])
fi
if test "$gcry_cv_have_asm_volatile_memory" = "yes" ; then
AC_DEFINE(HAVE_GCC_ASM_VOLATILE_MEMORY,1,
[Define if inline asm memory barrier is supported])
fi
#
# Check whether GCC assembler supports features needed for our ARM
# implementations. This needs to be done before setting up the
# assembler stuff.
#
AC_CACHE_CHECK([whether GCC assembler is compatible for ARM assembly implementations],
[gcry_cv_gcc_arm_platform_as_ok],
[if test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_arm_platform_as_ok="n/a"
else
gcry_cv_gcc_arm_platform_as_ok=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
/* Test if assembler supports UAL syntax. */
".syntax unified\n\t"
".arm\n\t" /* our assembly code is in ARM mode */
".text\n\t"
/* Following causes error if assembler ignored '.syntax unified'. */
"asmfunc:\n\t"
"add r0, r0, r4, ror #12;\n\t"
/* Test if '.type' and '.size' are supported. */
".size asmfunc,.-asmfunc;\n\t"
".type asmfunc,%function;\n\t"
);
void asmfunc(void);]], [ asmfunc(); ] )],
[gcry_cv_gcc_arm_platform_as_ok=yes])
fi])
if test "$gcry_cv_gcc_arm_platform_as_ok" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS,1,
[Defined if underlying assembler is compatible with ARM assembly implementations])
fi
#
# Check whether GCC assembler supports features needed for our ARMv8/Aarch64
# implementations. This needs to be done before setting up the
# assembler stuff.
#
AC_CACHE_CHECK([whether GCC assembler is compatible for ARMv8/Aarch64 assembly implementations],
[gcry_cv_gcc_aarch64_platform_as_ok],
[if test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_aarch64_platform_as_ok="n/a"
else
gcry_cv_gcc_aarch64_platform_as_ok=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".text\n\t"
"asmfunc:\n\t"
"eor x0, x0, x30, ror #12;\n\t"
"add x0, x0, x30, asr #12;\n\t"
"eor v0.16b, v0.16b, v31.16b;\n\t"
);
void asmfunc(void);]], [ asmfunc(); ] )],
[gcry_cv_gcc_aarch64_platform_as_ok=yes])
fi])
if test "$gcry_cv_gcc_aarch64_platform_as_ok" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS,1,
[Defined if underlying assembler is compatible with ARMv8/Aarch64 assembly implementations])
fi
#
# Check whether GCC assembler supports for CFI directives.
#
AC_CACHE_CHECK([whether GCC assembler supports for CFI directives],
[gcry_cv_gcc_asm_cfi_directives],
[gcry_cv_gcc_asm_cfi_directives=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".text\n\t"
"ac_test:\n\t"
".cfi_startproc\n\t"
".cfi_remember_state\n\t"
".cfi_adjust_cfa_offset 8\n\t"
".cfi_rel_offset 0, 8\n\t"
".cfi_def_cfa_register 1\n\t"
".cfi_register 2, 3\n\t"
".cfi_restore 2\n\t"
".cfi_escape 0x0f, 0x02, 0x11, 0x00\n\t"
".cfi_restore_state\n\t"
".long 0\n\t"
".cfi_endproc\n\t"
);
void asmfunc(void)]])],
[gcry_cv_gcc_asm_cfi_directives=yes])])
if test "$gcry_cv_gcc_asm_cfi_directives" = "yes" ; then
AC_DEFINE(HAVE_GCC_ASM_CFI_DIRECTIVES,1,
[Defined if underlying assembler supports for CFI directives])
fi
#
# Check whether GCC assembler supports for ELF directives.
#
AC_CACHE_CHECK([whether GCC assembler supports for ELF directives],
[gcry_cv_gcc_asm_elf_directives],
[gcry_cv_gcc_asm_elf_directives=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
/* Test if ELF directives '.type' and '.size' are supported. */
".text\n\t"
"asmfunc:\n\t"
".size asmfunc,.-asmfunc;\n\t"
".type asmfunc,STT_FUNC;\n\t"
);]])],
[gcry_cv_gcc_asm_elf_directives=yes])])
if test "$gcry_cv_gcc_asm_elf_directives" = "yes" ; then
AC_DEFINE(HAVE_GCC_ASM_ELF_DIRECTIVES,1,
[Defined if underlying assembler supports for ELF directives])
fi
#
# Check whether underscores in symbols are required. This needs to be
# done before setting up the assembler stuff.
#
GNUPG_SYS_SYMBOL_UNDERSCORE()
#################################
#### ####
#### Setup assembler stuff. ####
#### Define mpi_cpu_arch. ####
#### ####
#################################
AC_ARG_ENABLE(mpi-path,
AS_HELP_STRING([--enable-mpi-path=EXTRA_PATH],
[prepend EXTRA_PATH to list of CPU specific optimizations]),
mpi_extra_path="$enableval",mpi_extra_path="")
AC_MSG_CHECKING(architecture and mpi assembler functions)
if test -f $srcdir/mpi/config.links ; then
. $srcdir/mpi/config.links
AC_CONFIG_LINKS("$mpi_ln_list")
ac_cv_mpi_sflags="$mpi_sflags"
AC_MSG_RESULT($mpi_cpu_arch)
else
AC_MSG_RESULT(failed)
AC_MSG_ERROR([mpi/config.links missing!])
fi
MPI_SFLAGS="$ac_cv_mpi_sflags"
AC_SUBST(MPI_SFLAGS)
AM_CONDITIONAL(MPI_MOD_ASM_MPIH_ADD1, test "$mpi_mod_asm_mpih_add1" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_MPIH_SUB1, test "$mpi_mod_asm_mpih_sub1" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_MPIH_MUL1, test "$mpi_mod_asm_mpih_mul1" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_MPIH_MUL2, test "$mpi_mod_asm_mpih_mul2" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_MPIH_MUL3, test "$mpi_mod_asm_mpih_mul3" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_MPIH_LSHIFT, test "$mpi_mod_asm_mpih_lshift" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_MPIH_RSHIFT, test "$mpi_mod_asm_mpih_rshift" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_UDIV, test "$mpi_mod_asm_udiv" = yes)
AM_CONDITIONAL(MPI_MOD_ASM_UDIV_QRNND, test "$mpi_mod_asm_udiv_qrnnd" = yes)
AM_CONDITIONAL(MPI_MOD_C_MPIH_ADD1, test "$mpi_mod_c_mpih_add1" = yes)
AM_CONDITIONAL(MPI_MOD_C_MPIH_SUB1, test "$mpi_mod_c_mpih_sub1" = yes)
AM_CONDITIONAL(MPI_MOD_C_MPIH_MUL1, test "$mpi_mod_c_mpih_mul1" = yes)
AM_CONDITIONAL(MPI_MOD_C_MPIH_MUL2, test "$mpi_mod_c_mpih_mul2" = yes)
AM_CONDITIONAL(MPI_MOD_C_MPIH_MUL3, test "$mpi_mod_c_mpih_mul3" = yes)
AM_CONDITIONAL(MPI_MOD_C_MPIH_LSHIFT, test "$mpi_mod_c_mpih_lshift" = yes)
AM_CONDITIONAL(MPI_MOD_C_MPIH_RSHIFT, test "$mpi_mod_c_mpih_rshift" = yes)
AM_CONDITIONAL(MPI_MOD_C_UDIV, test "$mpi_mod_c_udiv" = yes)
AM_CONDITIONAL(MPI_MOD_C_UDIV_QRNND, test "$mpi_mod_c_udiv_qrnnd" = yes)
# Reset non applicable feature flags.
if test "$mpi_cpu_arch" != "x86" ; then
aesnisupport="n/a"
shaextsupport="n/a"
pclmulsupport="n/a"
sse41support="n/a"
avxsupport="n/a"
avx2support="n/a"
avx512support="n/a"
gfnisupport="n/a"
padlocksupport="n/a"
drngsupport="n/a"
fi
if test "$mpi_cpu_arch" != "arm" ; then
if test "$mpi_cpu_arch" != "aarch64" ; then
neonsupport="n/a"
armcryptosupport="n/a"
svesupport="n/a"
sve2support="n/a"
fi
fi
if test "$mpi_cpu_arch" != "ppc"; then
ppccryptosupport="n/a"
fi
#############################################
#### ####
#### Platform specific compiler checks. ####
#### ####
#############################################
# Following tests depend on warnings to cause compile to fail, so set -Werror
# temporarily.
_gcc_cflags_save=$CFLAGS
CFLAGS="$CFLAGS -Werror"
#
# Check whether compiler supports 'optimize' function attribute
#
AC_CACHE_CHECK([whether compiler supports 'optimize' function attribute],
[gcry_cv_gcc_attribute_optimize],
[gcry_cv_gcc_attribute_optimize=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[int __attribute__ ((optimize("-O2"))) fn(int i){return i;}]])],
[gcry_cv_gcc_attribute_optimize=yes])])
if test "$gcry_cv_gcc_attribute_optimize" = "yes" ; then
AC_DEFINE(HAVE_GCC_ATTRIBUTE_OPTIMIZE,1,
[Defined if compiler supports "__attribute__ ((optimize))" function attribute])
fi
#
# Check whether compiler supports 'ms_abi' function attribute.
#
AC_CACHE_CHECK([whether compiler supports 'ms_abi' function attribute],
[gcry_cv_gcc_attribute_ms_abi],
[gcry_cv_gcc_attribute_ms_abi=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[int __attribute__ ((ms_abi)) proto(int);]])],
[gcry_cv_gcc_attribute_ms_abi=yes])])
if test "$gcry_cv_gcc_attribute_ms_abi" = "yes" ; then
AC_DEFINE(HAVE_GCC_ATTRIBUTE_MS_ABI,1,
[Defined if compiler supports "__attribute__ ((ms_abi))" function attribute])
fi
#
# Check whether compiler supports 'sysv_abi' function attribute.
#
AC_CACHE_CHECK([whether compiler supports 'sysv_abi' function attribute],
[gcry_cv_gcc_attribute_sysv_abi],
[gcry_cv_gcc_attribute_sysv_abi=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[int __attribute__ ((sysv_abi)) proto(int);]])],
[gcry_cv_gcc_attribute_sysv_abi=yes])])
if test "$gcry_cv_gcc_attribute_sysv_abi" = "yes" ; then
AC_DEFINE(HAVE_GCC_ATTRIBUTE_SYSV_ABI,1,
[Defined if compiler supports "__attribute__ ((sysv_abi))" function attribute])
fi
#
# Check whether default calling convention is 'ms_abi'.
#
if test "$gcry_cv_gcc_attribute_ms_abi" = "yes" ; then
AC_CACHE_CHECK([whether default calling convention is 'ms_abi'],
[gcry_cv_gcc_default_abi_is_ms_abi],
[gcry_cv_gcc_default_abi_is_ms_abi=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[void *test(void) {
void *(*def_func)(void) = test;
void *__attribute__((ms_abi))(*msabi_func)(void);
/* warning on SysV abi targets, passes on Windows based targets */
msabi_func = def_func;
return msabi_func;
}]])],
[gcry_cv_gcc_default_abi_is_ms_abi=yes])])
if test "$gcry_cv_gcc_default_abi_is_ms_abi" = "yes" ; then
AC_DEFINE(HAVE_GCC_DEFAULT_ABI_IS_MS_ABI,1,
[Defined if default calling convention is 'ms_abi'])
fi
fi
#
# Check whether default calling convention is 'sysv_abi'.
#
if test "$gcry_cv_gcc_attribute_sysv_abi" = "yes" ; then
AC_CACHE_CHECK([whether default calling convention is 'sysv_abi'],
[gcry_cv_gcc_default_abi_is_sysv_abi],
[gcry_cv_gcc_default_abi_is_sysv_abi=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[void *test(void) {
void *(*def_func)(void) = test;
void *__attribute__((sysv_abi))(*sysvabi_func)(void);
/* warning on MS ABI targets, passes on SysV ABI targets */
sysvabi_func = def_func;
return sysvabi_func;
}]])],
[gcry_cv_gcc_default_abi_is_sysv_abi=yes])])
if test "$gcry_cv_gcc_default_abi_is_sysv_abi" = "yes" ; then
AC_DEFINE(HAVE_GCC_DEFAULT_ABI_IS_SYSV_ABI,1,
[Defined if default calling convention is 'sysv_abi'])
fi
fi
# Restore flags.
CFLAGS=$_gcc_cflags_save;
#
# Check whether GCC inline assembler supports SSSE3 instructions
# This is required for the AES-NI instructions.
#
AC_CACHE_CHECK([whether GCC inline assembler supports SSSE3 instructions],
[gcry_cv_gcc_inline_asm_ssse3],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_ssse3="n/a"
else
gcry_cv_gcc_inline_asm_ssse3=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[static unsigned char be_mask[16] __attribute__ ((aligned (16))) =
{ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
void a(void) {
__asm__("pshufb %[mask], %%xmm2\n\t"::[mask]"m"(*be_mask):);
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_ssse3=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_ssse3" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_SSSE3,1,
[Defined if inline assembler supports SSSE3 instructions])
fi
#
# Check whether GCC inline assembler supports PCLMUL instructions.
#
AC_CACHE_CHECK([whether GCC inline assembler supports PCLMUL instructions],
[gcry_cv_gcc_inline_asm_pclmul],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_pclmul="n/a"
else
gcry_cv_gcc_inline_asm_pclmul=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
__asm__("pclmulqdq \$0, %%xmm1, %%xmm3\n\t":::"cc");
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_pclmul=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_pclmul" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_PCLMUL,1,
[Defined if inline assembler supports PCLMUL instructions])
fi
#
# Check whether GCC inline assembler supports SHA Extensions instructions.
#
AC_CACHE_CHECK([whether GCC inline assembler supports SHA Extensions instructions],
[gcry_cv_gcc_inline_asm_shaext],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_shaext="n/a"
else
gcry_cv_gcc_inline_asm_shaext=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
__asm__("sha1rnds4 \$0, %%xmm1, %%xmm3\n\t":::"cc");
__asm__("sha1nexte %%xmm1, %%xmm3\n\t":::"cc");
__asm__("sha1msg1 %%xmm1, %%xmm3\n\t":::"cc");
__asm__("sha1msg2 %%xmm1, %%xmm3\n\t":::"cc");
__asm__("sha256rnds2 %%xmm0, %%xmm1, %%xmm3\n\t":::"cc");
__asm__("sha256msg1 %%xmm1, %%xmm3\n\t":::"cc");
__asm__("sha256msg2 %%xmm1, %%xmm3\n\t":::"cc");
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_shaext=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_shaext" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_SHAEXT,1,
[Defined if inline assembler supports SHA Extensions instructions])
fi
#
# Check whether GCC inline assembler supports SSE4.1 instructions.
#
AC_CACHE_CHECK([whether GCC inline assembler supports SSE4.1 instructions],
[gcry_cv_gcc_inline_asm_sse41],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_sse41="n/a"
else
gcry_cv_gcc_inline_asm_sse41=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
int i;
__asm__("pextrd \$2, %%xmm0, %[out]\n\t" : [out] "=m" (i));
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_sse41=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_sse41" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_SSE41,1,
[Defined if inline assembler supports SSE4.1 instructions])
fi
#
# Check whether GCC inline assembler supports AVX instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AVX instructions],
[gcry_cv_gcc_inline_asm_avx],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_avx="n/a"
else
gcry_cv_gcc_inline_asm_avx=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
__asm__("xgetbv; vaesdeclast (%[mem]),%%xmm0,%%xmm7\n\t"::[mem]"r"(0):);
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_avx=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_avx" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AVX,1,
[Defined if inline assembler supports AVX instructions])
fi
#
# Check whether GCC inline assembler supports AVX2 instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AVX2 instructions],
[gcry_cv_gcc_inline_asm_avx2],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_avx2="n/a"
else
gcry_cv_gcc_inline_asm_avx2=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
__asm__("xgetbv; vpbroadcastb %%xmm7,%%ymm1\n\t":::"cc");
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_avx2=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_avx2" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AVX2,1,
[Defined if inline assembler supports AVX2 instructions])
fi
#
# Check whether GCC inline assembler supports AVX512 instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AVX512 instructions],
[gcry_cv_gcc_inline_asm_avx512],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_avx512="n/a"
else
gcry_cv_gcc_inline_asm_avx512=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
__asm__("xgetbv; vpopcntq %%zmm7, %%zmm1%{%%k1%}%{z%};\n\t":::"cc");
__asm__("vpexpandb %%zmm3, %%zmm1;\n\t":::"cc");
__asm__("vpxorq %%xmm7, %%xmm7, %%xmm7;\n\t":::"cc");
__asm__("vpxorq %%ymm7, %%ymm7, %%ymm7;\n\t":::"cc");
__asm__("vpxorq (%%eax)%{1to8%}, %%zmm7, %%zmm7;\n\t":::"cc");
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_avx512=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_avx512" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AVX512,1,
[Defined if inline assembler supports AVX512 instructions])
fi
#
# Check whether GCC inline assembler supports VAES and VPCLMUL instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports VAES and VPCLMUL instructions],
[gcry_cv_gcc_inline_asm_vaes_vpclmul],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_vaes_vpclmul="n/a"
else
gcry_cv_gcc_inline_asm_vaes_vpclmul=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
__asm__("vaesenclast %%ymm7,%%ymm7,%%ymm1\n\t":::"cc");/*256-bit*/
__asm__("vaesenclast %%zmm7,%%zmm7,%%zmm1\n\t":::"cc");/*512-bit*/
__asm__("vpclmulqdq \$0,%%ymm7,%%ymm7,%%ymm1\n\t":::"cc");/*256-bit*/
__asm__("vpclmulqdq \$0,%%zmm7,%%zmm7,%%zmm1\n\t":::"cc");/*512-bit*/
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_vaes_vpclmul=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_vaes_vpclmul" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL,1,
[Defined if inline assembler supports VAES and VPCLMUL instructions])
fi
#
# Check whether GCC inline assembler supports GFNI instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports GFNI instructions],
[gcry_cv_gcc_inline_asm_gfni],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_gfni="n/a"
else
gcry_cv_gcc_inline_asm_gfni=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void a(void) {
__asm__("gf2p8affineqb \$123, %%xmm0, %%xmm0;\n\t":::"cc"); /* SSE */
__asm__("vgf2p8affineinvqb \$234, %%ymm1, %%ymm1, %%ymm1;\n\t":::"cc"); /* AVX */
__asm__("vgf2p8mulb (%%eax), %%zmm2, %%zmm2;\n\t":::"cc"); /* AVX512 */
}]], [ a(); ] )],
[gcry_cv_gcc_inline_asm_gfni=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_gfni" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_GFNI,1,
[Defined if inline assembler supports GFNI instructions])
fi
#
# Check whether GCC inline assembler supports BMI2 instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports BMI2 instructions],
[gcry_cv_gcc_inline_asm_bmi2],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_bmi2="n/a"
else
gcry_cv_gcc_inline_asm_bmi2=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[unsigned int a(unsigned int x, unsigned int y) {
unsigned int tmp1, tmp2;
asm ("rorxl %2, %1, %0"
: "=r" (tmp1)
: "rm0" (x), "J" (32 - ((23) & 31)));
asm ("andnl %2, %1, %0"
: "=r" (tmp2)
: "r0" (x), "rm" (y));
return tmp1 + tmp2;
}]], [ a(1, 2); ] )],
[gcry_cv_gcc_inline_asm_bmi2=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_bmi2" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_BMI2,1,
[Defined if inline assembler supports BMI2 instructions])
fi
+#
+# Check whether compiler supports x86/AVX512 intrinsics
+#
+_gcc_cflags_save=$CFLAGS
+CFLAGS="$CFLAGS -mavx512f"
+
+AC_CACHE_CHECK([whether compiler supports x86/AVX512 intrinsics],
+ [gcry_cv_cc_x86_avx512_intrinsics],
+ [if test "$mpi_cpu_arch" != "x86" ||
+ test "$try_asm_modules" != "yes" ; then
+ gcry_cv_cc_x86_avx512_intrinsics="n/a"
+ else
+ gcry_cv_cc_x86_avx512_intrinsics=no
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE(
+ [[#include <immintrin.h>
+ __m512i fn(void *in, __m128i y)
+ {
+ __m512i x;
+ x = _mm512_maskz_loadu_epi32(_cvtu32_mask16(0xfff0), in)
+ ^ _mm512_castsi128_si512(y);
+ asm volatile ("vinserti32x4 \$3, %0, %%zmm6, %%zmm6;\n\t"
+ "vpxord %%zmm6, %%zmm6, %%zmm6"
+ ::"x"(y),"r"(in):"memory","xmm6");
+ return x;
+ }
+ ]])],
+ [gcry_cv_cc_x86_avx512_intrinsics=yes])
+ fi])
+if test "$gcry_cv_cc_x86_avx512_intrinsics" = "yes" ; then
+ AC_DEFINE(HAVE_COMPATIBLE_CC_X86_AVX512_INTRINSICS,1,
+ [Defined if underlying compiler supports x86/AVX512 intrinsics])
+fi
+
+AM_CONDITIONAL(ENABLE_X86_AVX512_INTRINSICS_EXTRA_CFLAGS,
+ test "$gcry_cv_cc_x86_avx512_intrinsics" = "yes")
+
+# Restore flags.
+CFLAGS=$_gcc_cflags_save;
+
+
#
# Check whether GCC assembler needs "-Wa,--divide" to correctly handle
# constant division
#
if test $amd64_as_feature_detection = yes; then
AC_CACHE_CHECK([whether GCC assembler handles division correctly],
[gcry_cv_gcc_as_const_division_ok],
[gcry_cv_gcc_as_const_division_ok=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(".text\n\tfn:\n\t xorl \$(123456789/12345678), %ebp;\n\t");
void fn(void);]],
[fn();])],
[gcry_cv_gcc_as_const_division_ok=yes])])
if test "$gcry_cv_gcc_as_const_division_ok" = "no" ; then
#
# Add '-Wa,--divide' to CPPFLAGS and try check again.
#
_gcc_cppflags_save="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS -Wa,--divide"
AC_CACHE_CHECK([whether GCC assembler handles division correctly with "-Wa,--divide"],
[gcry_cv_gcc_as_const_division_with_wadivide_ok],
[gcry_cv_gcc_as_const_division_with_wadivide_ok=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(".text\n\tfn:\n\t xorl \$(123456789/12345678), %ebp;\n\t");
void fn(void);]],
[fn();])],
[gcry_cv_gcc_as_const_division_with_wadivide_ok=yes])])
if test "$gcry_cv_gcc_as_const_division_with_wadivide_ok" = "no" ; then
# '-Wa,--divide' did not work, restore old flags.
CPPFLAGS="$_gcc_cppflags_save"
fi
fi
fi
#
# Check whether GCC assembler supports features needed for our amd64
# implementations
#
if test $amd64_as_feature_detection = yes; then
AC_CACHE_CHECK([whether GCC assembler is compatible for amd64 assembly implementations],
[gcry_cv_gcc_amd64_platform_as_ok],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_amd64_platform_as_ok="n/a"
else
gcry_cv_gcc_amd64_platform_as_ok=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
/* Test if '.type' and '.size' are supported. */
/* These work only on ELF targets. */
".text\n\t"
"asmfunc:\n\t"
".size asmfunc,.-asmfunc;\n\t"
".type asmfunc,@function;\n\t"
/* Test if assembler allows use of '/' for constant division
* (Solaris/x86 issue). If previous constant division check
* and "-Wa,--divide" workaround failed, this causes assembly
* to be disable on this machine. */
"xorl \$(123456789/12345678), %ebp;\n\t"
);
void asmfunc(void);]], [ asmfunc(); ])],
[gcry_cv_gcc_amd64_platform_as_ok=yes])
fi])
if test "$gcry_cv_gcc_amd64_platform_as_ok" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS,1,
[Defined if underlying assembler is compatible with amd64 assembly implementations])
fi
if test "$gcry_cv_gcc_amd64_platform_as_ok" = "no" &&
test "$gcry_cv_gcc_attribute_sysv_abi" = "yes" &&
test "$gcry_cv_gcc_default_abi_is_ms_abi" = "yes"; then
AC_CACHE_CHECK([whether GCC assembler is compatible for WIN64 assembly implementations],
[gcry_cv_gcc_win64_platform_as_ok],
[gcry_cv_gcc_win64_platform_as_ok=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".text\n\t"
".globl asmfunc\n\t"
"asmfunc:\n\t"
"xorq \$(1234), %rbp;\n\t"
);
void asmfunc(void);]], [ asmfunc(); ])],
[gcry_cv_gcc_win64_platform_as_ok=yes])])
if test "$gcry_cv_gcc_win64_platform_as_ok" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS,1,
[Defined if underlying assembler is compatible with WIN64 assembly implementations])
fi
fi
fi
#
# Check whether GCC assembler supports features needed for assembly
# implementations that use Intel syntax
#
AC_CACHE_CHECK([whether GCC assembler is compatible for Intel syntax assembly implementations],
[gcry_cv_gcc_platform_as_ok_for_intel_syntax],
[if test "$mpi_cpu_arch" != "x86" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_platform_as_ok_for_intel_syntax="n/a"
else
gcry_cv_gcc_platform_as_ok_for_intel_syntax=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".intel_syntax noprefix\n\t"
".text\n\t"
"actest:\n\t"
"pxor xmm1, xmm7;\n\t"
"vperm2i128 ymm2, ymm3, ymm0, 1;\n\t"
"add eax, ebp;\n\t"
"rorx eax, ebp, 1;\n\t"
"sub eax, [esp + 4];\n\t"
"add dword ptr [esp + eax], 0b10101;\n\t"
".att_syntax prefix\n\t"
);
void actest(void);]], [ actest(); ])],
[gcry_cv_gcc_platform_as_ok_for_intel_syntax=yes])
fi])
if test "$gcry_cv_gcc_platform_as_ok_for_intel_syntax" = "yes" ; then
AC_DEFINE(HAVE_INTEL_SYNTAX_PLATFORM_AS,1,
[Defined if underlying assembler is compatible with Intel syntax assembly implementations])
fi
#
# Check whether compiler is configured for ARMv6 or newer architecture
#
AC_CACHE_CHECK([whether compiler is configured for ARMv6 or newer architecture],
[gcry_cv_cc_arm_arch_is_v6],
[if test "$mpi_cpu_arch" != "arm" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_cc_arm_arch_is_v6="n/a"
else
gcry_cv_cc_arm_arch_is_v6=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[
#if defined(__arm__) && \
((defined(__ARM_ARCH) && __ARM_ARCH >= 6) \
|| defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) \
|| defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|| defined(__ARM_ARCH_7EM__))
/* empty */
#else
/* fail compile if not ARMv6. */
not_armv6 not_armv6 = (not_armv6)not_armv6;
#endif
]])],
[gcry_cv_cc_arm_arch_is_v6=yes])
fi])
if test "$gcry_cv_cc_arm_arch_is_v6" = "yes" ; then
AC_DEFINE(HAVE_ARM_ARCH_V6,1,
[Defined if ARM architecture is v6 or newer])
fi
#
# Check whether GCC inline assembler supports NEON instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports NEON instructions],
[gcry_cv_gcc_inline_asm_neon],
[if test "$mpi_cpu_arch" != "arm" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_neon="n/a"
else
gcry_cv_gcc_inline_asm_neon=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".syntax unified\n\t"
".arm\n\t"
".fpu neon\n\t"
".text\n\t"
"testfn:\n\t"
"vld1.64 {q0-q1}, [r0]!;\n\t"
"vrev64.8 q0, q3;\n\t"
"vadd.u64 q0, q1;\n\t"
"vadd.s64 d3, d2, d3;\n\t"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_neon=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_neon" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_NEON,1,
[Defined if inline assembler supports NEON instructions])
fi
#
# Check whether GCC inline assembler supports AArch32 Crypto Extension instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AArch32 Crypto Extension instructions],
[gcry_cv_gcc_inline_asm_aarch32_crypto],
[if test "$mpi_cpu_arch" != "arm" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_aarch32_crypto="n/a"
else
gcry_cv_gcc_inline_asm_aarch32_crypto=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".syntax unified\n\t"
".arch armv8-a\n\t"
".arm\n\t"
".fpu crypto-neon-fp-armv8\n\t"
".text\n\t"
"testfn:\n\t"
"sha1h.32 q0, q0;\n\t"
"sha1c.32 q0, q0, q0;\n\t"
"sha1p.32 q0, q0, q0;\n\t"
"sha1su0.32 q0, q0, q0;\n\t"
"sha1su1.32 q0, q0;\n\t"
"sha256h.32 q0, q0, q0;\n\t"
"sha256h2.32 q0, q0, q0;\n\t"
"sha1p.32 q0, q0, q0;\n\t"
"sha256su0.32 q0, q0;\n\t"
"sha256su1.32 q0, q0, q15;\n\t"
"aese.8 q0, q0;\n\t"
"aesd.8 q0, q0;\n\t"
"aesmc.8 q0, q0;\n\t"
"aesimc.8 q0, q0;\n\t"
"vmull.p64 q0, d0, d0;\n\t"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_aarch32_crypto=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_aarch32_crypto" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO,1,
[Defined if inline assembler supports AArch32 Crypto Extension instructions])
fi
#
# Check whether GCC inline assembler supports AArch64 NEON instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 NEON instructions],
[gcry_cv_gcc_inline_asm_aarch64_neon],
[if test "$mpi_cpu_arch" != "aarch64" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_aarch64_neon="n/a"
else
gcry_cv_gcc_inline_asm_aarch64_neon=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".cpu generic+simd\n\t"
".text\n\t"
"testfn:\n\t"
"mov w0, \#42;\n\t"
"dup v0.8b, w0;\n\t"
"ld4 {v0.8b,v1.8b,v2.8b,v3.8b},[x0],\#32;\n\t"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_aarch64_neon=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_aarch64_neon" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_NEON,1,
[Defined if inline assembler supports AArch64 NEON instructions])
fi
#
# Check whether GCC inline assembler supports AArch64 Crypto Extension instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 Crypto Extension instructions],
[gcry_cv_gcc_inline_asm_aarch64_crypto],
[if test "$mpi_cpu_arch" != "aarch64" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_aarch64_crypto="n/a"
else
gcry_cv_gcc_inline_asm_aarch64_crypto=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".cpu generic+simd+crypto\n\t"
".text\n\t"
"testfn:\n\t"
"mov w0, \#42;\n\t"
"dup v0.8b, w0;\n\t"
"ld4 {v0.8b,v1.8b,v2.8b,v3.8b},[x0],\#32;\n\t"
"sha1h s0, s0;\n\t"
"sha1c q0, s0, v0.4s;\n\t"
"sha1p q0, s0, v0.4s;\n\t"
"sha1su0 v0.4s, v0.4s, v0.4s;\n\t"
"sha1su1 v0.4s, v0.4s;\n\t"
"sha256h q0, q0, v0.4s;\n\t"
"sha256h2 q0, q0, v0.4s;\n\t"
"sha1p q0, s0, v0.4s;\n\t"
"sha256su0 v0.4s, v0.4s;\n\t"
"sha256su1 v0.4s, v0.4s, v31.4s;\n\t"
"aese v0.16b, v0.16b;\n\t"
"aesd v0.16b, v0.16b;\n\t"
"aesmc v0.16b, v0.16b;\n\t"
"aesimc v0.16b, v0.16b;\n\t"
"pmull v0.1q, v0.1d, v31.1d;\n\t"
"pmull2 v0.1q, v0.2d, v31.2d;\n\t"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_aarch64_crypto=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_aarch64_crypto" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO,1,
[Defined if inline assembler supports AArch64 Crypto Extension instructions])
fi
#
# Check whether GCC inline assembler supports AArch64 SVE instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 SVE instructions],
[gcry_cv_gcc_inline_asm_aarch64_sve],
[if test "$mpi_cpu_arch" != "aarch64" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_aarch64_sve="n/a"
else
gcry_cv_gcc_inline_asm_aarch64_sve=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".cpu generic+simd+sve\n\t"
".text\n\t"
"testfn:\n\t"
"mov x0, \#60;\n\t"
"whilelo p0.s, xzr, x0;\n\t"
"mov z0.s, p0/z, \#55;\n\t"
"ld1b {z0.b}, p0/z, [x1];\n\t"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_aarch64_sve=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_aarch64_sve" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_SVE,1,
[Defined if inline assembler supports AArch64 SVE instructions])
fi
#
# Check whether GCC inline assembler supports AArch64 SVE2 instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 SVE2 instructions],
[gcry_cv_gcc_inline_asm_aarch64_sve2],
[if test "$mpi_cpu_arch" != "aarch64" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_aarch64_sve2="n/a"
else
gcry_cv_gcc_inline_asm_aarch64_sve2=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".cpu generic+simd+sve2\n\t"
".text\n\t"
"testfn:\n\t"
";\n\t"
"eor3 z0.d, z0.d, z1.d, z2.d;\n\t"
"ext z8.b, {z20.b, z21.b}, \#3;\n\t"
"adclt z0.d, z1.d, z2.d;\n\t"
"tbl z0.b, {z8.b, z9.b}, z1.b;\n\t"
"addhnb z16.s, z17.d, z18.d;\n\t"
"mov z0.s, p0/z, \#55;\n\t"
"ld1b {z0.b}, p0/z, [x1];\n\t"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_aarch64_sve2=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_aarch64_sve2" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_SVE2,1,
[Defined if inline assembler supports AArch64 SVE2 instructions])
fi
#
# Check whether GCC inline assembler supports AArch64 SHA3/SHA512/SM3/SM4 instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports AArch64 SHA3/SHA512/SM3/SM4 instructions],
[gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4],
[if test "$mpi_cpu_arch" != "aarch64" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4="n/a"
else
gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(
".arch armv8.2-a+sha3+sm4\n\t"
".text\n\t"
"testfn:\n\t"
/* Test for SHA512 instructions */
"sha512h q0, q0, v0.2d;\n\t"
"sha512h2 q0, q0, v0.2d;\n\t"
"sha512su0 v0.2d, v0.2d;\n\t"
"sha512su1 v0.2d, v0.2d, v31.2d;\n\t"
/* Test for SHA3 instructions */
"bcax v0.16b, v1.16b, v2.16b, v3.16b;\n\t"
"eor3 v0.16b, v1.16b, v2.16b, v3.16b;\n\t"
"rax1 v0.2d, v1.2d, v2.2d;\n\t"
"xar v0.2d, v1.2d, v2.2d, \#1;\n\t"
/* Test for SM3 instructions */
"sm3partw1 v0.4s, v1.4s, v2.4s;\n\t"
"sm3partw2 v0.4s, v1.4s, v2.4s;\n\t"
"sm3ss1 v0.4s, v1.4s, v2.4s, v3.4s;\n\t"
"sm3tt1a v0.4s, v1.4s, v2.s[0];\n\t"
"sm3tt1b v0.4s, v1.4s, v2.s[0];\n\t"
"sm3tt2a v0.4s, v1.4s, v2.s[0];\n\t"
"sm3tt2b v0.4s, v1.4s, v2.s[0];\n\t"
/* Test for SM4 instructions */
"sm4e v0.4s, v1.4s;\n\t"
"sm4ekey v0.4s, v1.4s, v2.4s;\n\t"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_aarch64_sha3_sha512_sm3_sm4" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_AARCH64_SHA3_SHA512_SM3_SM4,1,
[Defined if inline assembler supports AArch64 SHA3/SHA512/SM3/SM4 instructions])
fi
#
# Check whether compiler supports AArch64/NEON/crypto intrinsics
#
AC_CACHE_CHECK([whether compiler supports AArch64/NEON/crypto intrinsics],
[gcry_cv_cc_aarch64_neon_intrinsics],
[if test "$mpi_cpu_arch" != "aarch64" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_cc_aarch64_neon_intrinsics="n/a"
else
gcry_cv_cc_aarch64_neon_intrinsics=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[#include <arm_neon.h>
#define __m128i uint64x2_t
#define vpsrldq128(s, a, o) \
({ uint64x2_t __tmp = { 0, 0 }; \
o = (__m128i)vextq_u8((uint8x16_t)a, \
(uint8x16_t)__tmp, (s) & 15); })
#define vaesenclast128(a, b, o) \
(o = (__m128i)vaeseq_u8((uint8x16_t)b, (uint8x16_t)a))
#define memory_barrier_with_vec(a) __asm__("" : "+w"(a) :: "memory")
static inline __attribute__((always_inline)) __m128i
fn2(__m128i a)
{
vpsrldq128(2, a, a);
return a;
}
__m128i fn(__m128i in)
{
__m128i x;
memory_barrier_with_vec(in);
x = fn2(in);
memory_barrier_with_vec(x);
vaesenclast128(in, x, in);
memory_barrier_with_vec(in);
return in;
}
]])],
[gcry_cv_cc_aarch64_neon_intrinsics=yes])
fi])
if test "$gcry_cv_cc_aarch64_neon_intrinsics" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_CC_AARCH64_NEON_INTRINSICS,1,
[Defined if underlying compiler supports AArch64/NEON/crypto intrinsics])
fi
_gcc_cflags_save=$CFLAGS
CFLAGS="$CFLAGS -O2 -march=armv8-a+crypto"
if test "$gcry_cv_cc_aarch64_neon_intrinsics" = "no" &&
test "$mpi_cpu_arch" = "aarch64" &&
test "$try_asm_modules" = "yes" ; then
AC_CACHE_CHECK([whether compiler supports AArch64/NEON/crypto intrinsics with extra GCC flags],
[gcry_cv_cc_aarch64_neon_intrinsics_cflags],
[gcry_cv_cc_aarch64_neon_intrinsics_cflags=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[#include <arm_neon.h>
#define __m128i uint64x2_t
#define vpsrldq128(s, a, o) \
({ uint64x2_t __tmp = { 0, 0 }; \
o = (__m128i)vextq_u8((uint8x16_t)a, \
(uint8x16_t)__tmp, (s) & 15); })
#define vaesenclast128(a, b, o) \
(o = (__m128i)vaeseq_u8((uint8x16_t)b, (uint8x16_t)a))
#define memory_barrier_with_vec(a) __asm__("" : "+w"(a) :: "memory")
static inline __attribute__((always_inline)) __m128i
fn2(__m128i a)
{
vpsrldq128(2, a, a);
return a;
}
__m128i fn(__m128i in)
{
__m128i x;
memory_barrier_with_vec(in);
x = fn2(in);
memory_barrier_with_vec(x);
vaesenclast128(in, x, in);
memory_barrier_with_vec(in);
return in;
}
]])],
[gcry_cv_cc_aarch64_neon_intrinsics_cflags=yes])])
if test "$gcry_cv_cc_aarch64_neon_intrinsics_cflags" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_CC_AARCH64_NEON_INTRINSICS,1,
[Defined if underlying compiler supports AArch64/NEON/crypto intrinsics])
AC_DEFINE(HAVE_COMPATIBLE_CC_AARCH64_NEON_INTRINSICS_WITH_CFLAGS,1,
[Defined if underlying compiler supports AArch64/NEON/crypto intrinsics with extra GCC flags])
fi
fi
AM_CONDITIONAL(ENABLE_AARCH64_NEON_INTRINSICS_EXTRA_CFLAGS,
test "$gcry_cv_cc_aarch64_neon_intrinsics_cflags" = "yes")
# Restore flags.
CFLAGS=$_gcc_cflags_save;
#
# Check whether compiler supports PowerPC AltiVec/VSX intrinsics
#
AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics],
[gcry_cv_cc_ppc_altivec],
[if test "$mpi_cpu_arch" != "ppc" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_cc_ppc_altivec="n/a"
else
gcry_cv_cc_ppc_altivec=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[#include <altivec.h>
typedef vector unsigned char block;
typedef vector unsigned int vecu32;
static inline __attribute__((always_inline)) vecu32
vec_sld_u32(vecu32 a, vecu32 b, unsigned int idx)
{
return vec_sld (a, b, (4 * idx) & 15);
}
block fn(block in)
{
block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
vecu32 y = vec_vsx_ld (0, (unsigned int*)0);
y = vec_sld_u32 (y, y, 3);
return vec_cipher_be (t, in) ^ (block)y;
}
]])],
[gcry_cv_cc_ppc_altivec=yes])
fi])
if test "$gcry_cv_cc_ppc_altivec" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1,
[Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics])
fi
_gcc_cflags_save=$CFLAGS
CFLAGS="$CFLAGS -O2 -maltivec -mvsx -mcrypto"
if test "$gcry_cv_cc_ppc_altivec" = "no" &&
test "$mpi_cpu_arch" = "ppc" &&
test "$try_asm_modules" = "yes" ; then
AC_CACHE_CHECK([whether compiler supports PowerPC AltiVec/VSX/crypto intrinsics with extra GCC flags],
[gcry_cv_cc_ppc_altivec_cflags],
[gcry_cv_cc_ppc_altivec_cflags=no
AC_COMPILE_IFELSE([AC_LANG_SOURCE(
[[#include <altivec.h>
typedef vector unsigned char block;
typedef vector unsigned int vecu32;
static inline __attribute__((always_inline)) vecu32
vec_sld_u32(vecu32 a, vecu32 b, unsigned int idx)
{
return vec_sld (a, b, (4 * idx) & 15);
}
block fn(block in)
{
block t = vec_perm (in, in, vec_vsx_ld (0, (unsigned char*)0));
vecu32 y = vec_vsx_ld (0, (unsigned int*)0);
y = vec_sld_u32 (y, y, 3);
return vec_cipher_be (t, in) ^ (block)y;
}
]])],
[gcry_cv_cc_ppc_altivec_cflags=yes])])
if test "$gcry_cv_cc_ppc_altivec_cflags" = "yes" ; then
AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC,1,
[Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics])
AC_DEFINE(HAVE_COMPATIBLE_CC_PPC_ALTIVEC_WITH_CFLAGS,1,
[Defined if underlying compiler supports PowerPC AltiVec/VSX/crypto intrinsics with extra GCC flags])
fi
fi
AM_CONDITIONAL(ENABLE_PPC_VCRYPTO_EXTRA_CFLAGS,
test "$gcry_cv_cc_ppc_altivec_cflags" = "yes")
# Restore flags.
CFLAGS=$_gcc_cflags_save;
#
# Check whether GCC inline assembler supports PowerPC AltiVec/VSX/crypto instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports PowerPC AltiVec/VSX/crypto instructions],
[gcry_cv_gcc_inline_asm_ppc_altivec],
[if test "$mpi_cpu_arch" != "ppc" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_ppc_altivec="n/a"
else
gcry_cv_gcc_inline_asm_ppc_altivec=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(".globl testfn;\n"
".text\n\t"
"testfn:\n"
"stvx %v31,%r12,%r0;\n"
"lvx %v20,%r12,%r0;\n"
"vcipher %v0, %v1, %v22;\n"
"lxvw4x %vs32, %r0, %r1;\n"
"vadduwm %v0, %v1, %v22;\n"
"vshasigmaw %v0, %v1, 0, 15;\n"
"vshasigmad %v0, %v1, 0, 15;\n"
"vpmsumd %v11, %v11, %v11;\n"
);
void testfn(void);
]], [ testfn(); ] )],
[gcry_cv_gcc_inline_asm_ppc_altivec=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC,1,
[Defined if inline assembler supports PowerPC AltiVec/VSX/crypto instructions])
fi
#
# Check whether GCC inline assembler supports PowerISA 3.00 instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports PowerISA 3.00 instructions],
[gcry_cv_gcc_inline_asm_ppc_arch_3_00],
[if test "$mpi_cpu_arch" != "ppc" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_ppc_arch_3_00="n/a"
else
gcry_cv_gcc_inline_asm_ppc_arch_3_00=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[__asm__(".text\n\t"
".globl testfn;\n"
"testfn:\n"
"stxvb16x %r1,%v12,%v30;\n"
);
void testfn(void);
]], [ testfn(); ])],
[gcry_cv_gcc_inline_asm_ppc_arch_3_00=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_PPC_ARCH_3_00,1,
[Defined if inline assembler supports PowerISA 3.00 instructions])
fi
#
# Check whether compiler supports GCC PowerPC target attributes
#
AC_CACHE_CHECK([whether compiler supports GCC PowerPC target attributes],
[gcry_cv_gcc_attribute_ppc_target],
[if test "$mpi_cpu_arch" != "ppc" ; then
gcry_cv_gcc_attribute_ppc_target="n/a"
else
gcry_cv_gcc_attribute_ppc_target=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void __attribute__((always_inline)) inline aifn(void) {}
void __attribute__((target("cpu=power8"))) testfn8(void) {aifn();}
void __attribute__((target("cpu=power9"))) testfn9(void)
{ testfn8(); aifn(); }
]], [ testfn9(); aifn(); ])],
[gcry_cv_gcc_attribute_ppc_target=yes])
fi])
if test "$gcry_cv_gcc_attribute_ppc_target" = "yes" ; then
AC_DEFINE(HAVE_GCC_ATTRIBUTE_PPC_TARGET,1,
[Defined if compiler supports GCC PowerPC target attributes])
fi
#
# Check whether compiler supports clang PowerPC target attributes
#
AC_CACHE_CHECK([whether compiler supports clang PowerPC target attributes],
[gcry_cv_clang_attribute_ppc_target],
[if test "$mpi_cpu_arch" != "ppc" ; then
gcry_cv_clang_attribute_ppc_target="n/a"
else
gcry_cv_clang_attribute_ppc_target=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void __attribute__((always_inline)) inline aifn(void) {}
void __attribute__((target("arch=pwr8"))) testfn8(void) {aifn();}
void __attribute__((target("arch=pwr9"))) testfn9(void)
{ testfn8(); aifn(); }
]], [ testfn9(); aifn(); ])],
[gcry_cv_clang_attribute_ppc_target=yes])
fi])
if test "$gcry_cv_clang_attribute_ppc_target" = "yes" ; then
AC_DEFINE(HAVE_CLANG_ATTRIBUTE_PPC_TARGET,1,
[Defined if compiler supports clang PowerPC target attributes])
fi
#
# Check whether GCC inline assembler supports zSeries instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports zSeries instructions],
[gcry_cv_gcc_inline_asm_s390x],
[if test "$mpi_cpu_arch" != "s390x" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_s390x="n/a"
else
gcry_cv_gcc_inline_asm_s390x=no
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[typedef unsigned int u128_t __attribute__ ((mode (TI)));
unsigned int testfunc(unsigned int x, void *y, unsigned int z)
{
unsigned long fac[8];
register unsigned long reg0 asm("0") = 0;
register unsigned long reg1 asm("1") = x;
u128_t r1 = ((u128_t)(unsigned long)y << 64) | (unsigned long)z;
u128_t r2 = 0;
u128_t r3 = 0;
asm volatile (".insn rre,0xb92e << 16, %[r1], %[r2]\n\t"
: [r1] "+a" (r1), [r2] "+a" (r2)
: "r" (reg0), "r" (reg1)
: "cc", "memory");
asm volatile (".insn rrf,0xb929 << 16, %[r1], %[r2], %[r3], 0\n\t"
: [r1] "+a" (r1), [r2] "+a" (r2), [r3] "+a" (r3)
: "r" (reg0), "r" (reg1)
: "cc", "memory");
reg0 = 8 - 1;
asm ("stfle %1\n\t"
: "+d" (reg0), "=Q" (fac[0])
:
: "cc", "memory");
asm volatile ("mvc 0(16, %0), 0(%1)\n\t"
:
: "a" (y), "a" (fac)
: "memory");
asm volatile ("xc 0(16, %0), 0(%0)\n\t"
:
: "a" (fac)
: "memory");
asm volatile ("risbgn %%r11, %%r11, 0, 129, 0\n\t"
:
:
: "memory", "r11");
asm volatile ("algrk %%r14, %%r14, %%r14\n\t"
:
:
: "memory", "r14");
return (unsigned int)r1 ^ reg0;
}
]] , [ testfunc(0, 0, 0); ])],
[gcry_cv_gcc_inline_asm_s390x=yes])
fi])
if test "$gcry_cv_gcc_inline_asm_s390x" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_S390X,1,
[Defined if inline assembler supports zSeries instructions])
fi
#
# Check whether GCC inline assembler supports zSeries vector instructions
#
AC_CACHE_CHECK([whether GCC inline assembler supports zSeries vector instructions],
[gcry_cv_gcc_inline_asm_s390x_vx],
[if test "$mpi_cpu_arch" != "s390x" ||
test "$try_asm_modules" != "yes" ; then
gcry_cv_gcc_inline_asm_s390x_vx="n/a"
else
gcry_cv_gcc_inline_asm_s390x_vx=no
if test "$gcry_cv_gcc_inline_asm_s390x" = "yes" ; then
AC_LINK_IFELSE([AC_LANG_PROGRAM(
[[void testfunc(void)
{
asm volatile (".machine \"z13+vx\"\n\t"
"vx %%v0, %%v1, %%v31\n\t"
"verllf %%v11, %%v11, (16)(0)\n\t"
:
:
: "memory");
}
]], [ testfunc(); ])],
[gcry_cv_gcc_inline_asm_s390x_vx=yes])
fi
fi])
if test "$gcry_cv_gcc_inline_asm_s390x_vx" = "yes" ; then
AC_DEFINE(HAVE_GCC_INLINE_ASM_S390X_VX,1,
[Defined if inline assembler supports zSeries vector instructions])
fi
#######################################
#### Checks for library functions. ####
#######################################
AC_FUNC_VPRINTF
# We have replacements for these in src/missing-string.c
AC_CHECK_FUNCS(stpcpy strcasecmp)
# We have replacements for these in src/g10lib.h
AC_CHECK_FUNCS(strtoul memmove stricmp atexit raise)
# Other checks
AC_CHECK_FUNCS(strerror rand mmap getpagesize sysconf waitpid wait4)
AC_CHECK_FUNCS(gettimeofday getrusage gethrtime clock_gettime syslog)
AC_CHECK_FUNCS(syscall fcntl ftruncate flockfile getauxval elf_aux_info)
AC_CHECK_FUNCS(explicit_bzero explicit_memset getentropy sysctlbyname)
GNUPG_CHECK_MLOCK
#
# Replacement functions.
#
AC_REPLACE_FUNCS([getpid clock])
#
# Check whether it is necessary to link against libdl.
#
DL_LIBS=""
if test "$use_hmac_binary_check" != no ; then
_gcry_save_libs="$LIBS"
LIBS=""
AC_SEARCH_LIBS(dlopen, c dl,,,)
DL_LIBS=$LIBS
LIBS="$_gcry_save_libs"
fi
AC_SUBST(DL_LIBS)
#
# Check whether we can use Linux capabilities as requested.
#
if test "$use_capabilities" = "yes" ; then
use_capabilities=no
AC_CHECK_HEADERS(sys/capability.h)
if test "$ac_cv_header_sys_capability_h" = "yes" ; then
AC_CHECK_LIB(cap, cap_init, ac_need_libcap=1)
if test "$ac_cv_lib_cap_cap_init" = "yes"; then
AC_DEFINE(USE_CAPABILITIES,1,
[define if capabilities should be used])
LIBS="$LIBS -lcap"
use_capabilities=yes
fi
fi
if test "$use_capabilities" = "no" ; then
AC_MSG_WARN([[
***
*** The use of capabilities on this system is not possible.
*** You need a recent Linux kernel and some patches:
*** fcaps-2.2.9-990610.patch (kernel patch for 2.2.9)
*** fcap-module-990613.tar.gz (kernel module)
*** libcap-1.92.tar.gz (user mode library and utilities)
*** And you have to configure the kernel with CONFIG_VFS_CAP_PLUGIN
*** set (filesystems menu). Be warned: This code is *really* ALPHA.
***]])
fi
fi
# Check whether a random device is available.
if test "$try_dev_random" = yes ; then
AC_CACHE_CHECK(for random device, ac_cv_have_dev_random,
[if test -r "$NAME_OF_DEV_RANDOM" && test -r "$NAME_OF_DEV_URANDOM" ; then
ac_cv_have_dev_random=yes; else ac_cv_have_dev_random=no; fi])
if test "$ac_cv_have_dev_random" = yes; then
AC_DEFINE(HAVE_DEV_RANDOM,1,
[defined if the system supports a random device] )
fi
else
AC_MSG_CHECKING(for random device)
ac_cv_have_dev_random=no
AC_MSG_RESULT(has been disabled)
fi
# Figure out the random modules for this configuration.
if test "$random" = "default"; then
# Select default value.
if test "$ac_cv_func_getentropy" = yes; then
random_modules="getentropy"
elif test "$ac_cv_have_dev_random" = yes; then
# Try Linuxish random device.
random_modules="linux"
else
case "${host}" in
*-*-mingw32ce*)
# WindowsCE random device.
random_modules="w32ce"
;;
*-*-mingw32*|*-*-cygwin*)
# Windows random device.
random_modules="w32"
;;
*)
# Build everything, allow to select at runtime.
random_modules="$auto_random_modules"
;;
esac
fi
else
if test "$random" = "auto"; then
# Build everything, allow to select at runtime.
random_modules="$auto_random_modules"
else
random_modules="$random"
fi
fi
#
# Other defines
#
if test mym4_isgit = "yes"; then
AC_DEFINE(IS_DEVELOPMENT_VERSION,1,
[Defined if this is not a regular release])
fi
AM_CONDITIONAL(CROSS_COMPILING, test x$cross_compiling = xyes)
# This is handy for debugging so the compiler doesn't rearrange
# things and eliminate variables.
AC_ARG_ENABLE(optimization,
AS_HELP_STRING([--disable-optimization],
[disable compiler optimization]),
[if test $enableval = no ; then
CFLAGS=`echo $CFLAGS | sed 's/-O[[0-9]]//'`
fi])
AC_MSG_NOTICE([checking for cc features])
# CFLAGS mangling when using gcc.
if test "$GCC" = yes; then
AC_MSG_CHECKING([if gcc supports -fno-delete-null-pointer-checks])
_gcc_cflags_save=$CFLAGS
CFLAGS="-fno-delete-null-pointer-checks"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])],_gcc_wopt=yes,_gcc_wopt=no)
AC_MSG_RESULT($_gcc_wopt)
CFLAGS=$_gcc_cflags_save;
if test x"$_gcc_wopt" = xyes ; then
CFLAGS="$CFLAGS -fno-delete-null-pointer-checks"
fi
CFLAGS="$CFLAGS -Wall"
if test "$USE_MAINTAINER_MODE" = "yes"; then
CFLAGS="$CFLAGS -Wcast-align -Wshadow -Wstrict-prototypes"
CFLAGS="$CFLAGS -Wformat -Wno-format-y2k -Wformat-security"
# If -Wno-missing-field-initializers is supported we can enable a
# a bunch of really useful warnings.
AC_MSG_CHECKING([if gcc supports -Wno-missing-field-initializers])
_gcc_cflags_save=$CFLAGS
CFLAGS="-Wno-missing-field-initializers"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])],_gcc_wopt=yes,_gcc_wopt=no)
AC_MSG_RESULT($_gcc_wopt)
CFLAGS=$_gcc_cflags_save;
if test x"$_gcc_wopt" = xyes ; then
CFLAGS="$CFLAGS -W -Wextra -Wbad-function-cast"
CFLAGS="$CFLAGS -Wwrite-strings"
CFLAGS="$CFLAGS -Wdeclaration-after-statement"
CFLAGS="$CFLAGS -Wno-missing-field-initializers"
CFLAGS="$CFLAGS -Wno-sign-compare"
fi
AC_MSG_CHECKING([if gcc supports -Wpointer-arith])
_gcc_cflags_save=$CFLAGS
CFLAGS="-Wpointer-arith"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[])],_gcc_wopt=yes,_gcc_wopt=no)
AC_MSG_RESULT($_gcc_wopt)
CFLAGS=$_gcc_cflags_save;
if test x"$_gcc_wopt" = xyes ; then
CFLAGS="$CFLAGS -Wpointer-arith"
fi
fi
fi
# Check whether as(1) supports a noeexecstack feature. This test
# includes an override option.
CL_AS_NOEXECSTACK
AC_SUBST(LIBGCRYPT_CONFIG_API_VERSION)
AC_SUBST(LIBGCRYPT_CONFIG_LIBS)
AC_SUBST(LIBGCRYPT_CONFIG_CFLAGS)
AC_SUBST(LIBGCRYPT_CONFIG_HOST)
AC_SUBST(LIBGCRYPT_THREAD_MODULES)
AC_CONFIG_COMMANDS([gcrypt-conf],[[
chmod +x src/libgcrypt-config
]],[[
prefix=$prefix
exec_prefix=$exec_prefix
libdir=$libdir
datadir=$datadir
DATADIRNAME=$DATADIRNAME
]])
#####################
#### Conclusion. ####
#####################
# Check that requested feature can actually be used and define
# ENABLE_foo_SUPPORT macros.
if test x"$aesnisupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_ssse3" != "yes" ; then
aesnisupport="no (unsupported by compiler)"
fi
fi
if test x"$shaextsupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_shaext" != "yes" ; then
shaextsupport="no (unsupported by compiler)"
fi
fi
if test x"$pclmulsupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_pclmul" != "yes" ; then
pclmulsupport="no (unsupported by compiler)"
fi
fi
if test x"$sse41support" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_sse41" != "yes" ; then
sse41support="no (unsupported by compiler)"
fi
fi
if test x"$avxsupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_avx" != "yes" ; then
avxsupport="no (unsupported by compiler)"
fi
fi
if test x"$avx2support" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_avx2" != "yes" ; then
avx2support="no (unsupported by compiler)"
fi
fi
if test x"$avx512support" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_avx512" != "yes" ; then
avx512support="no (unsupported by compiler)"
fi
fi
if test x"$gfnisupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_gfni" != "yes" ; then
gfnisupport="no (unsupported by compiler)"
fi
fi
if test x"$neonsupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_neon" != "yes" ; then
if test "$gcry_cv_gcc_inline_asm_aarch64_neon" != "yes" ; then
neonsupport="no (unsupported by compiler)"
fi
fi
fi
if test x"$armcryptosupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_aarch32_crypto" != "yes" ; then
if test "$gcry_cv_gcc_inline_asm_aarch64_crypto" != "yes" ; then
armcryptosupport="no (unsupported by compiler)"
fi
fi
fi
if test x"$svesupport" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_sve" != "yes" ; then
if test "$gcry_cv_gcc_inline_asm_aarch64_sve" != "yes" ; then
svesupport="no (unsupported by compiler)"
fi
fi
fi
if test x"$sve2support" = xyes ; then
if test "$gcry_cv_gcc_inline_asm_sve2" != "yes" ; then
if test "$gcry_cv_gcc_inline_asm_aarch64_sve2" != "yes" ; then
sve2support="no (unsupported by compiler)"
fi
fi
fi
if test x"$aesnisupport" = xyes ; then
AC_DEFINE(ENABLE_AESNI_SUPPORT, 1,
[Enable support for Intel AES-NI instructions.])
fi
if test x"$shaextsupport" = xyes ; then
AC_DEFINE(ENABLE_SHAEXT_SUPPORT, 1,
[Enable support for Intel SHAEXT instructions.])
fi
if test x"$pclmulsupport" = xyes ; then
AC_DEFINE(ENABLE_PCLMUL_SUPPORT, 1,
[Enable support for Intel PCLMUL instructions.])
fi
if test x"$sse41support" = xyes ; then
AC_DEFINE(ENABLE_SSE41_SUPPORT, 1,
[Enable support for Intel SSE4.1 instructions.])
fi
if test x"$avxsupport" = xyes ; then
AC_DEFINE(ENABLE_AVX_SUPPORT,1,
[Enable support for Intel AVX instructions.])
fi
if test x"$avx2support" = xyes ; then
AC_DEFINE(ENABLE_AVX2_SUPPORT,1,
[Enable support for Intel AVX2 instructions.])
fi
if test x"$avx512support" = xyes ; then
AC_DEFINE(ENABLE_AVX512_SUPPORT,1,
[Enable support for Intel AVX512 instructions.])
fi
if test x"$gfnisupport" = xyes ; then
AC_DEFINE(ENABLE_GFNI_SUPPORT,1,
[Enable support for Intel GFNI instructions.])
fi
if test x"$neonsupport" = xyes ; then
AC_DEFINE(ENABLE_NEON_SUPPORT,1,
[Enable support for ARM NEON instructions.])
fi
if test x"$armcryptosupport" = xyes ; then
AC_DEFINE(ENABLE_ARM_CRYPTO_SUPPORT,1,
[Enable support for ARMv8 Crypto Extension instructions.])
fi
if test x"$svesupport" = xyes ; then
AC_DEFINE(ENABLE_SVE_SUPPORT,1,
[Enable support for ARMv8 SVE instructions.])
fi
if test x"$sve2support" = xyes ; then
AC_DEFINE(ENABLE_SVE2_SUPPORT,1,
[Enable support for ARMv9 SVE2 instructions.])
fi
if test x"$ppccryptosupport" = xyes ; then
AC_DEFINE(ENABLE_PPC_CRYPTO_SUPPORT,1,
[Enable support for POWER 8 (PowerISA 2.07) crypto extension.])
fi
if test x"$jentsupport" = xyes ; then
AC_DEFINE(ENABLE_JENT_SUPPORT, 1,
[Enable support for the jitter entropy collector.])
fi
if test x"$padlocksupport" = xyes ; then
AC_DEFINE(ENABLE_PADLOCK_SUPPORT, 1,
[Enable support for the PadLock engine.])
fi
if test x"$drngsupport" = xyes ; then
AC_DEFINE(ENABLE_DRNG_SUPPORT, 1,
[Enable support for Intel DRNG (RDRAND instruction).])
fi
if test x"$force_soft_hwfeatures" = xyes ; then
AC_DEFINE(ENABLE_FORCE_SOFT_HWFEATURES, 1,
[Enable forcing 'soft' HW feature bits on (for testing).])
fi
# Define conditional sources and config.h symbols depending on the
# selected ciphers, pubkey-ciphers, digests, kdfs, and random modules.
LIST_MEMBER(arcfour, $enabled_ciphers)
if test "$found" = "1"; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS arcfour.lo"
AC_DEFINE(USE_ARCFOUR, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS arcfour-amd64.lo"
;;
esac
fi
LIST_MEMBER(blowfish, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS blowfish.lo"
AC_DEFINE(USE_BLOWFISH, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS blowfish-amd64.lo"
;;
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS blowfish-arm.lo"
;;
esac
fi
LIST_MEMBER(cast5, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS cast5.lo"
AC_DEFINE(USE_CAST5, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS cast5-amd64.lo"
;;
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS cast5-arm.lo"
;;
esac
fi
LIST_MEMBER(des, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS des.lo"
AC_DEFINE(USE_DES, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS des-amd64.lo"
;;
esac
fi
LIST_MEMBER(aes, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS rijndael.lo"
AC_DEFINE(USE_AES, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-amd64.lo"
# Build with the SSSE3 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ssse3-amd64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ssse3-amd64-asm.lo"
# Build with the VAES/AVX2 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-vaes.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-vaes-avx2-amd64.lo"
;;
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-arm.lo"
# Build with the ARMv8/AArch32 CE implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-ce.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-aarch32-ce.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-aarch64.lo"
# Build with the ARMv8/AArch64 CE implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-ce.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-armv8-aarch64-ce.lo"
;;
powerpc64le-*-*)
# Build with the crypto extension implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc9le.lo"
if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" &&
test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then
# Build with AES-GCM bulk implementation for P10
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-gcm-p10le.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-p10le.lo"
fi
;;
powerpc64-*-*)
# Big-Endian.
# Build with the crypto extension implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc.lo"
;;
powerpc-*-*)
# Big-Endian.
# Build with the crypto extension implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-ppc.lo"
;;
s390x-*-*)
# Big-Endian.
# Build with the crypto extension implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-s390x.lo"
;;
esac
case "$mpi_cpu_arch" in
x86)
# Build with the AES-NI implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-aesni.lo"
# Build with the Padlock implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS rijndael-padlock.lo"
;;
esac
fi
LIST_MEMBER(twofish, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS twofish.lo"
AC_DEFINE(USE_TWOFISH, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-amd64.lo"
if test x"$avx2support" = xyes ; then
# Build with the AVX2 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-avx2-amd64.lo"
fi
;;
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-arm.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS twofish-aarch64.lo"
;;
esac
fi
LIST_MEMBER(serpent, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS serpent.lo"
AC_DEFINE(USE_SERPENT, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the SSE2 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS serpent-sse2-amd64.lo"
;;
esac
if test x"$avx2support" = xyes ; then
# Build with the AVX2 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS serpent-avx2-amd64.lo"
fi
+ if test x"$avx512support" = xyes ; then
+ # Build with the AVX512 implementation
+ GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS serpent-avx512-x86.lo"
+ fi
+
if test x"$neonsupport" = xyes ; then
# Build with the NEON implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS serpent-armv7-neon.lo"
fi
fi
LIST_MEMBER(rfc2268, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS rfc2268.lo"
AC_DEFINE(USE_RFC2268, 1, [Defined if this module should be included])
fi
LIST_MEMBER(seed, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS seed.lo"
AC_DEFINE(USE_SEED, 1, [Defined if this module should be included])
fi
LIST_MEMBER(camellia, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS camellia.lo camellia-glue.lo"
AC_DEFINE(USE_CAMELLIA, 1, [Defined if this module should be included])
case "${host}" in
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-arm.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-aarch64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-aarch64-ce.lo"
;;
powerpc64le-*-*)
# Build with the POWER vector implementations
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-ppc8le.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-ppc9le.lo"
;;
esac
if test x"$avxsupport" = xyes ; then
if test x"$aesnisupport" = xyes ; then
# Build with the AES-NI/AVX implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-aesni-avx-amd64.lo"
fi
fi
if test x"$avx2support" = xyes ; then
if test x"$aesnisupport" = xyes ; then
# Build with the AES-NI/AVX2 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-aesni-avx2-amd64.lo"
# Build with the VAES/AVX2 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-vaes-avx2-amd64.lo"
# Build with the GFNI/AVX2 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-gfni-avx2-amd64.lo"
# Build with the GFNI/AVX512 implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS camellia-gfni-avx512-amd64.lo"
fi
fi
fi
LIST_MEMBER(idea, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS idea.lo"
AC_DEFINE(USE_IDEA, 1, [Defined if this module should be included])
fi
LIST_MEMBER(salsa20, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS salsa20.lo"
AC_DEFINE(USE_SALSA20, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS salsa20-amd64.lo"
;;
esac
if test x"$neonsupport" = xyes ; then
# Build with the NEON implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS salsa20-armv7-neon.lo"
fi
fi
LIST_MEMBER(gost28147, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS gost28147.lo"
AC_DEFINE(USE_GOST28147, 1, [Defined if this module should be included])
fi
LIST_MEMBER(chacha20, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS chacha20.lo"
AC_DEFINE(USE_CHACHA20, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-amd64-ssse3.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-amd64-avx2.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-amd64-avx512.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-aarch64.lo"
;;
powerpc64le-*-*)
# Build with the ppc8 vector implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-ppc.lo"
# Build with the assembly implementation
if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" &&
test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-p10le-8x.lo"
fi
;;
powerpc64-*-*)
# Build with the ppc8 vector implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-ppc.lo"
;;
powerpc-*-*)
# Build with the ppc8 vector implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-ppc.lo"
;;
s390x-*-*)
# Build with the s390x/zSeries vector implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-s390x.lo"
;;
esac
if test x"$neonsupport" = xyes ; then
# Build with the NEON implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS chacha20-armv7-neon.lo"
fi
fi
LIST_MEMBER(sm4, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS sm4.lo"
AC_DEFINE(USE_SM4, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aesni-avx-amd64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aesni-avx2-amd64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-gfni-avx2-amd64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-gfni-avx512-amd64.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-aarch64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-armv8-aarch64-ce.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-armv9-aarch64-sve-ce.lo"
;;
powerpc64le-*-*)
# Build with the ppc64le vector implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS sm4-ppc.lo"
;;
esac
fi
LIST_MEMBER(aria, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_CIPHERS="$GCRYPT_CIPHERS aria.lo"
AC_DEFINE(USE_ARIA, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS aria-aesni-avx-amd64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS aria-aesni-avx2-amd64.lo"
GCRYPT_ASM_CIPHERS="$GCRYPT_ASM_CIPHERS aria-gfni-avx512-amd64.lo"
;;
esac
fi
LIST_MEMBER(dsa, $enabled_pubkey_ciphers)
AM_CONDITIONAL(USE_DSA, [test "$found" = "1"])
if test "$found" = "1" ; then
GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS dsa.lo"
AC_DEFINE(USE_DSA, 1, [Defined if this module should be included])
fi
LIST_MEMBER(rsa, $enabled_pubkey_ciphers)
AM_CONDITIONAL(USE_RSA, [test "$found" = "1"])
if test "$found" = "1" ; then
GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS rsa.lo"
AC_DEFINE(USE_RSA, 1, [Defined if this module should be included])
fi
LIST_MEMBER(elgamal, $enabled_pubkey_ciphers)
AM_CONDITIONAL(USE_ELGAMAL, [test "$found" = "1"])
if test "$found" = "1" ; then
GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS elgamal.lo"
AC_DEFINE(USE_ELGAMAL, 1, [Defined if this module should be included])
fi
LIST_MEMBER(ecc, $enabled_pubkey_ciphers)
AM_CONDITIONAL(USE_ECC, [test "$found" = "1"])
if test "$found" = "1" ; then
GCRYPT_PUBKEY_CIPHERS="$GCRYPT_PUBKEY_CIPHERS \
ecc.lo ecc-curves.lo ecc-misc.lo \
ecc-ecdh.lo ecc-ecdsa.lo ecc-eddsa.lo ecc-gost.lo \
ecc-sm2.lo"
AC_DEFINE(USE_ECC, 1, [Defined if this module should be included])
fi
LIST_MEMBER(crc, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS crc.lo"
AC_DEFINE(USE_CRC, 1, [Defined if this module should be included])
case "${host}" in
i?86-*-* | x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-intel-pclmul.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-armv8-ce.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-armv8-aarch64-ce.lo"
;;
powerpc64le-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-ppc.lo"
;;
powerpc64-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-ppc.lo"
;;
powerpc-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS crc-ppc.lo"
;;
esac
fi
LIST_MEMBER(gostr3411-94, $enabled_digests)
if test "$found" = "1" ; then
# GOST R 34.11-94 internally uses GOST 28147-89
LIST_MEMBER(gost28147, $enabled_ciphers)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS gostr3411-94.lo"
AC_DEFINE(USE_GOST_R_3411_94, 1, [Defined if this module should be included])
fi
fi
LIST_MEMBER(stribog, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS stribog.lo"
AC_DEFINE(USE_GOST_R_3411_12, 1, [Defined if this module should be included])
fi
LIST_MEMBER(md2, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS md2.lo"
AC_DEFINE(USE_MD2, 1, [Defined if this module should be included])
fi
LIST_MEMBER(md4, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS md4.lo"
AC_DEFINE(USE_MD4, 1, [Defined if this module should be included])
fi
LIST_MEMBER(md5, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS md5.lo"
AC_DEFINE(USE_MD5, 1, [Defined if this module should be included])
fi
LIST_MEMBER(rmd160, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS rmd160.lo"
AC_DEFINE(USE_RMD160, 1, [Defined if this module should be included])
fi
LIST_MEMBER(sha256, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha256.lo"
AC_DEFINE(USE_SHA256, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ssse3-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-avx-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-avx2-bmi2-amd64.lo"
;;
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-armv8-aarch32-ce.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-armv8-aarch64-ce.lo"
;;
powerpc64le-*-*)
# Build with the crypto extension implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ppc.lo"
;;
powerpc64-*-*)
# Big-Endian.
# Build with the crypto extension implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ppc.lo"
;;
powerpc-*-*)
# Big-Endian.
# Build with the crypto extension implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-ppc.lo"
esac
case "$mpi_cpu_arch" in
x86)
# Build with the SHAEXT implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha256-intel-shaext.lo"
;;
esac
fi
LIST_MEMBER(sha512, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha512.lo"
AC_DEFINE(USE_SHA512, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ssse3-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-avx-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-avx2-bmi2-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-avx512-amd64.lo"
;;
i?86-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ssse3-i386.lo"
;;
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-arm.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-armv8-aarch64-ce.lo"
;;
powerpc64le-*-*)
# Build with the crypto extension implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ppc.lo"
;;
powerpc64-*-*)
# Big-Endian.
# Build with the crypto extension implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ppc.lo"
;;
powerpc-*-*)
# Big-Endian.
# Build with the crypto extension implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-ppc.lo"
esac
if test x"$neonsupport" = xyes ; then
# Build with the NEON implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha512-armv7-neon.lo"
fi
fi
LIST_MEMBER(sha3, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS keccak.lo"
AC_DEFINE(USE_SHA3, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS keccak-amd64-avx512.lo"
;;
esac
if test x"$neonsupport" = xyes ; then
# Build with the NEON implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS keccak-armv7-neon.lo"
fi
fi
LIST_MEMBER(tiger, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS tiger.lo"
AC_DEFINE(USE_TIGER, 1, [Defined if this module should be included])
fi
LIST_MEMBER(whirlpool, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS whirlpool.lo"
AC_DEFINE(USE_WHIRLPOOL, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS whirlpool-sse2-amd64.lo"
;;
esac
fi
LIST_MEMBER(blake2, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS blake2.lo"
AC_DEFINE(USE_BLAKE2, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2b-amd64-avx2.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2b-amd64-avx512.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2s-amd64-avx.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS blake2s-amd64-avx512.lo"
;;
esac
fi
LIST_MEMBER(sm3, $enabled_digests)
if test "$found" = "1" ; then
GCRYPT_DIGESTS="$GCRYPT_DIGESTS sm3.lo"
AC_DEFINE(USE_SM3, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sm3-avx-bmi2-amd64.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sm3-aarch64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sm3-armv8-aarch64-ce.lo"
;;
esac
fi
# SHA-1 needs to be included always for example because it is used by
# random-csprng.c.
GCRYPT_DIGESTS="$GCRYPT_DIGESTS sha1.lo"
AC_DEFINE(USE_SHA1, 1, [Defined if this module should be included])
case "${host}" in
x86_64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-ssse3-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-avx-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-avx-bmi2-amd64.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-avx2-bmi2-amd64.lo"
;;
arm*-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-armv7-neon.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-armv8-aarch32-ce.lo"
;;
aarch64-*-*)
# Build with the assembly implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-armv8-aarch64-ce.lo"
;;
esac
case "$mpi_cpu_arch" in
x86)
# Build with the SHAEXT implementation
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS sha1-intel-shaext.lo"
;;
esac
# Arch specific GCM implementations
case "${host}" in
i?86-*-* | x86_64-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-intel-pclmul.lo"
;;
arm*-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-armv7-neon.lo"
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-armv8-aarch32-ce.lo"
;;
aarch64-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-armv8-aarch64-ce.lo"
;;
powerpc64le-*-* | powerpc64-*-* | powerpc-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS cipher-gcm-ppc.lo"
;;
esac
# Arch specific MAC implementations
case "${host}" in
s390x-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS poly1305-s390x.lo"
;;
x86_64-*-*)
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS poly1305-amd64-avx512.lo"
;;
powerpc64le-*-*)
# Build with the assembly implementation
if test "$gcry_cv_gcc_inline_asm_ppc_altivec" = "yes" &&
test "$gcry_cv_gcc_inline_asm_ppc_arch_3_00" = "yes" ; then
GCRYPT_ASM_DIGESTS="$GCRYPT_ASM_DIGESTS poly1305-p10le.lo"
fi
;;
esac
LIST_MEMBER(scrypt, $enabled_kdfs)
if test "$found" = "1" ; then
GCRYPT_KDFS="$GCRYPT_KDFS scrypt.lo"
AC_DEFINE(USE_SCRYPT, 1, [Defined if this module should be included])
fi
LIST_MEMBER(getentropy, $random_modules)
if test "$found" = "1" ; then
GCRYPT_RANDOM="$GCRYPT_RANDOM rndgetentropy.lo"
AC_DEFINE(USE_RNDGETENTROPY, 1, [Defined if the getentropy RNG should be used.])
fi
LIST_MEMBER(linux, $random_modules)
if test "$found" = "1" ; then
GCRYPT_RANDOM="$GCRYPT_RANDOM rndoldlinux.lo"
AC_DEFINE(USE_RNDOLDLINUX, 1, [Defined if the /dev/random RNG should be used.])
fi
LIST_MEMBER(unix, $random_modules)
if test "$found" = "1" ; then
GCRYPT_RANDOM="$GCRYPT_RANDOM rndunix.lo"
AC_DEFINE(USE_RNDUNIX, 1, [Defined if the default Unix RNG should be used.])
fi
LIST_MEMBER(egd, $random_modules)
if test "$found" = "1" ; then
GCRYPT_RANDOM="$GCRYPT_RANDOM rndegd.lo"
AC_DEFINE(USE_RNDEGD, 1, [Defined if the EGD based RNG should be used.])
fi
LIST_MEMBER(w32, $random_modules)
if test "$found" = "1" ; then
GCRYPT_RANDOM="$GCRYPT_RANDOM rndw32.lo"
AC_DEFINE(USE_RNDW32, 1,
[Defined if the Windows specific RNG should be used.])
fi
LIST_MEMBER(w32ce, $random_modules)
if test "$found" = "1" ; then
GCRYPT_RANDOM="$GCRYPT_RANDOM rndw32ce.lo"
AC_DEFINE(USE_RNDW32CE, 1,
[Defined if the WindowsCE specific RNG should be used.])
fi
if test "$try_asm_modules" = yes ; then
# Build with assembly implementations
GCRYPT_CIPHERS="$GCRYPT_CIPHERS $GCRYPT_ASM_CIPHERS"
GCRYPT_DIGESTS="$GCRYPT_DIGESTS $GCRYPT_ASM_DIGESTS"
fi
AC_SUBST([GCRYPT_CIPHERS])
AC_SUBST([GCRYPT_PUBKEY_CIPHERS])
AC_SUBST([GCRYPT_DIGESTS])
AC_SUBST([GCRYPT_KDFS])
AC_SUBST([GCRYPT_RANDOM])
AC_SUBST(LIBGCRYPT_CIPHERS, $enabled_ciphers)
AC_SUBST(LIBGCRYPT_PUBKEY_CIPHERS, $enabled_pubkey_ciphers)
AC_SUBST(LIBGCRYPT_DIGESTS, $enabled_digests)
# For printing the configuration we need a colon separated list of
# algorithm names.
tmp=`echo "$enabled_ciphers" | tr ' ' : `
AC_DEFINE_UNQUOTED(LIBGCRYPT_CIPHERS, "$tmp",
[List of available cipher algorithms])
tmp=`echo "$enabled_pubkey_ciphers" | tr ' ' : `
AC_DEFINE_UNQUOTED(LIBGCRYPT_PUBKEY_CIPHERS, "$tmp",
[List of available public key cipher algorithms])
tmp=`echo "$enabled_digests" | tr ' ' : `
AC_DEFINE_UNQUOTED(LIBGCRYPT_DIGESTS, "$tmp",
[List of available digest algorithms])
tmp=`echo "$enabled_kdfs" | tr ' ' : `
AC_DEFINE_UNQUOTED(LIBGCRYPT_KDFS, "$tmp",
[List of available KDF algorithms])
#
# Define conditional sources depending on the used hardware platform.
# Note that all possible modules must also be listed in
# src/Makefile.am (EXTRA_libgcrypt_la_SOURCES).
#
GCRYPT_HWF_MODULES=
case "$mpi_cpu_arch" in
x86)
AC_DEFINE(HAVE_CPU_ARCH_X86, 1, [Defined for the x86 platforms])
GCRYPT_HWF_MODULES="libgcrypt_la-hwf-x86.lo"
;;
alpha)
AC_DEFINE(HAVE_CPU_ARCH_ALPHA, 1, [Defined for Alpha platforms])
;;
sparc)
AC_DEFINE(HAVE_CPU_ARCH_SPARC, 1, [Defined for SPARC platforms])
;;
mips)
AC_DEFINE(HAVE_CPU_ARCH_MIPS, 1, [Defined for MIPS platforms])
;;
m68k)
AC_DEFINE(HAVE_CPU_ARCH_M68K, 1, [Defined for M68k platforms])
;;
ppc)
AC_DEFINE(HAVE_CPU_ARCH_PPC, 1, [Defined for PPC platforms])
GCRYPT_HWF_MODULES="libgcrypt_la-hwf-ppc.lo"
;;
arm)
AC_DEFINE(HAVE_CPU_ARCH_ARM, 1, [Defined for ARM platforms])
GCRYPT_HWF_MODULES="libgcrypt_la-hwf-arm.lo"
;;
aarch64)
AC_DEFINE(HAVE_CPU_ARCH_ARM, 1, [Defined for ARM AArch64 platforms])
GCRYPT_HWF_MODULES="libgcrypt_la-hwf-arm.lo"
;;
s390x)
AC_DEFINE(HAVE_CPU_ARCH_S390X, 1, [Defined for s390x/zSeries platforms])
GCRYPT_HWF_MODULES="libgcrypt_la-hwf-s390x.lo"
;;
esac
AC_SUBST([GCRYPT_HWF_MODULES])
#
# Option to disable building of doc file
#
build_doc=yes
AC_ARG_ENABLE([doc], AS_HELP_STRING([--disable-doc],
[do not build the documentation]),
build_doc=$enableval, build_doc=yes)
AM_CONDITIONAL([BUILD_DOC], [test "x$build_doc" != xno])
#
# Provide information about the build.
#
BUILD_REVISION="mym4_revision"
AC_SUBST(BUILD_REVISION)
AC_DEFINE_UNQUOTED(BUILD_REVISION, "$BUILD_REVISION",
[GIT commit id revision used to build this package])
changequote(,)dnl
BUILD_VERSION=`echo "$PACKAGE_VERSION" | sed 's/\([0-9.]*\).*/\1./'`
changequote([,])dnl
BUILD_VERSION="${BUILD_VERSION}mym4_revision_dec"
BUILD_FILEVERSION=`echo "${BUILD_VERSION}" | tr . ,`
AC_SUBST(BUILD_VERSION)
AC_SUBST(BUILD_FILEVERSION)
AC_ARG_ENABLE([build-timestamp],
AS_HELP_STRING([--enable-build-timestamp],
[set an explicit build timestamp for reproducibility.
(default is the current time in ISO-8601 format)]),
[if test "$enableval" = "yes"; then
BUILD_TIMESTAMP=`date -u +%Y-%m-%dT%H:%M+0000 2>/dev/null || date`
else
BUILD_TIMESTAMP="$enableval"
fi],
[BUILD_TIMESTAMP="<none>"])
AC_SUBST(BUILD_TIMESTAMP)
AC_DEFINE_UNQUOTED(BUILD_TIMESTAMP, "$BUILD_TIMESTAMP",
[The time this package was configured for a build])
# And create the files.
AC_CONFIG_FILES([
Makefile
m4/Makefile
compat/Makefile
mpi/Makefile
cipher/Makefile
random/Makefile
doc/Makefile
src/Makefile
src/gcrypt.h
src/libgcrypt-config
src/libgcrypt.pc
src/versioninfo.rc
tests/Makefile
])
AC_CONFIG_FILES([tests/hashtest-6g], [chmod +x tests/hashtest-6g])
AC_CONFIG_FILES([tests/hashtest-256g], [chmod +x tests/hashtest-256g])
AC_CONFIG_FILES([tests/basic-disable-all-hwf], [chmod +x tests/basic-disable-all-hwf])
AC_OUTPUT
detection_module="${GCRYPT_HWF_MODULES%.lo}"
test -n "$detection_module" || detection_module="none"
# Give some feedback
GCRY_MSG_SHOW([],[])
GCRY_MSG_SHOW([Libgcrypt],[v${VERSION} has been configured as follows:])
GCRY_MSG_SHOW([],[])
GCRY_MSG_SHOW([Platform: ],[$PRINTABLE_OS_NAME ($host)])
GCRY_MSG_SHOW([Hardware detection module:],[$detection_module])
GCRY_MSG_WRAP([Enabled cipher algorithms:],[$enabled_ciphers])
GCRY_MSG_WRAP([Enabled digest algorithms:],[$enabled_digests])
GCRY_MSG_WRAP([Enabled kdf algorithms: ],[$enabled_kdfs])
GCRY_MSG_WRAP([Enabled pubkey algorithms:],[$enabled_pubkey_ciphers])
GCRY_MSG_SHOW([Random number generator: ],[$random])
GCRY_MSG_SHOW([Try using jitter entropy: ],[$jentsupport])
GCRY_MSG_SHOW([Using linux capabilities: ],[$use_capabilities])
GCRY_MSG_SHOW([FIPS module version: ],[$fips_module_version])
GCRY_MSG_SHOW([Try using Padlock crypto: ],[$padlocksupport])
GCRY_MSG_SHOW([Try using AES-NI crypto: ],[$aesnisupport])
GCRY_MSG_SHOW([Try using Intel SHAEXT: ],[$shaextsupport])
GCRY_MSG_SHOW([Try using Intel PCLMUL: ],[$pclmulsupport])
GCRY_MSG_SHOW([Try using Intel SSE4.1: ],[$sse41support])
GCRY_MSG_SHOW([Try using DRNG (RDRAND): ],[$drngsupport])
GCRY_MSG_SHOW([Try using Intel AVX: ],[$avxsupport])
GCRY_MSG_SHOW([Try using Intel AVX2: ],[$avx2support])
GCRY_MSG_SHOW([Try using Intel AVX512: ],[$avx512support])
GCRY_MSG_SHOW([Try using Intel GFNI: ],[$gfnisupport])
GCRY_MSG_SHOW([Try using ARM NEON: ],[$neonsupport])
GCRY_MSG_SHOW([Try using ARMv8 crypto: ],[$armcryptosupport])
GCRY_MSG_SHOW([Try using ARMv8 SVE: ],[$svesupport])
GCRY_MSG_SHOW([Try using ARMv9 SVE2: ],[$sve2support])
GCRY_MSG_SHOW([Try using PPC crypto: ],[$ppccryptosupport])
GCRY_MSG_SHOW([],[])
if test "x${gpg_config_script_warn}" != x; then
cat <<G10EOF
Mismatches between the target platform and the to
be used libraries have been been detected for:
${gpg_config_script_warn}
Please check above for warning messages.
G10EOF
fi
if test "$gcry_cv_gcc_attribute_aligned" != "yes" ; then
cat <<G10EOF
Please not that your compiler does not support the GCC style
aligned attribute. Using this software may evoke bus errors.
G10EOF
fi
if test -n "$gpl"; then
echo "Please note that you are building a version of Libgcrypt with"
echo " $gpl"
echo "included. These parts are licensed under the GPL and thus the"
echo "use of this library has to comply with the conditions of the GPL."
echo ""
fi

File Metadata

Mime Type
text/x-diff
Expires
Mon, Dec 23, 2:11 PM (21 h, 26 m)
Storage Engine
local-disk
Storage Format
Raw Data
Storage Handle
13/9c/125594e02005aba957cee4058ef1

Event Timeline