diff --git a/cipher/cipher-internal.h b/cipher/cipher-internal.h index 66b75955..4e022f38 100644 --- a/cipher/cipher-internal.h +++ b/cipher/cipher-internal.h @@ -1,973 +1,975 @@ /* cipher-internal.h - Internal defs for cipher.c * Copyright (C) 2011 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #ifndef G10_CIPHER_INTERNAL_H #define G10_CIPHER_INTERNAL_H #include "./poly1305-internal.h" /* The maximum supported size of a block in bytes. */ #define MAX_BLOCKSIZE 16 /* The length for an OCB block. Although OCB supports any block length it does not make sense to use a 64 bit blocklen (and cipher) because this reduces the security margin to an unacceptable state. Thus we require a cipher with 128 bit blocklength. */ #define OCB_BLOCK_LEN (128/8) /* The size of the pre-computed L table for OCB. This takes the same size as the table used for GCM and thus we don't save anything by not using such a table. */ #define OCB_L_TABLE_SIZE 16 /* Check the above constants. */ #if OCB_BLOCK_LEN > MAX_BLOCKSIZE # error OCB_BLOCKLEN > MAX_BLOCKSIZE #endif /* Magic values for the context structure. */ #define CTX_MAGIC_NORMAL 0x24091964 #define CTX_MAGIC_SECURE 0x46919042 /* Try to use 16 byte aligned cipher context for better performance. We use the aligned attribute, thus it is only possible to implement this with gcc. */ #undef NEED_16BYTE_ALIGNED_CONTEXT #ifdef HAVE_GCC_ATTRIBUTE_ALIGNED # define NEED_16BYTE_ALIGNED_CONTEXT 1 #endif /* Undef this symbol to trade GCM speed for 256 bytes of memory per context */ #define GCM_USE_TABLES 1 /* GCM_USE_INTEL_PCLMUL indicates whether to compile GCM with Intel PCLMUL code. */ #undef GCM_USE_INTEL_PCLMUL #if defined(ENABLE_PCLMUL_SUPPORT) && defined(GCM_USE_TABLES) # if ((defined(__i386__) && SIZEOF_UNSIGNED_LONG == 4) || defined(__x86_64__)) # if __GNUC__ >= 4 # define GCM_USE_INTEL_PCLMUL 1 # endif # endif #endif /* GCM_USE_INTEL_PCLMUL */ /* GCM_USE_INTEL_VPCLMUL_AVX2 indicates whether to compile GCM with Intel VPCLMUL/AVX2 code. */ #undef GCM_USE_INTEL_VPCLMUL_AVX2 #if defined(__x86_64__) && defined(GCM_USE_INTEL_PCLMUL) && \ defined(ENABLE_AVX2_SUPPORT) && defined(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL) # define GCM_USE_INTEL_VPCLMUL_AVX2 1 #endif /* GCM_USE_INTEL_VPCLMUL_AVX2 */ /* GCM_USE_INTEL_VPCLMUL_AVX512 indicates whether to compile GCM with Intel VPCLMUL/AVX512 code. */ #undef GCM_USE_INTEL_VPCLMUL_AVX512 #if defined(__x86_64__) && defined(GCM_USE_INTEL_VPCLMUL_AVX2) && \ defined(ENABLE_AVX512_SUPPORT) && defined(HAVE_GCC_INLINE_ASM_AVX512) # define GCM_USE_INTEL_VPCLMUL_AVX512 1 #endif /* GCM_USE_INTEL_VPCLMUL_AVX512 */ /* GCM_USE_ARM_PMULL indicates whether to compile GCM with ARMv8 PMULL code. */ #undef GCM_USE_ARM_PMULL #if defined(ENABLE_ARM_CRYPTO_SUPPORT) && defined(GCM_USE_TABLES) # if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \ && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO) # define GCM_USE_ARM_PMULL 1 # elif defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) # define GCM_USE_ARM_PMULL 1 # endif #endif /* GCM_USE_ARM_PMULL */ /* GCM_USE_ARM_NEON indicates whether to compile GCM with ARMv7 NEON code. */ #undef GCM_USE_ARM_NEON #if defined(GCM_USE_TABLES) #if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \ defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_NEON) # define GCM_USE_ARM_NEON 1 #endif #endif /* GCM_USE_ARM_NEON */ /* GCM_USE_S390X_CRYPTO indicates whether to enable zSeries code. */ #undef GCM_USE_S390X_CRYPTO #if defined(HAVE_GCC_INLINE_ASM_S390X) # define GCM_USE_S390X_CRYPTO 1 #endif /* GCM_USE_S390X_CRYPTO */ /* GCM_USE_PPC_VPMSUM indicates whether to compile GCM with PPC Power 8 * polynomial multiplication instruction. */ #undef GCM_USE_PPC_VPMSUM #if defined(GCM_USE_TABLES) #if defined(ENABLE_PPC_CRYPTO_SUPPORT) && defined(__powerpc64__) && \ defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \ defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && __GNUC__ >= 4 # define GCM_USE_PPC_VPMSUM 1 # define NEED_16BYTE_ALIGNED_CONTEXT 1 /* this also aligns gcm_table */ #endif #endif /* GCM_USE_PPC_VPMSUM */ typedef unsigned int (*ghash_fn_t) (gcry_cipher_hd_t c, byte *result, const byte *buf, size_t nblocks); /* A structure with function pointers for mode operations. */ typedef struct cipher_mode_ops { gcry_err_code_t (*encrypt)(gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t (*decrypt)(gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t (*setiv)(gcry_cipher_hd_t c, const unsigned char *iv, size_t ivlen); gcry_err_code_t (*authenticate)(gcry_cipher_hd_t c, const unsigned char *abuf, size_t abuflen); gcry_err_code_t (*get_tag)(gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t (*check_tag)(gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); } cipher_mode_ops_t; /* A structure with function pointers for bulk operations. The cipher algorithm setkey function initializes them when bulk operations are available and the actual encryption routines use them if they are not NULL. */ typedef struct cipher_bulk_ops { + void (*ecb_crypt)(void *context, void *outbuf_arg, const void *inbuf_arg, + size_t nblocks, int encrypt); void (*cfb_enc)(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); void (*cfb_dec)(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); void (*cbc_enc)(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac); void (*cbc_dec)(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); void (*ofb_enc)(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); void (*ctr_enc)(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); void (*ctr32le_enc)(void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); size_t (*ocb_crypt)(gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); size_t (*ocb_auth)(gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); void (*xts_crypt)(void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); size_t (*gcm_crypt)(gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); } cipher_bulk_ops_t; /* A VIA processor with the Padlock engine as well as the Intel AES_NI instructions require an alignment of most data on a 16 byte boundary. Because we trick out the compiler while allocating the context, the align attribute as used in rijndael.c does not work on its own. Thus we need to make sure that the entire context structure is a aligned on that boundary. We achieve this by defining a new type and use that instead of our usual alignment type. */ typedef union { PROPERLY_ALIGNED_TYPE foo; #ifdef NEED_16BYTE_ALIGNED_CONTEXT char bar[16] __attribute__ ((aligned (16))); #endif char c[1]; } cipher_context_alignment_t; /* Storage structure for CMAC, for CMAC and EAX modes. */ typedef struct { /* The initialization vector. Also contains tag after finalization. */ union { cipher_context_alignment_t iv_align; unsigned char iv[MAX_BLOCKSIZE]; } u_iv; /* Subkeys for tag creation, not cleared by gcry_cipher_reset. */ unsigned char subkeys[2][MAX_BLOCKSIZE]; /* Space to save partial input lengths for MAC. */ unsigned char macbuf[MAX_BLOCKSIZE]; int mac_unused; /* Number of unprocessed bytes in MACBUF. */ unsigned int tag:1; /* Set to 1 if tag has been finalized. */ } gcry_cmac_context_t; /* The handle structure. */ struct gcry_cipher_handle { int magic; size_t actual_handle_size; /* Allocated size of this handle. */ size_t handle_offset; /* Offset to the malloced block. */ gcry_cipher_spec_t *spec; /* The algorithm id. This is a hack required because the module interface does not easily allow to retrieve this value. */ int algo; /* A structure with function pointers for mode operations. */ cipher_mode_ops_t mode_ops; /* A structure with function pointers for bulk operations. Due to limitations of the module system (we don't want to change the API) we need to keep these function pointers here. */ cipher_bulk_ops_t bulk; int mode; unsigned int flags; struct { int geniv_method; unsigned char fixed[MAX_BLOCKSIZE]; unsigned char dynamic[MAX_BLOCKSIZE]; size_t fixed_iv_len; size_t dynamic_iv_len; } aead; struct { unsigned int key:1; /* Set to 1 if a key has been set. */ unsigned int iv:1; /* Set to 1 if a IV has been set. */ unsigned int tag:1; /* Set to 1 if a tag is finalized. */ unsigned int finalize:1; /* Next encrypt/decrypt has the final data. */ unsigned int allow_weak_key:1; /* Set to 1 if weak keys are allowed. */ } marks; /* The initialization vector. For best performance we make sure that it is properly aligned. In particular some implementations of bulk operations expect an 16 byte aligned IV. IV is also used to store CBC-MAC in CCM mode; counter IV is stored in U_CTR. For OCB mode it is used for the offset value. */ union { cipher_context_alignment_t iv_align; unsigned char iv[MAX_BLOCKSIZE]; } u_iv; /* The counter for CTR mode. This field is also used by AESWRAP and thus we can't use the U_IV union. For OCB mode it is used for the checksum. */ union { cipher_context_alignment_t iv_align; unsigned char ctr[MAX_BLOCKSIZE]; } u_ctr; /* Space to save an IV or CTR for chaining operations. */ unsigned char lastiv[MAX_BLOCKSIZE]; int unused; /* Number of unused bytes in LASTIV. */ union { /* Mode specific storage for CCM mode. */ struct { u64 encryptlen; u64 aadlen; unsigned int authlen; /* Space to save partial input lengths for MAC. */ unsigned char macbuf[GCRY_CCM_BLOCK_LEN]; int mac_unused; /* Number of unprocessed bytes in MACBUF. */ unsigned char s0[GCRY_CCM_BLOCK_LEN]; unsigned int nonce:1; /* Set to 1 if nonce has been set. */ unsigned int lengths:1; /* Set to 1 if CCM length parameters has been processed. */ } ccm; /* Mode specific storage for Poly1305 mode. */ struct { /* byte counter for AAD. */ u32 aadcount[2]; /* byte counter for data. */ u32 datacount[2]; unsigned int aad_finalized:1; unsigned int bytecount_over_limits:1; poly1305_context_t ctx; } poly1305; /* Mode specific storage for CMAC mode. */ gcry_cmac_context_t cmac; /* Mode specific storage for EAX mode. */ struct { /* CMAC for header (AAD). */ gcry_cmac_context_t cmac_header; /* CMAC for ciphertext. */ gcry_cmac_context_t cmac_ciphertext; } eax; /* Mode specific storage for GCM mode and GCM-SIV mode. */ struct { /* The interim tag for GCM mode. */ union { cipher_context_alignment_t iv_align; unsigned char tag[MAX_BLOCKSIZE]; } u_tag; /* Space to save partial input lengths for MAC. */ unsigned char macbuf[GCRY_CCM_BLOCK_LEN]; int mac_unused; /* Number of unprocessed bytes in MACBUF. */ /* byte counters for GCM */ u32 aadlen[2]; u32 datalen[2]; /* encrypted tag counter */ unsigned char tagiv[MAX_BLOCKSIZE]; unsigned int ghash_data_finalized:1; unsigned int ghash_aad_finalized:1; unsigned int datalen_over_limits:1; unsigned int disallow_encryption_because_of_setiv_in_fips_mode:1; /* --- Following members are not cleared in gcry_cipher_reset --- */ /* GHASH multiplier from key. */ union { cipher_context_alignment_t iv_align; unsigned char key[MAX_BLOCKSIZE]; } u_ghash_key; /* Pre-calculated table for GCM. */ #ifdef GCM_USE_TABLES #if (SIZEOF_UNSIGNED_LONG == 8 || defined(__x86_64__)) #define GCM_TABLES_USE_U64 1 u64 gcm_table[4 * 16]; #else #undef GCM_TABLES_USE_U64 u32 gcm_table[8 * 16]; #endif #endif /* GHASH implementation in use. */ ghash_fn_t ghash_fn; /* POLYVAL implementation in use (GCM-SIV). */ ghash_fn_t polyval_fn; /* Key length used for GCM-SIV key generating key. */ unsigned int siv_keylen; /* Flags for accelerated implementations. */ unsigned int hw_impl_flags; } gcm; /* Mode specific storage for OCB mode. */ struct { /* --- Following members are not cleared in gcry_cipher_reset --- */ /* Helper variables and pre-computed table of L values. */ unsigned char L_star[OCB_BLOCK_LEN]; unsigned char L_dollar[OCB_BLOCK_LEN]; unsigned char L0L1[OCB_BLOCK_LEN]; unsigned char L[OCB_L_TABLE_SIZE][OCB_BLOCK_LEN]; /* --- Following members are cleared in gcry_cipher_reset --- */ /* The tag is valid if marks.tag has been set. */ unsigned char tag[OCB_BLOCK_LEN]; /* A buffer to hold the offset for the AAD processing. */ unsigned char aad_offset[OCB_BLOCK_LEN]; /* A buffer to hold the current sum of AAD processing. We can't use tag here because tag may already hold the preprocessed checksum of the data. */ unsigned char aad_sum[OCB_BLOCK_LEN]; /* A buffer to store AAD data not yet processed. */ unsigned char aad_leftover[OCB_BLOCK_LEN]; /* Number of data/aad blocks processed so far. */ u64 data_nblocks; u64 aad_nblocks; /* Number of valid bytes in AAD_LEFTOVER. */ unsigned char aad_nleftover; /* Length of the tag. Fixed for now but may eventually be specified using a set of gcry_cipher_flags. */ unsigned char taglen; /* Flags indicating that the final data/aad block has been processed. */ unsigned int data_finalized:1; unsigned int aad_finalized:1; } ocb; /* Mode specific storage for XTS mode. */ struct { /* Pointer to tweak cipher context, allocated after actual * cipher context. */ char *tweak_context; } xts; /* Mode specific storage for SIV mode. */ struct { /* Tag used for decryption. */ unsigned char dec_tag[GCRY_SIV_BLOCK_LEN]; /* S2V state. */ unsigned char s2v_d[GCRY_SIV_BLOCK_LEN]; /* Number of AAD elements processed. */ unsigned int aad_count:8; /* Flags for SIV state. */ unsigned int dec_tag_set:1; /* --- Following members are not cleared in gcry_cipher_reset --- */ /* S2V CMAC state. */ gcry_cmac_context_t s2v_cmac; unsigned char s2v_zero_block[GCRY_SIV_BLOCK_LEN]; /* Pointer to CTR cipher context, allocated after actual * cipher context. */ char *ctr_context; } siv; /* Mode specific storage for WRAP mode. */ struct { unsigned char plen[4]; } wrap; } u_mode; /* What follows are two contexts of the cipher in use. The first one needs to be aligned well enough for the cipher operation whereas the second one is a copy created by cipher_setkey and used by cipher_reset. That second copy has no need for proper aligment because it is only accessed by memcpy. */ cipher_context_alignment_t context; }; /*-- cipher-cbc.c --*/ gcry_err_code_t _gcry_cipher_cbc_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_cbc_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_cbc_cts_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_cbc_cts_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); /*-- cipher-cfb.c --*/ gcry_err_code_t _gcry_cipher_cfb_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_cfb_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_cfb8_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_cfb8_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); /*-- cipher-ofb.c --*/ gcry_err_code_t _gcry_cipher_ofb_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); /*-- cipher-ctr.c --*/ gcry_err_code_t _gcry_cipher_ctr_encrypt_ctx /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen, void *algo_context); gcry_err_code_t _gcry_cipher_ctr_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); /*-- cipher-aeswrap.c --*/ gcry_err_code_t _gcry_cipher_keywrap_encrypt /* */ (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen, const byte *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_keywrap_encrypt_padding /* */ (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen, const byte *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_keywrap_decrypt_auto /* */ (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen, const byte *inbuf, size_t inbuflen); /*-- cipher-ccm.c --*/ gcry_err_code_t _gcry_cipher_ccm_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_ccm_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_ccm_set_nonce /* */ (gcry_cipher_hd_t c, const unsigned char *nonce, size_t noncelen); gcry_err_code_t _gcry_cipher_ccm_authenticate /* */ (gcry_cipher_hd_t c, const unsigned char *abuf, size_t abuflen); gcry_err_code_t _gcry_cipher_ccm_set_lengths /* */ (gcry_cipher_hd_t c, u64 encryptedlen, u64 aadlen, u64 taglen); gcry_err_code_t _gcry_cipher_ccm_get_tag /* */ (gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t _gcry_cipher_ccm_check_tag /* */ (gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); /*-- cipher-cmac.c --*/ gcry_err_code_t _gcry_cmac_generate_subkeys /* */ (gcry_cipher_hd_t c, gcry_cmac_context_t *ctx); gcry_err_code_t _gcry_cmac_write /* */ (gcry_cipher_hd_t c, gcry_cmac_context_t *ctx, const byte * inbuf, size_t inlen); gcry_err_code_t _gcry_cmac_final /* */ (gcry_cipher_hd_t c, gcry_cmac_context_t *ctx); void _gcry_cmac_reset (gcry_cmac_context_t *ctx); /*-- cipher-eax.c --*/ gcry_err_code_t _gcry_cipher_eax_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_eax_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_eax_set_nonce /* */ (gcry_cipher_hd_t c, const unsigned char *nonce, size_t noncelen); gcry_err_code_t _gcry_cipher_eax_authenticate /* */ (gcry_cipher_hd_t c, const unsigned char *aadbuf, size_t aadbuflen); gcry_err_code_t _gcry_cipher_eax_get_tag /* */ (gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t _gcry_cipher_eax_check_tag /* */ (gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); gcry_err_code_t _gcry_cipher_eax_setkey /* */ (gcry_cipher_hd_t c); /*-- cipher-gcm.c --*/ gcry_err_code_t _gcry_cipher_gcm_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_gcm_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_gcm_setiv /* */ (gcry_cipher_hd_t c, const unsigned char *iv, size_t ivlen); gcry_err_code_t _gcry_cipher_gcm_authenticate /* */ (gcry_cipher_hd_t c, const unsigned char *aadbuf, size_t aadbuflen); gcry_err_code_t _gcry_cipher_gcm_get_tag /* */ (gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t _gcry_cipher_gcm_check_tag /* */ (gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); void _gcry_cipher_gcm_setkey /* */ (gcry_cipher_hd_t c); void _gcry_cipher_gcm_setupM /* */ (gcry_cipher_hd_t c); /*-- cipher-poly1305.c --*/ gcry_err_code_t _gcry_cipher_poly1305_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_poly1305_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_poly1305_setiv /* */ (gcry_cipher_hd_t c, const unsigned char *iv, size_t ivlen); gcry_err_code_t _gcry_cipher_poly1305_authenticate /* */ (gcry_cipher_hd_t c, const unsigned char *aadbuf, size_t aadbuflen); gcry_err_code_t _gcry_cipher_poly1305_get_tag /* */ (gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t _gcry_cipher_poly1305_check_tag /* */ (gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); void _gcry_cipher_poly1305_setkey /* */ (gcry_cipher_hd_t c); /*-- chacha20.c --*/ gcry_err_code_t _gcry_chacha20_poly1305_encrypt /* */ (gcry_cipher_hd_t c, byte *outbuf, const byte *inbuf, size_t length); gcry_err_code_t _gcry_chacha20_poly1305_decrypt /* */ (gcry_cipher_hd_t c, byte *outbuf, const byte *inbuf, size_t length); /*-- cipher-ocb.c --*/ gcry_err_code_t _gcry_cipher_ocb_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_ocb_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_ocb_set_nonce /* */ (gcry_cipher_hd_t c, const unsigned char *nonce, size_t noncelen); gcry_err_code_t _gcry_cipher_ocb_authenticate /* */ (gcry_cipher_hd_t c, const unsigned char *abuf, size_t abuflen); gcry_err_code_t _gcry_cipher_ocb_get_tag /* */ (gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t _gcry_cipher_ocb_check_tag /* */ (gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); void _gcry_cipher_ocb_setkey /* */ (gcry_cipher_hd_t c); /*-- cipher-xts.c --*/ gcry_err_code_t _gcry_cipher_xts_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_xts_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); /*-- cipher-siv.c --*/ gcry_err_code_t _gcry_cipher_siv_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_siv_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_siv_set_nonce /* */ (gcry_cipher_hd_t c, const unsigned char *nonce, size_t noncelen); gcry_err_code_t _gcry_cipher_siv_authenticate /* */ (gcry_cipher_hd_t c, const unsigned char *abuf, size_t abuflen); gcry_err_code_t _gcry_cipher_siv_set_decryption_tag /* */ (gcry_cipher_hd_t c, const byte *tag, size_t taglen); gcry_err_code_t _gcry_cipher_siv_get_tag /* */ (gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t _gcry_cipher_siv_check_tag /* */ (gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); gcry_err_code_t _gcry_cipher_siv_setkey /* */ (gcry_cipher_hd_t c, const unsigned char *ctrkey, size_t ctrkeylen); /*-- cipher-gcm-siv.c --*/ gcry_err_code_t _gcry_cipher_gcm_siv_encrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_gcm_siv_decrypt /* */ (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen); gcry_err_code_t _gcry_cipher_gcm_siv_set_nonce /* */ (gcry_cipher_hd_t c, const unsigned char *nonce, size_t noncelen); gcry_err_code_t _gcry_cipher_gcm_siv_authenticate /* */ (gcry_cipher_hd_t c, const unsigned char *abuf, size_t abuflen); gcry_err_code_t _gcry_cipher_gcm_siv_set_decryption_tag /* */ (gcry_cipher_hd_t c, const byte *tag, size_t taglen); gcry_err_code_t _gcry_cipher_gcm_siv_get_tag /* */ (gcry_cipher_hd_t c, unsigned char *outtag, size_t taglen); gcry_err_code_t _gcry_cipher_gcm_siv_check_tag /* */ (gcry_cipher_hd_t c, const unsigned char *intag, size_t taglen); gcry_err_code_t _gcry_cipher_gcm_siv_setkey /* */ (gcry_cipher_hd_t c, unsigned int keylen); /* Return the L-value for block N. Note: 'cipher_ocb.c' ensures that N * will never be multiple of 65536 (1 << OCB_L_TABLE_SIZE), thus N can * be directly passed to _gcry_ctz() function and resulting index will * never overflow the table. */ static inline const unsigned char * ocb_get_l (gcry_cipher_hd_t c, u64 n) { unsigned long ntz; #if ((defined(__i386__) || defined(__x86_64__)) && __GNUC__ >= 4) /* Assumes that N != 0. */ asm ("rep;bsfl %k[low], %k[ntz]\n\t" : [ntz] "=r" (ntz) : [low] "r" ((unsigned long)n) : "cc"); #else ntz = _gcry_ctz (n); #endif return c->u_mode.ocb.L[ntz]; } /* Return bit-shift of blocksize. */ static inline unsigned int _gcry_blocksize_shift(gcry_cipher_hd_t c) { /* Only blocksizes 8 and 16 are used. Return value in such way * that compiler can optimize calling functions based on this. */ return c->spec->blocksize == 8 ? 3 : 4; } /* Optimized function for adding value to cipher block. */ static inline void cipher_block_add(void *_dstsrc, unsigned int add, size_t blocksize) { byte *dstsrc = _dstsrc; u64 s[2]; if (blocksize == 8) { buf_put_be64(dstsrc + 0, buf_get_be64(dstsrc + 0) + add); } else /* blocksize == 16 */ { s[0] = buf_get_be64(dstsrc + 8); s[1] = buf_get_be64(dstsrc + 0); s[0] += add; s[1] += (s[0] < add); buf_put_be64(dstsrc + 8, s[0]); buf_put_be64(dstsrc + 0, s[1]); } } /* Optimized function for cipher block copying */ static inline void cipher_block_cpy(void *_dst, const void *_src, size_t blocksize) { byte *dst = _dst; const byte *src = _src; u64 s[2]; if (blocksize == 8) { buf_put_he64(dst + 0, buf_get_he64(src + 0)); } else /* blocksize == 16 */ { s[0] = buf_get_he64(src + 0); s[1] = buf_get_he64(src + 8); buf_put_he64(dst + 0, s[0]); buf_put_he64(dst + 8, s[1]); } } /* Optimized function for cipher block xoring */ static inline void cipher_block_xor(void *_dst, const void *_src1, const void *_src2, size_t blocksize) { byte *dst = _dst; const byte *src1 = _src1; const byte *src2 = _src2; u64 s1[2]; u64 s2[2]; if (blocksize == 8) { buf_put_he64(dst + 0, buf_get_he64(src1 + 0) ^ buf_get_he64(src2 + 0)); } else /* blocksize == 16 */ { s1[0] = buf_get_he64(src1 + 0); s1[1] = buf_get_he64(src1 + 8); s2[0] = buf_get_he64(src2 + 0); s2[1] = buf_get_he64(src2 + 8); buf_put_he64(dst + 0, s1[0] ^ s2[0]); buf_put_he64(dst + 8, s1[1] ^ s2[1]); } } /* Optimized function for in-place cipher block xoring */ static inline void cipher_block_xor_1(void *_dst, const void *_src, size_t blocksize) { cipher_block_xor (_dst, _dst, _src, blocksize); } /* Optimized function for cipher block xoring with two destination cipher blocks. Used mainly by CFB mode encryption. */ static inline void cipher_block_xor_2dst(void *_dst1, void *_dst2, const void *_src, size_t blocksize) { byte *dst1 = _dst1; byte *dst2 = _dst2; const byte *src = _src; u64 d2[2]; u64 s[2]; if (blocksize == 8) { d2[0] = buf_get_he64(dst2 + 0) ^ buf_get_he64(src + 0); buf_put_he64(dst2 + 0, d2[0]); buf_put_he64(dst1 + 0, d2[0]); } else /* blocksize == 16 */ { s[0] = buf_get_he64(src + 0); s[1] = buf_get_he64(src + 8); d2[0] = buf_get_he64(dst2 + 0); d2[1] = buf_get_he64(dst2 + 8); d2[0] = d2[0] ^ s[0]; d2[1] = d2[1] ^ s[1]; buf_put_he64(dst2 + 0, d2[0]); buf_put_he64(dst2 + 8, d2[1]); buf_put_he64(dst1 + 0, d2[0]); buf_put_he64(dst1 + 8, d2[1]); } } /* Optimized function for combined cipher block xoring and copying. Used by mainly CBC mode decryption. */ static inline void cipher_block_xor_n_copy_2(void *_dst_xor, const void *_src_xor, void *_srcdst_cpy, const void *_src_cpy, size_t blocksize) { byte *dst_xor = _dst_xor; byte *srcdst_cpy = _srcdst_cpy; const byte *src_xor = _src_xor; const byte *src_cpy = _src_cpy; u64 sc[2]; u64 sx[2]; u64 sdc[2]; if (blocksize == 8) { sc[0] = buf_get_he64(src_cpy + 0); buf_put_he64(dst_xor + 0, buf_get_he64(srcdst_cpy + 0) ^ buf_get_he64(src_xor + 0)); buf_put_he64(srcdst_cpy + 0, sc[0]); } else /* blocksize == 16 */ { sc[0] = buf_get_he64(src_cpy + 0); sc[1] = buf_get_he64(src_cpy + 8); sx[0] = buf_get_he64(src_xor + 0); sx[1] = buf_get_he64(src_xor + 8); sdc[0] = buf_get_he64(srcdst_cpy + 0); sdc[1] = buf_get_he64(srcdst_cpy + 8); sx[0] ^= sdc[0]; sx[1] ^= sdc[1]; buf_put_he64(dst_xor + 0, sx[0]); buf_put_he64(dst_xor + 8, sx[1]); buf_put_he64(srcdst_cpy + 0, sc[0]); buf_put_he64(srcdst_cpy + 8, sc[1]); } } /* Optimized function for combined cipher block byte-swapping. */ static inline void cipher_block_bswap (void *_dst_bswap, const void *_src_bswap, size_t blocksize) { byte *dst_bswap = _dst_bswap; const byte *src_bswap = _src_bswap; u64 t[2]; if (blocksize == 8) { buf_put_le64(dst_bswap, buf_get_be64(src_bswap)); } else { t[0] = buf_get_be64(src_bswap + 0); t[1] = buf_get_be64(src_bswap + 8); buf_put_le64(dst_bswap + 8, t[0]); buf_put_le64(dst_bswap + 0, t[1]); } } /* Optimized function for combined cipher block xoring and copying. Used by mainly CFB mode decryption. */ static inline void cipher_block_xor_n_copy(void *_dst_xor, void *_srcdst_cpy, const void *_src, size_t blocksize) { cipher_block_xor_n_copy_2(_dst_xor, _src, _srcdst_cpy, _src, blocksize); } #endif /*G10_CIPHER_INTERNAL_H*/ diff --git a/cipher/cipher.c b/cipher/cipher.c index 6c335aec..026c1511 100644 --- a/cipher/cipher.c +++ b/cipher/cipher.c @@ -1,2003 +1,2014 @@ /* cipher.c - cipher dispatcher * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 * 2005, 2007, 2008, 2009, 2011 Free Software Foundation, Inc. * Copyright (C) 2013 g10 Code GmbH * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #include #include #include #include #include "g10lib.h" #include "../src/gcrypt-testapi.h" #include "cipher.h" #include "./cipher-internal.h" /* This is the list of the default ciphers, which are included in libgcrypt. */ static gcry_cipher_spec_t * const cipher_list[] = { #if USE_BLOWFISH &_gcry_cipher_spec_blowfish, #endif #if USE_DES &_gcry_cipher_spec_des, &_gcry_cipher_spec_tripledes, #endif #if USE_ARCFOUR &_gcry_cipher_spec_arcfour, #endif #if USE_CAST5 &_gcry_cipher_spec_cast5, #endif #if USE_AES &_gcry_cipher_spec_aes, &_gcry_cipher_spec_aes192, &_gcry_cipher_spec_aes256, #endif #if USE_TWOFISH &_gcry_cipher_spec_twofish, &_gcry_cipher_spec_twofish128, #endif #if USE_SERPENT &_gcry_cipher_spec_serpent128, &_gcry_cipher_spec_serpent192, &_gcry_cipher_spec_serpent256, #endif #if USE_RFC2268 &_gcry_cipher_spec_rfc2268_40, &_gcry_cipher_spec_rfc2268_128, #endif #if USE_SEED &_gcry_cipher_spec_seed, #endif #if USE_CAMELLIA &_gcry_cipher_spec_camellia128, &_gcry_cipher_spec_camellia192, &_gcry_cipher_spec_camellia256, #endif #if USE_IDEA &_gcry_cipher_spec_idea, #endif #if USE_SALSA20 &_gcry_cipher_spec_salsa20, &_gcry_cipher_spec_salsa20r12, #endif #if USE_GOST28147 &_gcry_cipher_spec_gost28147, &_gcry_cipher_spec_gost28147_mesh, #endif #if USE_CHACHA20 &_gcry_cipher_spec_chacha20, #endif #if USE_SM4 &_gcry_cipher_spec_sm4, #endif NULL }; /* Cipher implementations starting with index 0 (enum gcry_cipher_algos) */ static gcry_cipher_spec_t * const cipher_list_algo0[] = { NULL, /* GCRY_CIPHER_NONE */ #if USE_IDEA &_gcry_cipher_spec_idea, #else NULL, #endif #if USE_DES &_gcry_cipher_spec_tripledes, #else NULL, #endif #if USE_CAST5 &_gcry_cipher_spec_cast5, #else NULL, #endif #if USE_BLOWFISH &_gcry_cipher_spec_blowfish, #else NULL, #endif NULL, /* GCRY_CIPHER_SAFER_SK128 */ NULL, /* GCRY_CIPHER_DES_SK */ #if USE_AES &_gcry_cipher_spec_aes, &_gcry_cipher_spec_aes192, &_gcry_cipher_spec_aes256, #else NULL, NULL, NULL, #endif #if USE_TWOFISH &_gcry_cipher_spec_twofish #else NULL #endif }; /* Cipher implementations starting with index 301 (enum gcry_cipher_algos) */ static gcry_cipher_spec_t * const cipher_list_algo301[] = { #if USE_ARCFOUR &_gcry_cipher_spec_arcfour, #else NULL, #endif #if USE_DES &_gcry_cipher_spec_des, #else NULL, #endif #if USE_TWOFISH &_gcry_cipher_spec_twofish128, #else NULL, #endif #if USE_SERPENT &_gcry_cipher_spec_serpent128, &_gcry_cipher_spec_serpent192, &_gcry_cipher_spec_serpent256, #else NULL, NULL, NULL, #endif #if USE_RFC2268 &_gcry_cipher_spec_rfc2268_40, &_gcry_cipher_spec_rfc2268_128, #else NULL, NULL, #endif #if USE_SEED &_gcry_cipher_spec_seed, #else NULL, #endif #if USE_CAMELLIA &_gcry_cipher_spec_camellia128, &_gcry_cipher_spec_camellia192, &_gcry_cipher_spec_camellia256, #else NULL, NULL, NULL, #endif #if USE_SALSA20 &_gcry_cipher_spec_salsa20, &_gcry_cipher_spec_salsa20r12, #else NULL, NULL, #endif #if USE_GOST28147 &_gcry_cipher_spec_gost28147, #else NULL, #endif #if USE_CHACHA20 &_gcry_cipher_spec_chacha20, #else NULL, #endif #if USE_GOST28147 &_gcry_cipher_spec_gost28147_mesh, #else NULL, #endif #if USE_SM4 &_gcry_cipher_spec_sm4, #else NULL, #endif }; static void _gcry_cipher_setup_mode_ops(gcry_cipher_hd_t c, int mode); static int map_algo (int algo) { return algo; } /* Return the spec structure for the cipher algorithm ALGO. For an unknown algorithm NULL is returned. */ static gcry_cipher_spec_t * spec_from_algo (int algo) { gcry_cipher_spec_t *spec = NULL; algo = map_algo (algo); if (algo >= 0 && algo < DIM(cipher_list_algo0)) spec = cipher_list_algo0[algo]; else if (algo >= 301 && algo < 301 + DIM(cipher_list_algo301)) spec = cipher_list_algo301[algo - 301]; if (spec) gcry_assert (spec->algo == algo); return spec; } /* Lookup a cipher's spec by its name. */ static gcry_cipher_spec_t * spec_from_name (const char *name) { gcry_cipher_spec_t *spec; int idx; const char **aliases; for (idx=0; (spec = cipher_list[idx]); idx++) { if (!stricmp (name, spec->name)) return spec; if (spec->aliases) { for (aliases = spec->aliases; *aliases; aliases++) if (!stricmp (name, *aliases)) return spec; } } return NULL; } /* Lookup a cipher's spec by its OID. */ static gcry_cipher_spec_t * spec_from_oid (const char *oid) { gcry_cipher_spec_t *spec; const gcry_cipher_oid_spec_t *oid_specs; int idx, j; for (idx=0; (spec = cipher_list[idx]); idx++) { oid_specs = spec->oids; if (oid_specs) { for (j = 0; oid_specs[j].oid; j++) if (!stricmp (oid, oid_specs[j].oid)) return spec; } } return NULL; } /* Locate the OID in the oid table and return the spec or NULL if not found. An optional "oid." or "OID." prefix in OID is ignored, the OID is expected to be in standard IETF dotted notation. A pointer to the OID specification of the module implementing this algorithm is return in OID_SPEC unless passed as NULL.*/ static gcry_cipher_spec_t * search_oid (const char *oid, gcry_cipher_oid_spec_t *oid_spec) { gcry_cipher_spec_t *spec; int i; if (!oid) return NULL; if (!strncmp (oid, "oid.", 4) || !strncmp (oid, "OID.", 4)) oid += 4; spec = spec_from_oid (oid); if (spec && spec->oids) { for (i = 0; spec->oids[i].oid; i++) if (!stricmp (oid, spec->oids[i].oid)) { if (oid_spec) *oid_spec = spec->oids[i]; return spec; } } return NULL; } /* Map STRING to the cipher algorithm identifier. Returns the algorithm ID of the cipher for the given name or 0 if the name is not known. It is valid to pass NULL for STRING which results in a return value of 0. */ int _gcry_cipher_map_name (const char *string) { gcry_cipher_spec_t *spec; if (!string) return 0; /* If the string starts with a digit (optionally prefixed with either "OID." or "oid."), we first look into our table of ASN.1 object identifiers to figure out the algorithm */ spec = search_oid (string, NULL); if (spec) return spec->algo; spec = spec_from_name (string); if (spec) return spec->algo; return 0; } /* Given a STRING with an OID in dotted decimal notation, this function returns the cipher mode (GCRY_CIPHER_MODE_*) associated with that OID or 0 if no mode is known. Passing NULL for string yields a return value of 0. */ int _gcry_cipher_mode_from_oid (const char *string) { gcry_cipher_spec_t *spec; gcry_cipher_oid_spec_t oid_spec; if (!string) return 0; spec = search_oid (string, &oid_spec); if (spec) return oid_spec.mode; return 0; } /* Map the cipher algorithm identifier ALGORITHM to a string representing this algorithm. This string is the default name as used by Libgcrypt. A "?" is returned for an unknown algorithm. NULL is never returned. */ const char * _gcry_cipher_algo_name (int algorithm) { gcry_cipher_spec_t *spec; spec = spec_from_algo (algorithm); return spec? spec->name : "?"; } /* Flag the cipher algorithm with the identifier ALGORITHM as disabled. There is no error return, the function does nothing for unknown algorithms. Disabled algorithms are virtually not available in Libgcrypt. This is not thread safe and should thus be called early. */ static void disable_cipher_algo (int algo) { gcry_cipher_spec_t *spec = spec_from_algo (algo); if (spec) spec->flags.disabled = 1; } /* Return 0 if the cipher algorithm with identifier ALGORITHM is available. Returns a basic error code value if it is not available. */ static gcry_err_code_t check_cipher_algo (int algorithm) { gcry_cipher_spec_t *spec; spec = spec_from_algo (algorithm); if (spec && !spec->flags.disabled && (spec->flags.fips || !fips_mode ())) return 0; return GPG_ERR_CIPHER_ALGO; } /* Return the standard length in bits of the key for the cipher algorithm with the identifier ALGORITHM. */ static unsigned int cipher_get_keylen (int algorithm) { gcry_cipher_spec_t *spec; unsigned len = 0; spec = spec_from_algo (algorithm); if (spec) { len = spec->keylen; if (!len) log_bug ("cipher %d w/o key length\n", algorithm); } return len; } /* Return the block length of the cipher algorithm with the identifier ALGORITHM. This function return 0 for an invalid algorithm. */ static unsigned int cipher_get_blocksize (int algorithm) { gcry_cipher_spec_t *spec; unsigned len = 0; spec = spec_from_algo (algorithm); if (spec) { len = spec->blocksize; if (!len) log_bug ("cipher %d w/o blocksize\n", algorithm); } return len; } /* Open a cipher handle for use with cipher algorithm ALGORITHM, using the cipher mode MODE (one of the GCRY_CIPHER_MODE_*) and return a handle in HANDLE. Put NULL into HANDLE and return an error code if something goes wrong. FLAGS may be used to modify the operation. The defined flags are: GCRY_CIPHER_SECURE: allocate all internal buffers in secure memory. GCRY_CIPHER_ENABLE_SYNC: Enable the sync operation as used in OpenPGP. GCRY_CIPHER_CBC_CTS: Enable CTS mode. GCRY_CIPHER_CBC_MAC: Enable MAC mode. Values for these flags may be combined using OR. */ gcry_err_code_t _gcry_cipher_open (gcry_cipher_hd_t *handle, int algo, int mode, unsigned int flags) { gcry_err_code_t rc; gcry_cipher_hd_t h = NULL; if (mode >= GCRY_CIPHER_MODE_INTERNAL) rc = GPG_ERR_INV_CIPHER_MODE; else rc = _gcry_cipher_open_internal (&h, algo, mode, flags); *handle = rc ? NULL : h; return rc; } gcry_err_code_t _gcry_cipher_open_internal (gcry_cipher_hd_t *handle, int algo, int mode, unsigned int flags) { int secure = (flags & GCRY_CIPHER_SECURE); gcry_cipher_spec_t *spec; gcry_cipher_hd_t h = NULL; gcry_err_code_t err; /* If the application missed to call the random poll function, we do it here to ensure that it is used once in a while. */ _gcry_fast_random_poll (); spec = spec_from_algo (algo); if (!spec) err = GPG_ERR_CIPHER_ALGO; else if (spec->flags.disabled) err = GPG_ERR_CIPHER_ALGO; else if (!spec->flags.fips && fips_mode ()) err = GPG_ERR_CIPHER_ALGO; else err = 0; /* check flags */ if ((! err) && ((flags & ~(0 | GCRY_CIPHER_SECURE | GCRY_CIPHER_ENABLE_SYNC | GCRY_CIPHER_CBC_CTS | GCRY_CIPHER_CBC_MAC | GCRY_CIPHER_EXTENDED)) || ((flags & GCRY_CIPHER_CBC_CTS) && (flags & GCRY_CIPHER_CBC_MAC)))) err = GPG_ERR_CIPHER_ALGO; /* check that a valid mode has been requested */ if (! err) switch (mode) { case GCRY_CIPHER_MODE_ECB: case GCRY_CIPHER_MODE_CBC: case GCRY_CIPHER_MODE_CFB: case GCRY_CIPHER_MODE_CFB8: case GCRY_CIPHER_MODE_OFB: case GCRY_CIPHER_MODE_CTR: case GCRY_CIPHER_MODE_AESWRAP: case GCRY_CIPHER_MODE_CMAC: case GCRY_CIPHER_MODE_EAX: if (!spec->encrypt || !spec->decrypt) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_CCM: if (!spec->encrypt || !spec->decrypt) err = GPG_ERR_INV_CIPHER_MODE; else if (spec->blocksize != GCRY_CCM_BLOCK_LEN) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_XTS: if (!spec->encrypt || !spec->decrypt) err = GPG_ERR_INV_CIPHER_MODE; else if (spec->blocksize != GCRY_XTS_BLOCK_LEN) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_GCM: if (!spec->encrypt || !spec->decrypt) err = GPG_ERR_INV_CIPHER_MODE; else if (spec->blocksize != GCRY_GCM_BLOCK_LEN) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_SIV: case GCRY_CIPHER_MODE_GCM_SIV: if (!spec->encrypt || !spec->decrypt) err = GPG_ERR_INV_CIPHER_MODE; else if (spec->blocksize != GCRY_SIV_BLOCK_LEN) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_POLY1305: if (!spec->stencrypt || !spec->stdecrypt || !spec->setiv) err = GPG_ERR_INV_CIPHER_MODE; else if (spec->algo != GCRY_CIPHER_CHACHA20) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_OCB: /* Note that our implementation allows only for 128 bit block length algorithms. Lower block lengths would be possible but we do not implement them because they limit the security too much. */ if (!spec->encrypt || !spec->decrypt) err = GPG_ERR_INV_CIPHER_MODE; else if (spec->blocksize != GCRY_OCB_BLOCK_LEN) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_STREAM: if (!spec->stencrypt || !spec->stdecrypt) err = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_NONE: /* This mode may be used for debugging. It copies the main text verbatim to the ciphertext. We do not allow this in fips mode or if no debug flag has been set. */ if (fips_mode () || !_gcry_get_debug_flag (0)) err = GPG_ERR_INV_CIPHER_MODE; break; default: err = GPG_ERR_INV_CIPHER_MODE; } /* Perform selftest here and mark this with a flag in cipher_table? No, we should not do this as it takes too long. Further it does not make sense to exclude algorithms with failing selftests at runtime: If a selftest fails there is something seriously wrong with the system and thus we better die immediately. */ if (! err) { size_t size = (sizeof (*h) + 2 * spec->contextsize - sizeof (cipher_context_alignment_t) #ifdef NEED_16BYTE_ALIGNED_CONTEXT + 15 /* Space for leading alignment gap. */ #endif /*NEED_16BYTE_ALIGNED_CONTEXT*/ ); /* Space needed per mode. */ switch (mode) { case GCRY_CIPHER_MODE_XTS: case GCRY_CIPHER_MODE_SIV: /* Additional cipher context for tweak. */ size += 2 * spec->contextsize + 15; break; default: break; } if (secure) h = xtrycalloc_secure (1, size); else h = xtrycalloc (1, size); if (! h) err = gpg_err_code_from_syserror (); else { size_t off = 0; char *tc; #ifdef NEED_16BYTE_ALIGNED_CONTEXT if ( ((uintptr_t)h & 0x0f) ) { /* The malloced block is not aligned on a 16 byte boundary. Correct for this. */ off = 16 - ((uintptr_t)h & 0x0f); h = (void*)((char*)h + off); } #endif /*NEED_16BYTE_ALIGNED_CONTEXT*/ h->magic = secure ? CTX_MAGIC_SECURE : CTX_MAGIC_NORMAL; h->actual_handle_size = size - off; h->handle_offset = off; h->spec = spec; h->algo = algo; h->mode = mode; h->flags = flags; /* Setup mode routines. */ _gcry_cipher_setup_mode_ops(h, mode); /* Setup defaults depending on the mode. */ switch (mode) { case GCRY_CIPHER_MODE_OCB: h->u_mode.ocb.taglen = 16; /* Bytes. */ break; case GCRY_CIPHER_MODE_XTS: tc = h->context.c + spec->contextsize * 2; tc += (16 - (uintptr_t)tc % 16) % 16; h->u_mode.xts.tweak_context = tc; break; case GCRY_CIPHER_MODE_SIV: tc = h->context.c + spec->contextsize * 2; tc += (16 - (uintptr_t)tc % 16) % 16; h->u_mode.siv.ctr_context = tc; break; default: break; } } } /* Done. */ *handle = err ? NULL : h; return err; } /* Release all resources associated with the cipher handle H. H may be NULL in which case this is a no-operation. */ void _gcry_cipher_close (gcry_cipher_hd_t h) { size_t off; if (!h) return; if ((h->magic != CTX_MAGIC_SECURE) && (h->magic != CTX_MAGIC_NORMAL)) _gcry_fatal_error(GPG_ERR_INTERNAL, "gcry_cipher_close: already closed/invalid handle"); else h->magic = 0; /* We always want to wipe out the memory even when the context has been allocated in secure memory. The user might have disabled secure memory or is using his own implementation which does not do the wiping. To accomplish this we need to keep track of the actual size of this structure because we have no way to known how large the allocated area was when using a standard malloc. */ off = h->handle_offset; wipememory (h, h->actual_handle_size); xfree ((char*)h - off); } /* Set the key to be used for the encryption context C to KEY with length KEYLEN. The length should match the required length. */ static gcry_err_code_t cipher_setkey (gcry_cipher_hd_t c, byte *key, size_t keylen) { gcry_err_code_t rc; if (c->mode == GCRY_CIPHER_MODE_XTS) { /* XTS uses two keys. */ if (keylen % 2) return GPG_ERR_INV_KEYLEN; keylen /= 2; if (fips_mode ()) { /* Reject key if subkeys Key_1 and Key_2 are equal. See "Implementation Guidance for FIPS 140-2, A.9 XTS-AES Key Generation Requirements" for details. */ if (buf_eq_const (key, key + keylen, keylen)) return GPG_ERR_WEAK_KEY; } } else if (c->mode == GCRY_CIPHER_MODE_SIV) { /* SIV uses two keys. */ if (keylen % 2) return GPG_ERR_INV_KEYLEN; keylen /= 2; } rc = c->spec->setkey (&c->context.c, key, keylen, &c->bulk); if (!rc || (c->marks.allow_weak_key && rc == GPG_ERR_WEAK_KEY)) { /* Duplicate initial context. */ memcpy ((void *) ((char *) &c->context.c + c->spec->contextsize), (void *) &c->context.c, c->spec->contextsize); c->marks.key = 1; switch (c->mode) { case GCRY_CIPHER_MODE_CMAC: rc = _gcry_cipher_cmac_set_subkeys (c); break; case GCRY_CIPHER_MODE_EAX: rc = _gcry_cipher_eax_setkey (c); break; case GCRY_CIPHER_MODE_GCM: _gcry_cipher_gcm_setkey (c); break; case GCRY_CIPHER_MODE_GCM_SIV: rc = _gcry_cipher_gcm_siv_setkey (c, keylen); if (rc) c->marks.key = 0; break; case GCRY_CIPHER_MODE_OCB: _gcry_cipher_ocb_setkey (c); break; case GCRY_CIPHER_MODE_POLY1305: _gcry_cipher_poly1305_setkey (c); break; case GCRY_CIPHER_MODE_XTS: /* Setup tweak cipher with second part of XTS key. */ rc = c->spec->setkey (c->u_mode.xts.tweak_context, key + keylen, keylen, &c->bulk); if (!rc || (c->marks.allow_weak_key && rc == GPG_ERR_WEAK_KEY)) { /* Duplicate initial tweak context. */ memcpy (c->u_mode.xts.tweak_context + c->spec->contextsize, c->u_mode.xts.tweak_context, c->spec->contextsize); } else c->marks.key = 0; break; case GCRY_CIPHER_MODE_SIV: /* Setup CTR cipher with second part of SIV key. */ rc = _gcry_cipher_siv_setkey (c, key + keylen, keylen); if (!rc || (c->marks.allow_weak_key && rc == GPG_ERR_WEAK_KEY)) { /* Duplicate initial CTR context. */ memcpy (c->u_mode.siv.ctr_context + c->spec->contextsize, c->u_mode.siv.ctr_context, c->spec->contextsize); } else c->marks.key = 0; break; default: break; } } else c->marks.key = 0; return rc; } /* Set the IV to be used for the encryption context C to IV with length IVLEN. The length should match the required length. */ static gcry_err_code_t cipher_setiv (gcry_cipher_hd_t c, const byte *iv, size_t ivlen) { /* If the cipher has its own IV handler, we use only this one. This is currently used for stream ciphers requiring a nonce. */ if (c->spec->setiv) { c->spec->setiv (&c->context.c, iv, ivlen); return 0; } memset (c->u_iv.iv, 0, c->spec->blocksize); if (iv) { if (ivlen != c->spec->blocksize) { log_info ("WARNING: cipher_setiv: ivlen=%u blklen=%u\n", (unsigned int)ivlen, (unsigned int)c->spec->blocksize); fips_signal_error ("IV length does not match blocklength"); } if (ivlen > c->spec->blocksize) ivlen = c->spec->blocksize; memcpy (c->u_iv.iv, iv, ivlen); c->marks.iv = 1; } else c->marks.iv = 0; c->unused = 0; return 0; } /* Reset the cipher context to the initial context. This is basically the same as an release followed by a new. */ static void cipher_reset (gcry_cipher_hd_t c) { unsigned int marks_key, marks_allow_weak_key; marks_key = c->marks.key; marks_allow_weak_key = c->marks.allow_weak_key; memcpy (&c->context.c, (char *) &c->context.c + c->spec->contextsize, c->spec->contextsize); memset (&c->marks, 0, sizeof c->marks); memset (c->u_iv.iv, 0, c->spec->blocksize); memset (c->lastiv, 0, c->spec->blocksize); memset (c->u_ctr.ctr, 0, c->spec->blocksize); c->unused = 0; c->marks.key = marks_key; c->marks.allow_weak_key = marks_allow_weak_key; switch (c->mode) { case GCRY_CIPHER_MODE_CMAC: _gcry_cmac_reset(&c->u_mode.cmac); break; case GCRY_CIPHER_MODE_EAX: _gcry_cmac_reset(&c->u_mode.eax.cmac_header); _gcry_cmac_reset(&c->u_mode.eax.cmac_ciphertext); break; case GCRY_CIPHER_MODE_GCM: case GCRY_CIPHER_MODE_GCM_SIV: /* Only clear head of u_mode, keep ghash_key and gcm_table. */ { byte *u_mode_pos = (void *)&c->u_mode; byte *ghash_key_pos = c->u_mode.gcm.u_ghash_key.key; size_t u_mode_head_length = ghash_key_pos - u_mode_pos; memset (&c->u_mode, 0, u_mode_head_length); } break; case GCRY_CIPHER_MODE_POLY1305: memset (&c->u_mode.poly1305, 0, sizeof c->u_mode.poly1305); break; case GCRY_CIPHER_MODE_CCM: memset (&c->u_mode.ccm, 0, sizeof c->u_mode.ccm); break; case GCRY_CIPHER_MODE_OCB: { const size_t table_maxblks = 1 << OCB_L_TABLE_SIZE; byte *u_mode_head_pos = (void *)&c->u_mode.ocb; byte *u_mode_tail_pos = (void *)&c->u_mode.ocb.tag; size_t u_mode_head_length = u_mode_tail_pos - u_mode_head_pos; size_t u_mode_tail_length = sizeof(c->u_mode.ocb) - u_mode_head_length; if (c->u_mode.ocb.aad_nblocks < table_maxblks) { /* Precalculated L-values are still ok after reset, no need * to clear. */ memset (u_mode_tail_pos, 0, u_mode_tail_length); } else { /* Reinitialize L table. */ memset (&c->u_mode.ocb, 0, sizeof(c->u_mode.ocb)); _gcry_cipher_ocb_setkey (c); } /* Setup default taglen. */ c->u_mode.ocb.taglen = 16; } break; case GCRY_CIPHER_MODE_XTS: memcpy (c->u_mode.xts.tweak_context, c->u_mode.xts.tweak_context + c->spec->contextsize, c->spec->contextsize); break; case GCRY_CIPHER_MODE_SIV: /* Only clear head of u_mode, keep s2v_cmac and ctr_context. */ { byte *u_mode_pos = (void *)&c->u_mode; byte *tail_pos = (void *)&c->u_mode.siv.s2v_cmac; size_t u_mode_head_length = tail_pos - u_mode_pos; memset (&c->u_mode, 0, u_mode_head_length); memcpy (c->u_mode.siv.ctr_context, c->u_mode.siv.ctr_context + c->spec->contextsize, c->spec->contextsize); memcpy (c->u_mode.siv.s2v_d, c->u_mode.siv.s2v_zero_block, GCRY_SIV_BLOCK_LEN); } break; default: break; /* u_mode unused by other modes. */ } } static gcry_err_code_t -do_ecb_crypt (gcry_cipher_hd_t c, - unsigned char *outbuf, size_t outbuflen, - const unsigned char *inbuf, size_t inbuflen, - gcry_cipher_encrypt_t crypt_fn) +do_ecb_crypt (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, + const unsigned char *inbuf, size_t inbuflen, int encrypt) { unsigned int blocksize = c->spec->blocksize; size_t n, nblocks; - unsigned int burn, nburn; if (outbuflen < inbuflen) return GPG_ERR_BUFFER_TOO_SHORT; if ((inbuflen % blocksize)) return GPG_ERR_INV_LENGTH; nblocks = inbuflen / blocksize; - burn = 0; - for (n=0; n < nblocks; n++ ) + if (nblocks == 0) + return 0; + + if (c->bulk.ecb_crypt) { - nburn = crypt_fn (&c->context.c, outbuf, inbuf); - burn = nburn > burn ? nburn : burn; - inbuf += blocksize; - outbuf += blocksize; + c->bulk.ecb_crypt (&c->context.c, outbuf, inbuf, nblocks, encrypt); } + else + { + gcry_cipher_encrypt_t crypt_fn = + encrypt ? c->spec->encrypt : c->spec->decrypt; + unsigned int burn = 0; + unsigned int nburn; - if (burn > 0) - _gcry_burn_stack (burn + 4 * sizeof(void *)); + for (n = 0; n < nblocks; n++) + { + nburn = crypt_fn (&c->context.c, outbuf, inbuf); + burn = nburn > burn ? nburn : burn; + inbuf += blocksize; + outbuf += blocksize; + } + + if (burn > 0) + _gcry_burn_stack (burn + 4 * sizeof(void *)); + } return 0; } static gcry_err_code_t do_ecb_encrypt (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen) { - return do_ecb_crypt (c, outbuf, outbuflen, inbuf, inbuflen, c->spec->encrypt); + return do_ecb_crypt (c, outbuf, outbuflen, inbuf, inbuflen, 1); } static gcry_err_code_t do_ecb_decrypt (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen) { - return do_ecb_crypt (c, outbuf, outbuflen, inbuf, inbuflen, c->spec->decrypt); + return do_ecb_crypt (c, outbuf, outbuflen, inbuf, inbuflen, 0); } static gcry_err_code_t do_stream_encrypt (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen) { (void)outbuflen; c->spec->stencrypt (&c->context.c, outbuf, (void *)inbuf, inbuflen); return 0; } static gcry_err_code_t do_stream_decrypt (gcry_cipher_hd_t c, unsigned char *outbuf, size_t outbuflen, const unsigned char *inbuf, size_t inbuflen) { (void)outbuflen; c->spec->stdecrypt (&c->context.c, outbuf, (void *)inbuf, inbuflen); return 0; } static gcry_err_code_t do_encrypt_none_unknown (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen, const byte *inbuf, size_t inbuflen) { gcry_err_code_t rc; (void)outbuflen; switch (c->mode) { case GCRY_CIPHER_MODE_CMAC: rc = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_NONE: if (fips_mode () || !_gcry_get_debug_flag (0)) { fips_signal_error ("cipher mode NONE used"); rc = GPG_ERR_INV_CIPHER_MODE; } else { if (inbuf != outbuf) memmove (outbuf, inbuf, inbuflen); rc = 0; } break; default: log_fatal ("cipher_encrypt: invalid mode %d\n", c->mode ); rc = GPG_ERR_INV_CIPHER_MODE; break; } return rc; } static gcry_err_code_t do_decrypt_none_unknown (gcry_cipher_hd_t c, byte *outbuf, size_t outbuflen, const byte *inbuf, size_t inbuflen) { gcry_err_code_t rc; (void)outbuflen; switch (c->mode) { case GCRY_CIPHER_MODE_CMAC: rc = GPG_ERR_INV_CIPHER_MODE; break; case GCRY_CIPHER_MODE_NONE: if (fips_mode () || !_gcry_get_debug_flag (0)) { fips_signal_error ("cipher mode NONE used"); rc = GPG_ERR_INV_CIPHER_MODE; } else { if (inbuf != outbuf) memmove (outbuf, inbuf, inbuflen); rc = 0; } break; default: log_fatal ("cipher_decrypt: invalid mode %d\n", c->mode ); rc = GPG_ERR_INV_CIPHER_MODE; break; } return rc; } /**************** * Encrypt IN and write it to OUT. If IN is NULL, in-place encryption has * been requested. */ gcry_err_code_t _gcry_cipher_encrypt (gcry_cipher_hd_t h, void *out, size_t outsize, const void *in, size_t inlen) { gcry_err_code_t rc; if (!in) /* Caller requested in-place encryption. */ { in = out; inlen = outsize; } if (h->mode != GCRY_CIPHER_MODE_NONE && !h->marks.key) { log_error ("cipher_encrypt: key not set\n"); return GPG_ERR_MISSING_KEY; } rc = h->mode_ops.encrypt (h, out, outsize, in, inlen); /* Failsafe: Make sure that the plaintext will never make it into OUT if the encryption returned an error. */ if (rc && out) memset (out, 0x42, outsize); return rc; } /**************** * Decrypt IN and write it to OUT. If IN is NULL, in-place encryption has * been requested. */ gcry_err_code_t _gcry_cipher_decrypt (gcry_cipher_hd_t h, void *out, size_t outsize, const void *in, size_t inlen) { if (!in) /* Caller requested in-place encryption. */ { in = out; inlen = outsize; } if (h->mode != GCRY_CIPHER_MODE_NONE && !h->marks.key) { log_error ("cipher_decrypt: key not set\n"); return GPG_ERR_MISSING_KEY; } return h->mode_ops.decrypt (h, out, outsize, in, inlen); } /**************** * Used for PGP's somewhat strange CFB mode. Only works if * the corresponding flag is set. */ static void cipher_sync (gcry_cipher_hd_t c) { if ((c->flags & GCRY_CIPHER_ENABLE_SYNC) && c->unused) { memmove (c->u_iv.iv + c->unused, c->u_iv.iv, c->spec->blocksize - c->unused); memcpy (c->u_iv.iv, c->lastiv + c->spec->blocksize - c->unused, c->unused); c->unused = 0; } } gcry_err_code_t _gcry_cipher_setkey (gcry_cipher_hd_t hd, const void *key, size_t keylen) { return cipher_setkey (hd, (void*)key, keylen); } gcry_err_code_t _gcry_cipher_setiv (gcry_cipher_hd_t c, const void *iv, size_t ivlen) { if (c->mode == GCRY_CIPHER_MODE_GCM) { c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 0; if (fips_mode ()) { /* Direct invocation of GCM setiv in FIPS mode disables encryption. */ c->u_mode.gcm.disallow_encryption_because_of_setiv_in_fips_mode = 1; } } return c->mode_ops.setiv (c, iv, ivlen); } /* Set counter for CTR mode. (CTR,CTRLEN) must denote a buffer of block size length, or (NULL,0) to set the CTR to the all-zero block. */ gpg_err_code_t _gcry_cipher_setctr (gcry_cipher_hd_t hd, const void *ctr, size_t ctrlen) { if (ctr && ctrlen == hd->spec->blocksize) { memcpy (hd->u_ctr.ctr, ctr, hd->spec->blocksize); hd->unused = 0; } else if (!ctr || !ctrlen) { memset (hd->u_ctr.ctr, 0, hd->spec->blocksize); hd->unused = 0; } else return GPG_ERR_INV_ARG; return 0; } gpg_err_code_t _gcry_cipher_getctr (gcry_cipher_hd_t hd, void *ctr, size_t ctrlen) { if (ctr && ctrlen == hd->spec->blocksize) memcpy (ctr, hd->u_ctr.ctr, hd->spec->blocksize); else return GPG_ERR_INV_ARG; return 0; } gcry_err_code_t _gcry_cipher_setup_geniv (gcry_cipher_hd_t hd, int method, const void *fixed_iv, size_t fixed_iv_len, const void *dyn_iv, size_t dyn_iv_len) { gcry_err_code_t rc = 0; if (method != GCRY_CIPHER_GENIV_METHOD_CONCAT) return GPG_ERR_INV_ARG; if (fixed_iv_len + dyn_iv_len > MAX_BLOCKSIZE) return GPG_ERR_INV_ARG; hd->aead.geniv_method = GCRY_CIPHER_GENIV_METHOD_CONCAT; hd->aead.fixed_iv_len = fixed_iv_len; hd->aead.dynamic_iv_len = dyn_iv_len; memset (hd->aead.fixed, 0, MAX_BLOCKSIZE); memset (hd->aead.dynamic, 0, MAX_BLOCKSIZE); memcpy (hd->aead.fixed, fixed_iv, fixed_iv_len); memcpy (hd->aead.dynamic, dyn_iv, dyn_iv_len); return rc; } gcry_err_code_t _gcry_cipher_geniv (gcry_cipher_hd_t hd, void *iv, size_t iv_len) { gcry_err_code_t rc = 0; int i; if (hd->aead.geniv_method != GCRY_CIPHER_GENIV_METHOD_CONCAT) return GPG_ERR_INV_ARG; if (iv_len != hd->aead.fixed_iv_len + hd->aead.dynamic_iv_len) return GPG_ERR_INV_ARG; memcpy (iv, hd->aead.fixed, hd->aead.fixed_iv_len); memcpy ((byte *)iv+hd->aead.fixed_iv_len, hd->aead.dynamic, hd->aead.dynamic_iv_len); rc = hd->mode_ops.setiv (hd, iv, iv_len); for (i = hd->aead.dynamic_iv_len; i > 0; i--) if (++hd->aead.dynamic[i - 1] != 0) break; return rc; } gcry_err_code_t _gcry_cipher_authenticate (gcry_cipher_hd_t hd, const void *abuf, size_t abuflen) { gcry_err_code_t rc; if (hd->mode_ops.authenticate) { rc = hd->mode_ops.authenticate (hd, abuf, abuflen); } else { log_error ("gcry_cipher_authenticate: invalid mode %d\n", hd->mode); rc = GPG_ERR_INV_CIPHER_MODE; } return rc; } gcry_err_code_t _gcry_cipher_gettag (gcry_cipher_hd_t hd, void *outtag, size_t taglen) { gcry_err_code_t rc; if (hd->mode_ops.get_tag) { rc = hd->mode_ops.get_tag (hd, outtag, taglen); } else { log_error ("gcry_cipher_gettag: invalid mode %d\n", hd->mode); rc = GPG_ERR_INV_CIPHER_MODE; } return rc; } gcry_err_code_t _gcry_cipher_checktag (gcry_cipher_hd_t hd, const void *intag, size_t taglen) { gcry_err_code_t rc; if (hd->mode_ops.check_tag) { rc = hd->mode_ops.check_tag (hd, intag, taglen); } else { log_error ("gcry_cipher_checktag: invalid mode %d\n", hd->mode); rc = GPG_ERR_INV_CIPHER_MODE; } return rc; } static void _gcry_cipher_setup_mode_ops(gcry_cipher_hd_t c, int mode) { /* Setup encryption and decryption routines. */ switch (mode) { case GCRY_CIPHER_MODE_STREAM: c->mode_ops.encrypt = do_stream_encrypt; c->mode_ops.decrypt = do_stream_decrypt; break; case GCRY_CIPHER_MODE_ECB: c->mode_ops.encrypt = do_ecb_encrypt; c->mode_ops.decrypt = do_ecb_decrypt; break; case GCRY_CIPHER_MODE_CBC: if (!(c->flags & GCRY_CIPHER_CBC_CTS)) { c->mode_ops.encrypt = _gcry_cipher_cbc_encrypt; c->mode_ops.decrypt = _gcry_cipher_cbc_decrypt; } else { c->mode_ops.encrypt = _gcry_cipher_cbc_cts_encrypt; c->mode_ops.decrypt = _gcry_cipher_cbc_cts_decrypt; } break; case GCRY_CIPHER_MODE_CFB: c->mode_ops.encrypt = _gcry_cipher_cfb_encrypt; c->mode_ops.decrypt = _gcry_cipher_cfb_decrypt; break; case GCRY_CIPHER_MODE_CFB8: c->mode_ops.encrypt = _gcry_cipher_cfb8_encrypt; c->mode_ops.decrypt = _gcry_cipher_cfb8_decrypt; break; case GCRY_CIPHER_MODE_OFB: c->mode_ops.encrypt = _gcry_cipher_ofb_encrypt; c->mode_ops.decrypt = _gcry_cipher_ofb_encrypt; break; case GCRY_CIPHER_MODE_CTR: c->mode_ops.encrypt = _gcry_cipher_ctr_encrypt; c->mode_ops.decrypt = _gcry_cipher_ctr_encrypt; break; case GCRY_CIPHER_MODE_AESWRAP: c->mode_ops.decrypt = _gcry_cipher_keywrap_decrypt_auto; if (!(c->flags & GCRY_CIPHER_EXTENDED)) c->mode_ops.encrypt = _gcry_cipher_keywrap_encrypt; else c->mode_ops.encrypt = _gcry_cipher_keywrap_encrypt_padding; break; case GCRY_CIPHER_MODE_CCM: c->mode_ops.encrypt = _gcry_cipher_ccm_encrypt; c->mode_ops.decrypt = _gcry_cipher_ccm_decrypt; break; case GCRY_CIPHER_MODE_EAX: c->mode_ops.encrypt = _gcry_cipher_eax_encrypt; c->mode_ops.decrypt = _gcry_cipher_eax_decrypt; break; case GCRY_CIPHER_MODE_GCM: c->mode_ops.encrypt = _gcry_cipher_gcm_encrypt; c->mode_ops.decrypt = _gcry_cipher_gcm_decrypt; break; case GCRY_CIPHER_MODE_POLY1305: c->mode_ops.encrypt = _gcry_cipher_poly1305_encrypt; c->mode_ops.decrypt = _gcry_cipher_poly1305_decrypt; break; case GCRY_CIPHER_MODE_OCB: c->mode_ops.encrypt = _gcry_cipher_ocb_encrypt; c->mode_ops.decrypt = _gcry_cipher_ocb_decrypt; break; case GCRY_CIPHER_MODE_XTS: c->mode_ops.encrypt = _gcry_cipher_xts_encrypt; c->mode_ops.decrypt = _gcry_cipher_xts_decrypt; break; case GCRY_CIPHER_MODE_SIV: c->mode_ops.encrypt = _gcry_cipher_siv_encrypt; c->mode_ops.decrypt = _gcry_cipher_siv_decrypt; break; case GCRY_CIPHER_MODE_GCM_SIV: c->mode_ops.encrypt = _gcry_cipher_gcm_siv_encrypt; c->mode_ops.decrypt = _gcry_cipher_gcm_siv_decrypt; break; default: c->mode_ops.encrypt = do_encrypt_none_unknown; c->mode_ops.decrypt = do_decrypt_none_unknown; break; } /* Setup IV setting routine. */ switch (mode) { case GCRY_CIPHER_MODE_CCM: c->mode_ops.setiv = _gcry_cipher_ccm_set_nonce; break; case GCRY_CIPHER_MODE_EAX: c->mode_ops.setiv = _gcry_cipher_eax_set_nonce; break; case GCRY_CIPHER_MODE_GCM: c->mode_ops.setiv = _gcry_cipher_gcm_setiv; break; case GCRY_CIPHER_MODE_POLY1305: c->mode_ops.setiv = _gcry_cipher_poly1305_setiv; break; case GCRY_CIPHER_MODE_OCB: c->mode_ops.setiv = _gcry_cipher_ocb_set_nonce; break; case GCRY_CIPHER_MODE_SIV: c->mode_ops.setiv = _gcry_cipher_siv_set_nonce; break; case GCRY_CIPHER_MODE_GCM_SIV: c->mode_ops.setiv = _gcry_cipher_gcm_siv_set_nonce; break; default: c->mode_ops.setiv = cipher_setiv; break; } /* Setup authentication routines for AEAD modes. */ switch (mode) { case GCRY_CIPHER_MODE_CCM: c->mode_ops.authenticate = _gcry_cipher_ccm_authenticate; c->mode_ops.get_tag = _gcry_cipher_ccm_get_tag; c->mode_ops.check_tag = _gcry_cipher_ccm_check_tag; break; case GCRY_CIPHER_MODE_CMAC: c->mode_ops.authenticate = _gcry_cipher_cmac_authenticate; c->mode_ops.get_tag = _gcry_cipher_cmac_get_tag; c->mode_ops.check_tag = _gcry_cipher_cmac_check_tag; break; case GCRY_CIPHER_MODE_EAX: c->mode_ops.authenticate = _gcry_cipher_eax_authenticate; c->mode_ops.get_tag = _gcry_cipher_eax_get_tag; c->mode_ops.check_tag = _gcry_cipher_eax_check_tag; break; case GCRY_CIPHER_MODE_GCM: c->mode_ops.authenticate = _gcry_cipher_gcm_authenticate; c->mode_ops.get_tag = _gcry_cipher_gcm_get_tag; c->mode_ops.check_tag = _gcry_cipher_gcm_check_tag; break; case GCRY_CIPHER_MODE_POLY1305: c->mode_ops.authenticate = _gcry_cipher_poly1305_authenticate; c->mode_ops.get_tag = _gcry_cipher_poly1305_get_tag; c->mode_ops.check_tag = _gcry_cipher_poly1305_check_tag; break; case GCRY_CIPHER_MODE_OCB: c->mode_ops.authenticate = _gcry_cipher_ocb_authenticate; c->mode_ops.get_tag = _gcry_cipher_ocb_get_tag; c->mode_ops.check_tag = _gcry_cipher_ocb_check_tag; break; case GCRY_CIPHER_MODE_SIV: c->mode_ops.authenticate = _gcry_cipher_siv_authenticate; c->mode_ops.get_tag = _gcry_cipher_siv_get_tag; c->mode_ops.check_tag = _gcry_cipher_siv_check_tag; break; case GCRY_CIPHER_MODE_GCM_SIV: c->mode_ops.authenticate = _gcry_cipher_gcm_siv_authenticate; c->mode_ops.get_tag = _gcry_cipher_gcm_siv_get_tag; c->mode_ops.check_tag = _gcry_cipher_gcm_siv_check_tag; break; default: c->mode_ops.authenticate = NULL; c->mode_ops.get_tag = NULL; c->mode_ops.check_tag = NULL; break; } } gcry_err_code_t _gcry_cipher_ctl (gcry_cipher_hd_t h, int cmd, void *buffer, size_t buflen) { gcry_err_code_t rc = 0; switch (cmd) { case GCRYCTL_RESET: cipher_reset (h); break; case GCRYCTL_FINALIZE: if (!h || buffer || buflen) return GPG_ERR_INV_ARG; h->marks.finalize = 1; break; case GCRYCTL_CFB_SYNC: cipher_sync( h ); break; case GCRYCTL_SET_CBC_CTS: if (buflen) if (h->flags & GCRY_CIPHER_CBC_MAC) rc = GPG_ERR_INV_FLAG; else h->flags |= GCRY_CIPHER_CBC_CTS; else h->flags &= ~GCRY_CIPHER_CBC_CTS; break; case GCRYCTL_SET_CBC_MAC: if (buflen) if (h->flags & GCRY_CIPHER_CBC_CTS) rc = GPG_ERR_INV_FLAG; else h->flags |= GCRY_CIPHER_CBC_MAC; else h->flags &= ~GCRY_CIPHER_CBC_MAC; break; case GCRYCTL_SET_CCM_LENGTHS: { u64 params[3]; size_t encryptedlen; size_t aadlen; size_t authtaglen; if (h->mode != GCRY_CIPHER_MODE_CCM) return GPG_ERR_INV_CIPHER_MODE; if (!buffer || buflen != 3 * sizeof(u64)) return GPG_ERR_INV_ARG; /* This command is used to pass additional length parameters needed by CCM mode to initialize CBC-MAC. */ memcpy (params, buffer, sizeof(params)); encryptedlen = params[0]; aadlen = params[1]; authtaglen = params[2]; rc = _gcry_cipher_ccm_set_lengths (h, encryptedlen, aadlen, authtaglen); } break; case GCRYCTL_SET_DECRYPTION_TAG: { if (!buffer) return GPG_ERR_INV_ARG; if (h->mode == GCRY_CIPHER_MODE_SIV) rc = _gcry_cipher_siv_set_decryption_tag (h, buffer, buflen); else if (h->mode == GCRY_CIPHER_MODE_GCM_SIV) rc = _gcry_cipher_gcm_siv_set_decryption_tag (h, buffer, buflen); else rc = GPG_ERR_INV_CIPHER_MODE; } break; case GCRYCTL_SET_TAGLEN: if (!h || !buffer || buflen != sizeof(int) ) return GPG_ERR_INV_ARG; switch (h->mode) { case GCRY_CIPHER_MODE_OCB: switch (*(int*)buffer) { case 8: case 12: case 16: h->u_mode.ocb.taglen = *(int*)buffer; break; default: rc = GPG_ERR_INV_LENGTH; /* Invalid tag length. */ break; } break; default: rc =GPG_ERR_INV_CIPHER_MODE; break; } break; case GCRYCTL_DISABLE_ALGO: /* This command expects NULL for H and BUFFER to point to an integer with the algo number. */ if( h || !buffer || buflen != sizeof(int) ) return GPG_ERR_CIPHER_ALGO; disable_cipher_algo( *(int*)buffer ); break; case PRIV_CIPHERCTL_DISABLE_WEAK_KEY: /* (private) */ if (h->spec->set_extra_info) rc = h->spec->set_extra_info (&h->context.c, CIPHER_INFO_NO_WEAK_KEY, NULL, 0); else rc = GPG_ERR_NOT_SUPPORTED; break; case PRIV_CIPHERCTL_GET_INPUT_VECTOR: /* (private) */ /* This is the input block as used in CFB and OFB mode which has initially been set as IV. The returned format is: 1 byte Actual length of the block in bytes. n byte The block. If the provided buffer is too short, an error is returned. */ if (buflen < (1 + h->spec->blocksize)) rc = GPG_ERR_TOO_SHORT; else { unsigned char *ivp; unsigned char *dst = buffer; int n = h->unused; if (!n) n = h->spec->blocksize; gcry_assert (n <= h->spec->blocksize); *dst++ = n; ivp = h->u_iv.iv + h->spec->blocksize - n; while (n--) *dst++ = *ivp++; } break; case PRIV_CIPHERCTL_GET_COUNTER: /* (private) */ /* This is the input block as used in CTR mode which has initially been set as IV. The returned format is: 1 byte Actual length of the block in bytes. n byte The block. If the provided buffer is too short, an error is returned. */ if (buflen < (1 + h->spec->blocksize)) rc = GPG_ERR_TOO_SHORT; else { unsigned char *ctrp; unsigned char *dst = buffer; int n = h->unused; if (!n) n = h->spec->blocksize; gcry_assert (n <= h->spec->blocksize); *dst++ = n; ctrp = h->u_ctr.ctr + h->spec->blocksize - n; while (n--) *dst++ = *ctrp++; } break; case GCRYCTL_SET_SBOX: if (h->spec->set_extra_info) rc = h->spec->set_extra_info (&h->context.c, GCRYCTL_SET_SBOX, buffer, buflen); else rc = GPG_ERR_NOT_SUPPORTED; break; case GCRYCTL_SET_ALLOW_WEAK_KEY: /* Expecting BUFFER to be NULL and buflen to be on/off flag (0 or 1). */ if (!h || buffer || buflen > 1) return GPG_ERR_CIPHER_ALGO; h->marks.allow_weak_key = buflen ? 1 : 0; break; default: rc = GPG_ERR_INV_OP; } return rc; } /* Return information about the cipher handle H. CMD is the kind of * information requested. * * CMD may be one of: * * GCRYCTL_GET_TAGLEN: * Return the length of the tag for an AE algorithm mode. An * error is returned for modes which do not support a tag. * BUFFER must be given as NULL. On success the result is stored * at NBYTES. The taglen is returned in bytes. * * GCRYCTL_GET_KEYLEN: * Return the length of the key wrapped for AES-WRAP mode. The * length is encoded in big-endian 4 bytes, when the key is * unwrapped with KWP. Return 00 00 00 00, when the key is * unwrapped with KW. * * The function returns 0 on success or an error code. */ gcry_err_code_t _gcry_cipher_info (gcry_cipher_hd_t h, int cmd, void *buffer, size_t *nbytes) { gcry_err_code_t rc = 0; switch (cmd) { case GCRYCTL_GET_TAGLEN: if (!h || buffer || !nbytes) rc = GPG_ERR_INV_ARG; else { switch (h->mode) { case GCRY_CIPHER_MODE_OCB: *nbytes = h->u_mode.ocb.taglen; break; case GCRY_CIPHER_MODE_CCM: *nbytes = h->u_mode.ccm.authlen; break; case GCRY_CIPHER_MODE_EAX: *nbytes = h->spec->blocksize; break; case GCRY_CIPHER_MODE_GCM: *nbytes = GCRY_GCM_BLOCK_LEN; break; case GCRY_CIPHER_MODE_POLY1305: *nbytes = POLY1305_TAGLEN; break; case GCRY_CIPHER_MODE_SIV: *nbytes = GCRY_SIV_BLOCK_LEN; break; case GCRY_CIPHER_MODE_GCM_SIV: *nbytes = GCRY_SIV_BLOCK_LEN; break; default: rc = GPG_ERR_INV_CIPHER_MODE; break; } } break; case GCRYCTL_GET_KEYLEN: if (!h || !buffer || !nbytes) rc = GPG_ERR_INV_ARG; else { switch (h->mode) { case GCRY_CIPHER_MODE_AESWRAP: *nbytes = 4; memcpy (buffer, h->u_mode.wrap.plen, 4); break; default: rc = GPG_ERR_INV_CIPHER_MODE; break; } } break; default: rc = GPG_ERR_INV_OP; } return rc; } /* Return information about the given cipher algorithm ALGO. WHAT select the kind of information returned: GCRYCTL_GET_KEYLEN: Return the length of the key. If the algorithm ALGO supports multiple key lengths, the maximum supported key length is returned. The key length is returned as number of octets. BUFFER and NBYTES must be zero. GCRYCTL_GET_BLKLEN: Return the blocklength of the algorithm ALGO counted in octets. BUFFER and NBYTES must be zero. GCRYCTL_TEST_ALGO: Returns 0 if the specified algorithm ALGO is available for use. BUFFER and NBYTES must be zero. Note: Because this function is in most cases used to return an integer value, we can make it easier for the caller to just look at the return value. The caller will in all cases consult the value and thereby detecting whether a error occurred or not (i.e. while checking the block size) */ gcry_err_code_t _gcry_cipher_algo_info (int algo, int what, void *buffer, size_t *nbytes) { gcry_err_code_t rc = 0; unsigned int ui; switch (what) { case GCRYCTL_GET_KEYLEN: if (buffer || (! nbytes)) rc = GPG_ERR_CIPHER_ALGO; else { ui = cipher_get_keylen (algo); if ((ui > 0) && (ui <= 512)) *nbytes = (size_t) ui / 8; else /* The only reason for an error is an invalid algo. */ rc = GPG_ERR_CIPHER_ALGO; } break; case GCRYCTL_GET_BLKLEN: if (buffer || (! nbytes)) rc = GPG_ERR_CIPHER_ALGO; else { ui = cipher_get_blocksize (algo); if ((ui > 0) && (ui < 10000)) *nbytes = ui; else { /* The only reason is an invalid algo or a strange blocksize. */ rc = GPG_ERR_CIPHER_ALGO; } } break; case GCRYCTL_TEST_ALGO: if (buffer || nbytes) rc = GPG_ERR_INV_ARG; else rc = check_cipher_algo (algo); break; default: rc = GPG_ERR_INV_OP; } return rc; } /* This function returns length of the key for algorithm ALGO. If the algorithm supports multiple key lengths, the maximum supported key length is returned. On error 0 is returned. The key length is returned as number of octets. This is a convenience functions which should be preferred over gcry_cipher_algo_info because it allows for proper type checking. */ size_t _gcry_cipher_get_algo_keylen (int algo) { size_t n; if (_gcry_cipher_algo_info (algo, GCRYCTL_GET_KEYLEN, NULL, &n)) n = 0; return n; } /* This functions returns the blocklength of the algorithm ALGO counted in octets. On error 0 is returned. This is a convenience functions which should be preferred over gcry_cipher_algo_info because it allows for proper type checking. */ size_t _gcry_cipher_get_algo_blklen (int algo) { size_t n; if (_gcry_cipher_algo_info( algo, GCRYCTL_GET_BLKLEN, NULL, &n)) n = 0; return n; } /* Explicitly initialize this module. */ gcry_err_code_t _gcry_cipher_init (void) { return 0; } /* Run the selftests for cipher algorithm ALGO with optional reporting function REPORT. */ gpg_error_t _gcry_cipher_selftest (int algo, int extended, selftest_report_func_t report) { gcry_err_code_t ec = 0; gcry_cipher_spec_t *spec; spec = spec_from_algo (algo); if (spec && !spec->flags.disabled && (spec->flags.fips || !fips_mode ()) && spec->selftest) ec = spec->selftest (algo, extended, report); else { ec = GPG_ERR_CIPHER_ALGO; if (report) report ("cipher", algo, "module", spec && !spec->flags.disabled && (spec->flags.fips || !fips_mode ())? "no selftest available" : spec? "algorithm disabled" : "algorithm not found"); } return gpg_error (ec); } diff --git a/cipher/rijndael-aesni.c b/cipher/rijndael-aesni.c index 156af015..906737a6 100644 --- a/cipher/rijndael-aesni.c +++ b/cipher/rijndael-aesni.c @@ -1,4879 +1,5033 @@ /* AES-NI accelerated AES for Libgcrypt * Copyright (C) 2000, 2001, 2002, 2003, 2007, * 2008, 2011, 2012 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #include #include #include /* for memcmp() */ #include "types.h" /* for byte and u32 typedefs */ #include "g10lib.h" #include "cipher.h" #include "bufhelp.h" #include "rijndael-internal.h" #include "./cipher-internal.h" #ifdef USE_AESNI #if _GCRY_GCC_VERSION >= 40400 /* 4.4 */ /* Prevent compiler from issuing SSE instructions between asm blocks. */ # pragma GCC target("no-sse") #endif #if __clang__ # pragma clang attribute push (__attribute__((target("no-sse"))), apply_to = function) #endif #define ALWAYS_INLINE inline __attribute__((always_inline)) #define NO_INLINE __attribute__((noinline)) #define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function)) #define ASM_FUNC_ATTR NO_INSTRUMENT_FUNCTION #define ASM_FUNC_ATTR_INLINE ASM_FUNC_ATTR ALWAYS_INLINE #define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE typedef struct u128_s { u32 a, b, c, d; } __attribute__((packed, aligned(1), may_alias)) u128_t; /* Copy of ocb_get_l needed here as GCC is unable to inline ocb_get_l because of 'pragma target'. */ static ASM_FUNC_ATTR_INLINE const unsigned char * aes_ocb_get_l (gcry_cipher_hd_t c, u64 n) { unsigned long ntz; /* Assumes that N != 0. */ asm ("rep;bsfl %k[low], %k[ntz]\n\t" : [ntz] "=r" (ntz) : [low] "r" ((unsigned long)n) : "cc"); return c->u_mode.ocb.L[ntz]; } /* Two macros to be called prior and after the use of AESNI instructions. There should be no external function calls between the use of these macros. There purpose is to make sure that the SSE regsiters are cleared and won't reveal any information about the key or the data. */ #ifdef __WIN64__ /* XMM6-XMM15 are callee-saved registers on WIN64. */ # define aesni_prepare_2_7_variable char win64tmp[16 * 2] # define aesni_prepare_8_15_variable char win64tmp8_15[16 * 8] # define aesni_prepare() do { } while (0) # define aesni_prepare_2_7() \ do { asm volatile ("movdqu %%xmm6, %0\n\t" \ "movdqu %%xmm7, %1\n\t" \ : "=m" (*win64tmp), "=m" (*(win64tmp+16)) \ : \ : "memory"); \ } while (0) # define aesni_prepare_8_15() \ do { asm volatile ("movdqu %%xmm8, 0*16(%0)\n\t" \ "movdqu %%xmm9, 1*16(%0)\n\t" \ "movdqu %%xmm10, 2*16(%0)\n\t" \ "movdqu %%xmm11, 3*16(%0)\n\t" \ "movdqu %%xmm12, 4*16(%0)\n\t" \ "movdqu %%xmm13, 5*16(%0)\n\t" \ "movdqu %%xmm14, 6*16(%0)\n\t" \ "movdqu %%xmm15, 7*16(%0)\n\t" \ : \ : "r" (win64tmp8_15) \ : "memory"); \ } while (0) # define aesni_cleanup() \ do { asm volatile ("pxor %%xmm0, %%xmm0\n\t" \ "pxor %%xmm1, %%xmm1\n" :: ); \ } while (0) # define aesni_cleanup_2_7() \ do { asm volatile ("movdqu %0, %%xmm6\n\t" \ "movdqu %1, %%xmm7\n\t" \ "pxor %%xmm2, %%xmm2\n" \ "pxor %%xmm3, %%xmm3\n" \ "pxor %%xmm4, %%xmm4\n" \ "pxor %%xmm5, %%xmm5\n" \ : \ : "m" (*win64tmp), "m" (*(win64tmp+16)) \ : "memory"); \ } while (0) # define aesni_cleanup_8_15() \ do { asm volatile ("movdqu 0*16(%0), %%xmm8\n\t" \ "movdqu 1*16(%0), %%xmm9\n\t" \ "movdqu 2*16(%0), %%xmm10\n\t" \ "movdqu 3*16(%0), %%xmm11\n\t" \ "movdqu 4*16(%0), %%xmm12\n\t" \ "movdqu 5*16(%0), %%xmm13\n\t" \ "movdqu 6*16(%0), %%xmm14\n\t" \ "movdqu 7*16(%0), %%xmm15\n\t" \ : \ : "r" (win64tmp8_15) \ : "memory"); \ } while (0) #else # define aesni_prepare_2_7_variable # define aesni_prepare() do { } while (0) # define aesni_prepare_2_7() do { } while (0) # define aesni_cleanup() \ do { asm volatile ("pxor %%xmm0, %%xmm0\n\t" \ "pxor %%xmm1, %%xmm1\n" :: ); \ } while (0) # define aesni_cleanup_2_7() \ do { asm volatile ("pxor %%xmm7, %%xmm7\n\t" \ "pxor %%xmm2, %%xmm2\n\t" \ "pxor %%xmm3, %%xmm3\n" \ "pxor %%xmm4, %%xmm4\n" \ "pxor %%xmm5, %%xmm5\n" \ "pxor %%xmm6, %%xmm6\n":: ); \ } while (0) # ifdef __x86_64__ # define aesni_prepare_8_15_variable # define aesni_prepare_8_15() do { } while (0) # define aesni_cleanup_8_15() \ do { asm volatile ("pxor %%xmm8, %%xmm8\n" \ "pxor %%xmm9, %%xmm9\n" \ "pxor %%xmm10, %%xmm10\n" \ "pxor %%xmm11, %%xmm11\n" \ "pxor %%xmm12, %%xmm12\n" \ "pxor %%xmm13, %%xmm13\n" \ "pxor %%xmm14, %%xmm14\n" \ "pxor %%xmm15, %%xmm15\n":: ); \ } while (0) # endif #endif void ASM_FUNC_ATTR _gcry_aes_aesni_do_setkey (RIJNDAEL_context *ctx, const byte *key) { aesni_prepare_2_7_variable; aesni_prepare(); aesni_prepare_2_7(); if (ctx->rounds < 12) { /* 128-bit key */ #define AESKEYGENASSIST_xmm1_xmm2(imm8) \ ".byte 0x66, 0x0f, 0x3a, 0xdf, 0xd1, " #imm8 " \n\t" #define AESKEY_EXPAND128 \ "pshufd $0xff, %%xmm2, %%xmm2\n\t" \ "movdqa %%xmm1, %%xmm3\n\t" \ "pslldq $4, %%xmm3\n\t" \ "pxor %%xmm3, %%xmm1\n\t" \ "pslldq $4, %%xmm3\n\t" \ "pxor %%xmm3, %%xmm1\n\t" \ "pslldq $4, %%xmm3\n\t" \ "pxor %%xmm3, %%xmm2\n\t" \ "pxor %%xmm2, %%xmm1\n\t" asm volatile ("movdqu (%[key]), %%xmm1\n\t" /* xmm1 := key */ "movdqa %%xmm1, (%[ksch])\n\t" /* ksch[0] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x01) AESKEY_EXPAND128 "movdqa %%xmm1, 0x10(%[ksch])\n\t" /* ksch[1] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x02) AESKEY_EXPAND128 "movdqa %%xmm1, 0x20(%[ksch])\n\t" /* ksch[2] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x04) AESKEY_EXPAND128 "movdqa %%xmm1, 0x30(%[ksch])\n\t" /* ksch[3] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x08) AESKEY_EXPAND128 "movdqa %%xmm1, 0x40(%[ksch])\n\t" /* ksch[4] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x10) AESKEY_EXPAND128 "movdqa %%xmm1, 0x50(%[ksch])\n\t" /* ksch[5] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x20) AESKEY_EXPAND128 "movdqa %%xmm1, 0x60(%[ksch])\n\t" /* ksch[6] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x40) AESKEY_EXPAND128 "movdqa %%xmm1, 0x70(%[ksch])\n\t" /* ksch[7] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x80) AESKEY_EXPAND128 "movdqa %%xmm1, 0x80(%[ksch])\n\t" /* ksch[8] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x1b) AESKEY_EXPAND128 "movdqa %%xmm1, 0x90(%[ksch])\n\t" /* ksch[9] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x36) AESKEY_EXPAND128 "movdqa %%xmm1, 0xa0(%[ksch])\n\t" /* ksch[10] := xmm1 */ : : [key] "r" (key), [ksch] "r" (ctx->keyschenc) : "cc", "memory" ); #undef AESKEYGENASSIST_xmm1_xmm2 #undef AESKEY_EXPAND128 } else if (ctx->rounds == 12) { /* 192-bit key */ #define AESKEYGENASSIST_xmm3_xmm2(imm8) \ ".byte 0x66, 0x0f, 0x3a, 0xdf, 0xd3, " #imm8 " \n\t" #define AESKEY_EXPAND192 \ "pshufd $0x55, %%xmm2, %%xmm2\n\t" \ "movdqu %%xmm1, %%xmm4\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm1\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm1\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm1\n\t" \ "pxor %%xmm2, %%xmm1\n\t" \ "pshufd $0xff, %%xmm1, %%xmm2\n\t" \ "movdqu %%xmm3, %%xmm4\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm3\n\t" \ "pxor %%xmm2, %%xmm3\n\t" asm volatile ("movdqu (%[key]), %%xmm1\n\t" /* xmm1 := key[0..15] */ "movq 16(%[key]), %%xmm3\n\t" /* xmm3 := key[16..23] */ "movdqa %%xmm1, (%[ksch])\n\t" /* ksch[0] := xmm1 */ "movdqa %%xmm3, %%xmm5\n\t" AESKEYGENASSIST_xmm3_xmm2(0x01) AESKEY_EXPAND192 "shufpd $0, %%xmm1, %%xmm5\n\t" "movdqa %%xmm5, 0x10(%[ksch])\n\t" /* ksch[1] := xmm5 */ "movdqa %%xmm1, %%xmm6\n\t" "shufpd $1, %%xmm3, %%xmm6\n\t" "movdqa %%xmm6, 0x20(%[ksch])\n\t" /* ksch[2] := xmm6 */ AESKEYGENASSIST_xmm3_xmm2(0x02) AESKEY_EXPAND192 "movdqa %%xmm1, 0x30(%[ksch])\n\t" /* ksch[3] := xmm1 */ "movdqa %%xmm3, %%xmm5\n\t" AESKEYGENASSIST_xmm3_xmm2(0x04) AESKEY_EXPAND192 "shufpd $0, %%xmm1, %%xmm5\n\t" "movdqa %%xmm5, 0x40(%[ksch])\n\t" /* ksch[4] := xmm5 */ "movdqa %%xmm1, %%xmm6\n\t" "shufpd $1, %%xmm3, %%xmm6\n\t" "movdqa %%xmm6, 0x50(%[ksch])\n\t" /* ksch[5] := xmm6 */ AESKEYGENASSIST_xmm3_xmm2(0x08) AESKEY_EXPAND192 "movdqa %%xmm1, 0x60(%[ksch])\n\t" /* ksch[6] := xmm1 */ "movdqa %%xmm3, %%xmm5\n\t" AESKEYGENASSIST_xmm3_xmm2(0x10) AESKEY_EXPAND192 "shufpd $0, %%xmm1, %%xmm5\n\t" "movdqa %%xmm5, 0x70(%[ksch])\n\t" /* ksch[7] := xmm5 */ "movdqa %%xmm1, %%xmm6\n\t" "shufpd $1, %%xmm3, %%xmm6\n\t" "movdqa %%xmm6, 0x80(%[ksch])\n\t" /* ksch[8] := xmm6 */ AESKEYGENASSIST_xmm3_xmm2(0x20) AESKEY_EXPAND192 "movdqa %%xmm1, 0x90(%[ksch])\n\t" /* ksch[9] := xmm1 */ "movdqa %%xmm3, %%xmm5\n\t" AESKEYGENASSIST_xmm3_xmm2(0x40) AESKEY_EXPAND192 "shufpd $0, %%xmm1, %%xmm5\n\t" "movdqa %%xmm5, 0xa0(%[ksch])\n\t" /* ksch[10] := xmm5 */ "movdqa %%xmm1, %%xmm6\n\t" "shufpd $1, %%xmm3, %%xmm6\n\t" "movdqa %%xmm6, 0xb0(%[ksch])\n\t" /* ksch[11] := xmm6 */ AESKEYGENASSIST_xmm3_xmm2(0x80) AESKEY_EXPAND192 "movdqa %%xmm1, 0xc0(%[ksch])\n\t" /* ksch[12] := xmm1 */ : : [key] "r" (key), [ksch] "r" (ctx->keyschenc) : "cc", "memory" ); #undef AESKEYGENASSIST_xmm3_xmm2 #undef AESKEY_EXPAND192 } else if (ctx->rounds > 12) { /* 256-bit key */ #define AESKEYGENASSIST_xmm1_xmm2(imm8) \ ".byte 0x66, 0x0f, 0x3a, 0xdf, 0xd1, " #imm8 " \n\t" #define AESKEYGENASSIST_xmm3_xmm2(imm8) \ ".byte 0x66, 0x0f, 0x3a, 0xdf, 0xd3, " #imm8 " \n\t" #define AESKEY_EXPAND256_A \ "pshufd $0xff, %%xmm2, %%xmm2\n\t" \ "movdqa %%xmm1, %%xmm4\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm1\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm1\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm1\n\t" \ "pxor %%xmm2, %%xmm1\n\t" #define AESKEY_EXPAND256_B \ "pshufd $0xaa, %%xmm2, %%xmm2\n\t" \ "movdqa %%xmm3, %%xmm4\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm3\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm3\n\t" \ "pslldq $4, %%xmm4\n\t" \ "pxor %%xmm4, %%xmm3\n\t" \ "pxor %%xmm2, %%xmm3\n\t" asm volatile ("movdqu (%[key]), %%xmm1\n\t" /* xmm1 := key[0..15] */ "movdqu 16(%[key]), %%xmm3\n\t" /* xmm3 := key[16..31] */ "movdqa %%xmm1, (%[ksch])\n\t" /* ksch[0] := xmm1 */ "movdqa %%xmm3, 0x10(%[ksch])\n\t" /* ksch[1] := xmm3 */ AESKEYGENASSIST_xmm3_xmm2(0x01) AESKEY_EXPAND256_A "movdqa %%xmm1, 0x20(%[ksch])\n\t" /* ksch[2] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x00) AESKEY_EXPAND256_B "movdqa %%xmm3, 0x30(%[ksch])\n\t" /* ksch[3] := xmm3 */ AESKEYGENASSIST_xmm3_xmm2(0x02) AESKEY_EXPAND256_A "movdqa %%xmm1, 0x40(%[ksch])\n\t" /* ksch[4] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x00) AESKEY_EXPAND256_B "movdqa %%xmm3, 0x50(%[ksch])\n\t" /* ksch[5] := xmm3 */ AESKEYGENASSIST_xmm3_xmm2(0x04) AESKEY_EXPAND256_A "movdqa %%xmm1, 0x60(%[ksch])\n\t" /* ksch[6] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x00) AESKEY_EXPAND256_B "movdqa %%xmm3, 0x70(%[ksch])\n\t" /* ksch[7] := xmm3 */ AESKEYGENASSIST_xmm3_xmm2(0x08) AESKEY_EXPAND256_A "movdqa %%xmm1, 0x80(%[ksch])\n\t" /* ksch[8] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x00) AESKEY_EXPAND256_B "movdqa %%xmm3, 0x90(%[ksch])\n\t" /* ksch[9] := xmm3 */ AESKEYGENASSIST_xmm3_xmm2(0x10) AESKEY_EXPAND256_A "movdqa %%xmm1, 0xa0(%[ksch])\n\t" /* ksch[10] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x00) AESKEY_EXPAND256_B "movdqa %%xmm3, 0xb0(%[ksch])\n\t" /* ksch[11] := xmm3 */ AESKEYGENASSIST_xmm3_xmm2(0x20) AESKEY_EXPAND256_A "movdqa %%xmm1, 0xc0(%[ksch])\n\t" /* ksch[12] := xmm1 */ AESKEYGENASSIST_xmm1_xmm2(0x00) AESKEY_EXPAND256_B "movdqa %%xmm3, 0xd0(%[ksch])\n\t" /* ksch[13] := xmm3 */ AESKEYGENASSIST_xmm3_xmm2(0x40) AESKEY_EXPAND256_A "movdqa %%xmm1, 0xe0(%[ksch])\n\t" /* ksch[14] := xmm1 */ : : [key] "r" (key), [ksch] "r" (ctx->keyschenc) : "cc", "memory" ); #undef AESKEYGENASSIST_xmm1_xmm2 #undef AESKEYGENASSIST_xmm3_xmm2 #undef AESKEY_EXPAND256_A #undef AESKEY_EXPAND256_B } aesni_cleanup(); aesni_cleanup_2_7(); } /* Make a decryption key from an encryption key. */ static ASM_FUNC_ATTR_INLINE void do_aesni_prepare_decryption (RIJNDAEL_context *ctx) { /* The AES-NI decrypt instructions use the Equivalent Inverse Cipher, thus we can't use the the standard decrypt key preparation. */ u128_t *ekey = (u128_t *)ctx->keyschenc; u128_t *dkey = (u128_t *)ctx->keyschdec; int rr; int r; #define DO_AESNI_AESIMC() \ asm volatile ("movdqa %[ekey], %%xmm1\n\t" \ /*"aesimc %%xmm1, %%xmm1\n\t"*/ \ ".byte 0x66, 0x0f, 0x38, 0xdb, 0xc9\n\t" \ "movdqa %%xmm1, %[dkey]" \ : [dkey] "=m" (dkey[r]) \ : [ekey] "m" (ekey[rr]) \ : "memory") dkey[0] = ekey[ctx->rounds]; r=1; rr=ctx->rounds-1; DO_AESNI_AESIMC(); r++; rr--; /* round 1 */ DO_AESNI_AESIMC(); r++; rr--; /* round 2 */ DO_AESNI_AESIMC(); r++; rr--; /* round 3 */ DO_AESNI_AESIMC(); r++; rr--; /* round 4 */ DO_AESNI_AESIMC(); r++; rr--; /* round 5 */ DO_AESNI_AESIMC(); r++; rr--; /* round 6 */ DO_AESNI_AESIMC(); r++; rr--; /* round 7 */ DO_AESNI_AESIMC(); r++; rr--; /* round 8 */ DO_AESNI_AESIMC(); r++; rr--; /* round 9 */ if (ctx->rounds > 10) { DO_AESNI_AESIMC(); r++; rr--; /* round 10 */ DO_AESNI_AESIMC(); r++; rr--; /* round 11 */ if (ctx->rounds > 12) { DO_AESNI_AESIMC(); r++; rr--; /* round 12 */ DO_AESNI_AESIMC(); r++; rr--; /* round 13 */ } } dkey[r] = ekey[0]; #undef DO_AESNI_AESIMC } void ASM_FUNC_ATTR _gcry_aes_aesni_prepare_decryption (RIJNDAEL_context *ctx) { aesni_prepare(); do_aesni_prepare_decryption (ctx); aesni_cleanup(); } /* Encrypt one block using the Intel AES-NI instructions. Block is input * and output through SSE register xmm0. */ static ASM_FUNC_ATTR_INLINE void do_aesni_enc (const RIJNDAEL_context *ctx) { #define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t" #define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t" asm volatile ("movdqa (%[key]), %%xmm1\n\t" /* xmm1 := key[0] */ "pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */ "movdqa 0x10(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x20(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x30(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x40(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x50(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x60(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x70(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x80(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x90(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xa0(%[key]), %%xmm1\n\t" "cmpl $10, %[rounds]\n\t" "jz .Lenclast%=\n\t" aesenc_xmm1_xmm0 "movdqa 0xb0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xc0(%[key]), %%xmm1\n\t" "cmpl $12, %[rounds]\n\t" "jz .Lenclast%=\n\t" aesenc_xmm1_xmm0 "movdqa 0xd0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xe0(%[key]), %%xmm1\n" ".Lenclast%=:\n\t" aesenclast_xmm1_xmm0 "\n" : : [key] "r" (ctx->keyschenc), [rounds] "r" (ctx->rounds) : "cc", "memory"); #undef aesenc_xmm1_xmm0 #undef aesenclast_xmm1_xmm0 } /* Decrypt one block using the Intel AES-NI instructions. Block is input * and output through SSE register xmm0. */ static ASM_FUNC_ATTR_INLINE void do_aesni_dec (const RIJNDAEL_context *ctx) { #define aesdec_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xde, 0xc1\n\t" #define aesdeclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdf, 0xc1\n\t" asm volatile ("movdqa (%[key]), %%xmm1\n\t" "pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */ "movdqa 0x10(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x20(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x30(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x40(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x50(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x60(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x70(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x80(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0x90(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0xa0(%[key]), %%xmm1\n\t" "cmpl $10, %[rounds]\n\t" "jz .Ldeclast%=\n\t" aesdec_xmm1_xmm0 "movdqa 0xb0(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0xc0(%[key]), %%xmm1\n\t" "cmpl $12, %[rounds]\n\t" "jz .Ldeclast%=\n\t" aesdec_xmm1_xmm0 "movdqa 0xd0(%[key]), %%xmm1\n\t" aesdec_xmm1_xmm0 "movdqa 0xe0(%[key]), %%xmm1\n" ".Ldeclast%=:\n\t" aesdeclast_xmm1_xmm0 "\n" : : [key] "r" (ctx->keyschdec), [rounds] "r" (ctx->rounds) : "cc", "memory"); #undef aesdec_xmm1_xmm0 #undef aesdeclast_xmm1_xmm0 } /* Encrypt four blocks using the Intel AES-NI instructions. Blocks are input * and output through SSE registers xmm1 to xmm4. */ static ASM_FUNC_ATTR_INLINE void do_aesni_enc_vec4 (const RIJNDAEL_context *ctx) { #define aesenc_xmm0_xmm1 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc8\n\t" #define aesenc_xmm0_xmm2 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xd0\n\t" #define aesenc_xmm0_xmm3 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xd8\n\t" #define aesenc_xmm0_xmm4 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xe0\n\t" #define aesenclast_xmm0_xmm1 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc8\n\t" #define aesenclast_xmm0_xmm2 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xd0\n\t" #define aesenclast_xmm0_xmm3 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xd8\n\t" #define aesenclast_xmm0_xmm4 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xe0\n\t" asm volatile ("movdqa (%[key]), %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" /* xmm1 ^= key[0] */ "pxor %%xmm0, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm0, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm0, %%xmm4\n\t" /* xmm4 ^= key[0] */ "movdqa 0x10(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x20(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x30(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x40(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x50(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x60(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x70(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x80(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0x90(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0xa0(%[key]), %%xmm0\n\t" "cmpl $10, %[rounds]\n\t" "jz .Ldeclast%=\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0xb0(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0xc0(%[key]), %%xmm0\n\t" "cmpl $12, %[rounds]\n\t" "jz .Ldeclast%=\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0xd0(%[key]), %%xmm0\n\t" aesenc_xmm0_xmm1 aesenc_xmm0_xmm2 aesenc_xmm0_xmm3 aesenc_xmm0_xmm4 "movdqa 0xe0(%[key]), %%xmm0\n" ".Ldeclast%=:\n\t" aesenclast_xmm0_xmm1 aesenclast_xmm0_xmm2 aesenclast_xmm0_xmm3 aesenclast_xmm0_xmm4 : /* no output */ : [key] "r" (ctx->keyschenc), [rounds] "r" (ctx->rounds) : "cc", "memory"); #undef aesenc_xmm0_xmm1 #undef aesenc_xmm0_xmm2 #undef aesenc_xmm0_xmm3 #undef aesenc_xmm0_xmm4 #undef aesenclast_xmm0_xmm1 #undef aesenclast_xmm0_xmm2 #undef aesenclast_xmm0_xmm3 #undef aesenclast_xmm0_xmm4 } /* Decrypt four blocks using the Intel AES-NI instructions. Blocks are input * and output through SSE registers xmm1 to xmm4. */ static ASM_FUNC_ATTR_INLINE void do_aesni_dec_vec4 (const RIJNDAEL_context *ctx) { #define aesdec_xmm0_xmm1 ".byte 0x66, 0x0f, 0x38, 0xde, 0xc8\n\t" #define aesdec_xmm0_xmm2 ".byte 0x66, 0x0f, 0x38, 0xde, 0xd0\n\t" #define aesdec_xmm0_xmm3 ".byte 0x66, 0x0f, 0x38, 0xde, 0xd8\n\t" #define aesdec_xmm0_xmm4 ".byte 0x66, 0x0f, 0x38, 0xde, 0xe0\n\t" #define aesdeclast_xmm0_xmm1 ".byte 0x66, 0x0f, 0x38, 0xdf, 0xc8\n\t" #define aesdeclast_xmm0_xmm2 ".byte 0x66, 0x0f, 0x38, 0xdf, 0xd0\n\t" #define aesdeclast_xmm0_xmm3 ".byte 0x66, 0x0f, 0x38, 0xdf, 0xd8\n\t" #define aesdeclast_xmm0_xmm4 ".byte 0x66, 0x0f, 0x38, 0xdf, 0xe0\n\t" asm volatile ("movdqa (%[key]), %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" /* xmm1 ^= key[0] */ "pxor %%xmm0, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm0, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm0, %%xmm4\n\t" /* xmm4 ^= key[0] */ "movdqa 0x10(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x20(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x30(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x40(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x50(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x60(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x70(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x80(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0x90(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0xa0(%[key]), %%xmm0\n\t" "cmpl $10, %[rounds]\n\t" "jz .Ldeclast%=\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0xb0(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0xc0(%[key]), %%xmm0\n\t" "cmpl $12, %[rounds]\n\t" "jz .Ldeclast%=\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0xd0(%[key]), %%xmm0\n\t" aesdec_xmm0_xmm1 aesdec_xmm0_xmm2 aesdec_xmm0_xmm3 aesdec_xmm0_xmm4 "movdqa 0xe0(%[key]), %%xmm0\n" ".Ldeclast%=:\n\t" aesdeclast_xmm0_xmm1 aesdeclast_xmm0_xmm2 aesdeclast_xmm0_xmm3 aesdeclast_xmm0_xmm4 : /* no output */ : [key] "r" (ctx->keyschdec), [rounds] "r" (ctx->rounds) : "cc", "memory"); #undef aesdec_xmm0_xmm1 #undef aesdec_xmm0_xmm2 #undef aesdec_xmm0_xmm3 #undef aesdec_xmm0_xmm4 #undef aesdeclast_xmm0_xmm1 #undef aesdeclast_xmm0_xmm2 #undef aesdeclast_xmm0_xmm3 #undef aesdeclast_xmm0_xmm4 } #ifdef __x86_64__ /* Encrypt eight blocks using the Intel AES-NI instructions. Blocks are input * and output through SSE registers xmm1 to xmm4 and xmm8 to xmm11. */ static ASM_FUNC_ATTR_INLINE void do_aesni_enc_vec8 (const RIJNDAEL_context *ctx) { asm volatile ("movdqa 0x10(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x20(%[key]), %%xmm0\n\t" "cmpl $12, %[rounds]\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x30(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x40(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x50(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x60(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x70(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x80(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x90(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xa0(%[key]), %%xmm0\n\t" - "jb .Ldeclast%=\n\t" + "jb .Lenclast%=\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xb0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xc0(%[key]), %%xmm0\n\t" - "je .Ldeclast%=\n\t" + "je .Lenclast%=\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xd0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xe0(%[key]), %%xmm0\n" - ".Ldeclast%=:\n\t" + ".Lenclast%=:\n\t" : /* no output */ : [key] "r" (ctx->keyschenc), [rounds] "r" (ctx->rounds) : "cc", "memory"); } /* Decrypt eight blocks using the Intel AES-NI instructions. Blocks are input * and output through SSE registers xmm1 to xmm4 and xmm8 to xmm11. */ static ASM_FUNC_ATTR_INLINE void do_aesni_dec_vec8 (const RIJNDAEL_context *ctx) { asm volatile ("movdqa 0x10(%[key]), %%xmm0\n\t" "cmpl $12, %[rounds]\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x20(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x30(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x40(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x50(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x60(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x70(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x80(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x90(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xa0(%[key]), %%xmm0\n\t" "jb .Ldeclast%=\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xb0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xc0(%[key]), %%xmm0\n\t" "je .Ldeclast%=\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xd0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xe0(%[key]), %%xmm0\n" ".Ldeclast%=:\n\t" : /* no output */ : [key] "r" (ctx->keyschdec), [rounds] "r" (ctx->rounds) : "cc", "memory"); } #endif /* __x86_64__ */ /* Perform a CTR encryption round using the counter CTR and the input block A. Write the result to the output block B and update CTR. CTR needs to be a 16 byte aligned little-endian value. */ static ASM_FUNC_ATTR_INLINE void do_aesni_ctr (const RIJNDAEL_context *ctx, unsigned char *ctr, unsigned char *b, const unsigned char *a) { #define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t" #define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t" asm volatile ("movdqa %%xmm5, %%xmm0\n\t" /* xmm0 := CTR (xmm5) */ "pcmpeqd %%xmm1, %%xmm1\n\t" "psrldq $8, %%xmm1\n\t" /* xmm1 = -1 */ "pshufb %%xmm6, %%xmm5\n\t" "psubq %%xmm1, %%xmm5\n\t" /* xmm5++ (big endian) */ /* detect if 64-bit carry handling is needed */ "cmpl $0xffffffff, 8(%[ctr])\n\t" "jne .Lno_carry%=\n\t" "cmpl $0xffffffff, 12(%[ctr])\n\t" "jne .Lno_carry%=\n\t" "pslldq $8, %%xmm1\n\t" /* move lower 64-bit to high */ "psubq %%xmm1, %%xmm5\n\t" /* add carry to upper 64bits */ ".Lno_carry%=:\n\t" "pshufb %%xmm6, %%xmm5\n\t" "movdqa %%xmm5, (%[ctr])\n\t" /* Update CTR (mem). */ "pxor (%[key]), %%xmm0\n\t" /* xmm1 ^= key[0] */ "movdqa 0x10(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x20(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x30(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x40(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x50(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x60(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x70(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x80(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x90(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xa0(%[key]), %%xmm1\n\t" "cmpl $10, %[rounds]\n\t" "jz .Lenclast%=\n\t" aesenc_xmm1_xmm0 "movdqa 0xb0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xc0(%[key]), %%xmm1\n\t" "cmpl $12, %[rounds]\n\t" "jz .Lenclast%=\n\t" aesenc_xmm1_xmm0 "movdqa 0xd0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xe0(%[key]), %%xmm1\n" ".Lenclast%=:\n\t" aesenclast_xmm1_xmm0 "movdqu %[src], %%xmm1\n\t" /* xmm1 := input */ "pxor %%xmm1, %%xmm0\n\t" /* EncCTR ^= input */ "movdqu %%xmm0, %[dst]" /* Store EncCTR. */ : [dst] "=m" (*b) : [src] "m" (*a), [ctr] "r" (ctr), [key] "r" (ctx->keyschenc), [rounds] "g" (ctx->rounds) : "cc", "memory"); #undef aesenc_xmm1_xmm0 #undef aesenclast_xmm1_xmm0 } /* Four blocks at a time variant of do_aesni_ctr. */ static ASM_FUNC_ATTR_INLINE void do_aesni_ctr_4 (const RIJNDAEL_context *ctx, unsigned char *ctr, unsigned char *b, const unsigned char *a) { static const byte bige_addb_const[4][16] __attribute__ ((aligned (16))) = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 } }; const void *bige_addb = bige_addb_const; #define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t" #define aesenc_xmm1_xmm2 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xd1\n\t" #define aesenc_xmm1_xmm3 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xd9\n\t" #define aesenc_xmm1_xmm4 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xe1\n\t" #define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t" #define aesenclast_xmm1_xmm2 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xd1\n\t" #define aesenclast_xmm1_xmm3 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xd9\n\t" #define aesenclast_xmm1_xmm4 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xe1\n\t" /* Register usage: [key] keyschedule xmm0 CTR-0 xmm1 temp / round key xmm2 CTR-1 xmm3 CTR-2 xmm4 CTR-3 xmm5 copy of *ctr xmm6 endian swapping mask */ asm volatile (/* detect if 8-bit carry handling is needed */ "addb $4, 15(%[ctr])\n\t" "jc .Ladd32bit%=\n\t" "movdqa %%xmm5, %%xmm0\n\t" /* xmm0 := CTR (xmm5) */ "movdqa 0*16(%[addb]), %%xmm2\n\t" /* xmm2 := be(1) */ "movdqa 1*16(%[addb]), %%xmm3\n\t" /* xmm3 := be(2) */ "movdqa 2*16(%[addb]), %%xmm4\n\t" /* xmm4 := be(3) */ "movdqa 3*16(%[addb]), %%xmm5\n\t" /* xmm5 := be(4) */ "paddb %%xmm0, %%xmm2\n\t" /* xmm2 := be(1) + CTR (xmm0) */ "paddb %%xmm0, %%xmm3\n\t" /* xmm3 := be(2) + CTR (xmm0) */ "paddb %%xmm0, %%xmm4\n\t" /* xmm4 := be(3) + CTR (xmm0) */ "paddb %%xmm0, %%xmm5\n\t" /* xmm5 := be(4) + CTR (xmm0) */ "movdqa (%[key]), %%xmm1\n\t" /* xmm1 := key[0] */ "jmp .Ldone_ctr%=\n\t" ".Ladd32bit%=:\n\t" "movdqa %%xmm5, (%[ctr])\n\t" /* Restore CTR. */ "movdqa %%xmm5, %%xmm0\n\t" /* xmm0, xmm2 := CTR (xmm5) */ "movdqa %%xmm0, %%xmm2\n\t" "pcmpeqd %%xmm1, %%xmm1\n\t" "psrldq $8, %%xmm1\n\t" /* xmm1 = -1 */ "pshufb %%xmm6, %%xmm2\n\t" /* xmm2 := le(xmm2) */ "psubq %%xmm1, %%xmm2\n\t" /* xmm2++ */ "movdqa %%xmm2, %%xmm3\n\t" /* xmm3 := xmm2 */ "psubq %%xmm1, %%xmm3\n\t" /* xmm3++ */ "movdqa %%xmm3, %%xmm4\n\t" /* xmm4 := xmm3 */ "psubq %%xmm1, %%xmm4\n\t" /* xmm4++ */ "movdqa %%xmm4, %%xmm5\n\t" /* xmm5 := xmm4 */ "psubq %%xmm1, %%xmm5\n\t" /* xmm5++ */ /* detect if 64-bit carry handling is needed */ "cmpl $0xffffffff, 8(%[ctr])\n\t" "jne .Lno_carry%=\n\t" "movl 12(%[ctr]), %%esi\n\t" "bswapl %%esi\n\t" "cmpl $0xfffffffc, %%esi\n\t" "jb .Lno_carry%=\n\t" /* no carry */ "pslldq $8, %%xmm1\n\t" /* move lower 64-bit to high */ "je .Lcarry_xmm5%=\n\t" /* esi == 0xfffffffc */ "cmpl $0xfffffffe, %%esi\n\t" "jb .Lcarry_xmm4%=\n\t" /* esi == 0xfffffffd */ "je .Lcarry_xmm3%=\n\t" /* esi == 0xfffffffe */ /* esi == 0xffffffff */ "psubq %%xmm1, %%xmm2\n\t" ".Lcarry_xmm3%=:\n\t" "psubq %%xmm1, %%xmm3\n\t" ".Lcarry_xmm4%=:\n\t" "psubq %%xmm1, %%xmm4\n\t" ".Lcarry_xmm5%=:\n\t" "psubq %%xmm1, %%xmm5\n\t" ".Lno_carry%=:\n\t" "movdqa (%[key]), %%xmm1\n\t" /* xmm1 := key[0] */ "pshufb %%xmm6, %%xmm2\n\t" /* xmm2 := be(xmm2) */ "pshufb %%xmm6, %%xmm3\n\t" /* xmm3 := be(xmm3) */ "pshufb %%xmm6, %%xmm4\n\t" /* xmm4 := be(xmm4) */ "pshufb %%xmm6, %%xmm5\n\t" /* xmm5 := be(xmm5) */ "movdqa %%xmm5, (%[ctr])\n\t" /* Update CTR (mem). */ ".Ldone_ctr%=:\n\t" : : [ctr] "r" (ctr), [key] "r" (ctx->keyschenc), [addb] "r" (bige_addb) : "%esi", "cc", "memory"); asm volatile ("pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */ "pxor %%xmm1, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm1, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm1, %%xmm4\n\t" /* xmm4 ^= key[0] */ "movdqa 0x10(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x20(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x30(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x40(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x50(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x60(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x70(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x80(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0x90(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0xa0(%[key]), %%xmm1\n\t" "cmpl $10, %[rounds]\n\t" "jz .Lenclast%=\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0xb0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0xc0(%[key]), %%xmm1\n\t" "cmpl $12, %[rounds]\n\t" "jz .Lenclast%=\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0xd0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 aesenc_xmm1_xmm2 aesenc_xmm1_xmm3 aesenc_xmm1_xmm4 "movdqa 0xe0(%[key]), %%xmm1\n" ".Lenclast%=:\n\t" aesenclast_xmm1_xmm0 aesenclast_xmm1_xmm2 aesenclast_xmm1_xmm3 aesenclast_xmm1_xmm4 : : [key] "r" (ctx->keyschenc), [rounds] "r" (ctx->rounds) : "cc", "memory"); asm volatile ("movdqu (%[src]), %%xmm1\n\t" /* Get block 1. */ "pxor %%xmm1, %%xmm0\n\t" /* EncCTR-1 ^= input */ "movdqu %%xmm0, (%[dst])\n\t" /* Store block 1 */ "movdqu 16(%[src]), %%xmm1\n\t" /* Get block 2. */ "pxor %%xmm1, %%xmm2\n\t" /* EncCTR-2 ^= input */ "movdqu %%xmm2, 16(%[dst])\n\t" /* Store block 2. */ "movdqu 32(%[src]), %%xmm1\n\t" /* Get block 3. */ "pxor %%xmm1, %%xmm3\n\t" /* EncCTR-3 ^= input */ "movdqu %%xmm3, 32(%[dst])\n\t" /* Store block 3. */ "movdqu 48(%[src]), %%xmm1\n\t" /* Get block 4. */ "pxor %%xmm1, %%xmm4\n\t" /* EncCTR-4 ^= input */ "movdqu %%xmm4, 48(%[dst])" /* Store block 4. */ : : [src] "r" (a), [dst] "r" (b) : "memory"); #undef aesenc_xmm1_xmm0 #undef aesenc_xmm1_xmm2 #undef aesenc_xmm1_xmm3 #undef aesenc_xmm1_xmm4 #undef aesenclast_xmm1_xmm0 #undef aesenclast_xmm1_xmm2 #undef aesenclast_xmm1_xmm3 #undef aesenclast_xmm1_xmm4 } #ifdef __x86_64__ /* Eight blocks at a time variant of do_aesni_ctr. */ static ASM_FUNC_ATTR_INLINE void do_aesni_ctr_8 (const RIJNDAEL_context *ctx, unsigned char *ctr, unsigned char *b, const unsigned char *a) { static const byte bige_addb_const[8][16] __attribute__ ((aligned (16))) = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 } }; const void *bige_addb = bige_addb_const; /* Register usage: [key] keyschedule xmm0 CTR-0 xmm1 temp / round key xmm2 CTR-1 xmm3 CTR-2 xmm4 CTR-3 xmm5 copy of *ctr xmm6 endian swapping mask xmm8 CTR-4 xmm9 CTR-5 xmm10 CTR-6 xmm11 CTR-7 xmm12 temp xmm13 temp xmm14 temp xmm15 temp */ asm volatile (/* detect if 8-bit carry handling is needed */ "addb $8, 15(%[ctr])\n\t" "jc .Ladd32bit%=\n\t" "movdqa (%[key]), %%xmm1\n\t" /* xmm1 := key[0] */ "movdqa 16(%[key]), %%xmm7\n\t" /* xmm7 := key[1] */ "movdqa %%xmm5, %%xmm0\n\t" /* xmm0 := CTR (xmm5) */ "movdqa %%xmm5, %%xmm2\n\t" /* xmm2 := CTR (xmm5) */ "movdqa %%xmm5, %%xmm3\n\t" /* xmm3 := CTR (xmm5) */ "movdqa %%xmm5, %%xmm4\n\t" /* xmm4 := CTR (xmm5) */ "paddb 0*16(%[addb]), %%xmm2\n\t" /* xmm2 := be(1) + CTR */ "paddb 1*16(%[addb]), %%xmm3\n\t" /* xmm3 := be(2) + CTR */ "paddb 2*16(%[addb]), %%xmm4\n\t" /* xmm4 := be(3) + CTR */ "pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */ "pxor %%xmm1, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm1, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm1, %%xmm4\n\t" /* xmm4 ^= key[0] */ "aesenc %%xmm7, %%xmm0\n\t" "aesenc %%xmm7, %%xmm2\n\t" "aesenc %%xmm7, %%xmm3\n\t" "aesenc %%xmm7, %%xmm4\n\t" "movdqa %%xmm5, %%xmm8\n\t" /* xmm8 := CTR (xmm5) */ "movdqa %%xmm5, %%xmm9\n\t" /* xmm9 := CTR (xmm5) */ "movdqa %%xmm5, %%xmm10\n\t" /* xmm10 := CTR (xmm5) */ "movdqa %%xmm5, %%xmm11\n\t" /* xmm11 := CTR (xmm5) */ "paddb 3*16(%[addb]), %%xmm8\n\t" /* xmm8 := be(4) + CTR */ "paddb 4*16(%[addb]), %%xmm9\n\t" /* xmm9 := be(5) + CTR */ "paddb 5*16(%[addb]), %%xmm10\n\t" /* xmm10 := be(6) + CTR */ "paddb 6*16(%[addb]), %%xmm11\n\t" /* xmm11 := be(7) + CTR */ "pxor %%xmm1, %%xmm8\n\t" /* xmm8 ^= key[0] */ "pxor %%xmm1, %%xmm9\n\t" /* xmm9 ^= key[0] */ "pxor %%xmm1, %%xmm10\n\t" /* xmm10 ^= key[0] */ "pxor %%xmm1, %%xmm11\n\t" /* xmm11 ^= key[0] */ "aesenc %%xmm7, %%xmm8\n\t" "aesenc %%xmm7, %%xmm9\n\t" "aesenc %%xmm7, %%xmm10\n\t" "aesenc %%xmm7, %%xmm11\n\t" "paddb 7*16(%[addb]), %%xmm5\n\t" /* xmm5 := be(8) + CTR */ "jmp .Ldone_ctr%=\n\t" ".Ladd32bit%=:\n\t" "movdqa %%xmm5, (%[ctr])\n\t" /* Restore CTR. */ "movdqa %%xmm5, %%xmm0\n\t" /* xmm0, xmm2 := CTR (xmm5) */ "movdqa %%xmm0, %%xmm2\n\t" "pcmpeqd %%xmm1, %%xmm1\n\t" "psrldq $8, %%xmm1\n\t" /* xmm1 = -1 */ "pshufb %%xmm6, %%xmm2\n\t" /* xmm2 := le(xmm2) */ "psubq %%xmm1, %%xmm2\n\t" /* xmm2++ */ "movdqa %%xmm2, %%xmm3\n\t" /* xmm3 := xmm2 */ "psubq %%xmm1, %%xmm3\n\t" /* xmm3++ */ "movdqa %%xmm3, %%xmm4\n\t" /* xmm4 := xmm3 */ "psubq %%xmm1, %%xmm4\n\t" /* xmm4++ */ "movdqa %%xmm4, %%xmm8\n\t" /* xmm8 := xmm4 */ "psubq %%xmm1, %%xmm8\n\t" /* xmm8++ */ "movdqa %%xmm8, %%xmm9\n\t" /* xmm9 := xmm8 */ "psubq %%xmm1, %%xmm9\n\t" /* xmm9++ */ "movdqa %%xmm9, %%xmm10\n\t" /* xmm10 := xmm9 */ "psubq %%xmm1, %%xmm10\n\t" /* xmm10++ */ "movdqa %%xmm10, %%xmm11\n\t" /* xmm11 := xmm10 */ "psubq %%xmm1, %%xmm11\n\t" /* xmm11++ */ "movdqa %%xmm11, %%xmm5\n\t" /* xmm5 := xmm11 */ "psubq %%xmm1, %%xmm5\n\t" /* xmm5++ */ /* detect if 64-bit carry handling is needed */ "cmpl $0xffffffff, 8(%[ctr])\n\t" "jne .Lno_carry%=\n\t" "movl 12(%[ctr]), %%esi\n\t" "bswapl %%esi\n\t" "cmpl $0xfffffff8, %%esi\n\t" "jb .Lno_carry%=\n\t" /* no carry */ "pslldq $8, %%xmm1\n\t" /* move lower 64-bit to high */ "je .Lcarry_xmm5%=\n\t" /* esi == 0xfffffff8 */ "cmpl $0xfffffffa, %%esi\n\t" "jb .Lcarry_xmm11%=\n\t" /* esi == 0xfffffff9 */ "je .Lcarry_xmm10%=\n\t" /* esi == 0xfffffffa */ "cmpl $0xfffffffc, %%esi\n\t" "jb .Lcarry_xmm9%=\n\t" /* esi == 0xfffffffb */ "je .Lcarry_xmm8%=\n\t" /* esi == 0xfffffffc */ "cmpl $0xfffffffe, %%esi\n\t" "jb .Lcarry_xmm4%=\n\t" /* esi == 0xfffffffd */ "je .Lcarry_xmm3%=\n\t" /* esi == 0xfffffffe */ /* esi == 0xffffffff */ "psubq %%xmm1, %%xmm2\n\t" ".Lcarry_xmm3%=:\n\t" "psubq %%xmm1, %%xmm3\n\t" ".Lcarry_xmm4%=:\n\t" "psubq %%xmm1, %%xmm4\n\t" ".Lcarry_xmm8%=:\n\t" "psubq %%xmm1, %%xmm8\n\t" ".Lcarry_xmm9%=:\n\t" "psubq %%xmm1, %%xmm9\n\t" ".Lcarry_xmm10%=:\n\t" "psubq %%xmm1, %%xmm10\n\t" ".Lcarry_xmm11%=:\n\t" "psubq %%xmm1, %%xmm11\n\t" ".Lcarry_xmm5%=:\n\t" "psubq %%xmm1, %%xmm5\n\t" ".Lno_carry%=:\n\t" "movdqa (%[key]), %%xmm1\n\t" /* xmm1 := key[0] */ "movdqa 16(%[key]), %%xmm7\n\t" /* xmm7 := key[1] */ "pshufb %%xmm6, %%xmm2\n\t" /* xmm2 := be(xmm2) */ "pshufb %%xmm6, %%xmm3\n\t" /* xmm3 := be(xmm3) */ "pshufb %%xmm6, %%xmm4\n\t" /* xmm4 := be(xmm4) */ "pxor %%xmm1, %%xmm0\n\t" /* xmm0 ^= key[0] */ "pxor %%xmm1, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm1, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm1, %%xmm4\n\t" /* xmm4 ^= key[0] */ "aesenc %%xmm7, %%xmm0\n\t" "aesenc %%xmm7, %%xmm2\n\t" "aesenc %%xmm7, %%xmm3\n\t" "aesenc %%xmm7, %%xmm4\n\t" "pshufb %%xmm6, %%xmm8\n\t" /* xmm8 := be(xmm8) */ "pshufb %%xmm6, %%xmm9\n\t" /* xmm9 := be(xmm9) */ "pshufb %%xmm6, %%xmm10\n\t" /* xmm10 := be(xmm10) */ "pshufb %%xmm6, %%xmm11\n\t" /* xmm11 := be(xmm11) */ "pxor %%xmm1, %%xmm8\n\t" /* xmm8 ^= key[0] */ "pxor %%xmm1, %%xmm9\n\t" /* xmm9 ^= key[0] */ "pxor %%xmm1, %%xmm10\n\t" /* xmm10 ^= key[0] */ "pxor %%xmm1, %%xmm11\n\t" /* xmm11 ^= key[0] */ "aesenc %%xmm7, %%xmm8\n\t" "aesenc %%xmm7, %%xmm9\n\t" "aesenc %%xmm7, %%xmm10\n\t" "aesenc %%xmm7, %%xmm11\n\t" "pshufb %%xmm6, %%xmm5\n\t" /* xmm5 := be(xmm5) */ "movdqa %%xmm5, (%[ctr])\n\t" /* Update CTR (mem). */ ".align 16\n\t" ".Ldone_ctr%=:\n\t" : : [ctr] "r" (ctr), [key] "r" (ctx->keyschenc), [addb] "r" (bige_addb) : "%esi", "cc", "memory"); asm volatile ("movdqa 0x20(%[key]), %%xmm1\n\t" "movdqu 0*16(%[src]), %%xmm12\n\t" /* Get block 1. */ "movdqu 1*16(%[src]), %%xmm13\n\t" /* Get block 2. */ "movdqu 2*16(%[src]), %%xmm14\n\t" /* Get block 3. */ "movdqu 3*16(%[src]), %%xmm15\n\t" /* Get block 4. */ "movdqu 4*16(%[src]), %%xmm7\n\t" /* Get block 5. */ "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "cmpl $12, %[rounds]\n\t" "movdqa 0x30(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0x40(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0x50(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0x60(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0x70(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0x80(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0x90(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0xa0(%[key]), %%xmm1\n\t" "jb .Lenclast%=\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0xb0(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0xc0(%[key]), %%xmm1\n\t" "je .Lenclast%=\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0xd0(%[key]), %%xmm1\n\t" "aesenc %%xmm1, %%xmm0\n\t" "aesenc %%xmm1, %%xmm2\n\t" "aesenc %%xmm1, %%xmm3\n\t" "aesenc %%xmm1, %%xmm4\n\t" "aesenc %%xmm1, %%xmm8\n\t" "aesenc %%xmm1, %%xmm9\n\t" "aesenc %%xmm1, %%xmm10\n\t" "aesenc %%xmm1, %%xmm11\n\t" "movdqa 0xe0(%[key]), %%xmm1\n" ".Lenclast%=:\n\t" : : [key] "r" (ctx->keyschenc), [rounds] "r" (ctx->rounds), [src] "r" (a) : "cc", "memory"); asm volatile ("pxor %%xmm1, %%xmm12\n\t" /* block1 ^= lastkey */ "pxor %%xmm1, %%xmm13\n\t" /* block2 ^= lastkey */ "pxor %%xmm1, %%xmm14\n\t" /* block3 ^= lastkey */ "pxor %%xmm1, %%xmm15\n\t" /* block4 ^= lastkey */ "aesenclast %%xmm12, %%xmm0\n\t" "aesenclast %%xmm13, %%xmm2\n\t" "aesenclast %%xmm14, %%xmm3\n\t" "aesenclast %%xmm15, %%xmm4\n\t" "movdqu 5*16(%[src]), %%xmm12\n\t" /* Get block 6. */ "movdqu 6*16(%[src]), %%xmm13\n\t" /* Get block 7. */ "movdqu 7*16(%[src]), %%xmm14\n\t" /* Get block 8. */ "movdqu %%xmm0, 0*16(%[dst])\n\t" /* Store block 1. */ "movdqu %%xmm2, 1*16(%[dst])\n\t" /* Store block 2. */ "movdqu %%xmm3, 2*16(%[dst])\n\t" /* Store block 3. */ "movdqu %%xmm4, 3*16(%[dst])\n\t" /* Store block 4. */ "pxor %%xmm1, %%xmm7\n\t" /* block5 ^= lastkey */ "pxor %%xmm1, %%xmm12\n\t" /* block6 ^= lastkey */ "pxor %%xmm1, %%xmm13\n\t" /* block7 ^= lastkey */ "pxor %%xmm1, %%xmm14\n\t" /* block8 ^= lastkey */ "aesenclast %%xmm7, %%xmm8\n\t" "aesenclast %%xmm12, %%xmm9\n\t" "aesenclast %%xmm13, %%xmm10\n\t" "aesenclast %%xmm14, %%xmm11\n\t" "movdqu %%xmm8, 4*16(%[dst])\n\t" /* Store block 8. */ "movdqu %%xmm9, 5*16(%[dst])\n\t" /* Store block 9. */ "movdqu %%xmm10, 6*16(%[dst])\n\t" /* Store block 10. */ "movdqu %%xmm11, 7*16(%[dst])\n\t" /* Store block 11. */ : : [src] "r" (a), [dst] "r" (b) : "memory"); } #endif /* __x86_64__ */ unsigned int ASM_FUNC_ATTR _gcry_aes_aesni_encrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src) { aesni_prepare (); asm volatile ("movdqu %[src], %%xmm0\n\t" : : [src] "m" (*src) : "memory" ); do_aesni_enc (ctx); asm volatile ("movdqu %%xmm0, %[dst]\n\t" : [dst] "=m" (*dst) : : "memory" ); aesni_cleanup (); return 0; } +void ASM_FUNC_ATTR +_gcry_aes_aesni_ecb_crypt (RIJNDAEL_context *ctx, unsigned char *dst, + const unsigned char *src, size_t nblocks, + int encrypt) +{ + aesni_prepare_2_7_variable; + + aesni_prepare (); + aesni_prepare_2_7(); + + if (!encrypt && !ctx->decryption_prepared) + { + do_aesni_prepare_decryption ( ctx ); + ctx->decryption_prepared = 1; + } + +#ifdef __x86_64__ + if (nblocks >= 8) + { + const void *key = encrypt ? ctx->keyschenc : ctx->keyschdec; + aesni_prepare_8_15_variable; + + aesni_prepare_8_15(); + + for (; nblocks >= 8; nblocks -= 8) + { + asm volatile + ("movdqa (%[key]), %%xmm0\n\t" + "movdqu 0*16(%[src]), %%xmm1\n\t" + "movdqu 1*16(%[src]), %%xmm2\n\t" + "movdqu 2*16(%[src]), %%xmm3\n\t" + "movdqu 3*16(%[src]), %%xmm4\n\t" + "movdqu 4*16(%[src]), %%xmm8\n\t" + "movdqu 5*16(%[src]), %%xmm9\n\t" + "movdqu 6*16(%[src]), %%xmm10\n\t" + "movdqu 7*16(%[src]), %%xmm11\n\t" + "pxor %%xmm0, %%xmm1\n\t" + "pxor %%xmm0, %%xmm2\n\t" + "pxor %%xmm0, %%xmm3\n\t" + "pxor %%xmm0, %%xmm4\n\t" + "pxor %%xmm0, %%xmm8\n\t" + "pxor %%xmm0, %%xmm9\n\t" + "pxor %%xmm0, %%xmm10\n\t" + "pxor %%xmm0, %%xmm11\n\t" + : /* No output */ + : [src] "r" (src), + [key] "r" (key) + : "memory"); + + if (encrypt) + { + do_aesni_enc_vec8 (ctx); + asm volatile + ("aesenclast %%xmm0, %%xmm1\n\t" + "aesenclast %%xmm0, %%xmm2\n\t" + "aesenclast %%xmm0, %%xmm3\n\t" + "aesenclast %%xmm0, %%xmm4\n\t" + "aesenclast %%xmm0, %%xmm8\n\t" + "aesenclast %%xmm0, %%xmm9\n\t" + "aesenclast %%xmm0, %%xmm10\n\t" + "aesenclast %%xmm0, %%xmm11\n\t" + ::: "memory" ); + } + else + { + do_aesni_dec_vec8 (ctx); + asm volatile + ("aesdeclast %%xmm0, %%xmm1\n\t" + "aesdeclast %%xmm0, %%xmm2\n\t" + "aesdeclast %%xmm0, %%xmm3\n\t" + "aesdeclast %%xmm0, %%xmm4\n\t" + "aesdeclast %%xmm0, %%xmm8\n\t" + "aesdeclast %%xmm0, %%xmm9\n\t" + "aesdeclast %%xmm0, %%xmm10\n\t" + "aesdeclast %%xmm0, %%xmm11\n\t" + ::: "memory" ); + } + + asm volatile + ("movdqu %%xmm1, 0*16(%[dst])\n\t" + "movdqu %%xmm2, 1*16(%[dst])\n\t" + "movdqu %%xmm3, 2*16(%[dst])\n\t" + "movdqu %%xmm4, 3*16(%[dst])\n\t" + "movdqu %%xmm8, 4*16(%[dst])\n\t" + "movdqu %%xmm9, 5*16(%[dst])\n\t" + "movdqu %%xmm10, 6*16(%[dst])\n\t" + "movdqu %%xmm11, 7*16(%[dst])\n\t" + : /* No output */ + : [dst] "r" (dst) + : "memory"); + + dst += 8*BLOCKSIZE; + src += 8*BLOCKSIZE; + } + + aesni_cleanup_8_15(); + } +#endif + + for (; nblocks >= 4; nblocks -= 4) + { + asm volatile + ("movdqu 0*16(%[src]), %%xmm1\n\t" + "movdqu 1*16(%[src]), %%xmm2\n\t" + "movdqu 2*16(%[src]), %%xmm3\n\t" + "movdqu 3*16(%[src]), %%xmm4\n\t" + : /* No output */ + : [src] "r" (src) + : "memory"); + + if (encrypt) + do_aesni_enc_vec4 (ctx); + else + do_aesni_dec_vec4 (ctx); + + asm volatile + ("movdqu %%xmm1, 0*16(%[dst])\n\t" + "movdqu %%xmm2, 1*16(%[dst])\n\t" + "movdqu %%xmm3, 2*16(%[dst])\n\t" + "movdqu %%xmm4, 3*16(%[dst])\n\t" + : /* No output */ + : [dst] "r" (dst) + : "memory"); + + dst += 4*BLOCKSIZE; + src += 4*BLOCKSIZE; + } + + for (; nblocks; nblocks--) + { + asm volatile ("movdqu %[src], %%xmm0\n\t" + : + : [src] "m" (*src) + : "memory" ); + + if (encrypt) + do_aesni_enc (ctx); + else + do_aesni_dec (ctx); + + asm volatile ("movdqu %%xmm0, %[dst]\n\t" + : [dst] "=m" (*dst) + : + : "memory" ); + + dst += BLOCKSIZE; + src += BLOCKSIZE; + } + + aesni_cleanup (); + aesni_cleanup_2_7 (); +} + + void ASM_FUNC_ATTR _gcry_aes_aesni_cfb_enc (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { unsigned int rounds = ctx->rounds; aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7(); asm volatile ("movdqu %[iv], %%xmm0\n\t" : /* No output */ : [iv] "m" (*iv) : "memory" ); asm volatile ("movdqa %[key0], %%xmm2\n\t" /* xmm2 = key[0] */ "movdqa %[keylast], %%xmm4\n\t" /* xmm4 = key[last] */ "movdqa %%xmm0, %%xmm3\n" "pxor %%xmm2, %%xmm4\n\t" /* xmm4 = key[0] ^ key[last] */ "pxor %%xmm2, %%xmm0\n\t" /* xmm0 = IV ^ key[0] */ : /* No output */ : [key0] "m" (ctx->keyschenc[0][0][0]), [keylast] "m" (ctx->keyschenc[rounds][0][0]) : "memory" ); for ( ;nblocks; nblocks-- ) { asm volatile ("movdqu %[inbuf], %%xmm5\n\t" "movdqa %%xmm2, %%xmm3\n\t" "pxor %%xmm4, %%xmm5\n\t" /* xmm5 = input ^ key[last] ^ key[0] */ : : [inbuf] "m" (*inbuf) : "memory" ); #define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t" #define aesenclast_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc1\n\t" #define aesenclast_xmm5_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc5\n\t" asm volatile ("movdqa 0x10(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x20(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x30(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x40(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x50(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x60(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x70(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x80(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x90(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "cmpl $10, %[rounds]\n\t" "jz .Lenclast%=\n\t" "movdqa 0xa0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xb0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "cmpl $12, %[rounds]\n\t" "jz .Lenclast%=\n\t" "movdqa 0xc0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xd0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 ".Lenclast%=:\n\t" aesenclast_xmm5_xmm0 : : [key] "r" (ctx->keyschenc), [rounds] "r" (rounds) : "cc", "memory"); #undef aesenc_xmm1_xmm0 #undef aesenclast_xmm1_xmm0 #undef aesenclast_xmm5_xmm0 asm volatile ("pxor %%xmm0, %%xmm3\n\t" "movdqu %%xmm3, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : [inbuf] "m" (*inbuf) : "memory" ); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } asm volatile ("movdqu %%xmm3, %[iv]\n\t" : [iv] "=m" (*iv) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); } void ASM_FUNC_ATTR _gcry_aes_aesni_cbc_enc (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks, int cbc_mac) { unsigned int rounds = ctx->rounds; aesni_prepare_2_7_variable; if (nblocks == 0) /* CMAC may call with nblocks 0. */ return; aesni_prepare (); aesni_prepare_2_7(); asm volatile ("movdqu %[iv], %%xmm0\n\t" : /* No output */ : [iv] "m" (*iv) : "memory" ); asm volatile ("movdqa %[key0], %%xmm2\n\t" /* xmm2 = key[0] */ "movdqa %[keylast], %%xmm3\n\t" /* xmm3 = key[last] */ "pxor %%xmm2, %%xmm0\n\t" /* xmm0 = IV ^ key[0] */ "pxor %%xmm3, %%xmm2\n\t" /* xmm2 = key[0] ^ key[last] */ : /* No output */ : [key0] "m" (ctx->keyschenc[0][0][0]), [keylast] "m" (ctx->keyschenc[rounds][0][0]) : "memory" ); asm volatile ("movdqu %[inbuf], %%xmm4\n\t" "pxor %%xmm4, %%xmm0\n\t" /* xmm0 = IV ^ key[0] ^ input */ : : [inbuf] "m" (*inbuf) : "memory" ); inbuf += BLOCKSIZE; for ( ;nblocks; ) { if (--nblocks) { asm volatile ("movdqu %[inbuf], %%xmm4\n\t" /* xmm4 = IV ^ key[0] ^ key[last] ^ input: */ "pxor %%xmm2, %%xmm4\n\t" : : [inbuf] "m" (*inbuf) : "memory" ); inbuf += BLOCKSIZE; } #define aesenc_xmm1_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdc, 0xc1\n\t" #define aesenclast_xmm4_xmm0 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xc4\n\t" #define aesenclast_xmm3_xmm5 ".byte 0x66, 0x0f, 0x38, 0xdd, 0xeb\n\t" asm volatile ("movdqa 0x10(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x20(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x30(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x40(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x50(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x60(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x70(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x80(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0x90(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "cmpl $10, %[rounds]\n\t" "jz .Lenclast%=\n\t" "movdqa 0xa0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xb0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "cmpl $12, %[rounds]\n\t" "jz .Lenclast%=\n\t" "movdqa 0xc0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 "movdqa 0xd0(%[key]), %%xmm1\n\t" aesenc_xmm1_xmm0 ".Lenclast%=:\n\t" "movdqa %%xmm0, %%xmm5\n" aesenclast_xmm4_xmm0 /* xmm0 = IV ^ key[0] */ aesenclast_xmm3_xmm5 /* xmm5 = IV */ : : [key] "r" (ctx->keyschenc), [rounds] "r" (rounds) : "cc", "memory"); #undef aesenc_xmm1_xmm0 #undef aesenclast_xmm4_xmm0 #undef aesenclast_xmm3_xmm5 asm volatile ("movdqu %%xmm5, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); outbuf += -(!cbc_mac) & BLOCKSIZE; } asm volatile ("movdqu %%xmm5, %[iv]\n\t" : [iv] "=m" (*iv) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); } void ASM_FUNC_ATTR _gcry_aes_aesni_ctr_enc (RIJNDAEL_context *ctx, unsigned char *ctr, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { static const unsigned char be_mask[16] __attribute__ ((aligned (16))) = { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }; aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7(); asm volatile ("movdqa %[mask], %%xmm6\n\t" /* Preload mask */ "movdqa %[ctr], %%xmm5\n\t" /* Preload CTR */ : /* No output */ : [mask] "m" (*be_mask), [ctr] "m" (*ctr) : "memory"); #ifdef __x86_64__ if (nblocks >= 8) { aesni_prepare_8_15_variable; aesni_prepare_8_15(); for ( ;nblocks >= 8 ; nblocks -= 8 ) { do_aesni_ctr_8 (ctx, ctr, outbuf, inbuf); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4 ; nblocks -= 4 ) { do_aesni_ctr_4 (ctx, ctr, outbuf, inbuf); outbuf += 4*BLOCKSIZE; inbuf += 4*BLOCKSIZE; } for ( ;nblocks; nblocks-- ) { do_aesni_ctr (ctx, ctr, outbuf, inbuf); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } aesni_cleanup (); aesni_cleanup_2_7 (); } void ASM_FUNC_ATTR _gcry_aes_aesni_ctr32le_enc (RIJNDAEL_context *ctx, unsigned char *ctr, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { static const byte le_addd_const[8][16] __attribute__ ((aligned (16))) = { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7(); asm volatile ("movdqa %[ctr], %%xmm5\n\t" /* Preload CTR */ : /* No output */ : [ctr] "m" (*ctr) : "memory"); #ifdef __x86_64__ if (nblocks >= 8) { aesni_prepare_8_15_variable; aesni_prepare_8_15(); for ( ;nblocks >= 8 ; nblocks -= 8 ) { asm volatile ("movdqa (%[key]), %%xmm0\n\t" "movdqa %%xmm5, %%xmm1\n\t" /* load input blocks */ "movdqa %%xmm5, %%xmm2\n\t" "movdqa %%xmm5, %%xmm3\n\t" "movdqa %%xmm5, %%xmm4\n\t" "movdqa %%xmm5, %%xmm8\n\t" "movdqa %%xmm5, %%xmm9\n\t" "movdqa %%xmm5, %%xmm10\n\t" "movdqa %%xmm5, %%xmm11\n\t" "paddd 0*16(%[addd]), %%xmm2\n\t" "paddd 1*16(%[addd]), %%xmm3\n\t" "paddd 2*16(%[addd]), %%xmm4\n\t" "paddd 3*16(%[addd]), %%xmm8\n\t" "paddd 4*16(%[addd]), %%xmm9\n\t" "paddd 5*16(%[addd]), %%xmm10\n\t" "paddd 6*16(%[addd]), %%xmm11\n\t" "pxor %%xmm0, %%xmm1\n\t" /* xmm1 ^= key[0] */ "pxor %%xmm0, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm0, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm0, %%xmm4\n\t" /* xmm4 ^= key[0] */ "pxor %%xmm0, %%xmm8\n\t" /* xmm8 ^= key[0] */ "pxor %%xmm0, %%xmm9\n\t" /* xmm9 ^= key[0] */ "pxor %%xmm0, %%xmm10\n\t" /* xmm10 ^= key[0] */ "pxor %%xmm0, %%xmm11\n\t" /* xmm11 ^= key[0] */ "movdqu 0*16(%[inbuf]), %%xmm6\n\t" "movdqu 1*16(%[inbuf]), %%xmm7\n\t" "movdqu 2*16(%[inbuf]), %%xmm12\n\t" "movdqu 3*16(%[inbuf]), %%xmm13\n\t" "movdqu 4*16(%[inbuf]), %%xmm14\n\t" "movdqu 5*16(%[inbuf]), %%xmm15\n\t" "paddd 7*16(%[addd]), %%xmm5\n\t" : /* No output */ : [addd] "r" (&le_addd_const[0][0]), [inbuf] "r" (inbuf), [key] "r" (ctx->keyschenc) : "memory"); do_aesni_enc_vec8 (ctx); asm volatile ("pxor %%xmm0, %%xmm6\n\t" "pxor %%xmm0, %%xmm7\n\t" "pxor %%xmm0, %%xmm12\n\t" "pxor %%xmm0, %%xmm13\n\t" "pxor %%xmm0, %%xmm14\n\t" "pxor %%xmm0, %%xmm15\n\t" "aesenclast %%xmm6, %%xmm1\n\t" "aesenclast %%xmm7, %%xmm2\n\t" "movdqu 6*16(%[inbuf]), %%xmm6\n\t" "movdqu 7*16(%[inbuf]), %%xmm7\n\t" "aesenclast %%xmm12, %%xmm3\n\t" "aesenclast %%xmm13, %%xmm4\n\t" "pxor %%xmm0, %%xmm6\n\t" "pxor %%xmm0, %%xmm7\n\t" "aesenclast %%xmm14, %%xmm8\n\t" "aesenclast %%xmm15, %%xmm9\n\t" "aesenclast %%xmm6, %%xmm10\n\t" "aesenclast %%xmm7, %%xmm11\n\t" "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "movdqu %%xmm4, 3*16(%[outbuf])\n\t" "movdqu %%xmm8, 4*16(%[outbuf])\n\t" "movdqu %%xmm9, 5*16(%[outbuf])\n\t" "movdqu %%xmm10, 6*16(%[outbuf])\n\t" "movdqu %%xmm11, 7*16(%[outbuf])\n\t" : /* No output */ : [inbuf] "r" (inbuf), [outbuf] "r" (outbuf) : "memory"); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4 ; nblocks -= 4 ) { asm volatile ("movdqa %%xmm5, %%xmm1\n\t" /* load input blocks */ "movdqa %%xmm5, %%xmm2\n\t" "movdqa %%xmm5, %%xmm3\n\t" "movdqa %%xmm5, %%xmm4\n\t" "paddd 0*16(%[addd]), %%xmm2\n\t" "paddd 1*16(%[addd]), %%xmm3\n\t" "paddd 2*16(%[addd]), %%xmm4\n\t" "paddd 3*16(%[addd]), %%xmm5\n\t" "movdqu 0*16(%[inbuf]), %%xmm6\n\t" "movdqu 1*16(%[inbuf]), %%xmm7\n\t" : /* No output */ : [addd] "r" (&le_addd_const[0][0]), [inbuf] "r" (inbuf) : "memory"); do_aesni_enc_vec4 (ctx); asm volatile ("pxor %%xmm6, %%xmm1\n\t" "pxor %%xmm7, %%xmm2\n\t" "movdqu 2*16(%[inbuf]), %%xmm6\n\t" "movdqu 3*16(%[inbuf]), %%xmm7\n\t" "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "pxor %%xmm6, %%xmm3\n\t" "pxor %%xmm7, %%xmm4\n\t" "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "movdqu %%xmm4, 3*16(%[outbuf])\n\t" : /* No output */ : [inbuf] "r" (inbuf), [outbuf] "r" (outbuf) : "memory"); outbuf += 4*BLOCKSIZE; inbuf += 4*BLOCKSIZE; } for ( ;nblocks; nblocks-- ) { asm volatile ("movdqa %%xmm5, %%xmm0\n\t" "paddd %[add_one], %%xmm5\n\t" "movdqu %[inbuf], %%xmm6\n\t" : : [add_one] "m" (*le_addd_const[0]), [inbuf] "m" (*inbuf) : "memory" ); do_aesni_enc (ctx); asm volatile ("pxor %%xmm0, %%xmm6\n\t" "movdqu %%xmm6, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } asm volatile ("movdqa %%xmm5, %[ctr]\n\t" : [ctr] "=m" (*ctr) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); } unsigned int ASM_FUNC_ATTR _gcry_aes_aesni_decrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src) { aesni_prepare (); asm volatile ("movdqu %[src], %%xmm0\n\t" : : [src] "m" (*src) : "memory" ); do_aesni_dec (ctx); asm volatile ("movdqu %%xmm0, %[dst]\n\t" : [dst] "=m" (*dst) : : "memory" ); aesni_cleanup (); return 0; } void ASM_FUNC_ATTR _gcry_aes_aesni_cfb_dec (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7(); asm volatile ("movdqu %[iv], %%xmm6\n\t" : /* No output */ : [iv] "m" (*iv) : "memory" ); /* CFB decryption can be parallelized */ #ifdef __x86_64__ if (nblocks >= 8) { aesni_prepare_8_15_variable; aesni_prepare_8_15(); for ( ;nblocks >= 8; nblocks -= 8) { asm volatile ("movdqa (%[key]), %%xmm0\n\t" "movdqu %%xmm6, %%xmm1\n\t" /* load input blocks */ "movdqu 0*16(%[inbuf]), %%xmm2\n\t" "movdqu 1*16(%[inbuf]), %%xmm3\n\t" "movdqu 2*16(%[inbuf]), %%xmm4\n\t" "movdqu 3*16(%[inbuf]), %%xmm8\n\t" "movdqu 4*16(%[inbuf]), %%xmm9\n\t" "movdqu 5*16(%[inbuf]), %%xmm10\n\t" "movdqu 6*16(%[inbuf]), %%xmm11\n\t" "movdqu 7*16(%[inbuf]), %%xmm6\n\t" /* update IV */ "movdqa %%xmm2, %%xmm12\n\t" "movdqa %%xmm3, %%xmm13\n\t" "movdqa %%xmm4, %%xmm14\n\t" "movdqa %%xmm8, %%xmm15\n\t" "pxor %%xmm0, %%xmm1\n\t" /* xmm1 ^= key[0] */ "pxor %%xmm0, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm0, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm0, %%xmm4\n\t" /* xmm4 ^= key[0] */ "pxor %%xmm0, %%xmm8\n\t" /* xmm8 ^= key[0] */ "pxor %%xmm0, %%xmm9\n\t" /* xmm9 ^= key[0] */ "pxor %%xmm0, %%xmm10\n\t" /* xmm10 ^= key[0] */ "pxor %%xmm0, %%xmm11\n\t" /* xmm11 ^= key[0] */ : /* No output */ : [inbuf] "r" (inbuf), [key] "r" (ctx->keyschenc) : "memory"); do_aesni_enc_vec8 (ctx); asm volatile ( "pxor %%xmm0, %%xmm12\n\t" "pxor %%xmm0, %%xmm13\n\t" "pxor %%xmm0, %%xmm14\n\t" "pxor %%xmm0, %%xmm15\n\t" "aesenclast %%xmm12, %%xmm1\n\t" "aesenclast %%xmm13, %%xmm2\n\t" "aesenclast %%xmm14, %%xmm3\n\t" "aesenclast %%xmm15, %%xmm4\n\t" "movdqu 4*16(%[inbuf]), %%xmm12\n\t" "movdqu 5*16(%[inbuf]), %%xmm13\n\t" "movdqu 6*16(%[inbuf]), %%xmm14\n\t" "movdqu 7*16(%[inbuf]), %%xmm15\n\t" "pxor %%xmm0, %%xmm12\n\t" "pxor %%xmm0, %%xmm13\n\t" "pxor %%xmm0, %%xmm14\n\t" "pxor %%xmm0, %%xmm15\n\t" "aesenclast %%xmm12, %%xmm8\n\t" "aesenclast %%xmm13, %%xmm9\n\t" "aesenclast %%xmm14, %%xmm10\n\t" "aesenclast %%xmm15, %%xmm11\n\t" "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "movdqu %%xmm4, 3*16(%[outbuf])\n\t" "movdqu %%xmm8, 4*16(%[outbuf])\n\t" "movdqu %%xmm9, 5*16(%[outbuf])\n\t" "movdqu %%xmm10, 6*16(%[outbuf])\n\t" "movdqu %%xmm11, 7*16(%[outbuf])\n\t" : /* No output */ : [inbuf] "r" (inbuf), [outbuf] "r" (outbuf) : "memory"); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4; nblocks -= 4) { asm volatile ("movdqu %%xmm6, %%xmm1\n\t" /* load input blocks */ "movdqu 0*16(%[inbuf]), %%xmm2\n\t" "movdqu 1*16(%[inbuf]), %%xmm3\n\t" "movdqu 2*16(%[inbuf]), %%xmm4\n\t" "movdqu 3*16(%[inbuf]), %%xmm6\n\t" /* update IV */ : /* No output */ : [inbuf] "r" (inbuf) : "memory"); do_aesni_enc_vec4 (ctx); asm volatile ("movdqu 0*16(%[inbuf]), %%xmm5\n\t" "pxor %%xmm5, %%xmm1\n\t" "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "movdqu 1*16(%[inbuf]), %%xmm5\n\t" "pxor %%xmm5, %%xmm2\n\t" "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "movdqu 2*16(%[inbuf]), %%xmm5\n\t" "pxor %%xmm5, %%xmm3\n\t" "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "movdqu 3*16(%[inbuf]), %%xmm5\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqu %%xmm4, 3*16(%[outbuf])\n\t" : /* No output */ : [inbuf] "r" (inbuf), [outbuf] "r" (outbuf) : "memory"); outbuf += 4*BLOCKSIZE; inbuf += 4*BLOCKSIZE; } asm volatile ("movdqu %%xmm6, %%xmm0\n\t" ::: "cc"); for ( ;nblocks; nblocks-- ) { do_aesni_enc (ctx); asm volatile ("movdqa %%xmm0, %%xmm6\n\t" "movdqu %[inbuf], %%xmm0\n\t" "pxor %%xmm0, %%xmm6\n\t" "movdqu %%xmm6, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : [inbuf] "m" (*inbuf) : "memory" ); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } asm volatile ("movdqu %%xmm0, %[iv]\n\t" : [iv] "=m" (*iv) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); } void ASM_FUNC_ATTR _gcry_aes_aesni_cbc_dec (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7(); if ( !ctx->decryption_prepared ) { do_aesni_prepare_decryption ( ctx ); ctx->decryption_prepared = 1; } asm volatile ("movdqu %[iv], %%xmm5\n\t" /* use xmm5 as fast IV storage */ : /* No output */ : [iv] "m" (*iv) : "memory"); #ifdef __x86_64__ if (nblocks >= 8) { aesni_prepare_8_15_variable; aesni_prepare_8_15(); for ( ;nblocks >= 8 ; nblocks -= 8 ) { asm volatile ("movdqa (%[key]), %%xmm0\n\t" "movdqu 0*16(%[inbuf]), %%xmm1\n\t" /* load input blocks */ "movdqu 1*16(%[inbuf]), %%xmm2\n\t" "movdqu 2*16(%[inbuf]), %%xmm3\n\t" "movdqu 3*16(%[inbuf]), %%xmm4\n\t" "movdqu 4*16(%[inbuf]), %%xmm8\n\t" "movdqu 5*16(%[inbuf]), %%xmm9\n\t" "movdqu 6*16(%[inbuf]), %%xmm10\n\t" "movdqu 7*16(%[inbuf]), %%xmm11\n\t" "movdqa %%xmm1, %%xmm12\n\t" "movdqa %%xmm2, %%xmm13\n\t" "movdqa %%xmm3, %%xmm14\n\t" "movdqa %%xmm4, %%xmm15\n\t" "pxor %%xmm0, %%xmm1\n\t" /* xmm1 ^= key[0] */ "pxor %%xmm0, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm0, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm0, %%xmm4\n\t" /* xmm4 ^= key[0] */ "pxor %%xmm0, %%xmm8\n\t" /* xmm8 ^= key[0] */ "pxor %%xmm0, %%xmm9\n\t" /* xmm9 ^= key[0] */ "pxor %%xmm0, %%xmm10\n\t" /* xmm10 ^= key[0] */ "pxor %%xmm0, %%xmm11\n\t" /* xmm11 ^= key[0] */ : /* No output */ : [inbuf] "r" (inbuf), [key] "r" (ctx->keyschdec) : "memory"); do_aesni_dec_vec8 (ctx); asm volatile ( "pxor %%xmm0, %%xmm5\n\t" /* xor IV with key */ "pxor %%xmm0, %%xmm12\n\t" /* xor IV with key */ "pxor %%xmm0, %%xmm13\n\t" /* xor IV with key */ "pxor %%xmm0, %%xmm14\n\t" /* xor IV with key */ "pxor %%xmm0, %%xmm15\n\t" /* xor IV with key */ "aesdeclast %%xmm5, %%xmm1\n\t" "aesdeclast %%xmm12, %%xmm2\n\t" "aesdeclast %%xmm13, %%xmm3\n\t" "aesdeclast %%xmm14, %%xmm4\n\t" "movdqu 4*16(%[inbuf]), %%xmm12\n\t" "movdqu 5*16(%[inbuf]), %%xmm13\n\t" "movdqu 6*16(%[inbuf]), %%xmm14\n\t" "movdqu 7*16(%[inbuf]), %%xmm5\n\t" "pxor %%xmm0, %%xmm12\n\t" /* xor IV with key */ "pxor %%xmm0, %%xmm13\n\t" /* xor IV with key */ "pxor %%xmm0, %%xmm14\n\t" /* xor IV with key */ "aesdeclast %%xmm15, %%xmm8\n\t" "aesdeclast %%xmm12, %%xmm9\n\t" "aesdeclast %%xmm13, %%xmm10\n\t" "aesdeclast %%xmm14, %%xmm11\n\t" "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "movdqu %%xmm4, 3*16(%[outbuf])\n\t" "movdqu %%xmm8, 4*16(%[outbuf])\n\t" "movdqu %%xmm9, 5*16(%[outbuf])\n\t" "movdqu %%xmm10, 6*16(%[outbuf])\n\t" "movdqu %%xmm11, 7*16(%[outbuf])\n\t" : /* No output */ : [inbuf] "r" (inbuf), [outbuf] "r" (outbuf) : "memory"); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4 ; nblocks -= 4 ) { asm volatile ("movdqu 0*16(%[inbuf]), %%xmm1\n\t" /* load input blocks */ "movdqu 1*16(%[inbuf]), %%xmm2\n\t" "movdqu 2*16(%[inbuf]), %%xmm3\n\t" "movdqu 3*16(%[inbuf]), %%xmm4\n\t" : /* No output */ : [inbuf] "r" (inbuf) : "memory"); do_aesni_dec_vec4 (ctx); asm volatile ("pxor %%xmm5, %%xmm1\n\t" /* xor IV with output */ "movdqu 0*16(%[inbuf]), %%xmm5\n\t" /* load new IV */ "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "pxor %%xmm5, %%xmm2\n\t" /* xor IV with output */ "movdqu 1*16(%[inbuf]), %%xmm5\n\t" /* load new IV */ "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "pxor %%xmm5, %%xmm3\n\t" /* xor IV with output */ "movdqu 2*16(%[inbuf]), %%xmm5\n\t" /* load new IV */ "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "pxor %%xmm5, %%xmm4\n\t" /* xor IV with output */ "movdqu 3*16(%[inbuf]), %%xmm5\n\t" /* load new IV */ "movdqu %%xmm4, 3*16(%[outbuf])\n\t" : /* No output */ : [inbuf] "r" (inbuf), [outbuf] "r" (outbuf) : "memory"); outbuf += 4*BLOCKSIZE; inbuf += 4*BLOCKSIZE; } for ( ;nblocks; nblocks-- ) { asm volatile ("movdqu %[inbuf], %%xmm0\n\t" "movdqa %%xmm0, %%xmm2\n\t" /* use xmm2 as savebuf */ : /* No output */ : [inbuf] "m" (*inbuf) : "memory"); /* uses only xmm0 and xmm1 */ do_aesni_dec (ctx); asm volatile ("pxor %%xmm5, %%xmm0\n\t" /* xor IV with output */ "movdqu %%xmm0, %[outbuf]\n\t" "movdqu %%xmm2, %%xmm5\n\t" /* store savebuf as new IV */ : [outbuf] "=m" (*outbuf) : : "memory"); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } asm volatile ("movdqu %%xmm5, %[iv]\n\t" /* store IV */ : /* No output */ : [iv] "m" (*iv) : "memory"); aesni_cleanup (); aesni_cleanup_2_7 (); } static ASM_FUNC_ATTR_INLINE void aesni_ocb_checksum (gcry_cipher_hd_t c, const unsigned char *plaintext, size_t nblocks) { RIJNDAEL_context *ctx = (void *)&c->context.c; /* Calculate checksum */ asm volatile ("movdqu %[checksum], %%xmm6\n\t" "pxor %%xmm1, %%xmm1\n\t" "pxor %%xmm2, %%xmm2\n\t" "pxor %%xmm3, %%xmm3\n\t" : :[checksum] "m" (*c->u_ctr.ctr) : "memory" ); if (0) {} #if defined(HAVE_GCC_INLINE_ASM_AVX2) else if (nblocks >= 16 && ctx->use_avx2) { /* Use wider 256-bit registers for fast xoring of plaintext. */ asm volatile ("vzeroupper\n\t" "vpxor %%xmm0, %%xmm0, %%xmm0\n\t" "vpxor %%xmm4, %%xmm4, %%xmm4\n\t" "vpxor %%xmm5, %%xmm5, %%xmm5\n\t" "vpxor %%xmm7, %%xmm7, %%xmm7\n\t" : : : "memory"); for (;nblocks >= 16; nblocks -= 16) { asm volatile ("vpxor %[ptr0], %%ymm6, %%ymm6\n\t" "vpxor %[ptr1], %%ymm1, %%ymm1\n\t" "vpxor %[ptr2], %%ymm2, %%ymm2\n\t" "vpxor %[ptr3], %%ymm3, %%ymm3\n\t" : : [ptr0] "m" (*(plaintext + 0 * BLOCKSIZE * 2)), [ptr1] "m" (*(plaintext + 1 * BLOCKSIZE * 2)), [ptr2] "m" (*(plaintext + 2 * BLOCKSIZE * 2)), [ptr3] "m" (*(plaintext + 3 * BLOCKSIZE * 2)) : "memory" ); asm volatile ("vpxor %[ptr4], %%ymm0, %%ymm0\n\t" "vpxor %[ptr5], %%ymm4, %%ymm4\n\t" "vpxor %[ptr6], %%ymm5, %%ymm5\n\t" "vpxor %[ptr7], %%ymm7, %%ymm7\n\t" : : [ptr4] "m" (*(plaintext + 4 * BLOCKSIZE * 2)), [ptr5] "m" (*(plaintext + 5 * BLOCKSIZE * 2)), [ptr6] "m" (*(plaintext + 6 * BLOCKSIZE * 2)), [ptr7] "m" (*(plaintext + 7 * BLOCKSIZE * 2)) : "memory" ); plaintext += BLOCKSIZE * 16; } asm volatile ("vpxor %%ymm0, %%ymm6, %%ymm6\n\t" "vpxor %%ymm4, %%ymm1, %%ymm1\n\t" "vpxor %%ymm5, %%ymm2, %%ymm2\n\t" "vpxor %%ymm7, %%ymm3, %%ymm3\n\t" "vextracti128 $1, %%ymm6, %%xmm0\n\t" "vextracti128 $1, %%ymm1, %%xmm4\n\t" "vextracti128 $1, %%ymm2, %%xmm5\n\t" "vextracti128 $1, %%ymm3, %%xmm7\n\t" "vpxor %%xmm0, %%xmm6, %%xmm6\n\t" "vpxor %%xmm4, %%xmm1, %%xmm1\n\t" "vpxor %%xmm5, %%xmm2, %%xmm2\n\t" "vpxor %%xmm7, %%xmm3, %%xmm3\n\t" "vzeroupper\n\t" : : : "memory" ); } #endif #if defined(HAVE_GCC_INLINE_ASM_AVX) else if (nblocks >= 16 && ctx->use_avx) { /* Same as AVX2, except using 256-bit floating point instructions. */ asm volatile ("vzeroupper\n\t" "vxorpd %%xmm0, %%xmm0, %%xmm0\n\t" "vxorpd %%xmm4, %%xmm4, %%xmm4\n\t" "vxorpd %%xmm5, %%xmm5, %%xmm5\n\t" "vxorpd %%xmm7, %%xmm7, %%xmm7\n\t" : : : "memory"); for (;nblocks >= 16; nblocks -= 16) { asm volatile ("vxorpd %[ptr0], %%ymm6, %%ymm6\n\t" "vxorpd %[ptr1], %%ymm1, %%ymm1\n\t" "vxorpd %[ptr2], %%ymm2, %%ymm2\n\t" "vxorpd %[ptr3], %%ymm3, %%ymm3\n\t" : : [ptr0] "m" (*(plaintext + 0 * BLOCKSIZE * 2)), [ptr1] "m" (*(plaintext + 1 * BLOCKSIZE * 2)), [ptr2] "m" (*(plaintext + 2 * BLOCKSIZE * 2)), [ptr3] "m" (*(plaintext + 3 * BLOCKSIZE * 2)) : "memory" ); asm volatile ("vxorpd %[ptr4], %%ymm0, %%ymm0\n\t" "vxorpd %[ptr5], %%ymm4, %%ymm4\n\t" "vxorpd %[ptr6], %%ymm5, %%ymm5\n\t" "vxorpd %[ptr7], %%ymm7, %%ymm7\n\t" : : [ptr4] "m" (*(plaintext + 4 * BLOCKSIZE * 2)), [ptr5] "m" (*(plaintext + 5 * BLOCKSIZE * 2)), [ptr6] "m" (*(plaintext + 6 * BLOCKSIZE * 2)), [ptr7] "m" (*(plaintext + 7 * BLOCKSIZE * 2)) : "memory" ); plaintext += BLOCKSIZE * 16; } asm volatile ("vxorpd %%ymm0, %%ymm6, %%ymm6\n\t" "vxorpd %%ymm4, %%ymm1, %%ymm1\n\t" "vxorpd %%ymm5, %%ymm2, %%ymm2\n\t" "vxorpd %%ymm7, %%ymm3, %%ymm3\n\t" "vextractf128 $1, %%ymm6, %%xmm0\n\t" "vextractf128 $1, %%ymm1, %%xmm4\n\t" "vextractf128 $1, %%ymm2, %%xmm5\n\t" "vextractf128 $1, %%ymm3, %%xmm7\n\t" "vxorpd %%xmm0, %%xmm6, %%xmm6\n\t" "vxorpd %%xmm4, %%xmm1, %%xmm1\n\t" "vxorpd %%xmm5, %%xmm2, %%xmm2\n\t" "vxorpd %%xmm7, %%xmm3, %%xmm3\n\t" "vzeroupper\n\t" : : : "memory" ); } #endif for (;nblocks >= 4; nblocks -= 4) { asm volatile ("movdqu %[ptr0], %%xmm0\n\t" "movdqu %[ptr1], %%xmm4\n\t" "movdqu %[ptr2], %%xmm5\n\t" "movdqu %[ptr3], %%xmm7\n\t" "pxor %%xmm0, %%xmm6\n\t" "pxor %%xmm4, %%xmm1\n\t" "pxor %%xmm5, %%xmm2\n\t" "pxor %%xmm7, %%xmm3\n\t" : : [ptr0] "m" (*(plaintext + 0 * BLOCKSIZE)), [ptr1] "m" (*(plaintext + 1 * BLOCKSIZE)), [ptr2] "m" (*(plaintext + 2 * BLOCKSIZE)), [ptr3] "m" (*(plaintext + 3 * BLOCKSIZE)) : "memory" ); plaintext += BLOCKSIZE * 4; } for (;nblocks >= 1; nblocks -= 1) { asm volatile ("movdqu %[ptr0], %%xmm0\n\t" "pxor %%xmm0, %%xmm6\n\t" : : [ptr0] "m" (*(plaintext + 0 * BLOCKSIZE)) : "memory" ); plaintext += BLOCKSIZE; } asm volatile ("pxor %%xmm1, %%xmm6\n\t" "pxor %%xmm2, %%xmm6\n\t" "pxor %%xmm3, %%xmm6\n\t" "movdqu %%xmm6, %[checksum]\n\t" : [checksum] "=m" (*c->u_ctr.ctr) : : "memory" ); } static unsigned int ASM_FUNC_ATTR_NOINLINE aesni_ocb_enc (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = (void *)&c->context.c; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; u64 n = c->u_mode.ocb.data_nblocks; const unsigned char *l; byte tmpbuf_store[3 * 16 + 15]; byte *tmpbuf; aesni_prepare_2_7_variable; asm volatile ("" : "=r" (tmpbuf) : "0" (tmpbuf_store) : "memory"); tmpbuf = tmpbuf + (-(uintptr_t)tmpbuf & 15); aesni_prepare (); aesni_prepare_2_7 (); /* Preload Offset */ asm volatile ("movdqu %[iv], %%xmm5\n\t" "movdqu %[ctr], %%xmm7\n\t" : /* No output */ : [iv] "m" (*c->u_iv.iv), [ctr] "m" (*c->u_ctr.ctr) : "memory" ); for ( ;nblocks && n % 4; nblocks-- ) { l = aes_ocb_get_l(c, ++n); /* Checksum_i = Checksum_{i-1} xor P_i */ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ asm volatile ("movdqu %[l], %%xmm1\n\t" "movdqu %[inbuf], %%xmm0\n\t" "pxor %%xmm1, %%xmm5\n\t" "pxor %%xmm0, %%xmm7\n\t" "pxor %%xmm5, %%xmm0\n\t" : : [l] "m" (*l), [inbuf] "m" (*inbuf) : "memory" ); do_aesni_enc (ctx); asm volatile ("pxor %%xmm5, %%xmm0\n\t" "movdqu %%xmm0, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); inbuf += BLOCKSIZE; outbuf += BLOCKSIZE; } #ifdef __x86_64__ if (nblocks >= 8) { unsigned char last_xor_first_key_store[16 + 15]; unsigned char *lxf_key; aesni_prepare_8_15_variable; asm volatile ("" : "=r" (lxf_key) : "0" (last_xor_first_key_store) : "memory"); lxf_key = lxf_key + (-(uintptr_t)lxf_key & 15); aesni_prepare_8_15(); asm volatile ("movdqu %[l0], %%xmm6\n\t" "movdqa %[last_key], %%xmm0\n\t" "pxor %[first_key], %%xmm5\n\t" "pxor %[first_key], %%xmm0\n\t" "movdqa %%xmm0, %[lxfkey]\n\t" : [lxfkey] "=m" (*lxf_key) : [l0] "m" (*c->u_mode.ocb.L[0]), [last_key] "m" (ctx->keyschenc[ctx->rounds][0][0]), [first_key] "m" (ctx->keyschenc[0][0][0]) : "memory" ); for ( ;nblocks >= 8 ; nblocks -= 8 ) { n += 4; l = aes_ocb_get_l(c, n); asm volatile ("movdqu %[l0l1], %%xmm10\n\t" "movdqu %[l1], %%xmm11\n\t" "movdqu %[l3], %%xmm15\n\t" : : [l0l1] "m" (*c->u_mode.ocb.L0L1), [l1] "m" (*c->u_mode.ocb.L[1]), [l3] "m" (*l) : "memory" ); n += 4; l = aes_ocb_get_l(c, n); /* Checksum_i = Checksum_{i-1} xor P_i */ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor ENCIPHER(K, C_i xor Offset_i) */ asm volatile ("movdqu %[inbuf0], %%xmm1\n\t" "movdqu %[inbuf1], %%xmm2\n\t" "movdqu %[inbuf2], %%xmm3\n\t" : : [inbuf0] "m" (*(inbuf + 0 * BLOCKSIZE)), [inbuf1] "m" (*(inbuf + 1 * BLOCKSIZE)), [inbuf2] "m" (*(inbuf + 2 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqu %[inbuf3], %%xmm4\n\t" "movdqu %[inbuf4], %%xmm8\n\t" "movdqu %[inbuf5], %%xmm9\n\t" : : [inbuf3] "m" (*(inbuf + 3 * BLOCKSIZE)), [inbuf4] "m" (*(inbuf + 4 * BLOCKSIZE)), [inbuf5] "m" (*(inbuf + 5 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqa %[lxfkey], %%xmm0\n\t" "movdqa %%xmm6, %%xmm12\n\t" "pxor %%xmm5, %%xmm12\n\t" "pxor %%xmm1, %%xmm7\n\t" "pxor %%xmm12, %%xmm1\n\t" "pxor %%xmm0, %%xmm12\n\t" "movdqa %%xmm10, %%xmm13\n\t" "pxor %%xmm5, %%xmm13\n\t" "pxor %%xmm2, %%xmm7\n\t" "pxor %%xmm13, %%xmm2\n\t" "pxor %%xmm0, %%xmm13\n\t" "movdqa %%xmm11, %%xmm14\n\t" "pxor %%xmm5, %%xmm14\n\t" "pxor %%xmm3, %%xmm7\n\t" "pxor %%xmm14, %%xmm3\n\t" "pxor %%xmm0, %%xmm14\n\t" "pxor %%xmm11, %%xmm5\n\t" "pxor %%xmm15, %%xmm5\n\t" "pxor %%xmm4, %%xmm7\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqa %%xmm5, %%xmm15\n\t" "pxor %%xmm0, %%xmm15\n\t" "movdqa %%xmm5, %%xmm0\n\t" "pxor %%xmm6, %%xmm0\n\t" "pxor %%xmm8, %%xmm7\n\t" "pxor %%xmm0, %%xmm8\n\t" "pxor %[lxfkey], %%xmm0\n\t" "movdqa %%xmm0, %[tmpbuf0]\n\t" "movdqa %%xmm10, %%xmm0\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm9, %%xmm7\n\t" "pxor %%xmm0, %%xmm9\n\t" "pxor %[lxfkey], %%xmm0\n" "movdqa %%xmm0, %[tmpbuf1]\n\t" : [tmpbuf0] "=m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "=m" (*(tmpbuf + 1 * BLOCKSIZE)) : [lxfkey] "m" (*lxf_key) : "memory" ); asm volatile ("movdqu %[inbuf6], %%xmm10\n\t" "movdqa %%xmm11, %%xmm0\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm10, %%xmm7\n\t" "pxor %%xmm0, %%xmm10\n\t" "pxor %[lxfkey], %%xmm0\n\t" "movdqa %%xmm0, %[tmpbuf2]\n\t" : [tmpbuf2] "=m" (*(tmpbuf + 2 * BLOCKSIZE)) : [inbuf6] "m" (*(inbuf + 6 * BLOCKSIZE)), [lxfkey] "m" (*lxf_key) : "memory" ); asm volatile ("movdqu %[l7], %%xmm0\n\t" "pxor %%xmm11, %%xmm5\n\t" "pxor %%xmm0, %%xmm5\n\t" "movdqa 0x10(%[key]), %%xmm0\n\t" "movdqu %[inbuf7], %%xmm11\n\t" "pxor %%xmm11, %%xmm7\n\t" "pxor %%xmm5, %%xmm11\n\t" : : [l7] "m" (*l), [inbuf7] "m" (*(inbuf + 7 * BLOCKSIZE)), [key] "r" (ctx->keyschenc) : "memory" ); asm volatile ("cmpl $12, %[rounds]\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x20(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x30(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x40(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x50(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x60(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x70(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x80(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x90(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "jb .Ldeclast%=\n\t" "movdqa 0xa0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xb0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "je .Ldeclast%=\n\t" "movdqa 0xc0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xd0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" ".Ldeclast%=:\n\t" : : [key] "r" (ctx->keyschenc), [rounds] "r" (ctx->rounds) : "cc", "memory"); asm volatile ("aesenclast %%xmm12, %%xmm1\n\t" "aesenclast %%xmm13, %%xmm2\n\t" "aesenclast %%xmm14, %%xmm3\n\t" "aesenclast %%xmm15, %%xmm4\n\t" "aesenclast %[tmpbuf0],%%xmm8\n\t" "aesenclast %[tmpbuf1],%%xmm9\n\t" "aesenclast %[tmpbuf2],%%xmm10\n\t" : : [tmpbuf0] "m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "m" (*(tmpbuf + 1 * BLOCKSIZE)), [tmpbuf2] "m" (*(tmpbuf + 2 * BLOCKSIZE)), [lxfkey] "m" (*lxf_key) : "memory" ); asm volatile ("aesenclast %%xmm5, %%xmm11\n\t" "pxor %[lxfkey], %%xmm11\n\t" "movdqu %%xmm1, %[outbuf0]\n\t" "movdqu %%xmm2, %[outbuf1]\n\t" : [outbuf0] "=m" (*(outbuf + 0 * BLOCKSIZE)), [outbuf1] "=m" (*(outbuf + 1 * BLOCKSIZE)) : [lxfkey] "m" (*lxf_key) : "memory" ); asm volatile ("movdqu %%xmm3, %[outbuf2]\n\t" "movdqu %%xmm4, %[outbuf3]\n\t" "movdqu %%xmm8, %[outbuf4]\n\t" : [outbuf2] "=m" (*(outbuf + 2 * BLOCKSIZE)), [outbuf3] "=m" (*(outbuf + 3 * BLOCKSIZE)), [outbuf4] "=m" (*(outbuf + 4 * BLOCKSIZE)) : : "memory" ); asm volatile ("movdqu %%xmm9, %[outbuf5]\n\t" "movdqu %%xmm10, %[outbuf6]\n\t" "movdqu %%xmm11, %[outbuf7]\n\t" : [outbuf5] "=m" (*(outbuf + 5 * BLOCKSIZE)), [outbuf6] "=m" (*(outbuf + 6 * BLOCKSIZE)), [outbuf7] "=m" (*(outbuf + 7 * BLOCKSIZE)) : : "memory" ); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } asm volatile ("pxor %[first_key], %%xmm5\n\t" "pxor %%xmm0, %%xmm0\n\t" "movdqu %%xmm0, %[lxfkey]\n\t" : [lxfkey] "=m" (*lxf_key) : [first_key] "m" (ctx->keyschenc[0][0][0]) : "memory" ); aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4 ; nblocks -= 4 ) { n += 4; l = aes_ocb_get_l(c, n); /* Checksum_i = Checksum_{i-1} xor P_i */ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ asm volatile ("movdqu %[l0], %%xmm0\n\t" "movdqu %[inbuf0], %%xmm1\n\t" "movdqu %[l0l1], %%xmm3\n\t" : : [l0] "m" (*c->u_mode.ocb.L[0]), [l0l1] "m" (*c->u_mode.ocb.L0L1), [inbuf0] "m" (*(inbuf + 0 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqu %[l1], %%xmm4\n\t" "movdqu %[l3], %%xmm6\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm1, %%xmm7\n\t" "pxor %%xmm0, %%xmm1\n\t" "movdqa %%xmm0, %[tmpbuf0]\n\t" : [tmpbuf0] "=m" (*(tmpbuf + 0 * BLOCKSIZE)) : [l1] "m" (*c->u_mode.ocb.L[1]), [l3] "m" (*l) : "memory" ); asm volatile ("movdqu %[inbuf1], %%xmm2\n\t" "pxor %%xmm5, %%xmm3\n\t" "pxor %%xmm2, %%xmm7\n\t" "pxor %%xmm3, %%xmm2\n\t" "movdqa %%xmm3, %[tmpbuf1]\n\t" : [tmpbuf1] "=m" (*(tmpbuf + 1 * BLOCKSIZE)) : [inbuf1] "m" (*(inbuf + 1 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqa %%xmm4, %%xmm0\n\t" "movdqu %[inbuf2], %%xmm3\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm3, %%xmm7\n\t" "pxor %%xmm0, %%xmm3\n\t" "movdqa %%xmm0, %[tmpbuf2]\n\t" : [tmpbuf2] "=m" (*(tmpbuf + 2 * BLOCKSIZE)) : [inbuf2] "m" (*(inbuf + 2 * BLOCKSIZE)) : "memory" ); asm volatile ("pxor %%xmm6, %%xmm5\n\t" "pxor %%xmm4, %%xmm5\n\t" "movdqu %[inbuf3], %%xmm4\n\t" "pxor %%xmm4, %%xmm7\n\t" "pxor %%xmm5, %%xmm4\n\t" : : [inbuf3] "m" (*(inbuf + 3 * BLOCKSIZE)) : "memory" ); do_aesni_enc_vec4 (ctx); asm volatile ("pxor %[tmpbuf0],%%xmm1\n\t" "movdqu %%xmm1, %[outbuf0]\n\t" "pxor %[tmpbuf1],%%xmm2\n\t" "movdqu %%xmm2, %[outbuf1]\n\t" : [outbuf0] "=m" (*(outbuf + 0 * BLOCKSIZE)), [outbuf1] "=m" (*(outbuf + 1 * BLOCKSIZE)) : [tmpbuf0] "m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "m" (*(tmpbuf + 1 * BLOCKSIZE)) : "memory" ); asm volatile ("pxor %[tmpbuf2],%%xmm3\n\t" "movdqu %%xmm3, %[outbuf2]\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqu %%xmm4, %[outbuf3]\n\t" : [outbuf2] "=m" (*(outbuf + 2 * BLOCKSIZE)), [outbuf3] "=m" (*(outbuf + 3 * BLOCKSIZE)) : [tmpbuf2] "m" (*(tmpbuf + 2 * BLOCKSIZE)) : "memory" ); outbuf += 4*BLOCKSIZE; inbuf += 4*BLOCKSIZE; } for ( ;nblocks; nblocks-- ) { l = aes_ocb_get_l(c, ++n); /* Checksum_i = Checksum_{i-1} xor P_i */ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ asm volatile ("movdqu %[l], %%xmm1\n\t" "movdqu %[inbuf], %%xmm0\n\t" "pxor %%xmm1, %%xmm5\n\t" "pxor %%xmm0, %%xmm7\n\t" "pxor %%xmm5, %%xmm0\n\t" : : [l] "m" (*l), [inbuf] "m" (*inbuf) : "memory" ); do_aesni_enc (ctx); asm volatile ("pxor %%xmm5, %%xmm0\n\t" "movdqu %%xmm0, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); inbuf += BLOCKSIZE; outbuf += BLOCKSIZE; } c->u_mode.ocb.data_nblocks = n; asm volatile ("movdqu %%xmm5, %[iv]\n\t" "movdqu %%xmm7, %[ctr]\n\t" : [iv] "=m" (*c->u_iv.iv), [ctr] "=m" (*c->u_ctr.ctr) : : "memory" ); asm volatile ("pxor %%xmm0, %%xmm0\n\t" "movdqa %%xmm0, %[tmpbuf0]\n\t" "movdqa %%xmm0, %[tmpbuf1]\n\t" "movdqa %%xmm0, %[tmpbuf2]\n\t" : [tmpbuf0] "=m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "=m" (*(tmpbuf + 1 * BLOCKSIZE)), [tmpbuf2] "=m" (*(tmpbuf + 2 * BLOCKSIZE)) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); return 0; } static unsigned int ASM_FUNC_ATTR_NOINLINE aesni_ocb_dec (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks_arg) { RIJNDAEL_context *ctx = (void *)&c->context.c; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; u64 n = c->u_mode.ocb.data_nblocks; const unsigned char *l; size_t nblocks = nblocks_arg; byte tmpbuf_store[3 * 16 + 15]; byte *tmpbuf; aesni_prepare_2_7_variable; asm volatile ("" : "=r" (tmpbuf) : "0" (tmpbuf_store) : "memory"); tmpbuf = tmpbuf + (-(uintptr_t)tmpbuf & 15); aesni_prepare (); aesni_prepare_2_7 (); if ( !ctx->decryption_prepared ) { do_aesni_prepare_decryption ( ctx ); ctx->decryption_prepared = 1; } /* Preload Offset */ asm volatile ("movdqu %[iv], %%xmm5\n\t" : /* No output */ : [iv] "m" (*c->u_iv.iv) : "memory" ); for ( ;nblocks && n % 4; nblocks-- ) { l = aes_ocb_get_l(c, ++n); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ asm volatile ("movdqu %[l], %%xmm1\n\t" "movdqu %[inbuf], %%xmm0\n\t" "pxor %%xmm1, %%xmm5\n\t" "pxor %%xmm5, %%xmm0\n\t" : : [l] "m" (*l), [inbuf] "m" (*inbuf) : "memory" ); do_aesni_dec (ctx); asm volatile ("pxor %%xmm5, %%xmm0\n\t" "movdqu %%xmm0, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); inbuf += BLOCKSIZE; outbuf += BLOCKSIZE; } #ifdef __x86_64__ if (nblocks >= 8) { unsigned char last_xor_first_key_store[16 + 15]; unsigned char *lxf_key; aesni_prepare_8_15_variable; asm volatile ("" : "=r" (lxf_key) : "0" (last_xor_first_key_store) : "memory"); lxf_key = lxf_key + (-(uintptr_t)lxf_key & 15); aesni_prepare_8_15(); asm volatile ("movdqu %[l0], %%xmm6\n\t" "movdqa %[last_key], %%xmm0\n\t" "pxor %[first_key], %%xmm5\n\t" "pxor %[first_key], %%xmm0\n\t" "movdqa %%xmm0, %[lxfkey]\n\t" : [lxfkey] "=m" (*lxf_key) : [l0] "m" (*c->u_mode.ocb.L[0]), [last_key] "m" (ctx->keyschdec[ctx->rounds][0][0]), [first_key] "m" (ctx->keyschdec[0][0][0]) : "memory" ); for ( ;nblocks >= 8 ; nblocks -= 8 ) { n += 4; l = aes_ocb_get_l(c, n); asm volatile ("movdqu %[l0l1], %%xmm10\n\t" "movdqu %[l1], %%xmm11\n\t" "movdqu %[l3], %%xmm15\n\t" : : [l0l1] "m" (*c->u_mode.ocb.L0L1), [l1] "m" (*c->u_mode.ocb.L[1]), [l3] "m" (*l) : "memory" ); n += 4; l = aes_ocb_get_l(c, n); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor ENCIPHER(K, C_i xor Offset_i) */ asm volatile ("movdqu %[inbuf0], %%xmm1\n\t" "movdqu %[inbuf1], %%xmm2\n\t" "movdqu %[inbuf2], %%xmm3\n\t" : : [inbuf0] "m" (*(inbuf + 0 * BLOCKSIZE)), [inbuf1] "m" (*(inbuf + 1 * BLOCKSIZE)), [inbuf2] "m" (*(inbuf + 2 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqu %[inbuf3], %%xmm4\n\t" "movdqu %[inbuf4], %%xmm8\n\t" "movdqu %[inbuf5], %%xmm9\n\t" : : [inbuf3] "m" (*(inbuf + 3 * BLOCKSIZE)), [inbuf4] "m" (*(inbuf + 4 * BLOCKSIZE)), [inbuf5] "m" (*(inbuf + 5 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqa %[lxfkey], %%xmm0\n\t" "movdqa %%xmm6, %%xmm12\n\t" "pxor %%xmm5, %%xmm12\n\t" "pxor %%xmm12, %%xmm1\n\t" "pxor %%xmm0, %%xmm12\n\t" "movdqa %%xmm10, %%xmm13\n\t" "pxor %%xmm5, %%xmm13\n\t" "pxor %%xmm13, %%xmm2\n\t" "pxor %%xmm0, %%xmm13\n\t" "movdqa %%xmm11, %%xmm14\n\t" "pxor %%xmm5, %%xmm14\n\t" "pxor %%xmm14, %%xmm3\n\t" "pxor %%xmm0, %%xmm14\n\t" "pxor %%xmm11, %%xmm5\n\t" "pxor %%xmm15, %%xmm5\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqa %%xmm5, %%xmm15\n\t" "pxor %%xmm0, %%xmm15\n\t" "movdqa %%xmm5, %%xmm0\n\t" "pxor %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm8\n\t" "pxor %[lxfkey], %%xmm0\n\t" "movdqa %%xmm0, %[tmpbuf0]\n\t" "movdqa %%xmm10, %%xmm0\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm0, %%xmm9\n\t" "pxor %[lxfkey], %%xmm0\n" "movdqa %%xmm0, %[tmpbuf1]\n\t" : [tmpbuf0] "=m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "=m" (*(tmpbuf + 1 * BLOCKSIZE)) : [lxfkey] "m" (*lxf_key) : "memory" ); asm volatile ("movdqu %[inbuf6], %%xmm10\n\t" "movdqa %%xmm11, %%xmm0\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm0, %%xmm10\n\t" "pxor %[lxfkey], %%xmm0\n\t" "movdqa %%xmm0, %[tmpbuf2]\n\t" : [tmpbuf2] "=m" (*(tmpbuf + 2 * BLOCKSIZE)) : [inbuf6] "m" (*(inbuf + 6 * BLOCKSIZE)), [lxfkey] "m" (*lxf_key) : "memory" ); asm volatile ("movdqu %[l7], %%xmm0\n\t" "pxor %%xmm11, %%xmm5\n\t" "pxor %%xmm0, %%xmm5\n\t" "movdqa 0x10(%[key]), %%xmm0\n\t" "movdqu %[inbuf7], %%xmm11\n\t" "pxor %%xmm5, %%xmm11\n\t" : : [l7] "m" (*l), [inbuf7] "m" (*(inbuf + 7 * BLOCKSIZE)), [key] "r" (ctx->keyschdec) : "memory" ); asm volatile ("cmpl $12, %[rounds]\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x20(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x30(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x40(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x50(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x60(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x70(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x80(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x90(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "jb .Ldeclast%=\n\t" "movdqa 0xa0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xb0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "je .Ldeclast%=\n\t" "movdqa 0xc0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xd0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" ".Ldeclast%=:\n\t" : : [key] "r" (ctx->keyschdec), [rounds] "r" (ctx->rounds) : "cc", "memory"); asm volatile ("aesdeclast %%xmm12, %%xmm1\n\t" "aesdeclast %%xmm13, %%xmm2\n\t" "aesdeclast %%xmm14, %%xmm3\n\t" "aesdeclast %%xmm15, %%xmm4\n\t" "aesdeclast %[tmpbuf0],%%xmm8\n\t" "aesdeclast %[tmpbuf1],%%xmm9\n\t" "aesdeclast %[tmpbuf2],%%xmm10\n\t" : : [tmpbuf0] "m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "m" (*(tmpbuf + 1 * BLOCKSIZE)), [tmpbuf2] "m" (*(tmpbuf + 2 * BLOCKSIZE)) : "memory" ); asm volatile ("aesdeclast %%xmm5, %%xmm11\n\t" "pxor %[lxfkey], %%xmm11\n\t" "movdqu %%xmm1, %[outbuf0]\n\t" "movdqu %%xmm2, %[outbuf1]\n\t" : [outbuf0] "=m" (*(outbuf + 0 * BLOCKSIZE)), [outbuf1] "=m" (*(outbuf + 1 * BLOCKSIZE)) : [lxfkey] "m" (*lxf_key) : "memory" ); asm volatile ("movdqu %%xmm3, %[outbuf2]\n\t" "movdqu %%xmm4, %[outbuf3]\n\t" "movdqu %%xmm8, %[outbuf4]\n\t" : [outbuf2] "=m" (*(outbuf + 2 * BLOCKSIZE)), [outbuf3] "=m" (*(outbuf + 3 * BLOCKSIZE)), [outbuf4] "=m" (*(outbuf + 4 * BLOCKSIZE)) : : "memory" ); asm volatile ("movdqu %%xmm9, %[outbuf5]\n\t" "movdqu %%xmm10, %[outbuf6]\n\t" "movdqu %%xmm11, %[outbuf7]\n\t" : [outbuf5] "=m" (*(outbuf + 5 * BLOCKSIZE)), [outbuf6] "=m" (*(outbuf + 6 * BLOCKSIZE)), [outbuf7] "=m" (*(outbuf + 7 * BLOCKSIZE)) : : "memory" ); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } asm volatile ("pxor %[first_key], %%xmm5\n\t" "pxor %%xmm0, %%xmm0\n\t" "movdqu %%xmm0, %[lxfkey]\n\t" : [lxfkey] "=m" (*lxf_key) : [first_key] "m" (ctx->keyschdec[0][0][0]) : "memory" ); aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4 ; nblocks -= 4 ) { n += 4; l = aes_ocb_get_l(c, n); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* C_i = Offset_i xor DECIPHER(K, P_i xor Offset_i) */ asm volatile ("movdqu %[l0], %%xmm0\n\t" "movdqu %[inbuf0], %%xmm1\n\t" "movdqu %[l0l1], %%xmm3\n\t" : : [l0] "m" (*c->u_mode.ocb.L[0]), [l0l1] "m" (*c->u_mode.ocb.L0L1), [inbuf0] "m" (*(inbuf + 0 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqu %[l1], %%xmm4\n\t" "movdqu %[l3], %%xmm6\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" "movdqa %%xmm0, %[tmpbuf0]\n\t" : [tmpbuf0] "=m" (*(tmpbuf + 0 * BLOCKSIZE)) : [l1] "m" (*c->u_mode.ocb.L[1]), [l3] "m" (*l) : "memory" ); asm volatile ("movdqu %[inbuf1], %%xmm2\n\t" "pxor %%xmm5, %%xmm3\n\t" "pxor %%xmm3, %%xmm2\n\t" "movdqa %%xmm3, %[tmpbuf1]\n\t" : [tmpbuf1] "=m" (*(tmpbuf + 1 * BLOCKSIZE)) : [inbuf1] "m" (*(inbuf + 1 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqa %%xmm4, %%xmm0\n\t" "movdqu %[inbuf2], %%xmm3\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm0, %%xmm3\n\t" "movdqa %%xmm0, %[tmpbuf2]\n\t" : [tmpbuf2] "=m" (*(tmpbuf + 2 * BLOCKSIZE)) : [inbuf2] "m" (*(inbuf + 2 * BLOCKSIZE)) : "memory" ); asm volatile ("pxor %%xmm6, %%xmm5\n\t" "pxor %%xmm4, %%xmm5\n\t" "movdqu %[inbuf3], %%xmm4\n\t" "pxor %%xmm5, %%xmm4\n\t" : : [inbuf3] "m" (*(inbuf + 3 * BLOCKSIZE)) : "memory" ); do_aesni_dec_vec4 (ctx); asm volatile ("pxor %[tmpbuf0],%%xmm1\n\t" "movdqu %%xmm1, %[outbuf0]\n\t" "pxor %[tmpbuf1],%%xmm2\n\t" "movdqu %%xmm2, %[outbuf1]\n\t" : [outbuf0] "=m" (*(outbuf + 0 * BLOCKSIZE)), [outbuf1] "=m" (*(outbuf + 1 * BLOCKSIZE)) : [tmpbuf0] "m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "m" (*(tmpbuf + 1 * BLOCKSIZE)) : "memory" ); asm volatile ("pxor %[tmpbuf2],%%xmm3\n\t" "movdqu %%xmm3, %[outbuf2]\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqu %%xmm4, %[outbuf3]\n\t" : [outbuf2] "=m" (*(outbuf + 2 * BLOCKSIZE)), [outbuf3] "=m" (*(outbuf + 3 * BLOCKSIZE)) : [tmpbuf2] "m" (*(tmpbuf + 2 * BLOCKSIZE)) : "memory" ); outbuf += 4*BLOCKSIZE; inbuf += 4*BLOCKSIZE; } for ( ;nblocks; nblocks-- ) { l = aes_ocb_get_l(c, ++n); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ /* Checksum_i = Checksum_{i-1} xor P_i */ asm volatile ("movdqu %[l], %%xmm1\n\t" "movdqu %[inbuf], %%xmm0\n\t" "pxor %%xmm1, %%xmm5\n\t" "pxor %%xmm5, %%xmm0\n\t" : : [l] "m" (*l), [inbuf] "m" (*inbuf) : "memory" ); do_aesni_dec (ctx); asm volatile ("pxor %%xmm5, %%xmm0\n\t" "movdqu %%xmm0, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); inbuf += BLOCKSIZE; outbuf += BLOCKSIZE; } c->u_mode.ocb.data_nblocks = n; asm volatile ("movdqu %%xmm5, %[iv]\n\t" : [iv] "=m" (*c->u_iv.iv) : : "memory" ); asm volatile ("pxor %%xmm0, %%xmm0\n\t" "movdqa %%xmm0, %[tmpbuf0]\n\t" "movdqa %%xmm0, %[tmpbuf1]\n\t" "movdqa %%xmm0, %[tmpbuf2]\n\t" : [tmpbuf0] "=m" (*(tmpbuf + 0 * BLOCKSIZE)), [tmpbuf1] "=m" (*(tmpbuf + 1 * BLOCKSIZE)), [tmpbuf2] "=m" (*(tmpbuf + 2 * BLOCKSIZE)) : : "memory" ); aesni_ocb_checksum (c, outbuf_arg, nblocks_arg); aesni_cleanup (); aesni_cleanup_2_7 (); return 0; } size_t ASM_FUNC_ATTR _gcry_aes_aesni_ocb_crypt(gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { if (encrypt) return aesni_ocb_enc(c, outbuf_arg, inbuf_arg, nblocks); else return aesni_ocb_dec(c, outbuf_arg, inbuf_arg, nblocks); } size_t ASM_FUNC_ATTR _gcry_aes_aesni_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = (void *)&c->context.c; const unsigned char *abuf = abuf_arg; u64 n = c->u_mode.ocb.aad_nblocks; const unsigned char *l; aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7 (); /* Preload Offset and Sum */ asm volatile ("movdqu %[iv], %%xmm5\n\t" "movdqu %[ctr], %%xmm6\n\t" : /* No output */ : [iv] "m" (*c->u_mode.ocb.aad_offset), [ctr] "m" (*c->u_mode.ocb.aad_sum) : "memory" ); for ( ;nblocks && n % 4; nblocks-- ) { l = aes_ocb_get_l(c, ++n); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ asm volatile ("movdqu %[l], %%xmm1\n\t" "movdqu %[abuf], %%xmm0\n\t" "pxor %%xmm1, %%xmm5\n\t" "pxor %%xmm5, %%xmm0\n\t" : : [l] "m" (*l), [abuf] "m" (*abuf) : "memory" ); do_aesni_enc (ctx); asm volatile ("pxor %%xmm0, %%xmm6\n\t" : : : "memory" ); abuf += BLOCKSIZE; } #ifdef __x86_64__ if (nblocks >= 8) { aesni_prepare_8_15_variable; aesni_prepare_8_15(); asm volatile ("movdqu %[l0], %%xmm7\n\t" "movdqu %[l0l1], %%xmm12\n\t" "movdqu %[l1], %%xmm13\n\t" : : [l0] "m" (*c->u_mode.ocb.L[0]), [l0l1] "m" (*c->u_mode.ocb.L0L1), [l1] "m" (*c->u_mode.ocb.L[1]) : "memory" ); for ( ;nblocks >= 8 ; nblocks -= 8 ) { n += 4; l = aes_ocb_get_l(c, n); asm volatile ("movdqu %[l3], %%xmm0\n\t" "pxor %%xmm13, %%xmm0\n\t" : : [l3] "m" (*l) : "memory" ); n += 4; l = aes_ocb_get_l(c, n); asm volatile ("movdqu %[l7], %%xmm14\n\t" "pxor %%xmm13, %%xmm14\n\t" : : [l7] "m" (*l) : "memory" ); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ asm volatile ("movdqu %[abuf0], %%xmm1\n\t" "movdqu %[abuf1], %%xmm2\n\t" "movdqu %[abuf2], %%xmm3\n\t" "movdqu %[abuf3], %%xmm4\n\t" : : [abuf0] "m" (*(abuf + 0 * BLOCKSIZE)), [abuf1] "m" (*(abuf + 1 * BLOCKSIZE)), [abuf2] "m" (*(abuf + 2 * BLOCKSIZE)), [abuf3] "m" (*(abuf + 3 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqu %[abuf4], %%xmm8\n\t" "movdqu %[abuf5], %%xmm9\n\t" "movdqu %[abuf6], %%xmm10\n\t" "movdqu %[abuf7], %%xmm11\n\t" : : [abuf4] "m" (*(abuf + 4 * BLOCKSIZE)), [abuf5] "m" (*(abuf + 5 * BLOCKSIZE)), [abuf6] "m" (*(abuf + 6 * BLOCKSIZE)), [abuf7] "m" (*(abuf + 7 * BLOCKSIZE)) : "memory" ); asm volatile ("pxor %%xmm7, %%xmm1\n\t" "pxor %%xmm5, %%xmm1\n\t" "pxor %%xmm12, %%xmm2\n\t" "pxor %%xmm5, %%xmm2\n\t" "pxor %%xmm13, %%xmm3\n\t" "pxor %%xmm5, %%xmm3\n\t" "pxor %%xmm0, %%xmm5\n\t" "movdqa (%[key]), %%xmm0\n\t" "pxor %%xmm5, %%xmm4\n\t" "pxor %%xmm0, %%xmm1\n\t" /* xmm1 ^= key[0] */ "pxor %%xmm0, %%xmm2\n\t" /* xmm2 ^= key[0] */ "pxor %%xmm0, %%xmm3\n\t" /* xmm3 ^= key[0] */ "pxor %%xmm0, %%xmm4\n\t" /* xmm4 ^= key[0] */ "pxor %%xmm7, %%xmm8\n\t" "pxor %%xmm5, %%xmm8\n\t" "pxor %%xmm12, %%xmm9\n\t" "pxor %%xmm5, %%xmm9\n\t" "pxor %%xmm13, %%xmm10\n\t" "pxor %%xmm5, %%xmm10\n\t" "pxor %%xmm14, %%xmm5\n\t" "pxor %%xmm5, %%xmm11\n\t" "pxor %%xmm0, %%xmm8\n\t" /* xmm8 ^= key[0] */ "pxor %%xmm0, %%xmm9\n\t" /* xmm9 ^= key[0] */ "pxor %%xmm0, %%xmm10\n\t" /* xmm10 ^= key[0] */ "pxor %%xmm0, %%xmm11\n\t" /* xmm11 ^= key[0] */ : : [key] "r" (ctx->keyschenc) : "memory" ); do_aesni_enc_vec8 (ctx); asm volatile ( "aesenclast %%xmm0, %%xmm1\n\t" "aesenclast %%xmm0, %%xmm2\n\t" "aesenclast %%xmm0, %%xmm3\n\t" "aesenclast %%xmm0, %%xmm4\n\t" "aesenclast %%xmm0, %%xmm8\n\t" "aesenclast %%xmm0, %%xmm9\n\t" "aesenclast %%xmm0, %%xmm10\n\t" "aesenclast %%xmm0, %%xmm11\n\t" "pxor %%xmm2, %%xmm1\n\t" "pxor %%xmm3, %%xmm1\n\t" "pxor %%xmm4, %%xmm1\n\t" "pxor %%xmm8, %%xmm1\n\t" "pxor %%xmm9, %%xmm6\n\t" "pxor %%xmm10, %%xmm6\n\t" "pxor %%xmm11, %%xmm6\n\t" "pxor %%xmm1, %%xmm6\n\t" : : : "memory" ); abuf += 8*BLOCKSIZE; } aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4 ; nblocks -= 4 ) { n += 4; l = aes_ocb_get_l(c, n); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ asm volatile ("movdqu %[l0], %%xmm0\n\t" "movdqu %[abuf0], %%xmm1\n\t" "movdqu %[l0l1], %%xmm3\n\t" : : [l0] "m" (*c->u_mode.ocb.L[0]), [l0l1] "m" (*c->u_mode.ocb.L0L1), [abuf0] "m" (*(abuf + 0 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqu %[l1], %%xmm4\n\t" "movdqu %[l3], %%xmm7\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" : : [l1] "m" (*c->u_mode.ocb.L[1]), [l3] "m" (*l) : "memory" ); asm volatile ("movdqu %[abuf1], %%xmm2\n\t" "pxor %%xmm5, %%xmm3\n\t" "pxor %%xmm3, %%xmm2\n\t" : : [abuf1] "m" (*(abuf + 1 * BLOCKSIZE)) : "memory" ); asm volatile ("movdqa %%xmm4, %%xmm0\n\t" "movdqu %[abuf2], %%xmm3\n\t" "pxor %%xmm5, %%xmm0\n\t" "pxor %%xmm0, %%xmm3\n\t" : : [abuf2] "m" (*(abuf + 2 * BLOCKSIZE)) : "memory" ); asm volatile ("pxor %%xmm7, %%xmm5\n\t" "pxor %%xmm4, %%xmm5\n\t" "movdqu %[abuf3], %%xmm4\n\t" "pxor %%xmm5, %%xmm4\n\t" : : [abuf3] "m" (*(abuf + 3 * BLOCKSIZE)) : "memory" ); do_aesni_enc_vec4 (ctx); asm volatile ("pxor %%xmm1, %%xmm6\n\t" "pxor %%xmm2, %%xmm6\n\t" "pxor %%xmm3, %%xmm6\n\t" "pxor %%xmm4, %%xmm6\n\t" : : : "memory" ); abuf += 4*BLOCKSIZE; } for ( ;nblocks; nblocks-- ) { l = aes_ocb_get_l(c, ++n); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ asm volatile ("movdqu %[l], %%xmm1\n\t" "movdqu %[abuf], %%xmm0\n\t" "pxor %%xmm1, %%xmm5\n\t" "pxor %%xmm5, %%xmm0\n\t" : : [l] "m" (*l), [abuf] "m" (*abuf) : "memory" ); do_aesni_enc (ctx); asm volatile ("pxor %%xmm0, %%xmm6\n\t" : : : "memory" ); abuf += BLOCKSIZE; } c->u_mode.ocb.aad_nblocks = n; asm volatile ("movdqu %%xmm5, %[iv]\n\t" "movdqu %%xmm6, %[ctr]\n\t" : [iv] "=m" (*c->u_mode.ocb.aad_offset), [ctr] "=m" (*c->u_mode.ocb.aad_sum) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); return 0; } static const u64 xts_gfmul_const[2] __attribute__ ((aligned (16))) = { 0x87, 0x01 }; static void ASM_FUNC_ATTR _gcry_aes_aesni_xts_enc (RIJNDAEL_context *ctx, unsigned char *tweak, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7 (); /* Preload Tweak */ asm volatile ("movdqu %[tweak], %%xmm5\n\t" "movdqa %[gfmul], %%xmm6\n\t" : : [tweak] "m" (*tweak), [gfmul] "m" (*xts_gfmul_const) : "memory" ); #ifdef __x86_64__ if (nblocks >= 8) { aesni_prepare_8_15_variable; aesni_prepare_8_15(); for ( ;nblocks >= 8 ; nblocks -= 8 ) { asm volatile ("pshufd $0x13, %%xmm5, %%xmm11\n\t" "movdqu %[inbuf0], %%xmm1\n\t" "pxor %%xmm5, %%xmm1\n\t" "movdqa %%xmm5, %%xmm7\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf0] "m" (*(inbuf + 0 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf1], %%xmm2\n\t" "pxor %%xmm5, %%xmm2\n\t" "movdqa %%xmm5, %%xmm12\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf1] "m" (*(inbuf + 1 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf2], %%xmm3\n\t" "pxor %%xmm5, %%xmm3\n\t" "movdqa %%xmm5, %%xmm13\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf2] "m" (*(inbuf + 2 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf3], %%xmm4\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqa %%xmm5, %%xmm14\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf3] "m" (*(inbuf + 3 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf4], %%xmm8\n\t" "pxor %%xmm5, %%xmm8\n\t" "movdqa %%xmm5, %%xmm15\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf4] "m" (*(inbuf + 4 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf5], %%xmm9\n\t" "pxor %%xmm5, %%xmm9\n\t" "movdqu %%xmm5, %[outbuf5]\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf5] "=m" (*(outbuf + 5 * 16)) : [inbuf5] "m" (*(inbuf + 5 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf6], %%xmm10\n\t" "pxor %%xmm5, %%xmm10\n\t" "movdqu %%xmm5, %[outbuf6]\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf6] "=m" (*(outbuf + 6 * 16)) : [inbuf6] "m" (*(inbuf + 6 * 16)) : "memory" ); asm volatile ("movdqa %%xmm11, %%xmm0\n\t" "movdqu %[inbuf7], %%xmm11\n\t" "pxor %%xmm5, %%xmm11\n\t" "movdqu %%xmm5, %[outbuf7]\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf7] "=m" (*(outbuf + 7 * 16)) : [inbuf7] "m" (*(inbuf + 7 * 16)) : "memory" ); asm volatile ("cmpl $12, %[rounds]\n\t" "movdqa (%[key]), %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" "pxor %%xmm0, %%xmm2\n\t" "pxor %%xmm0, %%xmm3\n\t" "pxor %%xmm0, %%xmm4\n\t" "pxor %%xmm0, %%xmm8\n\t" "pxor %%xmm0, %%xmm9\n\t" "pxor %%xmm0, %%xmm10\n\t" "pxor %%xmm0, %%xmm11\n\t" "movdqa 0x10(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x20(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x30(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x40(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x50(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x60(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x70(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x80(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0x90(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xa0(%[key]), %%xmm0\n\t" "jb .Lenclast%=\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xb0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xc0(%[key]), %%xmm0\n\t" "je .Lenclast%=\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xd0(%[key]), %%xmm0\n\t" "aesenc %%xmm0, %%xmm1\n\t" "aesenc %%xmm0, %%xmm2\n\t" "aesenc %%xmm0, %%xmm3\n\t" "aesenc %%xmm0, %%xmm4\n\t" "aesenc %%xmm0, %%xmm8\n\t" "aesenc %%xmm0, %%xmm9\n\t" "aesenc %%xmm0, %%xmm10\n\t" "aesenc %%xmm0, %%xmm11\n\t" "movdqa 0xe0(%[key]), %%xmm0\n\t" ".Lenclast%=:\n\t" : : [key] "r" (ctx->keyschenc), [rounds] "rm" (ctx->rounds) : "cc", "memory"); asm volatile ("pxor %%xmm0, %%xmm7\n\t" "pxor %%xmm0, %%xmm12\n\t" "pxor %%xmm0, %%xmm13\n\t" "pxor %%xmm0, %%xmm14\n\t" "aesenclast %%xmm7, %%xmm1\n\t" "aesenclast %%xmm12, %%xmm2\n\t" "aesenclast %%xmm13, %%xmm3\n\t" "aesenclast %%xmm14, %%xmm4\n\t" "movdqu 5*16(%[outbuf]), %%xmm12\n\t" "movdqu 6*16(%[outbuf]), %%xmm13\n\t" "movdqu 7*16(%[outbuf]), %%xmm14\n\t" "pxor %%xmm0, %%xmm15\n\t" "pxor %%xmm0, %%xmm12\n\t" "pxor %%xmm0, %%xmm13\n\t" "pxor %%xmm0, %%xmm14\n\t" "aesenclast %%xmm15, %%xmm8\n\t" "aesenclast %%xmm12, %%xmm9\n\t" "aesenclast %%xmm13, %%xmm10\n\t" "aesenclast %%xmm14, %%xmm11\n\t" "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "movdqu %%xmm4, 3*16(%[outbuf])\n\t" "movdqu %%xmm8, 4*16(%[outbuf])\n\t" "movdqu %%xmm9, 5*16(%[outbuf])\n\t" "movdqu %%xmm10, 6*16(%[outbuf])\n\t" "movdqu %%xmm11, 7*16(%[outbuf])\n\t" : : [outbuf] "r" (outbuf) : "memory" ); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4; nblocks -= 4 ) { asm volatile ("pshufd $0x13, %%xmm5, %%xmm4\n\t" "movdqu %[inbuf0], %%xmm1\n\t" "pxor %%xmm5, %%xmm1\n\t" "movdqu %%xmm5, %[outbuf0]\n\t" "movdqa %%xmm4, %%xmm0\n\t" "paddd %%xmm4, %%xmm4\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf0] "=m" (*(outbuf + 0 * 16)) : [inbuf0] "m" (*(inbuf + 0 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf1], %%xmm2\n\t" "pxor %%xmm5, %%xmm2\n\t" "movdqu %%xmm5, %[outbuf1]\n\t" "movdqa %%xmm4, %%xmm0\n\t" "paddd %%xmm4, %%xmm4\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf1] "=m" (*(outbuf + 1 * 16)) : [inbuf1] "m" (*(inbuf + 1 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf2], %%xmm3\n\t" "pxor %%xmm5, %%xmm3\n\t" "movdqu %%xmm5, %[outbuf2]\n\t" "movdqa %%xmm4, %%xmm0\n\t" "paddd %%xmm4, %%xmm4\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf2] "=m" (*(outbuf + 2 * 16)) : [inbuf2] "m" (*(inbuf + 2 * 16)) : "memory" ); asm volatile ("movdqa %%xmm4, %%xmm0\n\t" "movdqu %[inbuf3], %%xmm4\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqu %%xmm5, %[outbuf3]\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf3] "=m" (*(outbuf + 3 * 16)) : [inbuf3] "m" (*(inbuf + 3 * 16)) : "memory" ); do_aesni_enc_vec4 (ctx); asm volatile ("movdqu %[outbuf0], %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" "movdqu %[outbuf1], %%xmm0\n\t" "movdqu %%xmm1, %[outbuf0]\n\t" "movdqu %[outbuf2], %%xmm1\n\t" "pxor %%xmm0, %%xmm2\n\t" "movdqu %[outbuf3], %%xmm0\n\t" "pxor %%xmm1, %%xmm3\n\t" "pxor %%xmm0, %%xmm4\n\t" "movdqu %%xmm2, %[outbuf1]\n\t" "movdqu %%xmm3, %[outbuf2]\n\t" "movdqu %%xmm4, %[outbuf3]\n\t" : [outbuf0] "+m" (*(outbuf + 0 * 16)), [outbuf1] "+m" (*(outbuf + 1 * 16)), [outbuf2] "+m" (*(outbuf + 2 * 16)), [outbuf3] "+m" (*(outbuf + 3 * 16)) : : "memory" ); outbuf += BLOCKSIZE * 4; inbuf += BLOCKSIZE * 4; } for ( ;nblocks; nblocks-- ) { asm volatile ("movdqu %[inbuf], %%xmm0\n\t" "pxor %%xmm5, %%xmm0\n\t" "movdqa %%xmm5, %%xmm4\n\t" "pshufd $0x13, %%xmm5, %%xmm1\n\t" "psrad $31, %%xmm1\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm1\n\t" "pxor %%xmm1, %%xmm5\n\t" : : [inbuf] "m" (*inbuf) : "memory" ); do_aesni_enc (ctx); asm volatile ("pxor %%xmm4, %%xmm0\n\t" "movdqu %%xmm0, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } asm volatile ("movdqu %%xmm5, %[tweak]\n\t" : [tweak] "=m" (*tweak) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); } static void ASM_FUNC_ATTR _gcry_aes_aesni_xts_dec (RIJNDAEL_context *ctx, unsigned char *tweak, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { aesni_prepare_2_7_variable; aesni_prepare (); aesni_prepare_2_7 (); if ( !ctx->decryption_prepared ) { do_aesni_prepare_decryption ( ctx ); ctx->decryption_prepared = 1; } /* Preload Tweak */ asm volatile ("movdqu %[tweak], %%xmm5\n\t" "movdqa %[gfmul], %%xmm6\n\t" : : [tweak] "m" (*tweak), [gfmul] "m" (*xts_gfmul_const) : "memory" ); #ifdef __x86_64__ if (nblocks >= 8) { aesni_prepare_8_15_variable; aesni_prepare_8_15(); for ( ;nblocks >= 8 ; nblocks -= 8 ) { asm volatile ("pshufd $0x13, %%xmm5, %%xmm11\n\t" "movdqu %[inbuf0], %%xmm1\n\t" "pxor %%xmm5, %%xmm1\n\t" "movdqa %%xmm5, %%xmm7\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf0] "m" (*(inbuf + 0 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf1], %%xmm2\n\t" "pxor %%xmm5, %%xmm2\n\t" "movdqa %%xmm5, %%xmm12\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf1] "m" (*(inbuf + 1 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf2], %%xmm3\n\t" "pxor %%xmm5, %%xmm3\n\t" "movdqa %%xmm5, %%xmm13\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf2] "m" (*(inbuf + 2 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf3], %%xmm4\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqa %%xmm5, %%xmm14\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf3] "m" (*(inbuf + 3 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf4], %%xmm8\n\t" "pxor %%xmm5, %%xmm8\n\t" "movdqa %%xmm5, %%xmm15\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : : [inbuf4] "m" (*(inbuf + 4 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf5], %%xmm9\n\t" "pxor %%xmm5, %%xmm9\n\t" "movdqu %%xmm5, %[outbuf5]\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf5] "=m" (*(outbuf + 5 * 16)) : [inbuf5] "m" (*(inbuf + 5 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf6], %%xmm10\n\t" "pxor %%xmm5, %%xmm10\n\t" "movdqu %%xmm5, %[outbuf6]\n\t" "movdqa %%xmm11, %%xmm0\n\t" "paddd %%xmm11, %%xmm11\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf6] "=m" (*(outbuf + 6 * 16)) : [inbuf6] "m" (*(inbuf + 6 * 16)) : "memory" ); asm volatile ("movdqa %%xmm11, %%xmm0\n\t" "movdqu %[inbuf7], %%xmm11\n\t" "pxor %%xmm5, %%xmm11\n\t" "movdqu %%xmm5, %[outbuf7]\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf7] "=m" (*(outbuf + 7 * 16)) : [inbuf7] "m" (*(inbuf + 7 * 16)) : "memory" ); asm volatile ("cmpl $12, %[rounds]\n\t" "movdqa (%[key]), %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" "pxor %%xmm0, %%xmm2\n\t" "pxor %%xmm0, %%xmm3\n\t" "pxor %%xmm0, %%xmm4\n\t" "pxor %%xmm0, %%xmm8\n\t" "pxor %%xmm0, %%xmm9\n\t" "pxor %%xmm0, %%xmm10\n\t" "pxor %%xmm0, %%xmm11\n\t" "movdqa 0x10(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x20(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x30(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x40(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x50(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x60(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x70(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x80(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0x90(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xa0(%[key]), %%xmm0\n\t" "jb .Ldeclast%=\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xb0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xc0(%[key]), %%xmm0\n\t" "je .Ldeclast%=\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xd0(%[key]), %%xmm0\n\t" "aesdec %%xmm0, %%xmm1\n\t" "aesdec %%xmm0, %%xmm2\n\t" "aesdec %%xmm0, %%xmm3\n\t" "aesdec %%xmm0, %%xmm4\n\t" "aesdec %%xmm0, %%xmm8\n\t" "aesdec %%xmm0, %%xmm9\n\t" "aesdec %%xmm0, %%xmm10\n\t" "aesdec %%xmm0, %%xmm11\n\t" "movdqa 0xe0(%[key]), %%xmm0\n\t" ".Ldeclast%=:\n\t" : : [key] "r" (ctx->keyschdec), [rounds] "rm" (ctx->rounds) : "cc", "memory"); asm volatile ("pxor %%xmm0, %%xmm7\n\t" "pxor %%xmm0, %%xmm12\n\t" "pxor %%xmm0, %%xmm13\n\t" "pxor %%xmm0, %%xmm14\n\t" "aesdeclast %%xmm7, %%xmm1\n\t" "aesdeclast %%xmm12, %%xmm2\n\t" "aesdeclast %%xmm13, %%xmm3\n\t" "aesdeclast %%xmm14, %%xmm4\n\t" "movdqu 5*16(%[outbuf]), %%xmm12\n\t" "movdqu 6*16(%[outbuf]), %%xmm13\n\t" "movdqu 7*16(%[outbuf]), %%xmm14\n\t" "pxor %%xmm0, %%xmm15\n\t" "pxor %%xmm0, %%xmm12\n\t" "pxor %%xmm0, %%xmm13\n\t" "pxor %%xmm0, %%xmm14\n\t" "aesdeclast %%xmm15, %%xmm8\n\t" "aesdeclast %%xmm12, %%xmm9\n\t" "aesdeclast %%xmm13, %%xmm10\n\t" "aesdeclast %%xmm14, %%xmm11\n\t" "movdqu %%xmm1, 0*16(%[outbuf])\n\t" "movdqu %%xmm2, 1*16(%[outbuf])\n\t" "movdqu %%xmm3, 2*16(%[outbuf])\n\t" "movdqu %%xmm4, 3*16(%[outbuf])\n\t" "movdqu %%xmm8, 4*16(%[outbuf])\n\t" "movdqu %%xmm9, 5*16(%[outbuf])\n\t" "movdqu %%xmm10, 6*16(%[outbuf])\n\t" "movdqu %%xmm11, 7*16(%[outbuf])\n\t" : : [outbuf] "r" (outbuf) : "memory" ); outbuf += 8*BLOCKSIZE; inbuf += 8*BLOCKSIZE; } aesni_cleanup_8_15(); } #endif for ( ;nblocks >= 4; nblocks -= 4 ) { asm volatile ("pshufd $0x13, %%xmm5, %%xmm4\n\t" "movdqu %[inbuf0], %%xmm1\n\t" "pxor %%xmm5, %%xmm1\n\t" "movdqu %%xmm5, %[outbuf0]\n\t" "movdqa %%xmm4, %%xmm0\n\t" "paddd %%xmm4, %%xmm4\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf0] "=m" (*(outbuf + 0 * 16)) : [inbuf0] "m" (*(inbuf + 0 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf1], %%xmm2\n\t" "pxor %%xmm5, %%xmm2\n\t" "movdqu %%xmm5, %[outbuf1]\n\t" "movdqa %%xmm4, %%xmm0\n\t" "paddd %%xmm4, %%xmm4\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf1] "=m" (*(outbuf + 1 * 16)) : [inbuf1] "m" (*(inbuf + 1 * 16)) : "memory" ); asm volatile ("movdqu %[inbuf2], %%xmm3\n\t" "pxor %%xmm5, %%xmm3\n\t" "movdqu %%xmm5, %[outbuf2]\n\t" "movdqa %%xmm4, %%xmm0\n\t" "paddd %%xmm4, %%xmm4\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf2] "=m" (*(outbuf + 2 * 16)) : [inbuf2] "m" (*(inbuf + 2 * 16)) : "memory" ); asm volatile ("movdqa %%xmm4, %%xmm0\n\t" "movdqu %[inbuf3], %%xmm4\n\t" "pxor %%xmm5, %%xmm4\n\t" "movdqu %%xmm5, %[outbuf3]\n\t" "psrad $31, %%xmm0\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm0\n\t" "pxor %%xmm0, %%xmm5\n\t" : [outbuf3] "=m" (*(outbuf + 3 * 16)) : [inbuf3] "m" (*(inbuf + 3 * 16)) : "memory" ); do_aesni_dec_vec4 (ctx); asm volatile ("movdqu %[outbuf0], %%xmm0\n\t" "pxor %%xmm0, %%xmm1\n\t" "movdqu %[outbuf1], %%xmm0\n\t" "movdqu %%xmm1, %[outbuf0]\n\t" "movdqu %[outbuf2], %%xmm1\n\t" "pxor %%xmm0, %%xmm2\n\t" "movdqu %[outbuf3], %%xmm0\n\t" "pxor %%xmm1, %%xmm3\n\t" "pxor %%xmm0, %%xmm4\n\t" "movdqu %%xmm2, %[outbuf1]\n\t" "movdqu %%xmm3, %[outbuf2]\n\t" "movdqu %%xmm4, %[outbuf3]\n\t" : [outbuf0] "+m" (*(outbuf + 0 * 16)), [outbuf1] "+m" (*(outbuf + 1 * 16)), [outbuf2] "+m" (*(outbuf + 2 * 16)), [outbuf3] "+m" (*(outbuf + 3 * 16)) : : "memory" ); outbuf += BLOCKSIZE * 4; inbuf += BLOCKSIZE * 4; } for ( ;nblocks; nblocks-- ) { asm volatile ("movdqu %[inbuf], %%xmm0\n\t" "pxor %%xmm5, %%xmm0\n\t" "movdqa %%xmm5, %%xmm4\n\t" "pshufd $0x13, %%xmm5, %%xmm1\n\t" "psrad $31, %%xmm1\n\t" "paddq %%xmm5, %%xmm5\n\t" "pand %%xmm6, %%xmm1\n\t" "pxor %%xmm1, %%xmm5\n\t" : : [inbuf] "m" (*inbuf) : "memory" ); do_aesni_dec (ctx); asm volatile ("pxor %%xmm4, %%xmm0\n\t" "movdqu %%xmm0, %[outbuf]\n\t" : [outbuf] "=m" (*outbuf) : : "memory" ); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } asm volatile ("movdqu %%xmm5, %[tweak]\n\t" : [tweak] "=m" (*tweak) : : "memory" ); aesni_cleanup (); aesni_cleanup_2_7 (); } void ASM_FUNC_ATTR _gcry_aes_aesni_xts_crypt (RIJNDAEL_context *ctx, unsigned char *tweak, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks, int encrypt) { if (encrypt) _gcry_aes_aesni_xts_enc(ctx, tweak, outbuf, inbuf, nblocks); else _gcry_aes_aesni_xts_dec(ctx, tweak, outbuf, inbuf, nblocks); } #if __clang__ # pragma clang attribute pop #endif #endif /* USE_AESNI */ diff --git a/cipher/rijndael-armv8-aarch32-ce.S b/cipher/rijndael-armv8-aarch32-ce.S index 1eafa93e..6208652b 100644 --- a/cipher/rijndael-armv8-aarch32-ce.S +++ b/cipher/rijndael-armv8-aarch32-ce.S @@ -1,1988 +1,2134 @@ /* rijndael-armv8-aarch32-ce.S - ARMv8/CE accelerated AES * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \ defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO) .syntax unified .arch armv8-a .fpu crypto-neon-fp-armv8 .arm .text #ifdef __PIC__ # define GET_DATA_POINTER(reg, name, rtmp) \ ldr reg, 1f; \ ldr rtmp, 2f; \ b 3f; \ 1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \ 2: .word name(GOT); \ 3: add reg, pc, reg; \ ldr reg, [reg, rtmp]; #else # define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name #endif /* AES macros */ #define aes_preload_keys(keysched, rekeysched) \ vldmia keysched!, {q5-q7}; \ mov rekeysched, keysched; \ vldmialo keysched!, {q8-q15}; /* 128-bit */ \ addeq keysched, #(2*16); \ vldmiaeq keysched!, {q10-q15}; /* 192-bit */ \ addhi keysched, #(4*16); \ vldmiahi keysched!, {q12-q15}; /* 256-bit */ \ #define do_aes_one128(ed, mcimc, qo, qb) \ aes##ed.8 qb, q5; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q6; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q7; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q8; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q9; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q10; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q11; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q12; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q13; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q14; \ veor qo, qb, q15; #define do_aes_one128re(ed, mcimc, qo, qb, keysched, rekeysched) \ vldm rekeysched, {q8-q9}; \ do_aes_one128(ed, mcimc, qo, qb); #define do_aes_one192(ed, mcimc, qo, qb, keysched, rekeysched) \ vldm rekeysched!, {q8}; \ aes##ed.8 qb, q5; \ aes##mcimc.8 qb, qb; \ vldm rekeysched, {q9}; \ aes##ed.8 qb, q6; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q7; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q8; \ aes##mcimc.8 qb, qb; \ vldmia keysched!, {q8}; \ aes##ed.8 qb, q9; \ aes##mcimc.8 qb, qb; \ sub rekeysched, #(1*16); \ aes##ed.8 qb, q10; \ aes##mcimc.8 qb, qb; \ vldm keysched, {q9}; \ aes##ed.8 qb, q11; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q12; \ aes##mcimc.8 qb, qb; \ sub keysched, #16; \ aes##ed.8 qb, q13; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q14; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q15; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q8; \ veor qo, qb, q9; \ #define do_aes_one256(ed, mcimc, qo, qb, keysched, rekeysched) \ vldmia rekeysched!, {q8}; \ aes##ed.8 qb, q5; \ aes##mcimc.8 qb, qb; \ vldmia rekeysched!, {q9}; \ aes##ed.8 qb, q6; \ aes##mcimc.8 qb, qb; \ vldmia rekeysched!, {q10}; \ aes##ed.8 qb, q7; \ aes##mcimc.8 qb, qb; \ vldm rekeysched, {q11}; \ aes##ed.8 qb, q8; \ aes##mcimc.8 qb, qb; \ vldmia keysched!, {q8}; \ aes##ed.8 qb, q9; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q10; \ aes##mcimc.8 qb, qb; \ vldmia keysched!, {q9}; \ aes##ed.8 qb, q11; \ aes##mcimc.8 qb, qb; \ sub rekeysched, #(3*16); \ aes##ed.8 qb, q12; \ aes##mcimc.8 qb, qb; \ vldmia keysched!, {q10}; \ aes##ed.8 qb, q13; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q14; \ aes##mcimc.8 qb, qb; \ vldm keysched, {q11}; \ aes##ed.8 qb, q15; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q8; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q9; \ aes##mcimc.8 qb, qb; \ aes##ed.8 qb, q10; \ veor qo, qb, q11; \ sub keysched, #(3*16); \ #define aes_round_4(ed, mcimc, b0, b1, b2, b3, key) \ aes##ed.8 b0, key; \ aes##mcimc.8 b0, b0; \ aes##ed.8 b1, key; \ aes##mcimc.8 b1, b1; \ aes##ed.8 b2, key; \ aes##mcimc.8 b2, b2; \ aes##ed.8 b3, key; \ aes##mcimc.8 b3, b3; #define do_aes_4_128(ed, mcimc, b0, b1, b2, b3) \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q5); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q10); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q11); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q12); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q13); \ aes##ed.8 b0, q14; \ veor b0, b0, q15; \ aes##ed.8 b1, q14; \ veor b1, b1, q15; \ aes##ed.8 b2, q14; \ veor b2, b2, q15; \ aes##ed.8 b3, q14; \ veor b3, b3, q15; #define do_aes_4_128re(ed, mcimc, b0, b1, b2, b3, keysched, rekeysched) \ vldm rekeysched, {q8-q9}; \ do_aes_4_128(ed, mcimc, b0, b1, b2, b3); #define do_aes_4_192(ed, mcimc, b0, b1, b2, b3, keysched, rekeysched) \ vldm rekeysched!, {q8}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q5); \ vldm rekeysched, {q9}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \ vldmia keysched!, {q8}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \ sub rekeysched, #(1*16); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q10); \ vldm keysched, {q9}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q11); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q12); \ sub keysched, #16; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q13); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q14); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q15); \ aes##ed.8 b0, q8; \ veor b0, b0, q9; \ aes##ed.8 b1, q8; \ veor b1, b1, q9; \ aes##ed.8 b2, q8; \ veor b2, b2, q9; \ aes##ed.8 b3, q8; \ veor b3, b3, q9; #define do_aes_4_256(ed, mcimc, b0, b1, b2, b3, keysched, rekeysched) \ vldmia rekeysched!, {q8}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q5); \ vldmia rekeysched!, {q9}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q6); \ vldmia rekeysched!, {q10}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q7); \ vldm rekeysched, {q11}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \ vldmia keysched!, {q8}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q10); \ vldmia keysched!, {q9}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q11); \ sub rekeysched, #(3*16); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q12); \ vldmia keysched!, {q10}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q13); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q14); \ vldm keysched, {q11}; \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q15); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \ sub keysched, #(3*16); \ aes##ed.8 b0, q10; \ veor b0, b0, q11; \ aes##ed.8 b1, q10; \ veor b1, b1, q11; \ aes##ed.8 b2, q10; \ veor b2, b2, q11; \ aes##ed.8 b3, q10; \ veor b3, b3, q11; /* Other functional macros */ #define CLEAR_REG(reg) vmov.i8 reg, #0; /* * unsigned int _gcry_aes_enc_armv8_ce(void *keysched, byte *dst, * const byte *src, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_enc_armv8_ce .type _gcry_aes_enc_armv8_ce,%function; _gcry_aes_enc_armv8_ce: /* input: * r0: keysched * r1: dst * r2: src * r3: nrounds */ vldmia r0!, {q1-q3} /* load 3 round keys */ cmp r3, #12 vld1.8 {q0}, [r2] bhi .Lenc1_256 beq .Lenc1_192 .Lenc1_128: .Lenc1_tail: vldmia r0, {q8-q15} /* load 8 round keys */ aese.8 q0, q1 aesmc.8 q0, q0 CLEAR_REG(q1) aese.8 q0, q2 aesmc.8 q0, q0 CLEAR_REG(q2) aese.8 q0, q3 aesmc.8 q0, q0 CLEAR_REG(q3) aese.8 q0, q8 aesmc.8 q0, q0 CLEAR_REG(q8) aese.8 q0, q9 aesmc.8 q0, q0 CLEAR_REG(q9) aese.8 q0, q10 aesmc.8 q0, q0 CLEAR_REG(q10) aese.8 q0, q11 aesmc.8 q0, q0 CLEAR_REG(q11) aese.8 q0, q12 aesmc.8 q0, q0 CLEAR_REG(q12) aese.8 q0, q13 aesmc.8 q0, q0 CLEAR_REG(q13) aese.8 q0, q14 veor q0, q15 CLEAR_REG(q14) CLEAR_REG(q15) vst1.8 {q0}, [r1] CLEAR_REG(q0) mov r0, #0 bx lr .Lenc1_192: aese.8 q0, q1 aesmc.8 q0, q0 vmov q1, q3 aese.8 q0, q2 aesmc.8 q0, q0 vldm r0!, {q2-q3} /* load 3 round keys */ b .Lenc1_tail .Lenc1_256: vldm r0!, {q15} /* load 1 round key */ aese.8 q0, q1 aesmc.8 q0, q0 aese.8 q0, q2 aesmc.8 q0, q0 aese.8 q0, q3 aesmc.8 q0, q0 vldm r0!, {q1-q3} /* load 3 round keys */ aese.8 q0, q15 aesmc.8 q0, q0 b .Lenc1_tail .size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce; /* * unsigned int _gcry_aes_dec_armv8_ce(void *keysched, byte *dst, * const byte *src, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_dec_armv8_ce .type _gcry_aes_dec_armv8_ce,%function; _gcry_aes_dec_armv8_ce: /* input: * r0: keysched * r1: dst * r2: src * r3: nrounds */ vldmia r0!, {q1-q3} /* load 3 round keys */ cmp r3, #12 vld1.8 {q0}, [r2] bhi .Ldec1_256 beq .Ldec1_192 .Ldec1_128: .Ldec1_tail: vldmia r0, {q8-q15} /* load 8 round keys */ aesd.8 q0, q1 aesimc.8 q0, q0 CLEAR_REG(q1) aesd.8 q0, q2 aesimc.8 q0, q0 CLEAR_REG(q2) aesd.8 q0, q3 aesimc.8 q0, q0 CLEAR_REG(q3) aesd.8 q0, q8 aesimc.8 q0, q0 CLEAR_REG(q8) aesd.8 q0, q9 aesimc.8 q0, q0 CLEAR_REG(q9) aesd.8 q0, q10 aesimc.8 q0, q0 CLEAR_REG(q10) aesd.8 q0, q11 aesimc.8 q0, q0 CLEAR_REG(q11) aesd.8 q0, q12 aesimc.8 q0, q0 CLEAR_REG(q12) aesd.8 q0, q13 aesimc.8 q0, q0 CLEAR_REG(q13) aesd.8 q0, q14 veor q0, q15 CLEAR_REG(q14) CLEAR_REG(q15) vst1.8 {q0}, [r1] CLEAR_REG(q0) mov r0, #0 bx lr .Ldec1_192: aesd.8 q0, q1 aesimc.8 q0, q0 vmov q1, q3 aesd.8 q0, q2 aesimc.8 q0, q0 vldm r0!, {q2-q3} /* load 3 round keys */ b .Ldec1_tail .Ldec1_256: vldm r0!, {q15} /* load 1 round key */ aesd.8 q0, q1 aesimc.8 q0, q0 aesd.8 q0, q2 aesimc.8 q0, q0 aesd.8 q0, q3 aesimc.8 q0, q0 vldm r0!, {q1-q3} /* load 3 round keys */ aesd.8 q0, q15 aesimc.8 q0, q0 b .Ldec1_tail .size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce; /* * void _gcry_aes_cbc_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, size_t nblocks, * int cbc_mac, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cbc_enc_armv8_ce .type _gcry_aes_cbc_enc_armv8_ce,%function; _gcry_aes_cbc_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: cbc_mac => r5 * %st+8: nrounds => r6 */ push {r4-r6,lr} /* 4*4 = 16b */ ldr r4, [sp, #(16+0)] ldr r5, [sp, #(16+4)] cmp r4, #0 ldr r6, [sp, #(16+8)] beq .Lcbc_enc_skip cmp r5, #0 vpush {q4-q7} moveq r5, #16 movne r5, #0 cmp r6, #12 vld1.8 {q1}, [r3] /* load IV */ aes_preload_keys(r0, lr); beq .Lcbc_enc_loop192 bhi .Lcbc_enc_loop256 #define CBC_ENC(bits, ...) \ .Lcbc_enc_loop##bits: \ vld1.8 {q0}, [r2]!; /* load plaintext */ \ veor q1, q0, q1; \ subs r4, r4, #1; \ \ do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \ \ vst1.8 {q1}, [r1], r5; /* store ciphertext */ \ \ bne .Lcbc_enc_loop##bits; \ b .Lcbc_enc_done; CBC_ENC(128) CBC_ENC(192, r0, lr) CBC_ENC(256, r0, lr) #undef CBC_ENC .Lcbc_enc_done: vst1.8 {q1}, [r3] /* store IV */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) vpop {q4-q7} CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lcbc_enc_skip: pop {r4-r6,pc} .size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce; /* * void _gcry_aes_cbc_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cbc_dec_armv8_ce .type _gcry_aes_cbc_dec_armv8_ce,%function; _gcry_aes_cbc_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: nrounds => r5 */ push {r4-r6,lr} /* 4*4 = 16b */ ldr r4, [sp, #(16+0)] ldr r5, [sp, #(16+4)] cmp r4, #0 beq .Lcbc_dec_skip vpush {q4-q7} cmp r5, #12 vld1.8 {q0}, [r3] /* load IV */ aes_preload_keys(r0, r6); beq .Lcbc_dec_entry_192 bhi .Lcbc_dec_entry_256 #define CBC_DEC(bits, ...) \ .Lcbc_dec_entry_##bits: \ cmp r4, #4; \ blo .Lcbc_dec_loop_##bits; \ \ .Lcbc_dec_loop4_##bits: \ \ vld1.8 {q1-q2}, [r2]!; /* load ciphertext */ \ sub r4, r4, #4; \ vld1.8 {q3-q4}, [r2]; /* load ciphertext */ \ cmp r4, #4; \ sub r2, #32; \ \ do_aes_4_##bits(d, imc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ veor q1, q1, q0; \ vld1.8 {q0}, [r2]!; /* load next IV */ \ veor q2, q2, q0; \ vld1.8 {q0}, [r2]!; /* load next IV */ \ vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \ veor q3, q3, q0; \ vld1.8 {q0}, [r2]!; /* load next IV */ \ veor q4, q4, q0; \ vld1.8 {q0}, [r2]!; /* load next IV */ \ vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \ \ bhs .Lcbc_dec_loop4_##bits; \ cmp r4, #0; \ beq .Lcbc_dec_done; \ \ .Lcbc_dec_loop_##bits: \ vld1.8 {q1}, [r2]!; /* load ciphertext */ \ subs r4, r4, #1; \ vmov q2, q1; \ \ do_aes_one##bits(d, imc, q1, q1, ##__VA_ARGS__); \ \ veor q1, q1, q0; \ vmov q0, q2; \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ \ bne .Lcbc_dec_loop_##bits; \ b .Lcbc_dec_done; CBC_DEC(128) CBC_DEC(192, r0, r6) CBC_DEC(256, r0, r6) #undef CBC_DEC .Lcbc_dec_done: vst1.8 {q0}, [r3] /* store IV */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) vpop {q4-q7} CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lcbc_dec_skip: pop {r4-r6,pc} .size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce; +/* + * void _gcry_aes_ecb_enc_armv8_ce (const void *keysched, + * unsigned char *outbuf, + * const unsigned char *inbuf, + * size_t nblocks, + * unsigned int nrounds); + */ + +.align 3 +.globl _gcry_aes_ecb_enc_armv8_ce +.type _gcry_aes_ecb_enc_armv8_ce,%function; +_gcry_aes_ecb_enc_armv8_ce: + /* input: + * r0: keysched + * r1: outbuf + * r2: inbuf + * r3: nblocks + * %st+0: nrounds => r4 + */ + + push {r4-r6,lr} /* 4*4 = 16b */ + cmp r3, #0 + beq .Lecb_enc_skip + ldr r4, [sp, #(16+0)] + vpush {q4-q7} + + cmp r4, #12 + aes_preload_keys(r0, lr); + + beq .Lecb_entry_192e + bhi .Lecb_entry_256e + +#define ECB_CRYPT(bits, e_d, mc_imc, ...) \ + .Lecb_entry_##bits##e_d: \ + cmp r3, #4; \ + blo .Lecb_loop_##bits##e_d; \ + \ + .Lecb_loop4_##bits##e_d: \ + vld1.8 {q1-q2}, [r2]!; /* load plaintext */ \ + sub r3, r3, #4; \ + vld1.8 {q3-q4}, [r2]!; /* load plaintext */ \ + cmp r3, #4; \ + \ + do_aes_4_##bits(e_d, mc_imc, q1, q2, q3, q4, ##__VA_ARGS__); \ + \ + vst1.8 {q1-q2}, [r1]!; /* store ciphertext */ \ + vst1.8 {q3-q4}, [r1]!; /* store ciphertext */ \ + \ + bhs .Lecb_loop4_##bits##e_d; \ + cmp r3, #0; \ + beq .Lecb_done_##e_d; \ + \ + .Lecb_loop_##bits##e_d: \ + vld1.8 {q1}, [r2]!; /* load ciphertext */ \ + subs r3, r3, #1; \ + \ + do_aes_one##bits(e_d, mc_imc, q1, q1, ##__VA_ARGS__); \ + \ + vst1.8 {q1}, [r1]!; /* store plaintext */ \ + bne .Lecb_loop_##bits##e_d; \ + b .Lecb_done_##e_d; + + ECB_CRYPT(128, e, mc) + ECB_CRYPT(192, e, mc, r0, lr) + ECB_CRYPT(256, e, mc, r0, lr) + +.Lecb_done_e: + CLEAR_REG(q0) + CLEAR_REG(q1) + CLEAR_REG(q2) + CLEAR_REG(q3) + CLEAR_REG(q8) + CLEAR_REG(q9) + vpop {q4-q7} + CLEAR_REG(q10) + CLEAR_REG(q11) + CLEAR_REG(q12) + CLEAR_REG(q13) + CLEAR_REG(q14) + +.Lecb_enc_skip: + pop {r4-r6,pc} +.size _gcry_aes_ecb_enc_armv8_ce,.-_gcry_aes_ecb_enc_armv8_ce; + + +/* + * void _gcry_aes_ecb_dec_armv8_ce (const void *keysched, + * unsigned char *outbuf, + * const unsigned char *inbuf, + * size_t nblocks, + * unsigned int nrounds); + */ + +.align 3 +.globl _gcry_aes_ecb_dec_armv8_ce +.type _gcry_aes_ecb_dec_armv8_ce,%function; +_gcry_aes_ecb_dec_armv8_ce: + /* input: + * r0: keysched + * r1: outbuf + * r2: inbuf + * r3: nblocks + * %st+0: nrounds => r4 + */ + + push {r4-r6,lr} /* 4*4 = 16b */ + cmp r3, #0 + beq .Lecb_enc_skip + ldr r4, [sp, #(16+0)] + vpush {q4-q7} + + cmp r4, #12 + + aes_preload_keys(r0, lr); + + beq .Lecb_entry_192d + bhi .Lecb_entry_256d + + ECB_CRYPT(128, d, imc) + ECB_CRYPT(192, d, imc, r0, lr) + ECB_CRYPT(256, d, imc, r0, lr) + +#undef ECB_CRYPT + +.Lecb_done_d: + CLEAR_REG(q0) + CLEAR_REG(q1) + CLEAR_REG(q2) + CLEAR_REG(q3) + CLEAR_REG(q8) + CLEAR_REG(q9) + vpop {q4-q7} + CLEAR_REG(q10) + CLEAR_REG(q11) + CLEAR_REG(q12) + CLEAR_REG(q13) + CLEAR_REG(q14) + +.Lecb_dec_skip: + pop {r4-r6,pc} +.size _gcry_aes_ecb_dec_armv8_ce,.-_gcry_aes_ecb_dec_armv8_ce; + + /* * void _gcry_aes_cfb_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cfb_enc_armv8_ce .type _gcry_aes_cfb_enc_armv8_ce,%function; _gcry_aes_cfb_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: nrounds => r5 */ push {r4-r6,lr} /* 4*4 = 16b */ ldr r4, [sp, #(16+0)] ldr r5, [sp, #(16+4)] cmp r4, #0 beq .Lcfb_enc_skip vpush {q4-q7} cmp r5, #12 vld1.8 {q0}, [r3] /* load IV */ aes_preload_keys(r0, r6); beq .Lcfb_enc_entry_192 bhi .Lcfb_enc_entry_256 #define CFB_ENC(bits, ...) \ .Lcfb_enc_entry_##bits: \ .Lcfb_enc_loop_##bits: \ vld1.8 {q1}, [r2]!; /* load plaintext */ \ subs r4, r4, #1; \ \ do_aes_one##bits(e, mc, q0, q0, ##__VA_ARGS__); \ \ veor q0, q1, q0; \ vst1.8 {q0}, [r1]!; /* store ciphertext */ \ \ bne .Lcfb_enc_loop_##bits; \ b .Lcfb_enc_done; CFB_ENC(128) CFB_ENC(192, r0, r6) CFB_ENC(256, r0, r6) #undef CFB_ENC .Lcfb_enc_done: vst1.8 {q0}, [r3] /* store IV */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) vpop {q4-q7} CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lcfb_enc_skip: pop {r4-r6,pc} .size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce; /* * void _gcry_aes_cfb_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cfb_dec_armv8_ce .type _gcry_aes_cfb_dec_armv8_ce,%function; _gcry_aes_cfb_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: nrounds => r5 */ push {r4-r6,lr} /* 4*4 = 16b */ ldr r4, [sp, #(16+0)] ldr r5, [sp, #(16+4)] cmp r4, #0 beq .Lcfb_dec_skip vpush {q4-q7} cmp r5, #12 vld1.8 {q0}, [r3] /* load IV */ aes_preload_keys(r0, r6); beq .Lcfb_dec_entry_192 bhi .Lcfb_dec_entry_256 #define CFB_DEC(bits, ...) \ .Lcfb_dec_entry_##bits: \ cmp r4, #4; \ blo .Lcfb_dec_loop_##bits; \ \ .Lcfb_dec_loop4_##bits: \ \ vld1.8 {q2-q3}, [r2]!; /* load ciphertext */ \ vmov q1, q0; \ sub r4, r4, #4; \ vld1.8 {q4}, [r2]; /* load ciphertext */ \ sub r2, #32; \ cmp r4, #4; \ \ do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ veor q1, q1, q0; \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ veor q2, q2, q0; \ vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \ vld1.8 {q0}, [r2]!; \ veor q3, q3, q0; \ vld1.8 {q0}, [r2]!; /* load next IV / ciphertext */ \ veor q4, q4, q0; \ vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \ \ bhs .Lcfb_dec_loop4_##bits; \ cmp r4, #0; \ beq .Lcfb_dec_done; \ \ .Lcfb_dec_loop_##bits: \ \ vld1.8 {q1}, [r2]!; /* load ciphertext */ \ \ subs r4, r4, #1; \ \ do_aes_one##bits(e, mc, q0, q0, ##__VA_ARGS__); \ \ veor q2, q1, q0; \ vmov q0, q1; \ vst1.8 {q2}, [r1]!; /* store plaintext */ \ \ bne .Lcfb_dec_loop_##bits; \ b .Lcfb_dec_done; CFB_DEC(128) CFB_DEC(192, r0, r6) CFB_DEC(256, r0, r6) #undef CFB_DEC .Lcfb_dec_done: vst1.8 {q0}, [r3] /* store IV */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) vpop {q4-q7} CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lcfb_dec_skip: pop {r4-r6,pc} .size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce; /* * void _gcry_aes_ctr_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_ctr_enc_armv8_ce .type _gcry_aes_ctr_enc_armv8_ce,%function; _gcry_aes_ctr_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: nrounds => r5 */ vpush {q4-q7} push {r4-r12,lr} /* 4*16 + 4*10 = 104b */ ldr r4, [sp, #(104+0)] ldr r5, [sp, #(104+4)] cmp r4, #0 beq .Lctr_enc_skip cmp r5, #12 ldm r3, {r7-r10} vld1.8 {q0}, [r3] /* load IV */ rev r7, r7 rev r8, r8 rev r9, r9 rev r10, r10 aes_preload_keys(r0, r6); beq .Lctr_enc_entry_192 bhi .Lctr_enc_entry_256 #define CTR_ENC(bits, ...) \ .Lctr_enc_entry_##bits: \ cmp r4, #4; \ blo .Lctr_enc_loop_##bits; \ \ .Lctr_enc_loop4_##bits: \ cmp r10, #0xfffffffc; \ sub r4, r4, #4; \ blo .Lctr_enc_loop4_##bits##_nocarry; \ cmp r9, #0xffffffff; \ bne .Lctr_enc_loop4_##bits##_nocarry; \ \ adds r10, #1; \ vmov q1, q0; \ blcs .Lctr_overflow_one; \ rev r11, r10; \ vmov.32 d1[1], r11; \ \ adds r10, #1; \ vmov q2, q0; \ blcs .Lctr_overflow_one; \ rev r11, r10; \ vmov.32 d1[1], r11; \ \ adds r10, #1; \ vmov q3, q0; \ blcs .Lctr_overflow_one; \ rev r11, r10; \ vmov.32 d1[1], r11; \ \ adds r10, #1; \ vmov q4, q0; \ blcs .Lctr_overflow_one; \ rev r11, r10; \ vmov.32 d1[1], r11; \ \ b .Lctr_enc_loop4_##bits##_store_ctr; \ \ .Lctr_enc_loop4_##bits##_nocarry: \ \ veor q2, q2; \ vrev64.8 q1, q0; \ vceq.u32 d5, d5; \ vadd.u64 q3, q2, q2; \ vadd.u64 q4, q3, q2; \ vadd.u64 q0, q3, q3; \ vsub.u64 q2, q1, q2; \ vsub.u64 q3, q1, q3; \ vsub.u64 q4, q1, q4; \ vsub.u64 q0, q1, q0; \ vrev64.8 q1, q1; \ vrev64.8 q2, q2; \ vrev64.8 q3, q3; \ vrev64.8 q0, q0; \ vrev64.8 q4, q4; \ add r10, #4; \ \ .Lctr_enc_loop4_##bits##_store_ctr: \ \ vst1.8 {q0}, [r3]; \ cmp r4, #4; \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ \ do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ veor q1, q1, q0; \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ vld1.8 {q1}, [r2]!; /* load ciphertext */ \ veor q2, q2, q0; \ veor q3, q3, q1; \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ vst1.8 {q2}, [r1]!; /* store plaintext */ \ veor q4, q4, q0; \ vld1.8 {q0}, [r3]; /* reload IV */ \ vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \ \ bhs .Lctr_enc_loop4_##bits; \ cmp r4, #0; \ beq .Lctr_enc_done; \ \ .Lctr_enc_loop_##bits: \ \ adds r10, #1; \ vmov q1, q0; \ blcs .Lctr_overflow_one; \ rev r11, r10; \ subs r4, r4, #1; \ vld1.8 {q2}, [r2]!; /* load ciphertext */ \ vmov.32 d1[1], r11; \ \ do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \ \ veor q1, q2, q1; \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ \ bne .Lctr_enc_loop_##bits; \ b .Lctr_enc_done; CTR_ENC(128) CTR_ENC(192, r0, r6) CTR_ENC(256, r0, r6) #undef CTR_ENC .Lctr_enc_done: vst1.8 {q0}, [r3] /* store IV */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lctr_enc_skip: pop {r4-r12,lr} vpop {q4-q7} bx lr .Lctr_overflow_one: adcs r9, #0 adcs r8, #0 adc r7, #0 rev r11, r9 rev r12, r8 vmov.32 d1[0], r11 rev r11, r7 vmov.32 d0[1], r12 vmov.32 d0[0], r11 bx lr .size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce; /* * void _gcry_aes_ctr32le_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_ctr32le_enc_armv8_ce .type _gcry_aes_ctr32le_enc_armv8_ce,%function; _gcry_aes_ctr32le_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: nrounds => r5 */ vpush {q4-q7} push {r4-r12,lr} /* 4*16 + 4*10 = 104b */ ldr r4, [sp, #(104+0)] ldr r5, [sp, #(104+4)] cmp r4, #0 beq .Lctr32le_enc_skip cmp r5, #12 vld1.8 {q0}, [r3] /* load IV */ aes_preload_keys(r0, r6); beq .Lctr32le_enc_entry_192 bhi .Lctr32le_enc_entry_256 #define CTR_ENC(bits, ...) \ .Lctr32le_enc_entry_##bits: \ cmp r4, #4; \ blo .Lctr32le_enc_loop_##bits; \ \ .Lctr32le_enc_loop4_##bits: \ veor q2, q2; \ sub r4, r4, #4; \ vmov.i64 d4, #0xffffffff; /* q2 <= -1:0:0:0 */ \ vmov q1, q0; \ vadd.u32 q3, q2, q2; /* q3 <= -2:0:0:0 */ \ vadd.u32 q0, q3, q3; /* q0 <= -4:0:0:0 */ \ vadd.u32 q4, q3, q2; /* q4 <= -3:0:0:0 */ \ vsub.u32 q0, q1, q0; \ vsub.u32 q2, q1, q2; \ vst1.8 {q0}, [r3]; \ vsub.u32 q3, q1, q3; \ vsub.u32 q4, q1, q4; \ \ cmp r4, #4; \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ \ do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ veor q1, q1, q0; \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ vld1.8 {q1}, [r2]!; /* load ciphertext */ \ veor q2, q2, q0; \ veor q3, q3, q1; \ vld1.8 {q0}, [r2]!; /* load ciphertext */ \ vst1.8 {q2}, [r1]!; /* store plaintext */ \ veor q4, q4, q0; \ vld1.8 {q0}, [r3]; /* reload IV */ \ vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \ \ bhs .Lctr32le_enc_loop4_##bits; \ cmp r4, #0; \ beq .Lctr32le_enc_done; \ \ .Lctr32le_enc_loop_##bits: \ \ veor q2, q2; \ vmov q1, q0; \ vmov.i64 d4, #0xffffffff; /* q2 <= -1:0:0:0 */ \ subs r4, r4, #1; \ vsub.u32 q0, q0, q2; \ vld1.8 {q2}, [r2]!; /* load ciphertext */ \ \ do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \ \ veor q1, q2, q1; \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ \ bne .Lctr32le_enc_loop_##bits; \ b .Lctr32le_enc_done; CTR_ENC(128) CTR_ENC(192, r0, r6) CTR_ENC(256, r0, r6) #undef CTR_ENC .Lctr32le_enc_done: vst1.8 {q0}, [r3] /* store IV */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lctr32le_enc_skip: pop {r4-r12,lr} vpop {q4-q7} bx lr .size _gcry_aes_ctr32le_enc_armv8_ce,.-_gcry_aes_ctr32le_enc_armv8_ce; /* - * void _gcry_aes_ocb_enc_armv8_ce (const void *keysched, + * long _gcry_aes_ocb_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_enc_armv8_ce .type _gcry_aes_ocb_enc_armv8_ce,%function; _gcry_aes_ocb_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: offset * %st+0: checksum => r4 * %st+4: Ls => r5 * %st+8: nblocks => r6 (0 < nblocks <= 32) * %st+12: nrounds => r7 * %st+16: blkn => lr */ vpush {q4-q7} push {r4-r12,lr} /* 4*16 + 4*10 = 104b */ ldr r7, [sp, #(104+12)] ldr r4, [sp, #(104+0)] ldr r5, [sp, #(104+4)] ldr r6, [sp, #(104+8)] ldr lr, [sp, #(104+16)] cmp r7, #12 vld1.8 {q0}, [r3] /* load offset */ aes_preload_keys(r0, r12); beq .Locb_enc_entry_192 bhi .Locb_enc_entry_256 #define OCB_ENC(bits, ...) \ .Locb_enc_entry_##bits: \ cmp r6, #4; \ add lr, #1; \ blo .Locb_enc_loop_##bits; \ \ .Locb_enc_loop4_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ add r9, lr, #1; \ add r10, lr, #2; \ add r11, lr, #3; \ rbit r8, lr; \ add lr, lr, #4; \ rbit r9, r9; \ rbit r10, r10; \ rbit r11, r11; \ clz r8, r8; /* ntz(i+0) */ \ clz r9, r9; /* ntz(i+1) */ \ clz r10, r10; /* ntz(i+2) */ \ clz r11, r11; /* ntz(i+3) */ \ add r8, r5, r8, lsl #4; \ add r9, r5, r9, lsl #4; \ add r10, r5, r10, lsl #4; \ add r11, r5, r11, lsl #4; \ \ sub r6, #4; \ \ vld1.8 {q9}, [r8]; /* load L_{ntz(i+0)} */ \ vld1.8 {q1-q2}, [r2]!; /* load P_i+<0-1> */ \ vld1.8 {q8}, [r4]; /* load Checksum_{i-1} */ \ veor q0, q0, q9; /* Offset_i+0 */ \ vld1.8 {q9}, [r9]; /* load L_{ntz(i+1)} */ \ veor q8, q8, q1; /* Checksum_i+0 */ \ veor q1, q1, q0; /* P_i+0 xor Offset_i+0 */\ vld1.8 {q3-q4}, [r2]!; /* load P_i+<2-3> */ \ vst1.8 {q0}, [r1]!; /* store Offset_i+0 */\ veor q0, q0, q9; /* Offset_i+1 */ \ vld1.8 {q9}, [r10]; /* load L_{ntz(i+2)} */ \ veor q8, q8, q2; /* Checksum_i+1 */ \ veor q2, q2, q0; /* P_i+1 xor Offset_i+1 */\ vst1.8 {q0}, [r1]!; /* store Offset_i+1 */\ veor q0, q0, q9; /* Offset_i+2 */ \ vld1.8 {q9}, [r11]; /* load L_{ntz(i+3)} */ \ veor q8, q8, q3; /* Checksum_i+2 */ \ veor q3, q3, q0; /* P_i+2 xor Offset_i+2 */\ vst1.8 {q0}, [r1]!; /* store Offset_i+2 */\ veor q0, q0, q9; /* Offset_i+3 */ \ veor q8, q8, q4; /* Checksum_i+3 */ \ veor q4, q4, q0; /* P_i+3 xor Offset_i+3 */\ vst1.8 {q0}, [r1]; /* store Offset_i+3 */\ sub r1, #(3*16); \ vst1.8 {q8}, [r4]; /* store Checksum_i+3 */\ \ cmp r6, #4; \ \ do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ mov r8, r1; \ vld1.8 {q8-q9}, [r1]!; \ veor q1, q1, q8; \ veor q2, q2, q9; \ vld1.8 {q8-q9}, [r1]!; \ vst1.8 {q1-q2}, [r8]!; \ veor q3, q3, q8; \ veor q4, q4, q9; \ vst1.8 {q3-q4}, [r8]; \ \ bhs .Locb_enc_loop4_##bits; \ cmp r6, #0; \ beq .Locb_enc_done; \ \ .Locb_enc_loop_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ rbit r8, lr; \ add lr, #1; \ clz r8, r8; /* ntz(i) */ \ add r8, r5, r8, lsl #4; \ \ vld1.8 {q1}, [r2]!; /* load plaintext */ \ vld1.8 {q2}, [r8]; /* load L_{ntz(i)} */ \ vld1.8 {q3}, [r4]; /* load checksum */ \ subs r6, #1; \ veor q0, q0, q2; \ veor q3, q3, q1; \ veor q1, q1, q0; \ vst1.8 {q3}, [r4]; /* store checksum */ \ \ do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \ \ veor q1, q1, q0; \ vst1.8 {q1}, [r1]!; /* store ciphertext */ \ \ bne .Locb_enc_loop_##bits; \ b .Locb_enc_done; OCB_ENC(128re, r0, r12) OCB_ENC(192, r0, r12) OCB_ENC(256, r0, r12) #undef OCB_ENC .Locb_enc_done: vst1.8 {q0}, [r3] /* store offset */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) + mov r0, #0 pop {r4-r12,lr} vpop {q4-q7} bx lr .size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce; /* - * void _gcry_aes_ocb_dec_armv8_ce (const void *keysched, + * long _gcry_aes_ocb_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_dec_armv8_ce .type _gcry_aes_ocb_dec_armv8_ce,%function; _gcry_aes_ocb_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: offset * %st+0: checksum => r4 * %st+4: Ls => r5 * %st+8: nblocks => r6 (0 < nblocks <= 32) * %st+12: nrounds => r7 * %st+16: blkn => lr */ vpush {q4-q7} push {r4-r12,lr} /* 4*16 + 4*10 = 104b */ ldr r7, [sp, #(104+12)] ldr r4, [sp, #(104+0)] ldr r5, [sp, #(104+4)] ldr r6, [sp, #(104+8)] ldr lr, [sp, #(104+16)] cmp r7, #12 vld1.8 {q0}, [r3] /* load offset */ aes_preload_keys(r0, r12); beq .Locb_dec_entry_192 bhi .Locb_dec_entry_256 #define OCB_DEC(bits, ...) \ .Locb_dec_entry_##bits: \ cmp r6, #4; \ add lr, #1; \ blo .Locb_dec_loop_##bits; \ \ .Locb_dec_loop4_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ \ add r9, lr, #1; \ add r10, lr, #2; \ add r11, lr, #3; \ rbit r8, lr; \ add lr, lr, #4; \ rbit r9, r9; \ rbit r10, r10; \ rbit r11, r11; \ clz r8, r8; /* ntz(i+0) */ \ clz r9, r9; /* ntz(i+1) */ \ clz r10, r10; /* ntz(i+2) */ \ clz r11, r11; /* ntz(i+3) */ \ add r8, r5, r8, lsl #4; \ add r9, r5, r9, lsl #4; \ add r10, r5, r10, lsl #4; \ add r11, r5, r11, lsl #4; \ \ sub r6, #4; \ \ vld1.8 {q9}, [r8]; /* load L_{ntz(i+0)} */ \ vld1.8 {q1-q2}, [r2]!; /* load P_i+<0-1> */ \ veor q0, q0, q9; /* Offset_i+0 */ \ vld1.8 {q9}, [r9]; /* load L_{ntz(i+1)} */ \ veor q1, q1, q0; /* P_i+0 xor Offset_i+0 */\ vld1.8 {q3-q4}, [r2]!; /* load P_i+<2-3> */ \ vst1.8 {q0}, [r1]!; /* store Offset_i+0 */\ veor q0, q0, q9; /* Offset_i+1 */ \ vld1.8 {q9}, [r10]; /* load L_{ntz(i+2)} */ \ veor q2, q2, q0; /* P_i+1 xor Offset_i+1 */\ vst1.8 {q0}, [r1]!; /* store Offset_i+1 */\ veor q0, q0, q9; /* Offset_i+2 */ \ vld1.8 {q9}, [r11]; /* load L_{ntz(i+3)} */ \ veor q3, q3, q0; /* P_i+2 xor Offset_i+2 */\ vst1.8 {q0}, [r1]!; /* store Offset_i+2 */\ veor q0, q0, q9; /* Offset_i+3 */ \ veor q4, q4, q0; /* P_i+3 xor Offset_i+3 */\ vst1.8 {q0}, [r1]; /* store Offset_i+3 */\ sub r1, #(3*16); \ \ cmp r6, #4; \ \ do_aes_4_##bits(d, imc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ mov r8, r1; \ vld1.8 {q8-q9}, [r1]!; \ veor q1, q1, q8; \ veor q2, q2, q9; \ vld1.8 {q8-q9}, [r1]!; \ vst1.8 {q1-q2}, [r8]!; \ veor q1, q1, q2; \ vld1.8 {q2}, [r4]; /* load Checksum_{i-1} */ \ veor q3, q3, q8; \ veor q1, q1, q3; \ veor q4, q4, q9; \ veor q1, q1, q4; \ vst1.8 {q3-q4}, [r8]; \ veor q2, q2, q1; \ vst1.8 {q2}, [r4]; /* store Checksum_i+3 */ \ \ bhs .Locb_dec_loop4_##bits; \ cmp r6, #0; \ beq .Locb_dec_done; \ \ .Locb_dec_loop_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ \ rbit r8, lr; \ add lr, #1; \ clz r8, r8; /* ntz(i) */ \ add r8, r5, r8, lsl #4; \ \ vld1.8 {q2}, [r8]; /* load L_{ntz(i)} */ \ vld1.8 {q1}, [r2]!; /* load ciphertext */ \ subs r6, #1; \ veor q0, q0, q2; \ veor q1, q1, q0; \ \ do_aes_one##bits(d, imc, q1, q1, ##__VA_ARGS__) \ \ vld1.8 {q2}, [r4]; /* load checksum */ \ veor q1, q1, q0; \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ veor q2, q2, q1; \ vst1.8 {q2}, [r4]; /* store checksum */ \ \ bne .Locb_dec_loop_##bits; \ b .Locb_dec_done; OCB_DEC(128re, r0, r12) OCB_DEC(192, r0, r12) OCB_DEC(256, r0, r12) #undef OCB_DEC .Locb_dec_done: vst1.8 {q0}, [r3] /* store offset */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) + mov r0, #0 pop {r4-r12,lr} vpop {q4-q7} bx lr .size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce; /* - * void _gcry_aes_ocb_auth_armv8_ce (const void *keysched, + * long _gcry_aes_ocb_auth_armv8_ce (const void *keysched, * const unsigned char *abuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_auth_armv8_ce .type _gcry_aes_ocb_auth_armv8_ce,%function; _gcry_aes_ocb_auth_armv8_ce: /* input: * r0: keysched * r1: abuf * r2: offset * r3: checksum * %st+0: Ls => r5 * %st+4: nblocks => r6 (0 < nblocks <= 32) * %st+8: nrounds => r7 * %st+12: blkn => lr */ vpush {q4-q7} push {r4-r12,lr} /* 4*16 + 4*10 = 104b */ ldr r7, [sp, #(104+8)] ldr r5, [sp, #(104+0)] ldr r6, [sp, #(104+4)] ldr lr, [sp, #(104+12)] cmp r7, #12 vld1.8 {q0}, [r2] /* load offset */ aes_preload_keys(r0, r12); beq .Locb_auth_entry_192 bhi .Locb_auth_entry_256 #define OCB_AUTH(bits, ...) \ .Locb_auth_entry_##bits: \ cmp r6, #4; \ add lr, #1; \ blo .Locb_auth_loop_##bits; \ \ .Locb_auth_loop4_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \ \ add r9, lr, #1; \ add r10, lr, #2; \ add r11, lr, #3; \ rbit r8, lr; \ add lr, lr, #4; \ rbit r9, r9; \ rbit r10, r10; \ rbit r11, r11; \ clz r8, r8; /* ntz(i+0) */ \ clz r9, r9; /* ntz(i+1) */ \ clz r10, r10; /* ntz(i+2) */ \ clz r11, r11; /* ntz(i+3) */ \ add r8, r5, r8, lsl #4; \ add r9, r5, r9, lsl #4; \ add r10, r5, r10, lsl #4; \ add r11, r5, r11, lsl #4; \ \ sub r6, #4; \ \ vld1.8 {q9}, [r8]; /* load L_{ntz(i+0)} */ \ vld1.8 {q1-q2}, [r1]!; /* load A_i+<0-1> */ \ veor q0, q0, q9; /* Offset_i+0 */ \ vld1.8 {q9}, [r9]; /* load L_{ntz(i+1)} */ \ veor q1, q1, q0; /* A_i+0 xor Offset_i+0 */\ vld1.8 {q3-q4}, [r1]!; /* load A_i+<2-3> */ \ veor q0, q0, q9; /* Offset_i+1 */ \ vld1.8 {q9}, [r10]; /* load L_{ntz(i+2)} */ \ veor q2, q2, q0; /* A_i+1 xor Offset_i+1 */\ veor q0, q0, q9; /* Offset_i+2 */ \ vld1.8 {q9}, [r11]; /* load L_{ntz(i+3)} */ \ veor q3, q3, q0; /* A_i+2 xor Offset_i+2 */\ veor q0, q0, q9; /* Offset_i+3 */ \ veor q4, q4, q0; /* A_i+3 xor Offset_i+3 */\ \ cmp r6, #4; \ \ do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ veor q1, q1, q2; \ veor q3, q3, q4; \ vld1.8 {q2}, [r3]; \ veor q1, q1, q3; \ veor q2, q2, q1; \ vst1.8 {q2}, [r3]; \ \ bhs .Locb_auth_loop4_##bits; \ cmp r6, #0; \ beq .Locb_auth_done; \ \ .Locb_auth_loop_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \ \ rbit r8, lr; \ add lr, #1; \ clz r8, r8; /* ntz(i) */ \ add r8, r5, r8, lsl #4; \ \ vld1.8 {q2}, [r8]; /* load L_{ntz(i)} */ \ vld1.8 {q1}, [r1]!; /* load aadtext */ \ subs r6, #1; \ veor q0, q0, q2; \ vld1.8 {q2}, [r3]; /* load checksum */ \ veor q1, q1, q0; \ \ do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__) \ \ veor q2, q2, q1; \ vst1.8 {q2}, [r3]; /* store checksum */ \ \ bne .Locb_auth_loop_##bits; \ b .Locb_auth_done; OCB_AUTH(128re, r0, r12) OCB_AUTH(192, r0, r12) OCB_AUTH(256, r0, r12) #undef OCB_AUTH .Locb_auth_done: vst1.8 {q0}, [r2] /* store offset */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) + mov r0, #0 pop {r4-r12,lr} vpop {q4-q7} bx lr .size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce; /* * void _gcry_aes_xts_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_xts_enc_armv8_ce .type _gcry_aes_xts_enc_armv8_ce,%function; _gcry_aes_xts_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: nrounds => r5 */ vpush {q4-q7} push {r4-r12,lr} /* 4*16 + 4*10 = 104b */ ldr r4, [sp, #(104+0)] ldr r5, [sp, #(104+4)] cmp r4, #0 beq .Lxts_enc_skip cmp r5, #12 vld1.8 {q0}, [r3] /* load tweak */ mov r7, #0x87; aes_preload_keys(r0, r6); beq .Lxts_enc_entry_192 bhi .Lxts_enc_entry_256 #define CTR_XTS(bits, ...) \ .Lxts_enc_entry_##bits: \ cmp r4, #4; \ blo .Lxts_enc_loop_##bits; \ \ .Lxts_enc_loop4_##bits: \ sub r4, r4, #4; \ veor q9, q9, q9; \ \ vld1.8 {q1-q2}, [r2]!; /* load plaintext */ \ veor q1, q1, q0; \ cmp r4, #4; \ vmov.u32 d18[0], r7; \ vst1.8 {q0}, [r1]!; /* store tweak0 to temp */ \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ vld1.8 {q3-q4}, [r2]!; /* load plaintext */ \ veor q2, q2, q0; \ vst1.8 {q0}, [r1]!; /* store tweak1 to temp */ \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ veor q3, q3, q0; \ vst1.8 {q0}, [r1]!; /* store tweak2 to temp */ \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ veor q4, q4, q0; \ vst1.8 {q0}, [r1]; /* store tweak3 to temp */ \ sub r1, r1, #48; \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ vld1.8 {q8-q9}, [r1]!; /* load tweak from temp */ \ veor q1, q1, q8; \ veor q2, q2, q9; \ vld1.8 {q8-q9}, [r1]; /* load tweak from temp */ \ sub r1, r1, #32; \ veor q3, q3, q8; \ veor q4, q4, q9; \ vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \ vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \ \ bhs .Lxts_enc_loop4_##bits; \ cmp r4, #0; \ beq .Lxts_enc_done; \ \ .Lxts_enc_loop_##bits: \ \ vld1.8 {q1}, [r2]!; /* load ciphertext */ \ \ veor q9, q9, q9; \ veor q1, q1, q0; \ vmov.u32 d18[0], r7; \ vmov q2, q0; \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ subs r4, r4, #1; \ \ do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \ \ veor q1, q1, q2; \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ \ bne .Lxts_enc_loop_##bits; \ b .Lxts_enc_done; CTR_XTS(128re, r0, r6) CTR_XTS(192, r0, r6) CTR_XTS(256, r0, r6) #undef CTR_XTS .Lxts_enc_done: vst1.8 {q0}, [r3] /* store tweak */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lxts_enc_skip: pop {r4-r12,lr} vpop {q4-q7} bx lr .size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce; /* * void _gcry_aes_xts_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_xts_dec_armv8_ce .type _gcry_aes_xts_dec_armv8_ce,%function; _gcry_aes_xts_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * %st+0: nblocks => r4 * %st+4: nrounds => r5 */ vpush {q4-q7} push {r4-r12,lr} /* 4*16 + 4*10 = 104b */ ldr r4, [sp, #(104+0)] ldr r5, [sp, #(104+4)] cmp r4, #0 beq .Lxts_dec_skip cmp r5, #12 vld1.8 {q0}, [r3] /* load tweak */ mov r7, #0x87; aes_preload_keys(r0, r6); beq .Lxts_dec_entry_192 bhi .Lxts_dec_entry_256 #define CTR_XTS(bits, ...) \ .Lxts_dec_entry_##bits: \ cmp r4, #4; \ blo .Lxts_dec_loop_##bits; \ \ .Lxts_dec_loop4_##bits: \ sub r4, r4, #4; \ veor q9, q9, q9; \ \ vld1.8 {q1-q2}, [r2]!; /* load plaintext */ \ veor q1, q1, q0; \ cmp r4, #4; \ vmov.u32 d18[0], r7; \ vst1.8 {q0}, [r1]!; /* store tweak0 to temp */ \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ vld1.8 {q3-q4}, [r2]!; /* load plaintext */ \ veor q2, q2, q0; \ vst1.8 {q0}, [r1]!; /* store tweak1 to temp */ \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ veor q3, q3, q0; \ vst1.8 {q0}, [r1]!; /* store tweak2 to temp */ \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ veor q4, q4, q0; \ vst1.8 {q0}, [r1]; /* store tweak3 to temp */ \ sub r1, r1, #48; \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ \ do_aes_4_##bits(d, imc, q1, q2, q3, q4, ##__VA_ARGS__); \ \ vld1.8 {q8-q9}, [r1]!; /* load tweak from temp */ \ veor q1, q1, q8; \ veor q2, q2, q9; \ vld1.8 {q8-q9}, [r1]; /* load tweak from temp */ \ sub r1, r1, #32; \ veor q3, q3, q8; \ veor q4, q4, q9; \ vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \ vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \ \ bhs .Lxts_dec_loop4_##bits; \ cmp r4, #0; \ beq .Lxts_dec_done; \ \ .Lxts_dec_loop_##bits: \ \ vld1.8 {q1}, [r2]!; /* load ciphertext */ \ \ veor q9, q9, q9; \ veor q1, q1, q0; \ vmov.u32 d18[0], r7; \ vmov q2, q0; \ \ vshr.s64 d16, d1, #63; \ vshr.u64 d17, d0, #63; \ vadd.u64 q0, q0, q0; \ vand d16, d16, d18; \ veor q0, q0, q8; \ subs r4, r4, #1; \ \ do_aes_one##bits(d, imc, q1, q1, ##__VA_ARGS__); \ \ veor q1, q1, q2; \ vst1.8 {q1}, [r1]!; /* store plaintext */ \ \ bne .Lxts_dec_loop_##bits; \ b .Lxts_dec_done; CTR_XTS(128re, r0, r6) CTR_XTS(192, r0, r6) CTR_XTS(256, r0, r6) #undef CTR_XTS .Lxts_dec_done: vst1.8 {q0}, [r3] /* store tweak */ CLEAR_REG(q0) CLEAR_REG(q1) CLEAR_REG(q2) CLEAR_REG(q3) CLEAR_REG(q8) CLEAR_REG(q9) CLEAR_REG(q10) CLEAR_REG(q11) CLEAR_REG(q12) CLEAR_REG(q13) CLEAR_REG(q14) .Lxts_dec_skip: pop {r4-r12,lr} vpop {q4-q7} bx lr .size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce; /* * u32 _gcry_aes_sbox4_armv8_ce(u32 in4b); */ .align 3 .globl _gcry_aes_sbox4_armv8_ce .type _gcry_aes_sbox4_armv8_ce,%function; _gcry_aes_sbox4_armv8_ce: /* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in * Cryptology — CT-RSA 2015" for details. */ vmov.i8 q0, #0x52 vmov.i8 q1, #0 vmov s0, r0 aese.8 q0, q1 veor d0, d1 vpadd.i32 d0, d0, d1 vmov r0, s0 CLEAR_REG(q0) bx lr .size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce; /* * void _gcry_aes_invmixcol_armv8_ce(void *dst, const void *src); */ .align 3 .globl _gcry_aes_invmixcol_armv8_ce .type _gcry_aes_invmixcol_armv8_ce,%function; _gcry_aes_invmixcol_armv8_ce: vld1.8 {q0}, [r1] aesimc.8 q0, q0 vst1.8 {q0}, [r0] CLEAR_REG(q0) bx lr .size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce; #endif diff --git a/cipher/rijndael-armv8-aarch64-ce.S b/cipher/rijndael-armv8-aarch64-ce.S index 4fef0345..97d3d7eb 100644 --- a/cipher/rijndael-armv8-aarch64-ce.S +++ b/cipher/rijndael-armv8-aarch64-ce.S @@ -1,1921 +1,2038 @@ /* rijndael-armv8-aarch64-ce.S - ARMv8/CE accelerated AES * Copyright (C) 2016 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include "asm-common-aarch64.h" #if defined(__AARCH64EL__) && \ defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \ defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) .cpu generic+simd+crypto .text /* Register macros */ #define vk0 v17 #define vk1 v18 #define vk2 v19 #define vk3 v20 #define vk4 v21 #define vk5 v22 #define vk6 v23 #define vk7 v24 #define vk8 v25 #define vk9 v26 #define vk10 v27 #define vk11 v28 #define vk12 v29 #define vk13 v30 #define vklast v31 /* Helper macros */ #define __ /*_*/ #define _(...) __VA_ARGS__ /* AES macros */ #define aes_preload_keys(keysched, nrounds) \ cmp nrounds, #12; \ ld1 {vk0.16b-vk3.16b}, [keysched], #64; \ ld1 {vk4.16b-vk7.16b}, [keysched], #64; \ ld1 {vk8.16b-vk10.16b}, [keysched], #48; \ mov vklast.16b, vk10.16b; \ b.lo 1f; \ ld1 {vk11.16b-vk12.16b}, [keysched], #32; \ mov vklast.16b, vk12.16b; \ b.eq 1f; \ ld1 {vk13.16b-vklast.16b}, [keysched]; \ 1: ; #define do_aes_one_part1(ed, mcimc, vb, vkfirst) \ aes##ed vb.16b, vkfirst.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk1.16b; \ aes##mcimc vb.16b, vb.16b; #define do_aes_one_part2_128(ed, mcimc, vb, iop1, iop2) \ aes##ed vb.16b, vk2.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk3.16b; \ aes##mcimc vb.16b, vb.16b; \ iop1; \ aes##ed vb.16b, vk4.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk5.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk6.16b; \ aes##mcimc vb.16b, vb.16b; \ iop2; \ aes##ed vb.16b, vk7.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk8.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk9.16b; #define do_aes_one_part2_192(ed, mcimc, vb, iop1, iop2) \ aes##ed vb.16b, vk2.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk3.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk4.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk5.16b; \ aes##mcimc vb.16b, vb.16b; \ iop1; \ aes##ed vb.16b, vk6.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk7.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk8.16b; \ aes##mcimc vb.16b, vb.16b; \ iop2; \ aes##ed vb.16b, vk9.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk10.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk11.16b; #define do_aes_one_part2_256(ed, mcimc, vb, iop1, iop2) \ aes##ed vb.16b, vk2.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk3.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk4.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk5.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk6.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk7.16b; \ aes##mcimc vb.16b, vb.16b; \ iop1; \ aes##ed vb.16b, vk8.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk9.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk10.16b; \ aes##mcimc vb.16b, vb.16b; \ iop2; \ aes##ed vb.16b, vk11.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk12.16b; \ aes##mcimc vb.16b, vb.16b; \ aes##ed vb.16b, vk13.16b; #define do_aes_one128(ed, mcimc, vo, vb, vkfirst) \ do_aes_one_part1(ed, mcimc, vb, vkfirst); \ do_aes_one_part2_128(ed, mcimc, vb, __, __); \ eor vo.16b, vb.16b, vklast.16b; #define do_aes_one192(ed, mcimc, vo, vb, vkfirst) \ do_aes_one_part1(ed, mcimc, vb, vkfirst); \ do_aes_one_part2_192(ed, mcimc, vb, __, __); \ eor vo.16b, vb.16b, vklast.16b; #define do_aes_one256(ed, mcimc, vo, vb, vkfirst) \ do_aes_one_part1(ed, mcimc, vb, vkfirst); \ do_aes_one_part2_256(ed, mcimc, vb, __, __); \ eor vo.16b, vb.16b, vklast.16b; #define aes_round_4_multikey(ed, mcimc, b0, b1, b2, b3, key0, key1, key2, key3) \ aes##ed b0.16b, key0.16b; \ aes##mcimc b0.16b, b0.16b; \ aes##ed b1.16b, key1.16b; \ aes##mcimc b1.16b, b1.16b; \ aes##ed b2.16b, key2.16b; \ aes##mcimc b2.16b, b2.16b; \ aes##ed b3.16b, key3.16b; \ aes##mcimc b3.16b, b3.16b; #define aes_round_4(ed, mcimc, b0, b1, b2, b3, key) \ aes_round_4_multikey(ed, mcimc, b0, b1, b2, b3, key, key, key, key); #define aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, key1, b0_key2, b1_key2, b2_key2, b3_key2) \ aes##ed b0.16b, key1.16b; \ aes##ed b1.16b, key1.16b; \ aes##ed b2.16b, key1.16b; \ aes##ed b3.16b, key1.16b; \ eor o0.16b, b0.16b, b0_key2.16b; \ eor o1.16b, b1.16b, b1_key2.16b; \ eor o2.16b, b2.16b, b2_key2.16b; \ eor o3.16b, b3.16b, b3_key2.16b; #define do_aes_4_part1_multikey(ed, mcimc, b0, b1, b2, b3, key0, key1, key2, key3) \ aes_round_4_multikey(ed, mcimc, b0, b1, b2, b3, key0, key1, key2, key3); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk1); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk2); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk3); #define do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vkfirst) \ do_aes_4_part1_multikey(ed, mcimc, b0, b1, b2, b3, vkfirst, vkfirst, vkfirst, vkfirst); #define do_aes_4_part2_128(ed, mcimc, o0, o1, o2, o3, b0, b1, b2, b3, \ b0_key, b1_key, b2_key, b3_key) \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk4); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk5); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk8); \ aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, vk9, b0_key, b1_key, b2_key, b3_key); #define do_aes_4_part2_192(ed, mcimc, o0, o1, o2, o3, b0, b1, b2, b3, \ b0_key, b1_key, b2_key, b3_key) \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk4); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk5); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk8); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk9); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk10); \ aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, vk11, b0_key, b1_key, b2_key, b3_key); #define do_aes_4_part2_256(ed, mcimc, o0, o1, o2, o3, b0, b1, b2, b3, \ b0_key, b1_key, b2_key, b3_key) \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk4); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk5); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk6); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk7); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk8); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk9); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk10); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk11); \ aes_round_4(ed, mcimc, b0, b1, b2, b3, vk12); \ aes_lastround_4(ed, o0, o1, o2, o3, b0, b1, b2, b3, vk13, b0_key, b1_key, b2_key, b3_key); #define do_aes_4_128(ed, mcimc, b0, b1, b2, b3) \ do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vk0); \ do_aes_4_part2_128(ed, mcimc, b0, b1, b2, b3, b0, b1, b2, b3, vklast, vklast, vklast, vklast); #define do_aes_4_192(ed, mcimc, b0, b1, b2, b3) \ do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vk0); \ do_aes_4_part2_192(ed, mcimc, b0, b1, b2, b3, b0, b1, b2, b3, vklast, vklast, vklast, vklast); #define do_aes_4_256(ed, mcimc, b0, b1, b2, b3) \ do_aes_4_part1(ed, mcimc, b0, b1, b2, b3, vk0); \ do_aes_4_part2_256(ed, mcimc, b0, b1, b2, b3, b0, b1, b2, b3, vklast, vklast, vklast, vklast); /* Other functional macros */ #define CLEAR_REG(reg) movi reg.16b, #0; #define aes_clear_keys(nrounds) \ CLEAR_REG(vk0); \ CLEAR_REG(vk1); \ CLEAR_REG(vk2); \ CLEAR_REG(vk3); \ CLEAR_REG(vk4); \ CLEAR_REG(vk5); \ CLEAR_REG(vk6); \ CLEAR_REG(vk7); \ CLEAR_REG(vk9); \ CLEAR_REG(vk8); \ CLEAR_REG(vk10); \ CLEAR_REG(vk11); \ CLEAR_REG(vk12); \ CLEAR_REG(vk13); \ CLEAR_REG(vklast); /* * unsigned int _gcry_aes_enc_armv8_ce(void *keysched, byte *dst, * const byte *src, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_enc_armv8_ce ELF(.type _gcry_aes_enc_armv8_ce,%function;) _gcry_aes_enc_armv8_ce: /* input: * x0: keysched * x1: dst * x2: src * w3: nrounds */ CFI_STARTPROC(); aes_preload_keys(x0, w3); ld1 {v0.16b}, [x2] b.hi .Lenc1_256 b.eq .Lenc1_192 .Lenc1_128: do_aes_one128(e, mc, v0, v0, vk0); .Lenc1_tail: CLEAR_REG(vk0) CLEAR_REG(vk1) CLEAR_REG(vk2) CLEAR_REG(vk3) CLEAR_REG(vk4) CLEAR_REG(vk5) CLEAR_REG(vk6) CLEAR_REG(vk7) CLEAR_REG(vk8) CLEAR_REG(vk9) CLEAR_REG(vk10) CLEAR_REG(vklast) st1 {v0.16b}, [x1] CLEAR_REG(v0) mov x0, #0 ret_spec_stop .Lenc1_192: do_aes_one192(e, mc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) b .Lenc1_tail .Lenc1_256: do_aes_one256(e, mc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) CLEAR_REG(vk13) b .Lenc1_tail CFI_ENDPROC(); ELF(.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;) /* * unsigned int _gcry_aes_dec_armv8_ce(void *keysched, byte *dst, * const byte *src, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_dec_armv8_ce ELF(.type _gcry_aes_dec_armv8_ce,%function;) _gcry_aes_dec_armv8_ce: /* input: * x0: keysched * x1: dst * x2: src * w3: nrounds */ CFI_STARTPROC(); aes_preload_keys(x0, w3); ld1 {v0.16b}, [x2] b.hi .Ldec1_256 b.eq .Ldec1_192 .Ldec1_128: do_aes_one128(d, imc, v0, v0, vk0); .Ldec1_tail: CLEAR_REG(vk0) CLEAR_REG(vk1) CLEAR_REG(vk2) CLEAR_REG(vk3) CLEAR_REG(vk4) CLEAR_REG(vk5) CLEAR_REG(vk6) CLEAR_REG(vk7) CLEAR_REG(vk8) CLEAR_REG(vk9) CLEAR_REG(vk10) CLEAR_REG(vklast) st1 {v0.16b}, [x1] CLEAR_REG(v0) mov x0, #0 ret_spec_stop .Ldec1_192: do_aes_one192(d, imc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) b .Ldec1_tail .Ldec1_256: do_aes_one256(d, imc, v0, v0, vk0); CLEAR_REG(vk11) CLEAR_REG(vk12) CLEAR_REG(vk13) b .Ldec1_tail CFI_ENDPROC(); ELF(.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;) +/* + * void _gcry_aes_ecb_enc_armv8_ce (const void *keysched, + * unsigned char *outbuf, + * const unsigned char *inbuf, + * size_t nblocks, unsigned int nrounds); + */ + +.align 3 +.globl _gcry_aes_ecb_enc_armv8_ce +ELF(.type _gcry_aes_ecb_enc_armv8_ce,%function;) +_gcry_aes_ecb_enc_armv8_ce: + /* input: + * x0: keysched + * x1: outbuf + * x2: inbuf + * x3: nblocks + * w4: nrounds + */ + CFI_STARTPROC(); + + cbz x3, .Lecb_enc_skip + + aes_preload_keys(x0, w4); + + b.eq .Lecb_entry_192e + b.hi .Lecb_entry_256e + +#define ECB_CRYPT(bits, e_d, mc_imc) \ + .Lecb_entry_##bits##e_d: \ + cmp x3, #4; \ + b.lo .Lecb_loop_##bits##e_d; \ + \ + .Lecb_loop4_##bits##e_d: \ + sub x3, x3, #4; \ + ld1 {v0.16b-v3.16b}, [x2], #64; /* load ciphertext */ \ + cmp x3, #4; \ + do_aes_4_##bits(e_d, mc_imc, v0, v1, v2, v3); \ + st1 {v0.16b-v3.16b}, [x1], #64; /* store plaintext */ \ + \ + b.hs .Lecb_loop4_##bits##e_d; \ + CLEAR_REG(v1); \ + CLEAR_REG(v2); \ + CLEAR_REG(v3); \ + cbz x3, .Lecb_done_##e_d; \ + \ + .Lecb_loop_##bits##e_d: \ + ld1 {v0.16b}, [x2], #16; /* load ciphertext */ \ + sub x3, x3, #1; \ + do_aes_one##bits(e_d, mc_imc, v0, v0, vk0); \ + st1 {v0.16b}, [x1], #16; /* store plaintext */ \ + \ + cbnz x3, .Lecb_loop_##bits##e_d; \ + b .Lecb_done_##e_d; + + ECB_CRYPT(128, e, mc) + ECB_CRYPT(192, e, mc) + ECB_CRYPT(256, e, mc) + +.Lecb_done_e: + aes_clear_keys(w4) + + CLEAR_REG(v0) + +.Lecb_enc_skip: + ret_spec_stop + CFI_ENDPROC(); +ELF(.size _gcry_aes_ecb_enc_armv8_ce,.-_gcry_aes_ecb_enc_armv8_ce;) + + +/* + * void _gcry_aes_ecb_dec_armv8_ce (const void *keysched, + * unsigned char *outbuf, + * const unsigned char *inbuf, + * size_t nblocks, unsigned int nrounds); + */ + +.align 3 +.globl _gcry_aes_ecb_dec_armv8_ce +ELF(.type _gcry_aes_ecb_dec_armv8_ce,%function;) +_gcry_aes_ecb_dec_armv8_ce: + /* input: + * x0: keysched + * x1: outbuf + * x2: inbuf + * x3: nblocks + * w4: nrounds + */ + CFI_STARTPROC(); + + cbz x3, .Lecb_enc_skip + + aes_preload_keys(x0, w4); + + b.eq .Lecb_entry_192d + b.hi .Lecb_entry_256d + + ECB_CRYPT(128, d, imc) + ECB_CRYPT(192, d, imc) + ECB_CRYPT(256, d, imc) + +#undef ECB_CRYPT + +.Lecb_done_d: + aes_clear_keys(w4) + + CLEAR_REG(v0) + +.Lecb_dec_skip: + ret_spec_stop + CFI_ENDPROC(); +ELF(.size _gcry_aes_ecb_dec_armv8_ce,.-_gcry_aes_ecb_dec_armv8_ce;) + + /* * void _gcry_aes_cbc_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, size_t nblocks, * int cbc_mac, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cbc_enc_armv8_ce ELF(.type _gcry_aes_cbc_enc_armv8_ce,%function;) _gcry_aes_cbc_enc_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: iv * x4: nblocks * w5: cbc_mac * w6: nrounds */ CFI_STARTPROC(); cbz x4, .Lcbc_enc_skip cmp w5, #0 ld1 {v4.16b}, [x3] /* load IV */ csetm x5, eq aes_preload_keys(x0, w6); and x5, x5, #16 ld1 {v3.16b}, [x2], #16; /* load plaintext */ mov v0.16b, vk0.16b; sub x4, x4, #1; eor v16.16b, vk0.16b, vklast.16b; eor v4.16b, v4.16b, v3.16b; do_aes_one_part1(e, mc, v4, v0); b.eq .Lcbc_enc_entry_192 b.hi .Lcbc_enc_entry_256 #define CBC_ENC(bits) \ .Lcbc_enc_entry_##bits: \ cbz x4, .Lcbc_enc_done_##bits; \ \ .Lcbc_enc_loop_##bits: \ do_aes_one_part2_##bits(e, mc, v4, \ _(ld1 {v0.16b}, [x2], #16 /* load plaintext */), \ _(eor v0.16b, v0.16b, v16.16b)); \ sub x4, x4, #1; \ eor v3.16b, v4.16b, vklast.16b; \ do_aes_one_part1(e, mc, v4, v0); \ st1 {v3.16b}, [x1], x5; /* store ciphertext */ \ cbnz x4, .Lcbc_enc_loop_##bits; \ \ .Lcbc_enc_done_##bits: \ do_aes_one_part2_##bits(e, mc, v4, __, __); \ b .Lcbc_enc_done; CBC_ENC(128) CBC_ENC(192) CBC_ENC(256) #undef CBC_ENC .Lcbc_enc_done: eor v3.16b, v4.16b, vklast.16b; st1 {v3.16b}, [x1]; /* store ciphertext */ aes_clear_keys(w6) st1 {v3.16b}, [x3] /* store IV */ CLEAR_REG(v16) CLEAR_REG(v4) CLEAR_REG(v3) CLEAR_REG(v0) .Lcbc_enc_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;) /* * void _gcry_aes_cbc_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, - * unsigned char *iv, unsigned int nrounds); + * unsigned char *iv, + * size_t nblocks, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cbc_dec_armv8_ce ELF(.type _gcry_aes_cbc_dec_armv8_ce,%function;) _gcry_aes_cbc_dec_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lcbc_dec_skip add sp, sp, #-64; CFI_ADJUST_CFA_OFFSET(64); ld1 {v16.16b}, [x3] /* load IV */ aes_preload_keys(x0, w5); b.eq .Lcbc_dec_entry_192 b.hi .Lcbc_dec_entry_256 #define CBC_DEC(bits) \ .Lcbc_dec_entry_##bits: \ cmp x4, #4; \ b.lo .Lcbc_dec_loop_##bits; \ \ ld1 {v0.16b-v3.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ sub x4, x4, #4; \ eor v4.16b, v16.16b, vklast.16b; \ eor v5.16b, v0.16b, vklast.16b; \ eor v6.16b, v1.16b, vklast.16b; \ eor v7.16b, v2.16b, vklast.16b; \ mov v16.16b, v3.16b; /* next IV */ \ \ do_aes_4_part1(d, imc, v0, v1, v2, v3, vk0); \ b.lo .Lcbc_dec_done4_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ \ .Lcbc_dec_loop4_##bits: \ do_aes_4_part2_##bits(d, imc, v8, v9, v10, v11, v0, v1, v2, v3, v4, v5, v6, v7); \ ld1 {v0.16b-v3.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ sub x4, x4, #4; \ eor v4.16b, v16.16b, vklast.16b; \ eor v5.16b, v0.16b, vklast.16b; \ eor v6.16b, v1.16b, vklast.16b; \ eor v7.16b, v2.16b, vklast.16b; \ mov v16.16b, v3.16b; /* next IV */ \ \ do_aes_4_part1(d, imc, v0, v1, v2, v3, vk0); \ st1 {v8.16b-v11.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lcbc_dec_loop4_##bits; \ \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ \ .Lcbc_dec_done4_##bits: \ do_aes_4_part2_##bits(d, imc, v0, v1, v2, v3, v0, v1, v2, v3, v4, v5, v6, v7); \ \ CLEAR_REG(v4); \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ st1 {v0.16b-v3.16b}, [x1], #64; /* store plaintext */ \ CLEAR_REG(v0); \ CLEAR_REG(v3); \ cbz x4, .Lcbc_dec_done; \ \ .Lcbc_dec_loop_##bits: \ ld1 {v1.16b}, [x2], #16; /* load ciphertext */ \ sub x4, x4, #1; \ eor v16.16b, v16.16b, vklast.16b; \ mov v2.16b, v1.16b; \ \ do_aes_one_part1(d, imc, v1, vk0); \ do_aes_one_part2_##bits(d, imc, v1, __, __); \ eor v1.16b, v1.16b, v16.16b; \ \ mov v16.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lcbc_dec_loop_##bits; \ b .Lcbc_dec_done; CBC_DEC(128) CBC_DEC(192) CBC_DEC(256) #undef CBC_DEC .Lcbc_dec_done: aes_clear_keys(w5) st1 {v16.16b}, [x3] /* store IV */ CLEAR_REG(v16) CLEAR_REG(v1) CLEAR_REG(v2) add sp, sp, #64; CFI_ADJUST_CFA_OFFSET(-64); .Lcbc_dec_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;) /* * void _gcry_aes_ctr_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_ctr_enc_armv8_ce ELF(.type _gcry_aes_ctr_enc_armv8_ce,%function;) _gcry_aes_ctr_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lctr_enc_skip add x8, sp, #-64 add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); mov w6, #(1 << 24) movi v16.16b, #0 mov v16.S[3], w6 /* 1 */ /* load IV */ ldp x9, x10, [x3] ld1 {v0.16b}, [x3] rev x9, x9 rev x10, x10 mov x12, #(4 << 56) lsl x11, x10, #56 aes_preload_keys(x0, w5); b.eq .Lctr_enc_entry_192 b.hi .Lctr_enc_entry_256 #define CTR_ENC(bits) \ .Lctr_enc_entry_##bits: \ cmp x4, #4; \ b.lo .Lctr_enc_loop_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ \ adds x11, x11, x12; \ add v9.4s, v16.4s, v16.4s; /* 2 */ \ add v10.4s, v16.4s, v9.4s; /* 3 */ \ add v11.4s, v9.4s, v9.4s; /* 4 */ \ mov x7, #1; \ sub x4, x4, #4; \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ b.cs .Lctr_enc_carry4_##bits; \ \ mov v1.16b, v0.16b; \ add x10, x10, #4; \ add v2.16b, v0.16b, v16.16b; \ add v3.8h, v0.8h, v9.8h; \ add v4.4s, v0.4s, v10.4s; \ add v0.2d, v0.2d, v11.2d; \ \ .Lctr_enc_entry4_##bits##_carry_done: \ mov x7, #0; \ cmp x4, #4; \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ b.lo .Lctr_enc_done4_##bits; \ \ st1 {v12.16b-v15.16b}, [x8]; /* store callee saved registers */ \ \ .Lctr_enc_loop4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v12, v13, v14, v15, v1, v2, v3, v4, v5, v6, v7, v8); \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ adds x11, x11, x12; \ sub x4, x4, #4; \ b.cs .Lctr_enc_carry4_##bits; \ \ mov v1.16b, v0.16b; \ add x10, x10, #4; \ add v2.16b, v0.16b, v16.16b; \ add v3.8h, v0.8h, v9.8h; \ add v4.4s, v0.4s, v10.4s; \ add v0.2d, v0.2d, v11.2d; \ \ .Lctr_enc_loop4_##bits##_carry_done: \ cmp x4, #4; \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ st1 {v12.16b-v15.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lctr_enc_loop4_##bits; \ \ ld1 {v12.16b-v15.16b}, [x8]; /* restore callee saved registers */ \ \ .Lctr_enc_done4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v5, v6, v7, v8, v1, v2, v3, v4, v5, v6, v7, v8); \ \ st1 {v5.16b-v8.16b}, [x1], #64; /* store plaintext */ \ \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x4, .Lctr_enc_done; \ \ .Lctr_enc_loop_##bits: \ \ adds x10, x10, #1; \ mov v1.16b, v0.16b; \ adc x9, x9, xzr; \ dup v0.2d, x10; \ sub x4, x4, #1; \ ins v0.D[0], x9; \ ld1 {v2.16b}, [x2], #16; /* load ciphertext */ \ rev64 v0.16b, v0.16b; \ \ do_aes_one_part1(e, mc, v1, vk0); \ eor v2.16b, v2.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v1, __, __); \ \ eor v1.16b, v1.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lctr_enc_loop_##bits; \ b .Lctr_enc_done; \ \ .Lctr_enc_carry4_##bits: \ \ adds x13, x10, #1; \ mov v1.16b, v0.16b; \ adc x14, x9, xzr; \ dup v2.2d, x13; \ adds x13, x10, #2; \ ins v2.D[0], x14; \ adc x14, x9, xzr; \ rev64 v2.16b, v2.16b; \ dup v3.2d, x13; \ adds x13, x10, #3; \ ins v3.D[0], x14; \ adc x14, x9, xzr; \ rev64 v3.16b, v3.16b; \ dup v4.2d, x13; \ adds x10, x10, #4; \ ins v4.D[0], x14; \ adc x9, x9, xzr; \ rev64 v4.16b, v4.16b; \ dup v0.2d, x10; \ ins v0.D[0], x9; \ rev64 v0.16b, v0.16b; \ \ cbz x7, .Lctr_enc_loop4_##bits##_carry_done; \ b .Lctr_enc_entry4_##bits##_carry_done; CTR_ENC(128) CTR_ENC(192) CTR_ENC(256) #undef CTR_ENC .Lctr_enc_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); .Lctr_enc_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;) /* * void _gcry_aes_ctr32le_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_ctr32le_enc_armv8_ce ELF(.type _gcry_aes_ctr32le_enc_armv8_ce,%function;) _gcry_aes_ctr32le_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lctr32le_enc_skip add x8, sp, #-64 add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); mov w6, #1 movi v16.16b, #0 mov v16.S[0], w6 /* load IV */ ld1 {v0.16b}, [x3] aes_preload_keys(x0, w5); b.eq .Lctr32le_enc_entry_192 b.hi .Lctr32le_enc_entry_256 #define CTR32LE_ENC(bits) \ .Lctr32le_enc_entry_##bits: \ cmp x4, #4; \ b.lo .Lctr32le_enc_loop_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ add v9.4s, v16.4s, v16.4s; /* 2 */ \ cmp x4, #8; \ add v10.4s, v9.4s, v16.4s; /* 3 */ \ sub x4, x4, #4; \ add v11.4s, v9.4s, v9.4s; /* 4 */ \ \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ \ mov v1.16b, v0.16b; \ add v2.4s, v0.4s, v16.4s; \ add v3.4s, v0.4s, v9.4s; \ add v4.4s, v0.4s, v10.4s; \ add v0.4s, v0.4s, v11.4s; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ b.lo .Lctr32le_enc_done4_##bits; \ \ st1 {v12.16b-v15.16b}, [x8]; /* store callee saved registers */ \ \ .Lctr32le_enc_loop4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v12, v13, v14, v15, v1, v2, v3, v4, v5, v6, v7, v8); \ ld1 {v5.16b-v8.16b}, [x2], #64; /* preload ciphertext */ \ \ cmp x4, #8; \ sub x4, x4, #4; \ \ mov v1.16b, v0.16b; \ add v2.4s, v0.4s, v16.4s; \ add v3.4s, v0.4s, v9.4s; \ add v4.4s, v0.4s, v10.4s; \ add v0.4s, v0.4s, v11.4s; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ st1 {v12.16b-v15.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lctr32le_enc_loop4_##bits; \ \ ld1 {v12.16b-v15.16b}, [x8]; /* restore callee saved registers */ \ \ .Lctr32le_enc_done4_##bits: \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ eor v8.16b, v8.16b, vklast.16b; \ do_aes_4_part2_##bits(e, mc, v5, v6, v7, v8, v1, v2, v3, v4, v5, v6, v7, v8); \ \ st1 {v5.16b-v8.16b}, [x1], #64; /* store plaintext */ \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x4, .Lctr32le_enc_done; \ \ .Lctr32le_enc_loop_##bits: \ \ mov v1.16b, v0.16b; \ ld1 {v2.16b}, [x2], #16; /* load ciphertext */ \ sub x4, x4, #1; \ add v0.4s, v0.4s, v16.4s; \ \ do_aes_one_part1(e, mc, v1, vk0); \ eor v2.16b, v2.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v1, __, __); \ \ eor v1.16b, v1.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lctr32le_enc_loop_##bits; \ b .Lctr32le_enc_done; CTR32LE_ENC(128) CTR32LE_ENC(192) CTR32LE_ENC(256) #undef CTR32LE_ENC .Lctr32le_enc_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); .Lctr32le_enc_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ctr32le_enc_armv8_ce,.-_gcry_aes_ctr32le_enc_armv8_ce;) /* * void _gcry_aes_cfb_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cfb_enc_armv8_ce ELF(.type _gcry_aes_cfb_enc_armv8_ce,%function;) _gcry_aes_cfb_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lcfb_enc_skip /* load IV */ ld1 {v0.16b}, [x3] aes_preload_keys(x0, w5); ld1 {v1.16b}, [x2], #16; /* load plaintext */ eor v3.16b, vk0.16b, vklast.16b; eor v0.16b, v0.16b, vklast.16b; sub x4, x4, #1; mov v4.16b, v3.16b; do_aes_one_part1(e, mc, v0, v4); b.eq .Lcfb_enc_entry_192 b.hi .Lcfb_enc_entry_256 #define CFB_ENC(bits) \ .Lcfb_enc_entry_##bits: \ cbz x4, .Lcfb_enc_done_##bits; \ \ .Lcfb_enc_loop_##bits: \ eor v2.16b, v1.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v0, \ _(eor v4.16b, v3.16b, v1.16b), \ _(ld1 {v1.16b}, [x2], #16 /* load plaintext */)); \ sub x4, x4, #1; \ eor v2.16b, v2.16b, v0.16b; \ do_aes_one_part1(e, mc, v0, v4); \ st1 {v2.16b}, [x1], #16; /* store ciphertext */ \ cbnz x4, .Lcfb_enc_loop_##bits; \ \ .Lcfb_enc_done_##bits: \ eor v2.16b, v1.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v0, __, __); \ b .Lcfb_enc_done; CFB_ENC(128) CFB_ENC(192) CFB_ENC(256) #undef CFB_ENC .Lcfb_enc_done: eor v2.16b, v2.16b, v0.16b; st1 {v2.16b}, [x1]; /* store ciphertext */ aes_clear_keys(w5) st1 {v2.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v3) CLEAR_REG(v4) .Lcfb_enc_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;) /* * void _gcry_aes_cfb_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *iv, unsigned int nrounds); */ .align 3 .globl _gcry_aes_cfb_dec_armv8_ce ELF(.type _gcry_aes_cfb_dec_armv8_ce,%function;) _gcry_aes_cfb_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: iv * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lcfb_dec_skip add sp, sp, #-64; CFI_ADJUST_CFA_OFFSET(64); /* load IV */ ld1 {v0.16b}, [x3] aes_preload_keys(x0, w5); b.eq .Lcfb_dec_entry_192 b.hi .Lcfb_dec_entry_256 #define CFB_DEC(bits) \ .Lcfb_dec_entry_##bits: \ cmp x4, #4; \ b.lo .Lcfb_dec_loop_##bits; \ \ ld1 {v2.16b-v5.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ mov v1.16b, v0.16b; \ sub x4, x4, #4; \ eor v6.16b, v2.16b, vklast.16b; \ eor v7.16b, v3.16b, vklast.16b; \ eor v16.16b, v4.16b, vklast.16b; \ mov v0.16b, v5.16b; /* next IV */ \ eor v5.16b, v5.16b, vklast.16b; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ b.lo .Lcfb_dec_done4_##bits; \ \ st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ \ \ .Lcfb_dec_loop4_##bits: \ do_aes_4_part2_##bits(e, mc, v8, v9, v10, v11, v1, v2, v3, v4, v6, v7, v16, v5); \ ld1 {v2.16b-v5.16b}, [x2], #64; /* load ciphertext */ \ cmp x4, #8; \ mov v1.16b, v0.16b; \ sub x4, x4, #4; \ eor v6.16b, v2.16b, vklast.16b; \ eor v7.16b, v3.16b, vklast.16b; \ eor v16.16b, v4.16b, vklast.16b; \ mov v0.16b, v5.16b; /* next IV */ \ eor v5.16b, v5.16b, vklast.16b; \ \ do_aes_4_part1(e, mc, v1, v2, v3, v4, vk0); \ st1 {v8.16b-v11.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lcfb_dec_loop4_##bits; \ \ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ \ \ .Lcfb_dec_done4_##bits: \ do_aes_4_part2_##bits(e, mc, v1, v2, v3, v4, v1, v2, v3, v4, v6, v7, v16, v5); \ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ st1 {v1.16b-v4.16b}, [x1], #64; /* store plaintext */ \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ cbz x4, .Lcfb_dec_done; \ \ .Lcfb_dec_loop_##bits: \ ld1 {v1.16b}, [x2], #16; /* load ciphertext */ \ sub x4, x4, #1; \ \ do_aes_one_part1(e, mc, v0, vk0); \ eor v2.16b, v1.16b, vklast.16b; \ do_aes_one_part2_##bits(e, mc, v0, __, __); \ eor v2.16b, v2.16b, v0.16b; \ \ mov v0.16b, v1.16b; \ st1 {v2.16b}, [x1], #16; /* store plaintext */ \ \ cbnz x4, .Lcfb_dec_loop_##bits; \ b .Lcfb_dec_done; CFB_DEC(128) CFB_DEC(192) CFB_DEC(256) #undef CFB_DEC .Lcfb_dec_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store IV */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #64; CFI_ADJUST_CFA_OFFSET(-64); .Lcfb_dec_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;) /* - * void _gcry_aes_ocb_enc_armv8_ce (const void *keysched, + * long _gcry_aes_ocb_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_enc_armv8_ce ELF(.type _gcry_aes_ocb_enc_armv8_ce,%function;) _gcry_aes_ocb_enc_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: offset * x4: checksum * x5: Ltable * x6: nblocks (0 < nblocks) * w7: nrounds * %st+0: blkn => w12 */ CFI_STARTPROC(); ldr w12, [sp] ld1 {v0.16b}, [x3] /* load offset */ ld1 {v16.16b}, [x4] /* load checksum */ add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); aes_preload_keys(x0, w7); st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ eor v0.16b, v0.16b, vk0.16b; /* offset ^ first key */ eor v9.16b, vk0.16b, vklast.16b; /* first key ^ last key */ b.eq .Locb_ecry_entry_192 b.hi .Locb_ecry_entry_256 #define OCB_CRYPT(bits, ed, mcimc) \ .Locb_##ed##cry_entry_##bits: \ /* Get number of blocks to align nblk to 4. */ \ neg x13, x12; \ add x12, x12, #1; /* Pre-increment nblk for ntz calculation */ \ and x13, x13, #(4-1); \ cmp x13, x6; \ csel x13, x6, x13, hi; \ cbz x13, .Locb_##ed##cry_alignment_ok_##bits; \ \ /* Number of blocks after alignment. */ \ sub x14, x6, x13; \ \ /* If number after alignment is less than 4, skip aligned handling \ * completely. */ \ cmp x14, #4; \ csel x13, x6, x13, lo; \ \ .Locb_##ed##cry_unaligned_entry_##bits: \ cmp x13, #4; \ \ .Locb_##ed##cry_loop1_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ rbit x8, x12; \ add x12, x12, #1; \ clz x8, x8; /* ntz(i) */ \ add x8, x5, x8, lsl #4; \ \ ld1 {v1.16b}, [x2], #16; /* load plaintext */ \ ld1 {v2.16b}, [x8]; /* load L_{ntz(i)} */ \ eor v0.16b, v0.16b, v2.16b; \ sub x13, x13, #1; \ ENC(eor v16.16b, v16.16b, v1.16b); \ sub x6, x6, #1; \ \ do_aes_one_part1(ed, mcimc, v1, v0); \ eor v2.16b, v0.16b, v9.16b; \ do_aes_one_part2_##bits(ed, mcimc, v1, __, __); \ eor v1.16b, v1.16b, v2.16b; \ st1 {v1.16b}, [x1], #16; /* store ciphertext */ \ DEC(eor v16.16b, v16.16b, v1.16b); \ \ cbnz x13, .Locb_##ed##cry_loop1_##bits; \ \ cbz x6, .Locb_##ed##cry_done; \ \ /* nblk is now aligned and we have 4 or more blocks. So jump directly to \ * aligned processing. */ \ b .Locb_##ed##cry_aligned_entry_##bits; \ \ .Locb_##ed##cry_alignment_ok_##bits: \ cbz x6, .Locb_##ed##cry_done; \ \ /* Short buffers do not benefit from L-array optimization. */ \ cmp x6, #4; \ mov x13, x6; \ b.lo .Locb_##ed##cry_unaligned_entry_##bits; \ \ .Locb_##ed##cry_aligned_entry_##bits: \ /* Prepare L-array optimization. \ * Since nblk is aligned to 4, offsets will have following construction: \ * - block1 = ntz{0} = offset ^ L[0] \ * - block2 = ntz{1} = offset ^ L[0] ^ L[1] \ * - block3 = ntz{0} = offset ^ L[1] \ * - block4 = ntz{x} = offset ^ L[1] ^ L[ntz{x}] \ */ \ ld1 {v10.16b-v11.16b}, [x5]; /* preload L[0] && L[1] */ \ mov x15, #4; \ \ st1 {v12.16b-v15.16b}, [x16]; /* store callee saved registers */ \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ add x11, x12, #3; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load P_i+<0-3> */ \ rbit x11, x11; \ eor v6.16b, v10.16b, v11.16b; /* L[0] ^ L[1] */ \ ENC(eor v16.16b, v16.16b, v1.16b); /* Checksum_i+0 */ \ add x12, x12, #4; \ clz x11, x11; /* ntz(i+3) */ \ add x15, x15, #4; \ add x11, x5, x11, lsl #4; \ \ eor v5.16b, v0.16b, v10.16b; /* Offset_i+0 */ \ ENC(eor v16.16b, v16.16b, v2.16b); /* Checksum_i+1 */ \ ld1 {v8.16b}, [x11]; /* load L_{ntz(i+3)} */ \ ENC(eor v16.16b, v16.16b, v3.16b); /* Checksum_i+2 */ \ eor v6.16b, v0.16b, v6.16b; /* Offset_i+1 */ \ ENC(eor v16.16b, v16.16b, v4.16b); /* Checksum_i+3 */ \ eor v7.16b, v0.16b, v11.16b; /* Offset_i+2 */ \ eor v8.16b, v8.16b, v11.16b; /* L[1] ^ L[ntz{x}] */ \ cmp x15, x13; \ eor v0.16b, v0.16b, v8.16b; /* Offset_i+3 */ \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v5, v6, v7, v0); /* P_i+j xor Offset_i+j */ \ b.hi .Locb_##ed##cry_aligned_done4_##bits; \ \ .Locb_##ed##cry_aligned_loop4_##bits: \ add x11, x12, #3; \ eor v5.16b, v5.16b, v9.16b; \ eor v6.16b, v6.16b, v9.16b; \ rbit x11, x11; \ eor v7.16b, v7.16b, v9.16b; \ eor v8.16b, v0.16b, v9.16b; \ clz x11, x11; /* ntz(i+3) */ \ do_aes_4_part2_##bits(ed, mcimc, v12, v13, v14, v15, v1, v2, v3, v4, v5, v6, v7, v8); /* xor Offset_i+j */ \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Checksum_i = Checksum_{i-1} xor P_i */ \ /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \ \ add x12, x12, #4; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load P_i+<0-3> */ \ eor v6.16b, v10.16b, v11.16b; /* L[0] ^ L[1] */ \ add x15, x15, #4; \ DEC(eor v16.16b, v16.16b, v12.16b); /* Checksum_i+0 */ \ ENC(eor v16.16b, v16.16b, v1.16b); /* Checksum_i+0 */ \ add x11, x5, x11, lsl #4; \ \ eor v5.16b, v0.16b, v10.16b; /* Offset_i+0 */ \ ENC(eor v16.16b, v16.16b, v2.16b); /* Checksum_i+1 */ \ DEC(eor v16.16b, v16.16b, v13.16b); /* Checksum_1+2 */ \ ld1 {v8.16b}, [x11]; /* load L_{ntz(i+3)} */ \ ENC(eor v16.16b, v16.16b, v3.16b); /* Checksum_i+2 */ \ DEC(eor v16.16b, v16.16b, v14.16b); /* Checksum_i+0+3 */ \ eor v6.16b, v0.16b, v6.16b; /* Offset_i+1 */ \ ENC(eor v16.16b, v16.16b, v4.16b); /* Checksum_i+3 */ \ DEC(eor v16.16b, v16.16b, v15.16b); /* Checksum_i+0+1+2 */ \ eor v7.16b, v0.16b, v11.16b; /* Offset_i+2 */ \ eor v8.16b, v8.16b, v11.16b; /* L[1] ^ L[ntz{x}] */ \ cmp x15, x13; \ eor v0.16b, v0.16b, v8.16b; /* Offset_i+3 */ \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v5, v6, v7, v0); /* P_i+j xor Offset_i+j */ \ st1 {v12.16b-v15.16b}, [x1], #64; \ \ b.ls .Locb_##ed##cry_aligned_loop4_##bits; \ \ .Locb_##ed##cry_aligned_done4_##bits: \ eor v5.16b, v5.16b, v9.16b; \ eor v6.16b, v6.16b, v9.16b; \ eor v7.16b, v7.16b, v9.16b; \ eor v8.16b, v0.16b, v9.16b; \ do_aes_4_part2_##bits(ed, mcimc, v1, v2, v3, v4, v1, v2, v3, v4, v5, v6, v7, v8); /* xor Offset_i+j */ \ DEC(eor v16.16b, v16.16b, v1.16b); /* Checksum_i+0 */ \ DEC(eor v5.16b, v2.16b, v3.16b); /* Checksum_1+2 */ \ DEC(eor v16.16b, v16.16b, v4.16b); /* Checksum_i+0+3 */ \ st1 {v1.16b-v4.16b}, [x1], #64; \ DEC(eor v16.16b, v16.16b, v5.16b); /* Checksum_i+0+1+2 */ \ \ sub x15, x15, #4; \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ ld1 {v12.16b-v15.16b}, [x16]; /* restore callee saved registers */ \ sub x13, x13, x15; \ sub x6, x6, x15; \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ \ /* Handle tailing 1…3 blocks in unaligned loop. */ \ mov x13, x6; \ cbnz x6, .Locb_##ed##cry_unaligned_entry_##bits; \ \ b .Locb_##ed##cry_done; #define ENC(...) __VA_ARGS__ #define DEC(...) /*_*/ OCB_CRYPT(128, e, mc) OCB_CRYPT(192, e, mc) OCB_CRYPT(256, e, mc) #undef ENC #undef DEC .Locb_ecry_done: eor v0.16b, v0.16b, vk0.16b; /* restore offset */ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ aes_clear_keys(w7) st1 {v16.16b}, [x4] /* store checksum */ st1 {v0.16b}, [x3] /* store offset */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v7) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); + mov x0, #0 ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;) /* - * void _gcry_aes_ocb_dec_armv8_ce (const void *keysched, + * long _gcry_aes_ocb_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_dec_armv8_ce ELF(.type _gcry_aes_ocb_dec_armv8_ce,%function;) _gcry_aes_ocb_dec_armv8_ce: /* input: * x0: keysched * x1: outbuf * x2: inbuf * x3: offset * x4: checksum * x5: Ltable * x6: nblocks (0 < nblocks) * w7: nrounds * %st+0: blkn => w12 */ CFI_STARTPROC(); ldr w12, [sp] ld1 {v0.16b}, [x3] /* load offset */ ld1 {v16.16b}, [x4] /* load checksum */ add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); aes_preload_keys(x0, w7); st1 {v8.16b-v11.16b}, [sp]; /* store callee saved registers */ eor v0.16b, v0.16b, vk0.16b; /* offset ^ first key */ eor v9.16b, vk0.16b, vklast.16b; /* first key ^ last key */ b.eq .Locb_dcry_entry_192 b.hi .Locb_dcry_entry_256 #define ENC(...) /*_*/ #define DEC(...) __VA_ARGS__ OCB_CRYPT(128, d, imc) OCB_CRYPT(192, d, imc) OCB_CRYPT(256, d, imc) #undef ENC #undef DEC #undef OCB_CRYPT .Locb_dcry_done: eor v0.16b, v0.16b, vk0.16b; /* restore offset */ ld1 {v8.16b-v11.16b}, [sp]; /* restore callee saved registers */ aes_clear_keys(w7) st1 {v16.16b}, [x4] /* store checksum */ st1 {v0.16b}, [x3] /* store offset */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) add sp, sp, #128; CFI_ADJUST_CFA_OFFSET(-128); + mov x0, #0 ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;) /* - * void _gcry_aes_ocb_auth_armv8_ce (const void *keysched, + * long _gcry_aes_ocb_auth_armv8_ce (const void *keysched, * const unsigned char *abuf, * unsigned char *offset, * unsigned char *checksum, * unsigned char *L_table, * size_t nblocks, * unsigned int nrounds, * unsigned int blkn); */ .align 3 .globl _gcry_aes_ocb_auth_armv8_ce ELF(.type _gcry_aes_ocb_auth_armv8_ce,%function;) _gcry_aes_ocb_auth_armv8_ce: /* input: * x0: keysched * x1: abuf * x2: offset => x3 * x3: checksum => x4 * x4: Ltable => x5 * x5: nblocks => x6 (0 < nblocks <= 32) * w6: nrounds => w7 * w7: blkn => w12 */ CFI_STARTPROC(); mov w12, w7 mov w7, w6 mov x6, x5 mov x5, x4 mov x4, x3 mov x3, x2 aes_preload_keys(x0, w7); ld1 {v0.16b}, [x3] /* load offset */ ld1 {v16.16b}, [x4] /* load checksum */ beq .Locb_auth_entry_192 bhi .Locb_auth_entry_256 #define OCB_AUTH(bits) \ .Locb_auth_entry_##bits: \ cmp x6, #4; \ add w12, w12, #1; \ b.lo .Locb_auth_loop_##bits; \ \ .Locb_auth_loop4_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \ \ add w9, w12, #1; \ add w10, w12, #2; \ add w11, w12, #3; \ rbit w8, w12; \ add w12, w12, #4; \ rbit w9, w9; \ rbit w10, w10; \ rbit w11, w11; \ clz w8, w8; /* ntz(i+0) */ \ clz w9, w9; /* ntz(i+1) */ \ clz w10, w10; /* ntz(i+2) */ \ clz w11, w11; /* ntz(i+3) */ \ add x8, x5, x8, lsl #4; \ ld1 {v1.16b-v4.16b}, [x1], #64; /* load A_i+<0-3> */ \ add x9, x5, x9, lsl #4; \ add x10, x5, x10, lsl #4; \ add x11, x5, x11, lsl #4; \ \ sub x6, x6, #4; \ \ ld1 {v5.16b}, [x8]; /* load L_{ntz(i+0)} */ \ ld1 {v6.16b}, [x9]; /* load L_{ntz(i+1)} */ \ ld1 {v7.16b}, [x10]; /* load L_{ntz(i+2)} */ \ eor v5.16b, v5.16b, v0.16b; /* Offset_i+0 */ \ ld1 {v0.16b}, [x11]; /* load L_{ntz(i+3)} */ \ eor v6.16b, v6.16b, v5.16b; /* Offset_i+1 */ \ eor v1.16b, v1.16b, v5.16b; /* A_i+0 xor Offset_i+0 */ \ eor v7.16b, v7.16b, v6.16b; /* Offset_i+2 */ \ eor v2.16b, v2.16b, v6.16b; /* A_i+1 xor Offset_i+1 */ \ eor v0.16b, v0.16b, v7.16b; /* Offset_i+3 */ \ cmp x6, #4; \ eor v3.16b, v3.16b, v7.16b; /* A_i+2 xor Offset_i+2 */ \ eor v4.16b, v4.16b, v0.16b; /* A_i+3 xor Offset_i+3 */ \ \ do_aes_4_##bits(e, mc, v1, v2, v3, v4); \ \ eor v1.16b, v1.16b, v2.16b; \ eor v16.16b, v16.16b, v3.16b; \ eor v1.16b, v1.16b, v4.16b; \ eor v16.16b, v16.16b, v1.16b; \ \ b.hs .Locb_auth_loop4_##bits; \ CLEAR_REG(v3); \ CLEAR_REG(v4); \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x6, .Locb_auth_done; \ \ .Locb_auth_loop_##bits: \ \ /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \ /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \ \ rbit w8, w12; \ add w12, w12, #1; \ clz w8, w8; /* ntz(i) */ \ add x8, x5, x8, lsl #4; \ \ ld1 {v1.16b}, [x1], #16; /* load aadtext */ \ ld1 {v2.16b}, [x8]; /* load L_{ntz(i)} */ \ sub x6, x6, #1; \ eor v0.16b, v0.16b, v2.16b; \ eor v1.16b, v1.16b, v0.16b; \ \ do_aes_one##bits(e, mc, v1, v1, vk0) \ \ eor v16.16b, v16.16b, v1.16b; \ \ cbnz x6, .Locb_auth_loop_##bits; \ b .Locb_auth_done; OCB_AUTH(128) OCB_AUTH(192) OCB_AUTH(256) #undef OCB_AUTH .Locb_auth_done: aes_clear_keys(w7) st1 {v16.16b}, [x4] /* store checksum */ st1 {v0.16b}, [x3] /* store offset */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v16) + mov x0, #0 ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;) /* * void _gcry_aes_xts_enc_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *tweak, * size_t nblocks, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_xts_enc_armv8_ce ELF(.type _gcry_aes_xts_enc_armv8_ce,%function;) _gcry_aes_xts_enc_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: tweak * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lxts_enc_skip add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); /* load tweak */ ld1 {v0.16b}, [x3] /* load gfmul mask */ mov x6, #0x87 mov x7, #0x01 mov v16.D[0], x6 mov v16.D[1], x7 aes_preload_keys(x0, w5); eor vklast.16b, vklast.16b, vk0.16b; b.eq .Lxts_ecry_entry_192 b.hi .Lxts_ecry_entry_256 #define XTS_CRYPT(bits, ed, mcimc) \ .Lxts_##ed##cry_entry_##bits: \ cmp x4, #4; \ b.lo .Lxts_##ed##cry_loop_##bits; \ \ st1 {v8.16b}, [sp]; /* store callee saved registers */ \ ext v4.16b, v0.16b, v0.16b, #8; \ mov v8.16b, v0.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v5.2d, v0.2d, v0.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v5.16b, v5.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v6.2d, v5.2d, v5.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v6.16b, v6.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v7.2d, v6.2d, v6.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v7.16b, v7.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v3.2d, v7.2d, v7.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v0.16b, v3.16b, v2.16b; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load plaintext */ \ cmp x4, #8; \ sub x4, x4, #4; \ \ eor v8.16b, v8.16b, vk0.16b; \ eor v5.16b, v5.16b, vk0.16b; \ eor v6.16b, v6.16b, vk0.16b; \ eor v7.16b, v7.16b, vk0.16b; \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v8, v5, v6, v7); \ b.lo .Lxts_##ed##cry_done4_##bits; \ \ st1 {v9.16b-v12.16b}, [x16]; /* store callee saved registers */ \ \ .Lxts_##ed##cry_loop4_##bits: \ eor v8.16b, v8.16b, vklast.16b; \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ do_aes_4_part2_##bits(ed, mcimc, v9, v10, v11, v12, v1, v2, v3, v4, v8, v5, v6, v7); \ \ ext v4.16b, v0.16b, v0.16b, #8; \ mov v8.16b, v0.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v5.2d, v0.2d, v0.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v5.16b, v5.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v6.2d, v5.2d, v5.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v6.16b, v6.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v7.2d, v6.2d, v6.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v7.16b, v7.16b, v2.16b; \ \ sshr v2.2d, v4.2d, #63; \ add v3.2d, v7.2d, v7.2d; \ and v2.16b, v2.16b, v16.16b; \ add v4.2d, v4.2d, v4.2d; \ eor v0.16b, v3.16b, v2.16b; \ ld1 {v1.16b-v4.16b}, [x2], #64; /* load plaintext */ \ cmp x4, #8; \ sub x4, x4, #4; \ \ eor v8.16b, v8.16b, vk0.16b; \ eor v5.16b, v5.16b, vk0.16b; \ eor v6.16b, v6.16b, vk0.16b; \ eor v7.16b, v7.16b, vk0.16b; \ \ do_aes_4_part1_multikey(ed, mcimc, v1, v2, v3, v4, v8, v5, v6, v7); \ \ st1 {v9.16b-v12.16b}, [x1], #64; /* store plaintext */ \ \ b.hs .Lxts_##ed##cry_loop4_##bits; \ \ ld1 {v9.16b-v12.16b}, [x16]; /* restore callee saved registers */ \ \ .Lxts_##ed##cry_done4_##bits: \ eor v8.16b, v8.16b, vklast.16b; \ eor v5.16b, v5.16b, vklast.16b; \ eor v6.16b, v6.16b, vklast.16b; \ eor v7.16b, v7.16b, vklast.16b; \ do_aes_4_part2_##bits(ed, mcimc, v1, v2, v3, v4, v1, v2, v3, v4, v8, v5, v6, v7); \ \ st1 {v1.16b-v4.16b}, [x1], #64; /* store plaintext */ \ \ CLEAR_REG(v4); \ ld1 {v8.16b}, [sp]; /* restore callee saved registers */ \ CLEAR_REG(v5); \ CLEAR_REG(v6); \ CLEAR_REG(v7); \ cbz x4, .Lxts_##ed##cry_done; \ \ .Lxts_##ed##cry_loop_##bits: \ \ ld1 {v1.16b}, [x2], #16; /* load plaintext */ \ ext v3.16b, v0.16b, v0.16b, #8; \ eor v2.16b, v0.16b, vk0.16b; \ sshr v3.2d, v3.2d, #63; \ add v0.2d, v0.2d, v0.2d; \ and v3.16b, v3.16b, v16.16b; \ sub x4, x4, #1; \ eor v0.16b, v0.16b, v3.16b; \ \ do_aes_one_part1(ed, mcimc, v1, v2); \ eor v2.16b, v2.16b, vklast.16b; \ do_aes_one_part2_##bits(ed, mcimc, v1, __, __); \ eor v1.16b, v1.16b, v2.16b; \ \ st1 {v1.16b}, [x1], #16; /* store ciphertext */ \ \ cbnz x4, .Lxts_##ed##cry_loop_##bits; \ b .Lxts_##ed##cry_done; XTS_CRYPT(128, e, mc) XTS_CRYPT(192, e, mc) XTS_CRYPT(256, e, mc) .Lxts_ecry_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store tweak */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) CLEAR_REG(v3) CLEAR_REG(v16) add sp, sp, 128; CFI_ADJUST_CFA_OFFSET(-128); .Lxts_enc_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;) /* * void _gcry_aes_xts_dec_armv8_ce (const void *keysched, * unsigned char *outbuf, * const unsigned char *inbuf, * unsigned char *tweak, * size_t nblocks, * unsigned int nrounds); */ .align 3 .globl _gcry_aes_xts_dec_armv8_ce ELF(.type _gcry_aes_xts_dec_armv8_ce,%function;) _gcry_aes_xts_dec_armv8_ce: /* input: * r0: keysched * r1: outbuf * r2: inbuf * r3: tweak * x4: nblocks * w5: nrounds */ CFI_STARTPROC(); cbz x4, .Lxts_dec_skip add x16, sp, #-64; add sp, sp, #-128; CFI_ADJUST_CFA_OFFSET(128); /* load tweak */ ld1 {v0.16b}, [x3] /* load gfmul mask */ mov x6, #0x87 mov x7, #0x01 mov v16.D[0], x6 mov v16.D[1], x7 aes_preload_keys(x0, w5); eor vklast.16b, vklast.16b, vk0.16b; b.eq .Lxts_dcry_entry_192 b.hi .Lxts_dcry_entry_256 XTS_CRYPT(128, d, imc) XTS_CRYPT(192, d, imc) XTS_CRYPT(256, d, imc) #undef XTS_CRYPT .Lxts_dcry_done: aes_clear_keys(w5) st1 {v0.16b}, [x3] /* store tweak */ CLEAR_REG(v0) CLEAR_REG(v1) CLEAR_REG(v2) add sp, sp, 128; CFI_ADJUST_CFA_OFFSET(-128); .Lxts_dec_skip: ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;) /* * u32 _gcry_aes_sbox4_armv8_ce(u32 in4b); */ .align 3 .globl _gcry_aes_sbox4_armv8_ce ELF(.type _gcry_aes_sbox4_armv8_ce,%function;) _gcry_aes_sbox4_armv8_ce: /* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in * Cryptology — CT-RSA 2015" for details. */ CFI_STARTPROC(); movi v0.16b, #0x52 movi v1.16b, #0 mov v0.S[0], w0 aese v0.16b, v1.16b addv s0, v0.4s mov w0, v0.S[0] CLEAR_REG(v0) ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;) /* * void _gcry_aes_invmixcol_armv8_ce(void *dst, const void *src); */ .align 3 .globl _gcry_aes_invmixcol_armv8_ce ELF(.type _gcry_aes_invmixcol_armv8_ce,%function;) _gcry_aes_invmixcol_armv8_ce: CFI_STARTPROC(); ld1 {v0.16b}, [x1] aesimc v0.16b, v0.16b st1 {v0.16b}, [x0] CLEAR_REG(v0) ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;) #endif diff --git a/cipher/rijndael-armv8-ce.c b/cipher/rijndael-armv8-ce.c index c9c37654..042b7d42 100644 --- a/cipher/rijndael-armv8-ce.c +++ b/cipher/rijndael-armv8-ce.c @@ -1,374 +1,396 @@ /* ARMv8 Crypto Extension AES for Libgcrypt * Copyright (C) 2016, 2022 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * */ #include #include #include #include /* for memcmp() */ #include "types.h" /* for byte and u32 typedefs */ #include "g10lib.h" #include "cipher.h" #include "bufhelp.h" #include "rijndael-internal.h" #include "./cipher-internal.h" #ifdef USE_ARM_CE typedef struct u128_s { u32 a, b, c, d; } u128_t; extern u32 _gcry_aes_sbox4_armv8_ce(u32 in4b); extern void _gcry_aes_invmixcol_armv8_ce(u128_t *dst, const u128_t *src); extern unsigned int _gcry_aes_enc_armv8_ce(const void *keysched, byte *dst, const byte *src, unsigned int nrounds); extern unsigned int _gcry_aes_dec_armv8_ce(const void *keysched, byte *dst, const byte *src, unsigned int nrounds); extern void _gcry_aes_cbc_enc_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *iv, size_t nblocks, int cbc_mac, unsigned int nrounds); extern void _gcry_aes_cbc_dec_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *iv, size_t nblocks, unsigned int nrounds); extern void _gcry_aes_cfb_enc_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *iv, size_t nblocks, unsigned int nrounds); extern void _gcry_aes_cfb_dec_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *iv, size_t nblocks, unsigned int nrounds); extern void _gcry_aes_ctr_enc_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *iv, size_t nblocks, unsigned int nrounds); extern void _gcry_aes_ctr32le_enc_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *iv, size_t nblocks, unsigned int nrounds); -extern void _gcry_aes_ocb_enc_armv8_ce (const void *keysched, - unsigned char *outbuf, - const unsigned char *inbuf, - unsigned char *offset, - unsigned char *checksum, - unsigned char *L_table, - size_t nblocks, - unsigned int nrounds, - unsigned int blkn); -extern void _gcry_aes_ocb_dec_armv8_ce (const void *keysched, - unsigned char *outbuf, - const unsigned char *inbuf, - unsigned char *offset, - unsigned char *checksum, - unsigned char *L_table, - size_t nblocks, - unsigned int nrounds, - unsigned int blkn); -extern void _gcry_aes_ocb_auth_armv8_ce (const void *keysched, - const unsigned char *abuf, - unsigned char *offset, - unsigned char *checksum, - unsigned char *L_table, - size_t nblocks, - unsigned int nrounds, - unsigned int blkn); +extern size_t _gcry_aes_ocb_enc_armv8_ce (const void *keysched, + unsigned char *outbuf, + const unsigned char *inbuf, + unsigned char *offset, + unsigned char *checksum, + unsigned char *L_table, + size_t nblocks, + unsigned int nrounds, + unsigned int blkn); +extern size_t _gcry_aes_ocb_dec_armv8_ce (const void *keysched, + unsigned char *outbuf, + const unsigned char *inbuf, + unsigned char *offset, + unsigned char *checksum, + unsigned char *L_table, + size_t nblocks, + unsigned int nrounds, + unsigned int blkn); +extern size_t _gcry_aes_ocb_auth_armv8_ce (const void *keysched, + const unsigned char *abuf, + unsigned char *offset, + unsigned char *checksum, + unsigned char *L_table, + size_t nblocks, + unsigned int nrounds, + unsigned int blkn); extern void _gcry_aes_xts_enc_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *tweak, size_t nblocks, unsigned int nrounds); extern void _gcry_aes_xts_dec_armv8_ce (const void *keysched, unsigned char *outbuf, const unsigned char *inbuf, unsigned char *tweak, size_t nblocks, unsigned int nrounds); - -typedef void (*ocb_crypt_fn_t) (const void *keysched, unsigned char *outbuf, - const unsigned char *inbuf, - unsigned char *offset, unsigned char *checksum, - unsigned char *L_table, size_t nblocks, - unsigned int nrounds, unsigned int blkn); - -typedef void (*xts_crypt_fn_t) (const void *keysched, unsigned char *outbuf, - const unsigned char *inbuf, - unsigned char *tweak, size_t nblocks, - unsigned int nrounds); +extern void _gcry_aes_ecb_enc_armv8_ce (const void *keysched, + unsigned char *outbuf, + const unsigned char *inbuf, + size_t nblocks, unsigned int nrounds); +extern void _gcry_aes_ecb_dec_armv8_ce (const void *keysched, + unsigned char *outbuf, + const unsigned char *inbuf, + size_t nblocks, unsigned int nrounds); void _gcry_aes_armv8_ce_setkey (RIJNDAEL_context *ctx, const byte *key) { unsigned int rounds = ctx->rounds; unsigned int KC = rounds - 6; u32 *W_u32 = ctx->keyschenc32b; unsigned int i, j; u32 W_prev; byte rcon = 1; for (i = 0; i < KC; i += 2) { W_u32[i + 0] = buf_get_le32(key + i * 4 + 0); W_u32[i + 1] = buf_get_le32(key + i * 4 + 4); } for (i = KC, j = KC, W_prev = W_u32[KC - 1]; i < 4 * (rounds + 1); i += 2, j += 2) { u32 temp0 = W_prev; u32 temp1; if (j == KC) { j = 0; temp0 = _gcry_aes_sbox4_armv8_ce(rol(temp0, 24)) ^ rcon; rcon = ((rcon << 1) ^ (-(rcon >> 7) & 0x1b)) & 0xff; } else if (KC == 8 && j == 4) { temp0 = _gcry_aes_sbox4_armv8_ce(temp0); } temp1 = W_u32[i - KC + 0]; W_u32[i + 0] = temp0 ^ temp1; W_u32[i + 1] = W_u32[i - KC + 1] ^ temp0 ^ temp1; W_prev = W_u32[i + 1]; } } /* Make a decryption key from an encryption key. */ void _gcry_aes_armv8_ce_prepare_decryption (RIJNDAEL_context *ctx) { u128_t *ekey = (u128_t *)(void *)ctx->keyschenc; u128_t *dkey = (u128_t *)(void *)ctx->keyschdec; int rounds = ctx->rounds; int rr; int r; #define DO_AESIMC() _gcry_aes_invmixcol_armv8_ce(&dkey[r], &ekey[rr]) dkey[0] = ekey[rounds]; r = 1; rr = rounds-1; DO_AESIMC(); r++; rr--; /* round 1 */ DO_AESIMC(); r++; rr--; /* round 2 */ DO_AESIMC(); r++; rr--; /* round 3 */ DO_AESIMC(); r++; rr--; /* round 4 */ DO_AESIMC(); r++; rr--; /* round 5 */ DO_AESIMC(); r++; rr--; /* round 6 */ DO_AESIMC(); r++; rr--; /* round 7 */ DO_AESIMC(); r++; rr--; /* round 8 */ DO_AESIMC(); r++; rr--; /* round 9 */ if (rounds >= 12) { if (rounds > 12) { DO_AESIMC(); r++; rr--; /* round 10 */ DO_AESIMC(); r++; rr--; /* round 11 */ } DO_AESIMC(); r++; rr--; /* round 12 / 10 */ DO_AESIMC(); r++; rr--; /* round 13 / 11 */ } dkey[r] = ekey[0]; #undef DO_AESIMC } unsigned int _gcry_aes_armv8_ce_encrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src) { const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; return _gcry_aes_enc_armv8_ce(keysched, dst, src, nrounds); } unsigned int _gcry_aes_armv8_ce_decrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src) { const void *keysched = ctx->keyschdec32; unsigned int nrounds = ctx->rounds; return _gcry_aes_dec_armv8_ce(keysched, dst, src, nrounds); } void _gcry_aes_armv8_ce_cbc_enc (const RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks, int cbc_mac) { const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_aes_cbc_enc_armv8_ce(keysched, outbuf, inbuf, iv, nblocks, cbc_mac, nrounds); } void _gcry_aes_armv8_ce_cbc_dec (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { const void *keysched = ctx->keyschdec32; unsigned int nrounds = ctx->rounds; if ( !ctx->decryption_prepared ) { _gcry_aes_armv8_ce_prepare_decryption ( ctx ); ctx->decryption_prepared = 1; } _gcry_aes_cbc_dec_armv8_ce(keysched, outbuf, inbuf, iv, nblocks, nrounds); } void _gcry_aes_armv8_ce_cfb_enc (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_aes_cfb_enc_armv8_ce(keysched, outbuf, inbuf, iv, nblocks, nrounds); } void _gcry_aes_armv8_ce_cfb_dec (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_aes_cfb_dec_armv8_ce(keysched, outbuf, inbuf, iv, nblocks, nrounds); } void _gcry_aes_armv8_ce_ctr_enc (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_aes_ctr_enc_armv8_ce(keysched, outbuf, inbuf, iv, nblocks, nrounds); } void _gcry_aes_armv8_ce_ctr32le_enc (RIJNDAEL_context *ctx, unsigned char *iv, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks) { const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_aes_ctr32le_enc_armv8_ce(keysched, outbuf, inbuf, iv, nblocks, nrounds); } size_t _gcry_aes_armv8_ce_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { RIJNDAEL_context *ctx = (void *)&c->context.c; const void *keysched = encrypt ? ctx->keyschenc32 : ctx->keyschdec32; - ocb_crypt_fn_t crypt_fn = encrypt ? _gcry_aes_ocb_enc_armv8_ce - : _gcry_aes_ocb_dec_armv8_ce; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int nrounds = ctx->rounds; u64 blkn = c->u_mode.ocb.data_nblocks; if ( !encrypt && !ctx->decryption_prepared ) { _gcry_aes_armv8_ce_prepare_decryption ( ctx ); ctx->decryption_prepared = 1; } c->u_mode.ocb.data_nblocks = blkn + nblocks; - crypt_fn(keysched, outbuf, inbuf, c->u_iv.iv, c->u_ctr.ctr, - c->u_mode.ocb.L[0], nblocks, nrounds, (unsigned int)blkn); - - return 0; + if (encrypt) + return _gcry_aes_ocb_enc_armv8_ce (keysched, outbuf, inbuf, + c->u_iv.iv, c->u_ctr.ctr, + c->u_mode.ocb.L[0], nblocks, nrounds, + (unsigned int)blkn); + else + return _gcry_aes_ocb_dec_armv8_ce (keysched, outbuf, inbuf, + c->u_iv.iv, c->u_ctr.ctr, + c->u_mode.ocb.L[0], nblocks, nrounds, + (unsigned int)blkn); } size_t _gcry_aes_armv8_ce_ocb_auth (gcry_cipher_hd_t c, void *abuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = (void *)&c->context.c; const void *keysched = ctx->keyschenc32; const unsigned char *abuf = abuf_arg; unsigned int nrounds = ctx->rounds; u64 blkn = c->u_mode.ocb.aad_nblocks; c->u_mode.ocb.aad_nblocks = blkn + nblocks; - _gcry_aes_ocb_auth_armv8_ce(keysched, abuf, c->u_mode.ocb.aad_offset, - c->u_mode.ocb.aad_sum, c->u_mode.ocb.L[0], - nblocks, nrounds, (unsigned int)blkn); - - return 0; + return _gcry_aes_ocb_auth_armv8_ce (keysched, abuf, c->u_mode.ocb.aad_offset, + c->u_mode.ocb.aad_sum, c->u_mode.ocb.L[0], + nblocks, nrounds, (unsigned int)blkn); } void _gcry_aes_armv8_ce_xts_crypt (RIJNDAEL_context *ctx, unsigned char *tweak, unsigned char *outbuf, const unsigned char *inbuf, size_t nblocks, int encrypt) { const void *keysched = encrypt ? ctx->keyschenc32 : ctx->keyschdec32; - xts_crypt_fn_t crypt_fn = encrypt ? _gcry_aes_xts_enc_armv8_ce - : _gcry_aes_xts_dec_armv8_ce; unsigned int nrounds = ctx->rounds; if ( !encrypt && !ctx->decryption_prepared ) { _gcry_aes_armv8_ce_prepare_decryption ( ctx ); ctx->decryption_prepared = 1; } - crypt_fn(keysched, outbuf, inbuf, tweak, nblocks, nrounds); + if (encrypt) + _gcry_aes_xts_enc_armv8_ce (keysched, outbuf, inbuf, tweak, + nblocks, nrounds); + else + _gcry_aes_xts_dec_armv8_ce (keysched, outbuf, inbuf, tweak, + nblocks, nrounds); } +void +_gcry_aes_armv8_ce_ecb_crypt (void *context, void *outbuf, + const void *inbuf, size_t nblocks, + int encrypt) +{ + RIJNDAEL_context *ctx = context; + const void *keysched = encrypt ? ctx->keyschenc32 : ctx->keyschdec32; + unsigned int nrounds = ctx->rounds; + + if ( !encrypt && !ctx->decryption_prepared ) + { + _gcry_aes_armv8_ce_prepare_decryption ( ctx ); + ctx->decryption_prepared = 1; + } + + if (encrypt) + _gcry_aes_ecb_enc_armv8_ce (keysched, outbuf, inbuf, nblocks, nrounds); + else + _gcry_aes_ecb_dec_armv8_ce (keysched, outbuf, inbuf, nblocks, nrounds); +} #endif /* USE_ARM_CE */ diff --git a/cipher/rijndael-vaes-avx2-amd64.S b/cipher/rijndael-vaes-avx2-amd64.S index e36e82a0..655fdf55 100644 --- a/cipher/rijndael-vaes-avx2-amd64.S +++ b/cipher/rijndael-vaes-avx2-amd64.S @@ -1,2971 +1,3401 @@ /* VAES/AVX2 AMD64 accelerated AES for Libgcrypt * Copyright (C) 2021 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #if defined(__x86_64__) #include #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) && \ defined(HAVE_GCC_INLINE_ASM_VAES_VPCLMUL) #include "asm-common-amd64.h" .text /********************************************************************** helper macros **********************************************************************/ #define no(...) /*_*/ #define yes(...) __VA_ARGS__ #define AES_OP8(op, key, b0, b1, b2, b3, b4, b5, b6, b7) \ op key, b0, b0; \ op key, b1, b1; \ op key, b2, b2; \ op key, b3, b3; \ op key, b4, b4; \ op key, b5, b5; \ op key, b6, b6; \ op key, b7, b7; #define VAESENC8(key, b0, b1, b2, b3, b4, b5, b6, b7) \ AES_OP8(vaesenc, key, b0, b1, b2, b3, b4, b5, b6, b7) #define VAESDEC8(key, b0, b1, b2, b3, b4, b5, b6, b7) \ AES_OP8(vaesdec, key, b0, b1, b2, b3, b4, b5, b6, b7) #define XOR8(key, b0, b1, b2, b3, b4, b5, b6, b7) \ AES_OP8(vpxor, key, b0, b1, b2, b3, b4, b5, b6, b7) #define AES_OP4(op, key, b0, b1, b2, b3) \ op key, b0, b0; \ op key, b1, b1; \ op key, b2, b2; \ op key, b3, b3; #define VAESENC4(key, b0, b1, b2, b3) \ AES_OP4(vaesenc, key, b0, b1, b2, b3) #define VAESDEC4(key, b0, b1, b2, b3) \ AES_OP4(vaesdec, key, b0, b1, b2, b3) #define XOR4(key, b0, b1, b2, b3) \ AES_OP4(vpxor, key, b0, b1, b2, b3) #define AES_OP2(op, key, b0, b1) \ op key, b0, b0; \ op key, b1, b1; #define VAESENC2(key, b0, b1) \ AES_OP2(vaesenc, key, b0, b1) #define VAESDEC2(key, b0, b1) \ AES_OP2(vaesdec, key, b0, b1) #define XOR2(key, b0, b1) \ AES_OP2(vpxor, key, b0, b1) /********************************************************************** CBC-mode decryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_cbc_dec_amd64,@function) .globl _gcry_vaes_avx2_cbc_dec_amd64 _gcry_vaes_avx2_cbc_dec_amd64: /* input: * %rdi: round keys * %rsi: iv * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); /* Load IV. */ vmovdqu (%rsi), %xmm15; /* Process 16 blocks per loop. */ .align 8 .Lcbc_dec_blk16: cmpq $16, %r8; jb .Lcbc_dec_blk8; leaq -16(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm8; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vmovdqu (8 * 16)(%rcx), %ymm4; vmovdqu (10 * 16)(%rcx), %ymm5; vmovdqu (12 * 16)(%rcx), %ymm6; vmovdqu (14 * 16)(%rcx), %ymm7; vpxor %ymm8, %ymm0, %ymm0; vpxor %ymm8, %ymm1, %ymm1; vpxor %ymm8, %ymm2, %ymm2; vpxor %ymm8, %ymm3, %ymm3; vpxor %ymm8, %ymm4, %ymm4; vpxor %ymm8, %ymm5, %ymm5; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm8, %ymm7, %ymm7; vbroadcasti128 (1 * 16)(%rdi), %ymm8; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm9; vmovdqu (1 * 16)(%rcx), %ymm10; vmovdqu (3 * 16)(%rcx), %ymm11; vmovdqu (5 * 16)(%rcx), %ymm12; vmovdqu (7 * 16)(%rcx), %ymm13; vmovdqu (9 * 16)(%rcx), %ymm14; vmovdqu (15 * 16)(%rcx), %xmm15; leaq (16 * 16)(%rcx), %rcx; /* AES rounds */ VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lcbc_dec_blk16_last; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lcbc_dec_blk16_last; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lcbc_dec_blk16_last: vpxor %ymm8, %ymm9, %ymm9; vpxor %ymm8, %ymm10, %ymm10; vpxor %ymm8, %ymm11, %ymm11; vpxor %ymm8, %ymm12, %ymm12; vpxor %ymm8, %ymm13, %ymm13; vpxor %ymm8, %ymm14, %ymm14; vaesdeclast %ymm9, %ymm0, %ymm0; vaesdeclast %ymm10, %ymm1, %ymm1; vpxor (-5 * 16)(%rcx), %ymm8, %ymm9; vpxor (-3 * 16)(%rcx), %ymm8, %ymm10; vaesdeclast %ymm11, %ymm2, %ymm2; vaesdeclast %ymm12, %ymm3, %ymm3; vaesdeclast %ymm13, %ymm4, %ymm4; vaesdeclast %ymm14, %ymm5, %ymm5; vaesdeclast %ymm9, %ymm6, %ymm6; vaesdeclast %ymm10, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lcbc_dec_blk16; /* Handle trailing eight blocks. */ .align 8 .Lcbc_dec_blk8: cmpq $8, %r8; jb .Lcbc_dec_blk4; leaq -8(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vpxor %ymm4, %ymm2, %ymm2; vpxor %ymm4, %ymm3, %ymm3; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm10; vmovdqu (1 * 16)(%rcx), %ymm11; vmovdqu (3 * 16)(%rcx), %ymm12; vmovdqu (5 * 16)(%rcx), %ymm13; vmovdqu (7 * 16)(%rcx), %xmm15; leaq (8 * 16)(%rcx), %rcx; /* AES rounds */ VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcbc_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcbc_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcbc_dec_blk8_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vpxor %ymm4, %ymm12, %ymm12; vpxor %ymm4, %ymm13, %ymm13; vaesdeclast %ymm10, %ymm0, %ymm0; vaesdeclast %ymm11, %ymm1, %ymm1; vaesdeclast %ymm12, %ymm2, %ymm2; vaesdeclast %ymm13, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lcbc_dec_blk4: cmpq $4, %r8; jb .Lcbc_dec_blk1; leaq -4(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm10; vmovdqu (1 * 16)(%rcx), %ymm11; vmovdqu (3 * 16)(%rcx), %xmm15; leaq (4 * 16)(%rcx), %rcx; /* AES rounds */ VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcbc_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcbc_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcbc_dec_blk4_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vaesdeclast %ymm10, %ymm0, %ymm0; vaesdeclast %ymm11, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lcbc_dec_blk1: cmpq $1, %r8; jb .Ldone_cbc_dec; leaq -1(%r8), %r8; /* Load input. */ vmovdqu (%rcx), %xmm2; leaq 16(%rcx), %rcx; /* Xor first key. */ vpxor (0 * 16)(%rdi), %xmm2, %xmm0; /* AES rounds. */ vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lcbc_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lcbc_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lcbc_dec_blk1_last: vpxor %xmm1, %xmm15, %xmm15; vaesdeclast %xmm15, %xmm0, %xmm0; vmovdqa %xmm2, %xmm15; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lcbc_dec_blk1; .align 8 .Ldone_cbc_dec: /* Store IV. */ vmovdqu %xmm15, (%rsi); vzeroall; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_cbc_dec_amd64,.-_gcry_vaes_avx2_cbc_dec_amd64) /********************************************************************** CFB-mode decryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_cfb_dec_amd64,@function) .globl _gcry_vaes_avx2_cfb_dec_amd64 _gcry_vaes_avx2_cfb_dec_amd64: /* input: * %rdi: round keys * %rsi: iv * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); /* Load IV. */ vmovdqu (%rsi), %xmm15; /* Process 16 blocks per loop. */ .align 8 .Lcfb_dec_blk16: cmpq $16, %r8; jb .Lcfb_dec_blk8; leaq -16(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm8; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm0; vmovdqu (1 * 16)(%rcx), %ymm1; vmovdqu (3 * 16)(%rcx), %ymm2; vmovdqu (5 * 16)(%rcx), %ymm3; vmovdqu (7 * 16)(%rcx), %ymm4; vmovdqu (9 * 16)(%rcx), %ymm5; vmovdqu (11 * 16)(%rcx), %ymm6; vmovdqu (13 * 16)(%rcx), %ymm7; vmovdqu (15 * 16)(%rcx), %xmm15; vpxor %ymm8, %ymm0, %ymm0; vpxor %ymm8, %ymm1, %ymm1; vpxor %ymm8, %ymm2, %ymm2; vpxor %ymm8, %ymm3, %ymm3; vpxor %ymm8, %ymm4, %ymm4; vpxor %ymm8, %ymm5, %ymm5; vpxor %ymm8, %ymm6, %ymm6; vpxor %ymm8, %ymm7, %ymm7; vbroadcasti128 (1 * 16)(%rdi), %ymm8; vmovdqu (0 * 16)(%rcx), %ymm9; vmovdqu (2 * 16)(%rcx), %ymm10; vmovdqu (4 * 16)(%rcx), %ymm11; vmovdqu (6 * 16)(%rcx), %ymm12; vmovdqu (8 * 16)(%rcx), %ymm13; vmovdqu (10 * 16)(%rcx), %ymm14; leaq (16 * 16)(%rcx), %rcx; /* AES rounds */ VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lcfb_dec_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lcfb_dec_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lcfb_dec_blk16_last: vpxor %ymm8, %ymm9, %ymm9; vpxor %ymm8, %ymm10, %ymm10; vpxor %ymm8, %ymm11, %ymm11; vpxor %ymm8, %ymm12, %ymm12; vpxor %ymm8, %ymm13, %ymm13; vpxor %ymm8, %ymm14, %ymm14; vaesenclast %ymm9, %ymm0, %ymm0; vaesenclast %ymm10, %ymm1, %ymm1; vpxor (-4 * 16)(%rcx), %ymm8, %ymm9; vpxor (-2 * 16)(%rcx), %ymm8, %ymm10; vaesenclast %ymm11, %ymm2, %ymm2; vaesenclast %ymm12, %ymm3, %ymm3; vaesenclast %ymm13, %ymm4, %ymm4; vaesenclast %ymm14, %ymm5, %ymm5; vaesenclast %ymm9, %ymm6, %ymm6; vaesenclast %ymm10, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lcfb_dec_blk16; /* Handle trailing eight blocks. */ .align 8 .Lcfb_dec_blk8: cmpq $8, %r8; jb .Lcfb_dec_blk4; leaq -8(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm0; vmovdqu (1 * 16)(%rcx), %ymm1; vmovdqu (3 * 16)(%rcx), %ymm2; vmovdqu (5 * 16)(%rcx), %ymm3; vmovdqu (7 * 16)(%rcx), %xmm15; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vpxor %ymm4, %ymm2, %ymm2; vpxor %ymm4, %ymm3, %ymm3; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm10; vmovdqu (2 * 16)(%rcx), %ymm11; vmovdqu (4 * 16)(%rcx), %ymm12; vmovdqu (6 * 16)(%rcx), %ymm13; leaq (8 * 16)(%rcx), %rcx; /* AES rounds */ VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcfb_dec_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcfb_dec_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcfb_dec_blk8_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vpxor %ymm4, %ymm12, %ymm12; vpxor %ymm4, %ymm13, %ymm13; vaesenclast %ymm10, %ymm0, %ymm0; vaesenclast %ymm11, %ymm1, %ymm1; vaesenclast %ymm12, %ymm2, %ymm2; vaesenclast %ymm13, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lcfb_dec_blk4: cmpq $4, %r8; jb .Lcfb_dec_blk1; leaq -4(%r8), %r8; /* Load input and xor first key. Update IV. */ vbroadcasti128 (0 * 16)(%rdi), %ymm4; vinserti128 $1, (0 * 16)(%rcx), %ymm15, %ymm0; vmovdqu (1 * 16)(%rcx), %ymm1; vmovdqu (3 * 16)(%rcx), %xmm15; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm4, %ymm1, %ymm1; vbroadcasti128 (1 * 16)(%rdi), %ymm4; vmovdqu (0 * 16)(%rcx), %ymm10; vmovdqu (2 * 16)(%rcx), %ymm11; leaq (4 * 16)(%rcx), %rcx; /* AES rounds */ VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lcfb_dec_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lcfb_dec_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lcfb_dec_blk4_last: vpxor %ymm4, %ymm10, %ymm10; vpxor %ymm4, %ymm11, %ymm11; vaesenclast %ymm10, %ymm0, %ymm0; vaesenclast %ymm11, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lcfb_dec_blk1: cmpq $1, %r8; jb .Ldone_cfb_dec; leaq -1(%r8), %r8; /* Xor first key. */ vpxor (0 * 16)(%rdi), %xmm15, %xmm0; /* Load input as next IV. */ vmovdqu (%rcx), %xmm15; leaq 16(%rcx), %rcx; /* AES rounds. */ vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lcfb_dec_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lcfb_dec_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lcfb_dec_blk1_last: vpxor %xmm15, %xmm1, %xmm1; vaesenclast %xmm1, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lcfb_dec_blk1; .align 8 .Ldone_cfb_dec: /* Store IV. */ vmovdqu %xmm15, (%rsi); vzeroall; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_cfb_dec_amd64,.-_gcry_vaes_avx2_cfb_dec_amd64) /********************************************************************** CTR-mode encryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_ctr_enc_amd64,@function) .globl _gcry_vaes_avx2_ctr_enc_amd64 _gcry_vaes_avx2_ctr_enc_amd64: /* input: * %rdi: round keys * %rsi: counter * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); movq 8(%rsi), %r10; movq 0(%rsi), %r11; bswapq %r10; bswapq %r11; vpcmpeqd %ymm15, %ymm15, %ymm15; vpsrldq $8, %ymm15, %ymm15; // 0:-1 vpaddq %ymm15, %ymm15, %ymm14; // 0:-2 vbroadcasti128 .Lbswap128_mask rRIP, %ymm13; #define inc_le128(x, minus_one, tmp) \ vpcmpeqq minus_one, x, tmp; \ vpsubq minus_one, x, x; \ vpslldq $8, tmp, tmp; \ vpsubq tmp, x, x; #define add2_le128(x, minus_one, minus_two, tmp1, tmp2) \ vpcmpeqq minus_one, x, tmp1; \ vpcmpeqq minus_two, x, tmp2; \ vpor tmp1, tmp2, tmp2; \ vpsubq minus_two, x, x; \ vpslldq $8, tmp2, tmp2; \ vpsubq tmp2, x, x; /* Process 16 blocks per loop. */ .align 8 .Lctr_enc_blk16: cmpq $16, %r8; jb .Lctr_enc_blk8; leaq -16(%r8), %r8; vbroadcasti128 (%rsi), %ymm7; vbroadcasti128 (0 * 16)(%rdi), %ymm8; /* detect if carry handling is needed */ addb $16, 15(%rsi); jc .Lctr_enc_blk16_handle_carry; /* Increment counters. */ vpaddb .Lbige_addb_0 rRIP, %ymm7, %ymm0; vpaddb .Lbige_addb_2 rRIP, %ymm7, %ymm1; vpaddb .Lbige_addb_4 rRIP, %ymm7, %ymm2; vpaddb .Lbige_addb_6 rRIP, %ymm7, %ymm3; vpaddb .Lbige_addb_8 rRIP, %ymm7, %ymm4; vpaddb .Lbige_addb_10 rRIP, %ymm7, %ymm5; vpaddb .Lbige_addb_12 rRIP, %ymm7, %ymm6; vpaddb .Lbige_addb_14 rRIP, %ymm7, %ymm7; leaq 16(%r10), %r10; .Lctr_enc_blk16_rounds: /* AES rounds */ XOR8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (1 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lctr_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lctr_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lctr_enc_blk16_last: vpxor (0 * 16)(%rcx), %ymm8, %ymm9; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm8, %ymm10; vpxor (4 * 16)(%rcx), %ymm8, %ymm11; vpxor (6 * 16)(%rcx), %ymm8, %ymm12; vaesenclast %ymm9, %ymm0, %ymm0; vaesenclast %ymm10, %ymm1, %ymm1; vaesenclast %ymm11, %ymm2, %ymm2; vaesenclast %ymm12, %ymm3, %ymm3; vpxor (8 * 16)(%rcx), %ymm8, %ymm9; vpxor (10 * 16)(%rcx), %ymm8, %ymm10; vpxor (12 * 16)(%rcx), %ymm8, %ymm11; vpxor (14 * 16)(%rcx), %ymm8, %ymm8; leaq (16 * 16)(%rcx), %rcx; vaesenclast %ymm9, %ymm4, %ymm4; vaesenclast %ymm10, %ymm5, %ymm5; vaesenclast %ymm11, %ymm6, %ymm6; vaesenclast %ymm8, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lctr_enc_blk16; .align 8 .Lctr_enc_blk16_handle_carry: /* Increment counters (handle carry). */ vpshufb %xmm13, %xmm7, %xmm1; /* be => le */ vmovdqa %xmm1, %xmm0; inc_le128(%xmm1, %xmm15, %xmm5); vinserti128 $1, %xmm1, %ymm0, %ymm7; /* ctr: +1:+0 */ vpshufb %ymm13, %ymm7, %ymm0; addq $16, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +3:+2 */ vpshufb %ymm13, %ymm7, %ymm1; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +5:+4 */ vpshufb %ymm13, %ymm7, %ymm2; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +7:+6 */ vpshufb %ymm13, %ymm7, %ymm3; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +9:+8 */ vpshufb %ymm13, %ymm7, %ymm4; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +11:+10 */ vpshufb %ymm13, %ymm7, %ymm5; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +13:+12 */ vpshufb %ymm13, %ymm7, %ymm6; add2_le128(%ymm7, %ymm15, %ymm14, %ymm9, %ymm10); /* ctr: +15:+14 */ vpshufb %ymm13, %ymm7, %ymm7; jmp .Lctr_enc_blk16_rounds; /* Handle trailing eight blocks. */ .align 8 .Lctr_enc_blk8: cmpq $8, %r8; jb .Lctr_enc_blk4; leaq -8(%r8), %r8; vbroadcasti128 (%rsi), %ymm3; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* detect if carry handling is needed */ addb $8, 15(%rsi); jc .Lctr_enc_blk8_handle_carry; /* Increment counters. */ vpaddb .Lbige_addb_0 rRIP, %ymm3, %ymm0; vpaddb .Lbige_addb_2 rRIP, %ymm3, %ymm1; vpaddb .Lbige_addb_4 rRIP, %ymm3, %ymm2; vpaddb .Lbige_addb_6 rRIP, %ymm3, %ymm3; leaq 8(%r10), %r10; .Lctr_enc_blk8_rounds: /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr_enc_blk8_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; vpxor (4 * 16)(%rcx), %ymm4, %ymm7; vpxor (6 * 16)(%rcx), %ymm4, %ymm4; leaq (8 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Lctr_enc_blk4; .align 8 .Lctr_enc_blk8_handle_carry: /* Increment counters (handle carry). */ vpshufb %xmm13, %xmm3, %xmm1; /* be => le */ vmovdqa %xmm1, %xmm0; inc_le128(%xmm1, %xmm15, %xmm5); vinserti128 $1, %xmm1, %ymm0, %ymm3; /* ctr: +1:+0 */ vpshufb %ymm13, %ymm3, %ymm0; addq $8, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +3:+2 */ vpshufb %ymm13, %ymm3, %ymm1; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +5:+4 */ vpshufb %ymm13, %ymm3, %ymm2; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +7:+6 */ vpshufb %ymm13, %ymm3, %ymm3; jmp .Lctr_enc_blk8_rounds; /* Handle trailing four blocks. */ .align 8 .Lctr_enc_blk4: cmpq $4, %r8; jb .Lctr_enc_blk1; leaq -4(%r8), %r8; vbroadcasti128 (%rsi), %ymm3; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* detect if carry handling is needed */ addb $4, 15(%rsi); jc .Lctr_enc_blk4_handle_carry; /* Increment counters. */ vpaddb .Lbige_addb_0 rRIP, %ymm3, %ymm0; vpaddb .Lbige_addb_2 rRIP, %ymm3, %ymm1; leaq 4(%r10), %r10; .Lctr_enc_blk4_rounds: /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr_enc_blk4_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; leaq (4 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; jmp .Lctr_enc_blk1; .align 8 .Lctr_enc_blk4_handle_carry: /* Increment counters (handle carry). */ vpshufb %xmm13, %xmm3, %xmm1; /* be => le */ vmovdqa %xmm1, %xmm0; inc_le128(%xmm1, %xmm15, %xmm5); vinserti128 $1, %xmm1, %ymm0, %ymm3; /* ctr: +1:+0 */ vpshufb %ymm13, %ymm3, %ymm0; addq $4, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; add2_le128(%ymm3, %ymm15, %ymm14, %ymm5, %ymm6); /* ctr: +3:+2 */ vpshufb %ymm13, %ymm3, %ymm1; jmp .Lctr_enc_blk4_rounds; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lctr_enc_blk1: cmpq $1, %r8; jb .Ldone_ctr_enc; leaq -1(%r8), %r8; /* Load and increament counter. */ vmovdqu (%rsi), %xmm0; addq $1, %r10; adcq $0, %r11; bswapq %r10; bswapq %r11; movq %r10, 8(%rsi); movq %r11, 0(%rsi); bswapq %r10; bswapq %r11; /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lctr_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lctr_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lctr_enc_blk1_last: vpxor (%rcx), %xmm1, %xmm1; /* Xor src to last round key. */ leaq 16(%rcx), %rcx; vaesenclast %xmm1, %xmm0, %xmm0; /* Last round and xor with xmm1. */ vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lctr_enc_blk1; .align 8 .Ldone_ctr_enc: vzeroall; xorl %r10d, %r10d; xorl %r11d, %r11d; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_ctr_enc_amd64,.-_gcry_vaes_avx2_ctr_enc_amd64) /********************************************************************** Little-endian 32-bit CTR-mode encryption (GCM-SIV) **********************************************************************/ ELF(.type _gcry_vaes_avx2_ctr32le_enc_amd64,@function) .globl _gcry_vaes_avx2_ctr32le_enc_amd64 _gcry_vaes_avx2_ctr32le_enc_amd64: /* input: * %rdi: round keys * %rsi: counter * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds */ CFI_STARTPROC(); vbroadcasti128 (%rsi), %ymm15; // CTR /* Process 16 blocks per loop. */ .align 8 .Lctr32le_enc_blk16: cmpq $16, %r8; jb .Lctr32le_enc_blk8; leaq -16(%r8), %r8; vbroadcasti128 (0 * 16)(%rdi), %ymm8; /* Increment counters. */ vpaddd .Lle_addd_0 rRIP, %ymm15, %ymm0; vpaddd .Lle_addd_2 rRIP, %ymm15, %ymm1; vpaddd .Lle_addd_4 rRIP, %ymm15, %ymm2; vpaddd .Lle_addd_6 rRIP, %ymm15, %ymm3; vpaddd .Lle_addd_8 rRIP, %ymm15, %ymm4; vpaddd .Lle_addd_10 rRIP, %ymm15, %ymm5; vpaddd .Lle_addd_12 rRIP, %ymm15, %ymm6; vpaddd .Lle_addd_14 rRIP, %ymm15, %ymm7; vpaddd .Lle_addd_16_2 rRIP, %ymm15, %ymm15; /* AES rounds */ XOR8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (1 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (10 * 16)(%rdi), %ymm8; cmpl $12, %r9d; jb .Lctr32le_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (12 * 16)(%rdi), %ymm8; jz .Lctr32le_enc_blk16_last; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm8; VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (14 * 16)(%rdi), %ymm8; /* Last round and output handling. */ .Lctr32le_enc_blk16_last: vpxor (0 * 16)(%rcx), %ymm8, %ymm9; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm8, %ymm10; vpxor (4 * 16)(%rcx), %ymm8, %ymm11; vpxor (6 * 16)(%rcx), %ymm8, %ymm12; vaesenclast %ymm9, %ymm0, %ymm0; vaesenclast %ymm10, %ymm1, %ymm1; vaesenclast %ymm11, %ymm2, %ymm2; vaesenclast %ymm12, %ymm3, %ymm3; vpxor (8 * 16)(%rcx), %ymm8, %ymm9; vpxor (10 * 16)(%rcx), %ymm8, %ymm10; vpxor (12 * 16)(%rcx), %ymm8, %ymm11; vpxor (14 * 16)(%rcx), %ymm8, %ymm8; leaq (16 * 16)(%rcx), %rcx; vaesenclast %ymm9, %ymm4, %ymm4; vaesenclast %ymm10, %ymm5, %ymm5; vaesenclast %ymm11, %ymm6, %ymm6; vaesenclast %ymm8, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Lctr32le_enc_blk16; /* Handle trailing eight blocks. */ .align 8 .Lctr32le_enc_blk8: cmpq $8, %r8; jb .Lctr32le_enc_blk4; leaq -8(%r8), %r8; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* Increment counters. */ vpaddd .Lle_addd_0 rRIP, %ymm15, %ymm0; vpaddd .Lle_addd_2 rRIP, %ymm15, %ymm1; vpaddd .Lle_addd_4 rRIP, %ymm15, %ymm2; vpaddd .Lle_addd_6 rRIP, %ymm15, %ymm3; vpaddd .Lle_addd_8_2 rRIP, %ymm15, %ymm15; /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr32le_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr32le_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr32le_enc_blk8_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; vpxor (4 * 16)(%rcx), %ymm4, %ymm7; vpxor (6 * 16)(%rcx), %ymm4, %ymm4; leaq (8 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lctr32le_enc_blk4: cmpq $4, %r8; jb .Lctr32le_enc_blk1; leaq -4(%r8), %r8; vbroadcasti128 (0 * 16)(%rdi), %ymm4; /* Increment counters. */ vpaddd .Lle_addd_0 rRIP, %ymm15, %ymm0; vpaddd .Lle_addd_2 rRIP, %ymm15, %ymm1; vpaddd .Lle_addd_4_2 rRIP, %ymm15, %ymm15; /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lctr32le_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lctr32le_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lctr32le_enc_blk4_last: vpxor (0 * 16)(%rcx), %ymm4, %ymm5; /* Xor src to last round key. */ vpxor (2 * 16)(%rcx), %ymm4, %ymm6; leaq (4 * 16)(%rcx), %rcx; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lctr32le_enc_blk1: cmpq $1, %r8; jb .Ldone_ctr32le_enc; leaq -1(%r8), %r8; /* Load and increament counter. */ vmovdqu %xmm15, %xmm0; vpaddd .Lle_addd_1 rRIP, %xmm15, %xmm15; /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lctr32le_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lctr32le_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lctr32le_enc_blk1_last: vpxor (%rcx), %xmm1, %xmm1; /* Xor src to last round key. */ leaq 16(%rcx), %rcx; vaesenclast %xmm1, %xmm0, %xmm0; /* Last round and xor with xmm1. */ vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lctr32le_enc_blk1; .align 8 .Ldone_ctr32le_enc: vmovdqu %xmm15, (%rsi); vzeroall; ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_ctr32le_enc_amd64,.-_gcry_vaes_avx2_ctr32le_enc_amd64) /********************************************************************** OCB-mode encryption/decryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_ocb_crypt_amd64,@function) .globl _gcry_vaes_avx2_ocb_crypt_amd64 _gcry_vaes_avx2_ocb_crypt_amd64: /* input: * %rdi: round keys * %esi: nblk * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds * 16(%rbp): offset * 24(%rbp): checksum * 32(%rbp): L-array * 40(%rbp): encrypt (%r15d) */ CFI_STARTPROC(); #define STACK_REGS_POS (16 * 16 + 4 * 16 + 2 * 16) #define STACK_ALLOC (STACK_REGS_POS + 5 * 8) #define OFFSET_PTR_Q 16(%rbp) #define CHECKSUM_PTR_Q 24(%rbp) #define L_ARRAY_PTR_L 32(%rbp) #define ENCRYPT_FLAG_L 40(%rbp) pushq %rbp; CFI_PUSH(%rbp); movq %rsp, %rbp; CFI_DEF_CFA_REGISTER(%rbp); subq $STACK_ALLOC, %rsp; andq $~63, %rsp; movq %r12, (STACK_REGS_POS + 0 * 8)(%rsp); CFI_REG_ON_STACK(r12, STACK_REGS_POS + 0 * 8); movq %r13, (STACK_REGS_POS + 1 * 8)(%rsp); CFI_REG_ON_STACK(r13, STACK_REGS_POS + 1 * 8); movq %r14, (STACK_REGS_POS + 2 * 8)(%rsp); CFI_REG_ON_STACK(r14, STACK_REGS_POS + 2 * 8); movq %r15, (STACK_REGS_POS + 3 * 8)(%rsp); CFI_REG_ON_STACK(r15, STACK_REGS_POS + 3 * 8); movq %rbx, (STACK_REGS_POS + 4 * 8)(%rsp); CFI_REG_ON_STACK(rbx, STACK_REGS_POS + 4 * 8); movl ENCRYPT_FLAG_L, %r15d; /* encrypt-flag. */ movq OFFSET_PTR_Q, %r14; /* offset ptr. */ movq CHECKSUM_PTR_Q, %rbx; /* checksum ptr. */ leal (, %r9d, 4), %eax; vmovdqu (%r14), %xmm15; /* Load offset. */ movq L_ARRAY_PTR_L, %r14; /* L-array ptr. */ vmovdqa (0 * 16)(%rdi), %xmm0; /* first key */ vpxor %xmm14, %xmm14, %xmm14; vpxor %xmm13, %xmm13, %xmm13; vpxor (%rdi, %rax, 4), %xmm0, %xmm0; /* first key ^ last key */ vpxor (0 * 16)(%rdi), %xmm15, %xmm15; /* offset ^ first key */ vmovdqa %xmm0, (14 * 16)(%rsp); vmovdqa %xmm0, (15 * 16)(%rsp); .align 8 .Lhandle_unaligned_ocb: /* Get number of blocks to align nblk to 16 (and L-array optimization). */ movl %esi, %r10d; negl %r10d; andl $15, %r10d; cmpq %r8, %r10; cmovaq %r8, %r10; cmpq $1, %r10; jb .Lunaligned_ocb_done; /* Number of blocks after alignment. */ movq %r8, %r11; subq %r10, %r11; /* If number after alignment is less than 16, skip aligned handling * completely. */ cmp $16, %r11; cmovbq %r8, %r10; /* Unaligned: Process eight blocks per loop. */ .align 8 .Locb_unaligned_blk8: cmpq $8, %r10; jb .Locb_unaligned_blk4; leaq -8(%r8), %r8; leaq -8(%r10), %r10; leal 1(%esi), %r11d; leal 2(%esi), %r12d; leal 3(%esi), %r13d; leal 4(%esi), %eax; tzcntl %r11d, %r11d; tzcntl %r12d, %r12d; tzcntl %r13d, %r13d; tzcntl %eax, %eax; shll $4, %r11d; shll $4, %r12d; shll $4, %r13d; shll $4, %eax; vpxor (%r14, %r11), %xmm15, %xmm5; vpxor (%r14, %r12), %xmm5, %xmm6; vpxor (%r14, %r13), %xmm6, %xmm7; vpxor (%r14, %rax), %xmm7, %xmm8; leal 5(%esi), %r11d; leal 6(%esi), %r12d; leal 7(%esi), %r13d; leal 8(%esi), %esi; tzcntl %r11d, %r11d; tzcntl %r12d, %r12d; tzcntl %r13d, %r13d; tzcntl %esi, %eax; shll $4, %r11d; shll $4, %r12d; shll $4, %r13d; shll $4, %eax; vpxor (%r14, %r11), %xmm8, %xmm9; vpxor (%r14, %r12), %xmm9, %xmm10; vpxor (%r14, %r13), %xmm10, %xmm11; vpxor (%r14, %rax), %xmm11, %xmm15; vinserti128 $1, %xmm6, %ymm5, %ymm5; vinserti128 $1, %xmm8, %ymm7, %ymm6; vinserti128 $1, %xmm10, %ymm9, %ymm7; vinserti128 $1, %xmm15, %ymm11, %ymm8; testl %r15d, %r15d; jz .Locb_unaligned_blk8_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; leaq (8 * 16)(%rcx), %rcx; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vpxor %ymm2, %ymm14, %ymm14; vpxor %ymm3, %ymm13, %ymm13; vpxor %ymm5, %ymm0, %ymm0; vpxor %ymm6, %ymm1, %ymm1; vpxor %ymm7, %ymm2, %ymm2; vpxor %ymm8, %ymm3, %ymm3; vmovdqa (14 * 16)(%rsp), %ymm9; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_unaligned_blk8_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_unaligned_blk8_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); /* Last round and output handling. */ .Locb_unaligned_blk8_enc_last: vpxor %ymm5, %ymm9, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm9, %ymm6; vpxor %ymm7, %ymm9, %ymm7; vpxor %ymm8, %ymm9, %ymm4; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Locb_unaligned_blk8; .align 8 .Locb_unaligned_blk8_dec: vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; vpxor (4 * 16)(%rcx), %ymm7, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; leaq (8 * 16)(%rcx), %rcx; vmovdqa (14 * 16)(%rsp), %ymm9; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_unaligned_blk8_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_unaligned_blk8_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); /* Last round and output handling. */ .Locb_unaligned_blk8_dec_last: vpxor %ymm5, %ymm9, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm9, %ymm6; vpxor %ymm7, %ymm9, %ymm7; vpxor %ymm8, %ymm9, %ymm4; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vpxor %ymm2, %ymm14, %ymm14; vpxor %ymm3, %ymm13, %ymm13; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Locb_unaligned_blk8; /* Unaligned: Process four blocks. */ .align 8 .Locb_unaligned_blk4: cmpq $4, %r10; jb .Locb_unaligned_blk1; leaq -4(%r8), %r8; leaq -4(%r10), %r10; leal 1(%esi), %r11d; leal 2(%esi), %r12d; leal 3(%esi), %r13d; leal 4(%esi), %esi; tzcntl %r11d, %r11d; tzcntl %r12d, %r12d; tzcntl %r13d, %r13d; tzcntl %esi, %eax; shll $4, %r11d; shll $4, %r12d; shll $4, %r13d; shll $4, %eax; vpxor (%r14, %r11), %xmm15, %xmm5; vpxor (%r14, %r12), %xmm5, %xmm6; vinserti128 $1, %xmm6, %ymm5, %ymm5; vpxor (%r14, %r13), %xmm6, %xmm7; vpxor (%r14, %rax), %xmm7, %xmm15; vinserti128 $1, %xmm15, %ymm7, %ymm6; testl %r15d, %r15d; jz .Locb_unaligned_blk4_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; leaq (4 * 16)(%rcx), %rcx; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vpxor %ymm5, %ymm0, %ymm0; vpxor %ymm6, %ymm1, %ymm1; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); cmpl $12, %r9d; jb .Locb_unaligned_blk4_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); jz .Locb_unaligned_blk4_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); /* Last round and output handling. */ .Locb_unaligned_blk4_enc_last: vmovdqa (14 * 16)(%rsp), %ymm8; vpxor %ymm5, %ymm8, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm8, %ymm6; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; jmp .Locb_unaligned_blk1; .align 8 .Locb_unaligned_blk4_dec: vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; leaq (4 * 16)(%rcx), %rcx; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); cmpl $12, %r9d; jb .Locb_unaligned_blk4_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); jz .Locb_unaligned_blk4_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); /* Last round and output handling. */ .Locb_unaligned_blk4_dec_last: vmovdqa (14 * 16)(%rsp), %ymm8; vpxor %ymm5, %ymm8, %ymm5; /* Xor src to last round key. */ vpxor %ymm6, %ymm8, %ymm6; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vpxor %ymm0, %ymm14, %ymm14; vpxor %ymm1, %ymm13, %ymm13; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Unaligned: Process one block per loop. */ .align 8 .Locb_unaligned_blk1: cmpq $1, %r10; jb .Lunaligned_ocb_done; leaq -1(%r8), %r8; leaq -1(%r10), %r10; leal 1(%esi), %esi; tzcntl %esi, %r11d; shll $4, %r11d; vpxor (%r14, %r11), %xmm15, %xmm15; testl %r15d, %r15d; jz .Locb_unaligned_blk1_dec; vmovdqu (%rcx), %xmm0; vpxor %ymm0, %ymm14, %ymm14; vpxor %xmm15, %xmm0, %xmm0; leaq 16(%rcx), %rcx; /* AES rounds. */ vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; cmpl $12, %r9d; jb .Locb_unaligned_blk1_enc_last; vaesenc (10 * 16)(%rdi), %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; jz .Locb_unaligned_blk1_enc_last; vaesenc (12 * 16)(%rdi), %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; /* Last round and output handling. */ .Locb_unaligned_blk1_enc_last: vpxor (14 * 16)(%rsp), %xmm15, %xmm1; vaesenclast %xmm1, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Locb_unaligned_blk1; .align 8 .Locb_unaligned_blk1_dec: vpxor (%rcx), %xmm15, %xmm0; leaq 16(%rcx), %rcx; /* AES rounds. */ vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; cmpl $12, %r9d; jb .Locb_unaligned_blk1_dec_last; vaesdec (10 * 16)(%rdi), %xmm0, %xmm0; vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; jz .Locb_unaligned_blk1_dec_last; vaesdec (12 * 16)(%rdi), %xmm0, %xmm0; vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; /* Last round and output handling. */ .Locb_unaligned_blk1_dec_last: vpxor (14 * 16)(%rsp), %xmm15, %xmm1; vaesdeclast %xmm1, %xmm0, %xmm0; vpxor %ymm0, %ymm14, %ymm14; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Locb_unaligned_blk1; .align 8 .Lunaligned_ocb_done: cmpq $1, %r8; jb .Ldone_ocb; /* Short buffers do not benefit from L-array optimization. */ movq %r8, %r10; cmpq $16, %r8; jb .Locb_unaligned_blk8; vinserti128 $1, %xmm15, %ymm15, %ymm15; /* Prepare L-array optimization. * Since nblk is aligned to 16, offsets will have following * construction: * - block1 = ntz{0} = offset ^ L[0] * - block2 = ntz{1} = offset ^ L[0] ^ L[1] * - block3 = ntz{0} = offset ^ L[1] * - block4 = ntz{2} = offset ^ L[1] ^ L[2] * - block5 = ntz{0} = offset ^ L[0] ^ L[1] ^ L[2] * - block6 = ntz{1} = offset ^ L[0] ^ L[2] * - block7 = ntz{0} = offset ^ L[2] * - block8 = ntz{3} = offset ^ L[2] ^ L[3] * - block9 = ntz{0} = offset ^ L[0] ^ L[2] ^ L[3] * - block10 = ntz{1} = offset ^ L[0] ^ L[1] ^ L[2] ^ L[3] * - block11 = ntz{0} = offset ^ L[1] ^ L[2] ^ L[3] * - block12 = ntz{2} = offset ^ L[1] ^ L[3] * - block13 = ntz{0} = offset ^ L[0] ^ L[1] ^ L[3] * - block14 = ntz{1} = offset ^ L[0] ^ L[3] * - block15 = ntz{0} = offset ^ L[3] * - block16 = ntz{x} = offset ^ L[3] ^ L[ntz{x}] */ vmovdqu (0 * 16)(%r14), %xmm0; vmovdqu (1 * 16)(%r14), %xmm1; vmovdqu (2 * 16)(%r14), %xmm2; vmovdqu (3 * 16)(%r14), %xmm3; vpxor %ymm13, %ymm14, %ymm14; vmovdqa %ymm14, (20 * 16)(%rsp); vpxor %xmm0, %xmm1, %xmm4; /* L[0] ^ L[1] */ vpxor %xmm0, %xmm2, %xmm5; /* L[0] ^ L[2] */ vpxor %xmm0, %xmm3, %xmm6; /* L[0] ^ L[3] */ vpxor %xmm1, %xmm2, %xmm7; /* L[1] ^ L[2] */ vpxor %xmm1, %xmm3, %xmm8; /* L[1] ^ L[3] */ vpxor %xmm2, %xmm3, %xmm9; /* L[2] ^ L[3] */ vpxor %xmm4, %xmm2, %xmm10; /* L[0] ^ L[1] ^ L[2] */ vpxor %xmm5, %xmm3, %xmm11; /* L[0] ^ L[2] ^ L[3] */ vpxor %xmm7, %xmm3, %xmm12; /* L[1] ^ L[2] ^ L[3] */ vpxor %xmm0, %xmm8, %xmm13; /* L[0] ^ L[1] ^ L[3] */ vpxor %xmm4, %xmm9, %xmm14; /* L[0] ^ L[1] ^ L[2] ^ L[3] */ vinserti128 $1, %xmm4, %ymm0, %ymm0; vinserti128 $1, %xmm7, %ymm1, %ymm1; vinserti128 $1, %xmm5, %ymm10, %ymm10; vinserti128 $1, %xmm9, %ymm2, %ymm2; vinserti128 $1, %xmm14, %ymm11, %ymm11; vinserti128 $1, %xmm8, %ymm12, %ymm12; vinserti128 $1, %xmm6, %ymm13, %ymm13; vmovdqa %ymm0, (0 * 16)(%rsp); vmovdqa %ymm1, (2 * 16)(%rsp); vmovdqa %ymm10, (4 * 16)(%rsp); vmovdqa %ymm2, (6 * 16)(%rsp); vmovdqa %ymm11, (8 * 16)(%rsp); vmovdqa %ymm12, (10 * 16)(%rsp); vmovdqa %ymm13, (12 * 16)(%rsp); /* Aligned: Process 16 blocks per loop. */ .align 8 .Locb_aligned_blk16: cmpq $16, %r8; jb .Locb_aligned_blk8; leaq -16(%r8), %r8; leal 16(%esi), %esi; tzcntl %esi, %eax; shll $4, %eax; vpxor (0 * 16)(%rsp), %ymm15, %ymm8; vpxor (2 * 16)(%rsp), %ymm15, %ymm9; vpxor (4 * 16)(%rsp), %ymm15, %ymm10; vpxor (6 * 16)(%rsp), %ymm15, %ymm11; vpxor (8 * 16)(%rsp), %ymm15, %ymm12; vpxor (3 * 16)(%r14), %xmm15, %xmm13; /* offset ^ first key ^ L[3] */ vpxor (%r14, %rax), %xmm13, %xmm14; /* offset ^ first key ^ L[3] ^ L[ntz{nblk+16}] */ vinserti128 $1, %xmm14, %ymm13, %ymm14; testl %r15d, %r15d; jz .Locb_aligned_blk16_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vpxor (8 * 16)(%rcx), %ymm0, %ymm4; vpxor (10 * 16)(%rcx), %ymm1, %ymm5; vpxor (12 * 16)(%rcx), %ymm2, %ymm6; vpxor (14 * 16)(%rcx), %ymm3, %ymm7; vpxor %ymm4, %ymm5, %ymm5; vpxor %ymm6, %ymm7, %ymm7; vpxor %ymm5, %ymm7, %ymm7; vpxor (20 * 16)(%rsp), %ymm7, %ymm7; vmovdqa %ymm7, (20 * 16)(%rsp); vpxor (10 * 16)(%rsp), %ymm15, %ymm13; vpxor (14 * 16)(%rcx), %ymm14, %ymm7; vpxor %ymm8, %ymm0, %ymm0; vpxor %ymm9, %ymm1, %ymm1; vpxor %ymm10, %ymm2, %ymm2; vpxor %ymm11, %ymm3, %ymm3; vpxor (8 * 16)(%rcx), %ymm12, %ymm4; vpxor (10 * 16)(%rcx), %ymm13, %ymm5; vmovdqa %ymm13, (16 * 16)(%rsp); vpxor (12 * 16)(%rsp), %ymm15, %ymm13; vpxor (12 * 16)(%rcx), %ymm13, %ymm6; vmovdqa %ymm13, (18 * 16)(%rsp); leaq (16 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); cmpl $12, %r9d; jb .Locb_aligned_blk16_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); jz .Locb_aligned_blk16_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm13; VAESENC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); /* Last round and output handling. */ .Locb_aligned_blk16_enc_last: vmovdqa (14 * 16)(%rsp), %ymm13; vpxor %ymm8, %ymm13, %ymm8; vpxor %ymm9, %ymm13, %ymm9; vpxor %ymm10, %ymm13, %ymm10; vpxor %ymm11, %ymm13, %ymm11; vaesenclast %ymm8, %ymm0, %ymm0; vaesenclast %ymm9, %ymm1, %ymm1; vaesenclast %ymm10, %ymm2, %ymm2; vaesenclast %ymm11, %ymm3, %ymm3; vpxor %ymm12, %ymm13, %ymm12; vpxor (16 * 16)(%rsp), %ymm13, %ymm8; vpxor (18 * 16)(%rsp), %ymm13, %ymm9; vpxor %ymm14, %ymm13, %ymm13; vaesenclast %ymm12, %ymm4, %ymm4; vaesenclast %ymm8, %ymm5, %ymm5; vaesenclast %ymm9, %ymm6, %ymm6; vaesenclast %ymm13, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); leaq (16 * 16)(%rdx), %rdx; jmp .Locb_aligned_blk16; .align 8 .Locb_aligned_blk16_dec: vpxor (10 * 16)(%rsp), %ymm15, %ymm13; vpxor (14 * 16)(%rcx), %ymm14, %ymm7; vpxor (0 * 16)(%rcx), %ymm8, %ymm0; vpxor (2 * 16)(%rcx), %ymm9, %ymm1; vpxor (4 * 16)(%rcx), %ymm10, %ymm2; vpxor (6 * 16)(%rcx), %ymm11, %ymm3; vpxor (8 * 16)(%rcx), %ymm12, %ymm4; vpxor (10 * 16)(%rcx), %ymm13, %ymm5; vmovdqa %ymm13, (16 * 16)(%rsp); vpxor (12 * 16)(%rsp), %ymm15, %ymm13; vpxor (12 * 16)(%rcx), %ymm13, %ymm6; vmovdqa %ymm13, (18 * 16)(%rsp); leaq (16 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (2 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (3 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (4 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (5 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (6 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (7 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (8 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (9 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); cmpl $12, %r9d; jb .Locb_aligned_blk16_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (11 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); jz .Locb_aligned_blk16_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); vbroadcasti128 (13 * 16)(%rdi), %ymm13; VAESDEC8(%ymm13, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); /* Last round and output handling. */ .Locb_aligned_blk16_dec_last: vmovdqa (14 * 16)(%rsp), %ymm13; vpxor %ymm8, %ymm13, %ymm8; vpxor %ymm9, %ymm13, %ymm9; vpxor %ymm10, %ymm13, %ymm10; vpxor %ymm11, %ymm13, %ymm11; vaesdeclast %ymm8, %ymm0, %ymm0; vaesdeclast %ymm9, %ymm1, %ymm1; vaesdeclast %ymm10, %ymm2, %ymm2; vaesdeclast %ymm11, %ymm3, %ymm3; vpxor %ymm12, %ymm13, %ymm12; vpxor (16 * 16)(%rsp), %ymm13, %ymm8; vpxor (18 * 16)(%rsp), %ymm13, %ymm9; vpxor %ymm14, %ymm13, %ymm13; vaesdeclast %ymm12, %ymm4, %ymm4; vaesdeclast %ymm8, %ymm5, %ymm5; vaesdeclast %ymm9, %ymm6, %ymm6; vaesdeclast %ymm13, %ymm7, %ymm7; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); vpxor %ymm1, %ymm0, %ymm0; vpxor %ymm3, %ymm2, %ymm2; vmovdqu %ymm4, (8 * 16)(%rdx); vmovdqu %ymm5, (10 * 16)(%rdx); vmovdqu %ymm6, (12 * 16)(%rdx); vmovdqu %ymm7, (14 * 16)(%rdx); vpxor %ymm5, %ymm4, %ymm4; vpxor %ymm7, %ymm6, %ymm6; leaq (16 * 16)(%rdx), %rdx; vpxor %ymm4, %ymm0, %ymm0; vpxor %ymm6, %ymm2, %ymm2; vpxor %ymm2, %ymm0, %ymm0; vpxor (20 * 16)(%rsp), %ymm0, %ymm0; vmovdqa %ymm0, (20 * 16)(%rsp); jmp .Locb_aligned_blk16; /* Aligned: Process trailing eight blocks. */ .align 8 .Locb_aligned_blk8: cmpq $8, %r8; jb .Locb_aligned_done; leaq -8(%r8), %r8; leal 8(%esi), %esi; tzcntl %esi, %eax; shll $4, %eax; vpxor (0 * 16)(%rsp), %ymm15, %ymm5; vpxor (2 * 16)(%rsp), %ymm15, %ymm6; vpxor (4 * 16)(%rsp), %ymm15, %ymm7; vpxor (2 * 16)(%r14), %xmm15, %xmm13; /* offset ^ first key ^ L[2] */ vpxor (%r14, %rax), %xmm13, %xmm14; /* offset ^ first key ^ L[2] ^ L[ntz{nblk+8}] */ vinserti128 $1, %xmm14, %ymm13, %ymm14; testl %r15d, %r15d; jz .Locb_aligned_blk8_dec; vmovdqu (0 * 16)(%rcx), %ymm0; vmovdqu (2 * 16)(%rcx), %ymm1; vmovdqu (4 * 16)(%rcx), %ymm2; vmovdqu (6 * 16)(%rcx), %ymm3; vpxor %ymm2, %ymm0, %ymm10; vpxor %ymm3, %ymm1, %ymm11; vpxor %ymm11, %ymm10, %ymm10; vpxor (20 * 16)(%rsp), %ymm10, %ymm10; vmovdqa %ymm10, (20 * 16)(%rsp); vpxor %ymm5, %ymm0, %ymm0; vpxor %ymm6, %ymm1, %ymm1; vpxor %ymm7, %ymm2, %ymm2; vpxor %ymm14, %ymm3, %ymm3; leaq (8 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; vmovdqa (14 * 16)(%rsp), %ymm8; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_aligned_blk8_enc_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_aligned_blk8_enc_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); /* Last round and output handling. */ .Locb_aligned_blk8_enc_last: vpxor %ymm5, %ymm8, %ymm5; vpxor %ymm6, %ymm8, %ymm6; vpxor %ymm7, %ymm8, %ymm7; vpxor %ymm14, %ymm8, %ymm4; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Locb_aligned_done; .align 8 .Locb_aligned_blk8_dec: vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; vpxor (4 * 16)(%rcx), %ymm7, %ymm2; vpxor (6 * 16)(%rcx), %ymm14, %ymm3; leaq (8 * 16)(%rcx), %rcx; vperm2i128 $0x11, %ymm14, %ymm14, %ymm15; vmovdqa (14 * 16)(%rsp), %ymm8; /* AES rounds */ vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); cmpl $12, %r9d; jb .Locb_aligned_blk8_dec_last; vbroadcasti128 (10 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); jz .Locb_aligned_blk8_dec_last; vbroadcasti128 (12 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Locb_aligned_blk8_dec_last: vpxor %ymm5, %ymm8, %ymm5; vpxor %ymm6, %ymm8, %ymm6; vpxor %ymm7, %ymm8, %ymm7; vpxor %ymm14, %ymm8, %ymm4; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; vpxor %ymm1, %ymm0, %ymm0; vpxor %ymm3, %ymm2, %ymm2; vpxor %ymm2, %ymm0, %ymm0; vpxor (20 * 16)(%rsp), %ymm0, %ymm0; vmovdqa %ymm0, (20 * 16)(%rsp); .align 8 .Locb_aligned_done: vmovdqa (20 * 16)(%rsp), %ymm14; vpxor %xmm13, %xmm13, %xmm13; /* Burn stack. */ vmovdqa %ymm13, (0 * 16)(%rsp); vmovdqa %ymm13, (2 * 16)(%rsp); vmovdqa %ymm13, (4 * 16)(%rsp); vmovdqa %ymm13, (6 * 16)(%rsp); vmovdqa %ymm13, (8 * 16)(%rsp); vmovdqa %ymm13, (10 * 16)(%rsp); vmovdqa %ymm13, (12 * 16)(%rsp); vmovdqa %ymm13, (16 * 16)(%rsp); vmovdqa %ymm13, (18 * 16)(%rsp); vmovdqa %ymm13, (20 * 16)(%rsp); /* Handle tailing 1…7 blocks in nblk-unaligned loop. */ movq %r8, %r10; cmpq $1, %r8; jnb .Locb_unaligned_blk8; .align 8 .Ldone_ocb: vpxor %ymm13, %ymm14, %ymm14; vextracti128 $1, %ymm14, %xmm13; vpxor (%rbx), %xmm14, %xmm14; vpxor %xmm13, %xmm14, %xmm14; vmovdqu %xmm14, (%rbx); movq OFFSET_PTR_Q, %r14; /* offset ptr. */ vpxor (0 * 16)(%rdi), %xmm15, %xmm15; /* offset ^ first key ^ first key */ vmovdqu %xmm15, (%r14); /* Store offset. */ /* Burn stack. */ vpxor %ymm0, %ymm0, %ymm0; vmovdqa %ymm0, (14 * 16)(%rsp); vzeroall; movq (STACK_REGS_POS + 0 * 8)(%rsp), %r12; CFI_RESTORE(%r12); movq (STACK_REGS_POS + 1 * 8)(%rsp), %r13; CFI_RESTORE(%r13); movq (STACK_REGS_POS + 2 * 8)(%rsp), %r14; CFI_RESTORE(%r14); movq (STACK_REGS_POS + 3 * 8)(%rsp), %r15; CFI_RESTORE(%r15); movq (STACK_REGS_POS + 4 * 8)(%rsp), %rbx; CFI_RESTORE(%rbx); leave; CFI_LEAVE(); ret_spec_stop #undef STACK_REGS_POS #undef STACK_ALLOC CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_ocb_crypt_amd64,.-_gcry_vaes_avx2_ocb_crypt_amd64) /********************************************************************** - CTR-mode encryption + XTS-mode encryption **********************************************************************/ ELF(.type _gcry_vaes_avx2_xts_crypt_amd64,@function) .globl _gcry_vaes_avx2_xts_crypt_amd64 _gcry_vaes_avx2_xts_crypt_amd64: /* input: * %rdi: round keys * %rsi: tweak * %rdx: dst * %rcx: src * %r8: nblocks * %r9: nrounds * 8(%rsp): encrypt */ CFI_STARTPROC(); movl 8(%rsp), %eax; #define tweak_clmul(shift, out, tweak, hi_tweak, tmp1, tmp2) \ vpsrld $(32-(shift)), hi_tweak, tmp2; \ vpsllq $(shift), tweak, out; \ vpclmulqdq $0, .Lxts_gfmul_clmul rRIP, tmp2, tmp1; \ vpunpckhqdq tmp2, tmp1, tmp1; \ vpxor tmp1, out, out; /* Prepare tweak. */ vmovdqu (%rsi), %xmm15; vpshufb .Lxts_high_bit_shuf rRIP, %xmm15, %xmm13; tweak_clmul(1, %xmm11, %xmm15, %xmm13, %xmm0, %xmm1); vinserti128 $1, %xmm11, %ymm15, %ymm15; /* tweak:tweak1 */ vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; cmpq $8, %r8; jb .Lxts_crypt_blk4; /* Process eight blocks per loop. */ leaq -8(%r8), %r8; vmovdqa %ymm15, %ymm5; tweak_clmul(2, %ymm6, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(4, %ymm7, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(6, %ymm8, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(8, %ymm15, %ymm15, %ymm13, %ymm0, %ymm1); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; vpxor (4 * 16)(%rcx), %ymm7, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; leaq (8 * 16)(%rcx), %rcx; .align 8 .Lxts_crypt_blk8_loop: cmpq $8, %r8; jb .Lxts_crypt_blk8_tail; leaq -8(%r8), %r8; testl %eax, %eax; jz .Lxts_dec_blk8; /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vmovdqa %ymm15, %ymm9; tweak_clmul(2, %ymm10, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(4, %ymm11, %ymm15, %ymm13, %ymm12, %ymm14); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_enc_blk8_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_enc_blk8_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; tweak_clmul(6, %ymm8, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(8, %ymm15, %ymm15, %ymm13, %ymm12, %ymm14); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm9, %ymm0; vpxor (2 * 16)(%rcx), %ymm10, %ymm1; vpxor (4 * 16)(%rcx), %ymm11, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; vmovdqa %ymm9, %ymm5; vmovdqa %ymm10, %ymm6; vmovdqa %ymm11, %ymm7; leaq (8 * 16)(%rcx), %rcx; jmp .Lxts_crypt_blk8_loop; .align 8 .Lxts_dec_blk8: /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vmovdqa %ymm15, %ymm9; tweak_clmul(2, %ymm10, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(4, %ymm11, %ymm15, %ymm13, %ymm12, %ymm14); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_dec_blk8_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_dec_blk8_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; tweak_clmul(6, %ymm8, %ymm15, %ymm13, %ymm12, %ymm14); tweak_clmul(8, %ymm15, %ymm15, %ymm13, %ymm12, %ymm14); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm9, %ymm0; vpxor (2 * 16)(%rcx), %ymm10, %ymm1; vpxor (4 * 16)(%rcx), %ymm11, %ymm2; vpxor (6 * 16)(%rcx), %ymm8, %ymm3; vmovdqa %ymm9, %ymm5; vmovdqa %ymm10, %ymm6; vmovdqa %ymm11, %ymm7; leaq (8 * 16)(%rcx), %rcx; jmp .Lxts_crypt_blk8_loop; .align 8 .Lxts_crypt_blk8_tail: testl %eax, %eax; jz .Lxts_dec_tail_blk8; /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_enc_blk8_tail_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_enc_blk8_tail_last; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_enc_blk8_tail_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vaesenclast %ymm7, %ymm2, %ymm2; vaesenclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; jmp .Lxts_crypt_blk4; .align 8 .Lxts_dec_tail_blk8: /* AES rounds */ XOR4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_dec_blk8_tail_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_dec_blk8_tail_last; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_dec_blk8_tail_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vpxor %ymm4, %ymm7, %ymm7; vpxor %ymm4, %ymm8, %ymm4; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vaesdeclast %ymm7, %ymm2, %ymm2; vaesdeclast %ymm4, %ymm3, %ymm3; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); vmovdqu %ymm2, (4 * 16)(%rdx); vmovdqu %ymm3, (6 * 16)(%rdx); leaq (8 * 16)(%rdx), %rdx; /* Handle trailing four blocks. */ .align 8 .Lxts_crypt_blk4: /* Try exit early as typically input length is large power of 2. */ cmpq $0, %r8; jb .Ldone_xts_crypt; cmpq $4, %r8; jb .Lxts_crypt_blk1; leaq -4(%r8), %r8; vmovdqa %ymm15, %ymm5; tweak_clmul(2, %ymm6, %ymm15, %ymm13, %ymm0, %ymm1); tweak_clmul(4, %ymm15, %ymm15, %ymm13, %ymm0, %ymm1); vpshufb .Lxts_high_bit_shuf rRIP, %ymm15, %ymm13; vbroadcasti128 (0 * 16)(%rdi), %ymm4; vpxor (0 * 16)(%rcx), %ymm5, %ymm0; vpxor (2 * 16)(%rcx), %ymm6, %ymm1; leaq (4 * 16)(%rcx), %rcx; testl %eax, %eax; jz .Lxts_dec_blk4; /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_enc_blk4_last; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESENC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_enc_blk4_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vaesenclast %ymm5, %ymm0, %ymm0; vaesenclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; jmp .Lxts_crypt_blk1; .align 8 .Lxts_dec_blk4: /* AES rounds */ XOR2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (1 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (2 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (3 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (4 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (5 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (6 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (7 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (8 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (9 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (10 * 16)(%rdi), %ymm4; cmpl $12, %r9d; jb .Lxts_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (11 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (12 * 16)(%rdi), %ymm4; jz .Lxts_dec_blk4_last; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (13 * 16)(%rdi), %ymm4; VAESDEC2(%ymm4, %ymm0, %ymm1); vbroadcasti128 (14 * 16)(%rdi), %ymm4; /* Last round and output handling. */ .Lxts_dec_blk4_last: vpxor %ymm4, %ymm5, %ymm5; /* Xor tweak to last round key. */ vpxor %ymm4, %ymm6, %ymm6; vaesdeclast %ymm5, %ymm0, %ymm0; vaesdeclast %ymm6, %ymm1, %ymm1; vmovdqu %ymm0, (0 * 16)(%rdx); vmovdqu %ymm1, (2 * 16)(%rdx); leaq (4 * 16)(%rdx), %rdx; /* Process trailing one to three blocks, one per loop. */ .align 8 .Lxts_crypt_blk1: cmpq $1, %r8; jb .Ldone_xts_crypt; leaq -1(%r8), %r8; vpxor (%rcx), %xmm15, %xmm0; vmovdqa %xmm15, %xmm5; tweak_clmul(1, %xmm15, %xmm15, %xmm13, %xmm2, %xmm3); vpshufb .Lxts_high_bit_shuf rRIP, %xmm15, %xmm13; leaq 16(%rcx), %rcx; testl %eax, %eax; jz .Lxts_dec_blk1; /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lxts_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lxts_enc_blk1_last; vaesenc %xmm1, %xmm0, %xmm0; vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lxts_enc_blk1_last: vpxor %xmm1, %xmm5, %xmm5; /* Xor tweak to last round key. */ vaesenclast %xmm5, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lxts_crypt_blk1; .align 8 .Lxts_dec_blk1: /* AES rounds. */ vpxor (0 * 16)(%rdi), %xmm0, %xmm0; vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (10 * 16)(%rdi), %xmm1; cmpl $12, %r9d; jb .Lxts_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (12 * 16)(%rdi), %xmm1; jz .Lxts_dec_blk1_last; vaesdec %xmm1, %xmm0, %xmm0; vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; vmovdqa (14 * 16)(%rdi), %xmm1; /* Last round and output handling. */ .Lxts_dec_blk1_last: vpxor %xmm1, %xmm5, %xmm5; /* Xor tweak to last round key. */ vaesdeclast %xmm5, %xmm0, %xmm0; vmovdqu %xmm0, (%rdx); leaq 16(%rdx), %rdx; jmp .Lxts_crypt_blk1; .align 8 .Ldone_xts_crypt: /* Store IV. */ vmovdqu %xmm15, (%rsi); vzeroall; xorl %eax, %eax ret_spec_stop CFI_ENDPROC(); ELF(.size _gcry_vaes_avx2_xts_crypt_amd64,.-_gcry_vaes_avx2_xts_crypt_amd64) +/********************************************************************** + ECB-mode encryption + **********************************************************************/ +ELF(.type _gcry_vaes_avx2_ecb_crypt_amd64,@function) +.globl _gcry_vaes_avx2_ecb_crypt_amd64 +_gcry_vaes_avx2_ecb_crypt_amd64: + /* input: + * %rdi: round keys + * %esi: encrypt + * %rdx: dst + * %rcx: src + * %r8: nblocks + * %r9: nrounds + */ + CFI_STARTPROC(); + + /* Process 16 blocks per loop. */ +.align 8 +.Lecb_blk16: + cmpq $16, %r8; + jb .Lecb_blk8; + + leaq -16(%r8), %r8; + + /* Load input and xor first key. */ + vbroadcasti128 (0 * 16)(%rdi), %ymm8; + vmovdqu (0 * 16)(%rcx), %ymm0; + vmovdqu (2 * 16)(%rcx), %ymm1; + vmovdqu (4 * 16)(%rcx), %ymm2; + vmovdqu (6 * 16)(%rcx), %ymm3; + vmovdqu (8 * 16)(%rcx), %ymm4; + vmovdqu (10 * 16)(%rcx), %ymm5; + vmovdqu (12 * 16)(%rcx), %ymm6; + vmovdqu (14 * 16)(%rcx), %ymm7; + vpxor %ymm8, %ymm0, %ymm0; + vpxor %ymm8, %ymm1, %ymm1; + vpxor %ymm8, %ymm2, %ymm2; + vpxor %ymm8, %ymm3, %ymm3; + vpxor %ymm8, %ymm4, %ymm4; + vpxor %ymm8, %ymm5, %ymm5; + vpxor %ymm8, %ymm6, %ymm6; + vpxor %ymm8, %ymm7, %ymm7; + vbroadcasti128 (1 * 16)(%rdi), %ymm8; + leaq (16 * 16)(%rcx), %rcx; + + testl %esi, %esi; + jz .Lecb_dec_blk16; + /* AES rounds */ + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (2 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (3 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (4 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (5 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (6 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (7 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (8 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (9 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (10 * 16)(%rdi), %ymm8; + cmpl $12, %r9d; + jb .Lecb_enc_blk16_last; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (11 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (12 * 16)(%rdi), %ymm8; + jz .Lecb_enc_blk16_last; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (13 * 16)(%rdi), %ymm8; + VAESENC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (14 * 16)(%rdi), %ymm8; + .Lecb_enc_blk16_last: + vaesenclast %ymm8, %ymm0, %ymm0; + vaesenclast %ymm8, %ymm1, %ymm1; + vaesenclast %ymm8, %ymm2, %ymm2; + vaesenclast %ymm8, %ymm3, %ymm3; + vaesenclast %ymm8, %ymm4, %ymm4; + vaesenclast %ymm8, %ymm5, %ymm5; + vaesenclast %ymm8, %ymm6, %ymm6; + vaesenclast %ymm8, %ymm7, %ymm7; + jmp .Lecb_blk16_end; + + .align 8 + .Lecb_dec_blk16: + /* AES rounds */ + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (2 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (3 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (4 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (5 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (6 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (7 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (8 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (9 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (10 * 16)(%rdi), %ymm8; + cmpl $12, %r9d; + jb .Lecb_dec_blk16_last; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (11 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (12 * 16)(%rdi), %ymm8; + jz .Lecb_dec_blk16_last; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (13 * 16)(%rdi), %ymm8; + VAESDEC8(%ymm8, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7); + vbroadcasti128 (14 * 16)(%rdi), %ymm8; + .Lecb_dec_blk16_last: + vaesdeclast %ymm8, %ymm0, %ymm0; + vaesdeclast %ymm8, %ymm1, %ymm1; + vaesdeclast %ymm8, %ymm2, %ymm2; + vaesdeclast %ymm8, %ymm3, %ymm3; + vaesdeclast %ymm8, %ymm4, %ymm4; + vaesdeclast %ymm8, %ymm5, %ymm5; + vaesdeclast %ymm8, %ymm6, %ymm6; + vaesdeclast %ymm8, %ymm7, %ymm7; + jmp .Lecb_blk16_end; + + .align 8 + .Lecb_blk16_end: + vmovdqu %ymm0, (0 * 16)(%rdx); + vmovdqu %ymm1, (2 * 16)(%rdx); + vmovdqu %ymm2, (4 * 16)(%rdx); + vmovdqu %ymm3, (6 * 16)(%rdx); + vmovdqu %ymm4, (8 * 16)(%rdx); + vmovdqu %ymm5, (10 * 16)(%rdx); + vmovdqu %ymm6, (12 * 16)(%rdx); + vmovdqu %ymm7, (14 * 16)(%rdx); + leaq (16 * 16)(%rdx), %rdx; + + jmp .Lecb_blk16; + + /* Handle trailing eight blocks. */ +.align 8 +.Lecb_blk8: + cmpq $8, %r8; + jmp .Lecb_blk4; + + leaq -8(%r8), %r8; + + /* Load input and xor first key. */ + vbroadcasti128 (0 * 16)(%rdi), %ymm4; + vmovdqu (0 * 16)(%rcx), %ymm0; + vmovdqu (2 * 16)(%rcx), %ymm1; + vmovdqu (4 * 16)(%rcx), %ymm2; + vmovdqu (6 * 16)(%rcx), %ymm3; + vpxor %ymm4, %ymm0, %ymm0; + vpxor %ymm4, %ymm1, %ymm1; + vpxor %ymm4, %ymm2, %ymm2; + vpxor %ymm4, %ymm3, %ymm3; + vbroadcasti128 (1 * 16)(%rdi), %ymm4; + leaq (8 * 16)(%rcx), %rcx; + + testl %esi, %esi; + jz .Lecb_dec_blk8; + /* AES rounds */ + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (2 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (3 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (4 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (5 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (6 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (7 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (8 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (9 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (10 * 16)(%rdi), %ymm4; + cmpl $12, %r9d; + jb .Lecb_enc_blk8_last; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (11 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (12 * 16)(%rdi), %ymm4; + jz .Lecb_enc_blk8_last; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (13 * 16)(%rdi), %ymm4; + VAESENC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (14 * 16)(%rdi), %ymm4; + .Lecb_enc_blk8_last: + vaesenclast %ymm4, %ymm0, %ymm0; + vaesenclast %ymm4, %ymm1, %ymm1; + vaesenclast %ymm4, %ymm2, %ymm2; + vaesenclast %ymm4, %ymm3, %ymm3; + vmovdqu %ymm0, (0 * 16)(%rdx); + vmovdqu %ymm1, (2 * 16)(%rdx); + vmovdqu %ymm2, (4 * 16)(%rdx); + vmovdqu %ymm3, (6 * 16)(%rdx); + leaq (8 * 16)(%rdx), %rdx; + jmp .Lecb_blk4; + + .align 8 + .Lecb_dec_blk8: + /* AES rounds */ + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (2 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (3 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (4 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (5 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (6 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (7 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (8 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (9 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (10 * 16)(%rdi), %ymm4; + cmpl $12, %r9d; + jb .Lecb_dec_blk8_last; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (11 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (12 * 16)(%rdi), %ymm4; + jz .Lecb_dec_blk8_last; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (13 * 16)(%rdi), %ymm4; + VAESDEC4(%ymm4, %ymm0, %ymm1, %ymm2, %ymm3); + vbroadcasti128 (14 * 16)(%rdi), %ymm4; + .Lecb_dec_blk8_last: + vaesdeclast %ymm4, %ymm0, %ymm0; + vaesdeclast %ymm4, %ymm1, %ymm1; + vaesdeclast %ymm4, %ymm2, %ymm2; + vaesdeclast %ymm4, %ymm3, %ymm3; + vmovdqu %ymm0, (0 * 16)(%rdx); + vmovdqu %ymm1, (2 * 16)(%rdx); + vmovdqu %ymm2, (4 * 16)(%rdx); + vmovdqu %ymm3, (6 * 16)(%rdx); + leaq (8 * 16)(%rdx), %rdx; + + /* Handle trailing four blocks. */ +.align 8 +.Lecb_blk4: + cmpq $4, %r8; + jb .Lecb_blk1; + + leaq -4(%r8), %r8; + + /* Load input and xor first key. */ + vbroadcasti128 (0 * 16)(%rdi), %ymm4; + vmovdqu (0 * 16)(%rcx), %ymm0; + vmovdqu (2 * 16)(%rcx), %ymm1; + vpxor %ymm4, %ymm0, %ymm0; + vpxor %ymm4, %ymm1, %ymm1; + vbroadcasti128 (1 * 16)(%rdi), %ymm4; + leaq (4 * 16)(%rcx), %rcx; + + testl %esi, %esi; + jz .Lecb_dec_blk4; + /* AES rounds */ + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (2 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (3 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (4 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (5 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (6 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (7 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (8 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (9 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (10 * 16)(%rdi), %ymm4; + cmpl $12, %r9d; + jb .Lecb_enc_blk4_last; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (11 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (12 * 16)(%rdi), %ymm4; + jz .Lecb_enc_blk4_last; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (13 * 16)(%rdi), %ymm4; + VAESENC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (14 * 16)(%rdi), %ymm4; + .Lecb_enc_blk4_last: + vaesenclast %ymm4, %ymm0, %ymm0; + vaesenclast %ymm4, %ymm1, %ymm1; + vmovdqu %ymm0, (0 * 16)(%rdx); + vmovdqu %ymm1, (2 * 16)(%rdx); + leaq (4 * 16)(%rdx), %rdx; + jmp .Lecb_blk1; + + .align 8 + .Lecb_dec_blk4: + /* AES rounds */ + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (2 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (3 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (4 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (5 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (6 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (7 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (8 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (9 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (10 * 16)(%rdi), %ymm4; + cmpl $12, %r9d; + jb .Lecb_dec_blk4_last; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (11 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (12 * 16)(%rdi), %ymm4; + jz .Lecb_dec_blk4_last; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (13 * 16)(%rdi), %ymm4; + VAESDEC2(%ymm4, %ymm0, %ymm1); + vbroadcasti128 (14 * 16)(%rdi), %ymm4; + .Lecb_dec_blk4_last: + vaesdeclast %ymm4, %ymm0, %ymm0; + vaesdeclast %ymm4, %ymm1, %ymm1; + vmovdqu %ymm0, (0 * 16)(%rdx); + vmovdqu %ymm1, (2 * 16)(%rdx); + leaq (4 * 16)(%rdx), %rdx; + + /* Process trailing one to three blocks, one per loop. */ +.align 8 +.Lecb_blk1: + cmpq $1, %r8; + jb .Ldone_ecb; + + leaq -1(%r8), %r8; + + /* Load input. */ + vmovdqu (%rcx), %xmm2; + leaq 16(%rcx), %rcx; + + /* Xor first key. */ + vpxor (0 * 16)(%rdi), %xmm2, %xmm0; + + testl %esi, %esi; + jz .Lecb_dec_blk1; + /* AES rounds. */ + vaesenc (1 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (2 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (3 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (4 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (5 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (6 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (7 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (8 * 16)(%rdi), %xmm0, %xmm0; + vaesenc (9 * 16)(%rdi), %xmm0, %xmm0; + vmovdqa (10 * 16)(%rdi), %xmm1; + cmpl $12, %r9d; + jb .Lecb_enc_blk1_last; + vaesenc %xmm1, %xmm0, %xmm0; + vaesenc (11 * 16)(%rdi), %xmm0, %xmm0; + vmovdqa (12 * 16)(%rdi), %xmm1; + jz .Lecb_enc_blk1_last; + vaesenc %xmm1, %xmm0, %xmm0; + vaesenc (13 * 16)(%rdi), %xmm0, %xmm0; + vmovdqa (14 * 16)(%rdi), %xmm1; + .Lecb_enc_blk1_last: + vaesenclast %xmm1, %xmm0, %xmm0; + jmp .Lecb_blk1_end; + + .align 8 + .Lecb_dec_blk1: + /* AES rounds. */ + vaesdec (1 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (2 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (3 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (4 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (5 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (6 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (7 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (8 * 16)(%rdi), %xmm0, %xmm0; + vaesdec (9 * 16)(%rdi), %xmm0, %xmm0; + vmovdqa (10 * 16)(%rdi), %xmm1; + cmpl $12, %r9d; + jb .Lecb_dec_blk1_last; + vaesdec %xmm1, %xmm0, %xmm0; + vaesdec (11 * 16)(%rdi), %xmm0, %xmm0; + vmovdqa (12 * 16)(%rdi), %xmm1; + jz .Lecb_dec_blk1_last; + vaesdec %xmm1, %xmm0, %xmm0; + vaesdec (13 * 16)(%rdi), %xmm0, %xmm0; + vmovdqa (14 * 16)(%rdi), %xmm1; + .Lecb_dec_blk1_last: + vaesdeclast %xmm1, %xmm0, %xmm0; + jmp .Lecb_blk1_end; + + .align 8 + .Lecb_blk1_end: + vmovdqu %xmm0, (%rdx); + leaq 16(%rdx), %rdx; + + jmp .Lecb_blk1; + +.align 8 +.Ldone_ecb: + vzeroall; + ret_spec_stop + CFI_ENDPROC(); +ELF(.size _gcry_vaes_avx2_ecb_crypt_amd64,.-_gcry_vaes_avx2_ecb_crypt_amd64) + /********************************************************************** constants **********************************************************************/ ELF(.type _gcry_vaes_consts,@object) _gcry_vaes_consts: .align 32 .Lbige_addb_0: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lbige_addb_1: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 .Lbige_addb_2: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2 .Lbige_addb_3: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3 .Lbige_addb_4: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4 .Lbige_addb_5: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5 .Lbige_addb_6: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6 .Lbige_addb_7: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7 .Lbige_addb_8: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 .Lbige_addb_9: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9 .Lbige_addb_10: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10 .Lbige_addb_11: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11 .Lbige_addb_12: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12 .Lbige_addb_13: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 .Lbige_addb_14: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14 .Lbige_addb_15: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15 .Lle_addd_0: .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_1: .byte 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_2: .byte 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_3: .byte 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_4: .byte 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_5: .byte 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_6: .byte 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_7: .byte 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_8: .byte 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_9: .byte 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_10: .byte 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_11: .byte 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_12: .byte 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_13: .byte 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_14: .byte 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_15: .byte 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_4_2: .byte 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_8_2: .byte 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lle_addd_16_2: .byte 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .Lxts_gfmul_clmul: .long 0x00, 0x87, 0x00, 0x00 .long 0x00, 0x87, 0x00, 0x00 .Lxts_high_bit_shuf: .byte -1, -1, -1, -1, 12, 13, 14, 15 .byte 4, 5, 6, 7, -1, -1, -1, -1 .byte -1, -1, -1, -1, 12, 13, 14, 15 .byte 4, 5, 6, 7, -1, -1, -1, -1 .Lbswap128_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 ELF(.size _gcry_vaes_consts,.-_gcry_vaes_consts) #endif /* HAVE_GCC_INLINE_ASM_VAES */ #endif /* __x86_64__ */ diff --git a/cipher/rijndael-vaes.c b/cipher/rijndael-vaes.c index dbcf9afa..978c86da 100644 --- a/cipher/rijndael-vaes.c +++ b/cipher/rijndael-vaes.c @@ -1,196 +1,222 @@ /* VAES/AVX2 accelerated AES for Libgcrypt * Copyright (C) 2021 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . * */ #include #include #include #include "types.h" /* for byte and u32 typedefs */ #include "g10lib.h" #include "cipher.h" #include "bufhelp.h" #include "rijndael-internal.h" #include "./cipher-internal.h" #ifdef USE_VAES # ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS # define ASM_FUNC_ABI __attribute__((sysv_abi)) # else # define ASM_FUNC_ABI # endif extern void _gcry_aes_aesni_prepare_decryption(RIJNDAEL_context *ctx); extern void _gcry_vaes_avx2_cbc_dec_amd64 (const void *keysched, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, unsigned int nrounds) ASM_FUNC_ABI; extern void _gcry_vaes_avx2_cfb_dec_amd64 (const void *keysched, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, unsigned int nrounds) ASM_FUNC_ABI; extern void _gcry_vaes_avx2_ctr_enc_amd64 (const void *keysched, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, unsigned int nrounds) ASM_FUNC_ABI; extern void _gcry_vaes_avx2_ctr32le_enc_amd64 (const void *keysched, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, unsigned int nrounds) ASM_FUNC_ABI; extern void _gcry_vaes_avx2_ocb_crypt_amd64 (const void *keysched, unsigned int blkn, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, unsigned int nrounds, unsigned char *offset, unsigned char *checksum, unsigned char *L_table, int encrypt) ASM_FUNC_ABI; extern void _gcry_vaes_avx2_xts_crypt_amd64 (const void *keysched, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, unsigned int nrounds, int encrypt) ASM_FUNC_ABI; +extern void _gcry_vaes_avx2_ecb_crypt_amd64 (const void *keysched, + int encrypt, + void *outbuf_arg, + const void *inbuf_arg, + size_t nblocks, + unsigned int nrounds) ASM_FUNC_ABI; + + +void +_gcry_aes_vaes_ecb_crypt (void *context, void *outbuf, + const void *inbuf, size_t nblocks, + int encrypt) +{ + RIJNDAEL_context *ctx = context; + const void *keysched = encrypt ? ctx->keyschenc32 : ctx->keyschdec32; + unsigned int nrounds = ctx->rounds; + + if (!encrypt && !ctx->decryption_prepared) + { + _gcry_aes_aesni_prepare_decryption (ctx); + ctx->decryption_prepared = 1; + } + + _gcry_vaes_avx2_ecb_crypt_amd64 (keysched, encrypt, outbuf, inbuf, + nblocks, nrounds); +} void _gcry_aes_vaes_cbc_dec (void *context, unsigned char *iv, void *outbuf, const void *inbuf, size_t nblocks) { RIJNDAEL_context *ctx = context; const void *keysched = ctx->keyschdec32; unsigned int nrounds = ctx->rounds; if (!ctx->decryption_prepared) { _gcry_aes_aesni_prepare_decryption (ctx); ctx->decryption_prepared = 1; } _gcry_vaes_avx2_cbc_dec_amd64 (keysched, iv, outbuf, inbuf, nblocks, nrounds); } void _gcry_aes_vaes_cfb_dec (void *context, unsigned char *iv, void *outbuf, const void *inbuf, size_t nblocks) { RIJNDAEL_context *ctx = context; const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_vaes_avx2_cfb_dec_amd64 (keysched, iv, outbuf, inbuf, nblocks, nrounds); } void _gcry_aes_vaes_ctr_enc (void *context, unsigned char *iv, void *outbuf, const void *inbuf, size_t nblocks) { RIJNDAEL_context *ctx = context; const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_vaes_avx2_ctr_enc_amd64 (keysched, iv, outbuf, inbuf, nblocks, nrounds); } void _gcry_aes_vaes_ctr32le_enc (void *context, unsigned char *iv, void *outbuf, const void *inbuf, size_t nblocks) { RIJNDAEL_context *ctx = context; const void *keysched = ctx->keyschenc32; unsigned int nrounds = ctx->rounds; _gcry_vaes_avx2_ctr32le_enc_amd64 (keysched, iv, outbuf, inbuf, nblocks, nrounds); } size_t _gcry_aes_vaes_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { RIJNDAEL_context *ctx = (void *)&c->context.c; const void *keysched = encrypt ? ctx->keyschenc32 : ctx->keyschdec32; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int nrounds = ctx->rounds; u64 blkn = c->u_mode.ocb.data_nblocks; if (!encrypt && !ctx->decryption_prepared) { _gcry_aes_aesni_prepare_decryption (ctx); ctx->decryption_prepared = 1; } c->u_mode.ocb.data_nblocks = blkn + nblocks; _gcry_vaes_avx2_ocb_crypt_amd64 (keysched, (unsigned int)blkn, outbuf, inbuf, nblocks, nrounds, c->u_iv.iv, c->u_ctr.ctr, c->u_mode.ocb.L[0], encrypt); return 0; } void _gcry_aes_vaes_xts_crypt (void *context, unsigned char *tweak, void *outbuf, const void *inbuf, size_t nblocks, int encrypt) { RIJNDAEL_context *ctx = context; const void *keysched = encrypt ? ctx->keyschenc32 : ctx->keyschdec32; unsigned int nrounds = ctx->rounds; if (!encrypt && !ctx->decryption_prepared) { _gcry_aes_aesni_prepare_decryption (ctx); ctx->decryption_prepared = 1; } _gcry_vaes_avx2_xts_crypt_amd64 (keysched, tweak, outbuf, inbuf, nblocks, nrounds, encrypt); } #endif /* USE_VAES */ diff --git a/cipher/rijndael.c b/cipher/rijndael.c index f3060ea5..84cb7109 100644 --- a/cipher/rijndael.c +++ b/cipher/rijndael.c @@ -1,1984 +1,1996 @@ /* Rijndael (AES) for GnuPG * Copyright (C) 2000, 2001, 2002, 2003, 2007, * 2008, 2011, 2012 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . ******************************************************************* * The code here is based on the optimized implementation taken from * http://www.esat.kuleuven.ac.be/~rijmen/rijndael/ on Oct 2, 2000, * which carries this notice: *------------------------------------------ * rijndael-alg-fst.c v2.3 April '2000 * * Optimised ANSI C code * * authors: v1.0: Antoon Bosselaers * v2.0: Vincent Rijmen * v2.3: Paulo Barreto * * This code is placed in the public domain. *------------------------------------------ * * The SP800-38a document is available at: * http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf * */ #include #include #include #include /* for memcmp() */ #include "types.h" /* for byte and u32 typedefs */ #include "g10lib.h" #include "cipher.h" #include "bufhelp.h" #include "rijndael-internal.h" #include "./cipher-internal.h" #ifdef USE_AMD64_ASM /* AMD64 assembly implementations of AES */ extern unsigned int _gcry_aes_amd64_encrypt_block(const void *keysched_enc, unsigned char *out, const unsigned char *in, int rounds, const void *encT); extern unsigned int _gcry_aes_amd64_decrypt_block(const void *keysched_dec, unsigned char *out, const unsigned char *in, int rounds, const void *decT); #endif /*USE_AMD64_ASM*/ #ifdef USE_AESNI /* AES-NI (AMD64 & i386) accelerated implementations of AES */ extern void _gcry_aes_aesni_do_setkey(RIJNDAEL_context *ctx, const byte *key); extern void _gcry_aes_aesni_prepare_decryption(RIJNDAEL_context *ctx); extern unsigned int _gcry_aes_aesni_encrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern unsigned int _gcry_aes_aesni_decrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern void _gcry_aes_aesni_cfb_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_aesni_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac); extern void _gcry_aes_aesni_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_aesni_ctr32le_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_aesni_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_aesni_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern size_t _gcry_aes_aesni_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); extern size_t _gcry_aes_aesni_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); extern void _gcry_aes_aesni_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); +extern void _gcry_aes_aesni_ecb_crypt (void *context, void *outbuf_arg, + const void *inbuf_arg, size_t nblocks, + int encrypt); #endif #ifdef USE_VAES /* VAES (AMD64) accelerated implementation of AES */ extern void _gcry_aes_vaes_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_vaes_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_vaes_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_vaes_ctr32le_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern size_t _gcry_aes_vaes_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); extern void _gcry_aes_vaes_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); +extern void _gcry_aes_vaes_ecb_crypt (void *context, void *outbuf_arg, + const void *inbuf_arg, size_t nblocks, + int encrypt); #endif #ifdef USE_SSSE3 /* SSSE3 (AMD64) vector permutation implementation of AES */ extern void _gcry_aes_ssse3_do_setkey(RIJNDAEL_context *ctx, const byte *key); extern void _gcry_aes_ssse3_prepare_decryption(RIJNDAEL_context *ctx); extern unsigned int _gcry_aes_ssse3_encrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern unsigned int _gcry_aes_ssse3_decrypt (const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern void _gcry_aes_ssse3_cfb_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ssse3_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac); extern void _gcry_aes_ssse3_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ssse3_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ssse3_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern size_t _gcry_aes_ssse3_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); extern size_t _gcry_aes_ssse3_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); #endif #ifdef USE_PADLOCK extern unsigned int _gcry_aes_padlock_encrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax); extern unsigned int _gcry_aes_padlock_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax); extern void _gcry_aes_padlock_prepare_decryption (RIJNDAEL_context *ctx); #endif #ifdef USE_ARM_ASM /* ARM assembly implementations of AES */ extern unsigned int _gcry_aes_arm_encrypt_block(const void *keysched_enc, unsigned char *out, const unsigned char *in, int rounds, const void *encT); extern unsigned int _gcry_aes_arm_decrypt_block(const void *keysched_dec, unsigned char *out, const unsigned char *in, int rounds, const void *decT); #endif /*USE_ARM_ASM*/ #ifdef USE_ARM_CE /* ARMv8 Crypto Extension implementations of AES */ extern void _gcry_aes_armv8_ce_setkey(RIJNDAEL_context *ctx, const byte *key); extern void _gcry_aes_armv8_ce_prepare_decryption(RIJNDAEL_context *ctx); extern unsigned int _gcry_aes_armv8_ce_encrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern unsigned int _gcry_aes_armv8_ce_decrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern void _gcry_aes_armv8_ce_cfb_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_armv8_ce_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac); extern void _gcry_aes_armv8_ce_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_armv8_ce_ctr32le_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_armv8_ce_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_armv8_ce_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern size_t _gcry_aes_armv8_ce_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); extern size_t _gcry_aes_armv8_ce_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); extern void _gcry_aes_armv8_ce_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); +extern void _gcry_aes_armv8_ce_ecb_crypt (void *context, void *outbuf_arg, + const void *inbuf_arg, size_t nblocks, + int encrypt); #endif /*USE_ARM_ASM*/ #ifdef USE_PPC_CRYPTO /* PowerPC Crypto implementations of AES */ extern void _gcry_aes_ppc8_setkey(RIJNDAEL_context *ctx, const byte *key); extern void _gcry_aes_ppc8_prepare_decryption(RIJNDAEL_context *ctx); extern unsigned int _gcry_aes_ppc8_encrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern unsigned int _gcry_aes_ppc8_decrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern void _gcry_aes_ppc8_cfb_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ppc8_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac); extern void _gcry_aes_ppc8_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ppc8_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ppc8_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern size_t _gcry_aes_ppc8_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); extern size_t _gcry_aes_ppc8_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); extern void _gcry_aes_ppc8_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); #endif /*USE_PPC_CRYPTO*/ #ifdef USE_PPC_CRYPTO_WITH_PPC9LE /* Power9 little-endian crypto implementations of AES */ extern unsigned int _gcry_aes_ppc9le_encrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern unsigned int _gcry_aes_ppc9le_decrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern void _gcry_aes_ppc9le_cfb_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ppc9le_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac); extern void _gcry_aes_ppc9le_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ppc9le_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern void _gcry_aes_ppc9le_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); extern size_t _gcry_aes_ppc9le_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); extern size_t _gcry_aes_ppc9le_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); extern void _gcry_aes_ppc9le_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); extern size_t _gcry_aes_p10le_gcm_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); #endif /*USE_PPC_CRYPTO_WITH_PPC9LE*/ #ifdef USE_S390X_CRYPTO /* zSeries crypto implementations of AES */ extern int _gcry_aes_s390x_setup_acceleration(RIJNDAEL_context *ctx, unsigned int keylen, unsigned int hwfeatures, cipher_bulk_ops_t *bulk_ops); extern void _gcry_aes_s390x_setkey(RIJNDAEL_context *ctx, const byte *key); extern void _gcry_aes_s390x_prepare_decryption(RIJNDAEL_context *ctx); extern unsigned int _gcry_aes_s390x_encrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); extern unsigned int _gcry_aes_s390x_decrypt(const RIJNDAEL_context *ctx, unsigned char *dst, const unsigned char *src); #endif /*USE_S390X_CRYPTO*/ static unsigned int do_encrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax); static unsigned int do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax); static void _gcry_aes_cfb_enc (void *context, unsigned char *iv, void *outbuf, const void *inbuf, size_t nblocks); static void _gcry_aes_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); static void _gcry_aes_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac); static void _gcry_aes_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); static void _gcry_aes_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks); static size_t _gcry_aes_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); static size_t _gcry_aes_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks); static void _gcry_aes_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt); /* All the numbers. */ #include "rijndael-tables.h" /* Function prototypes. */ static const char *selftest(void); static void prepare_decryption(RIJNDAEL_context *ctx); /* Prefetching for encryption/decryption tables. */ static inline void prefetch_table(const volatile byte *tab, size_t len) { size_t i; for (i = 0; len - i >= 8 * 32; i += 8 * 32) { (void)tab[i + 0 * 32]; (void)tab[i + 1 * 32]; (void)tab[i + 2 * 32]; (void)tab[i + 3 * 32]; (void)tab[i + 4 * 32]; (void)tab[i + 5 * 32]; (void)tab[i + 6 * 32]; (void)tab[i + 7 * 32]; } for (; i < len; i += 32) { (void)tab[i]; } (void)tab[len - 1]; } static void prefetch_enc(void) { /* Modify counters to trigger copy-on-write and unsharing if physical pages * of look-up table are shared between processes. Modifying counters also * causes checksums for pages to change and hint same-page merging algorithm * that these pages are frequently changing. */ enc_tables.counter_head++; enc_tables.counter_tail++; /* Prefetch look-up tables to cache. */ prefetch_table((const void *)&enc_tables, sizeof(enc_tables)); } static void prefetch_dec(void) { /* Modify counters to trigger copy-on-write and unsharing if physical pages * of look-up table are shared between processes. Modifying counters also * causes checksums for pages to change and hint same-page merging algorithm * that these pages are frequently changing. */ dec_tables.counter_head++; dec_tables.counter_tail++; /* Prefetch look-up tables to cache. */ prefetch_table((const void *)&dec_tables, sizeof(dec_tables)); } static inline u32 sbox4(u32 inb4) { u32 out; out = (encT[(inb4 >> 0) & 0xffU] & 0xff00U) >> 8; out |= (encT[(inb4 >> 8) & 0xffU] & 0xff00U) >> 0; out |= (encT[(inb4 >> 16) & 0xffU] & 0xff0000U) << 0; out |= (encT[(inb4 >> 24) & 0xffU] & 0xff0000U) << 8; return out; } /* Perform the key setup. */ static gcry_err_code_t do_setkey (RIJNDAEL_context *ctx, const byte *key, const unsigned keylen, cipher_bulk_ops_t *bulk_ops) { static int initialized = 0; static const char *selftest_failed = 0; void (*hw_setkey)(RIJNDAEL_context *ctx, const byte *key) = NULL; int rounds; unsigned int KC; unsigned int hwfeatures; /* The on-the-fly self tests are only run in non-fips mode. In fips mode explicit self-tests are required. Actually the on-the-fly self-tests are not fully thread-safe and it might happen that a failed self-test won't get noticed in another thread. FIXME: We might want to have a central registry of succeeded self-tests. */ if (!fips_mode () && !initialized) { initialized = 1; selftest_failed = selftest (); if (selftest_failed) log_error ("%s\n", selftest_failed ); } if (selftest_failed) return GPG_ERR_SELFTEST_FAILED; if( keylen == 128/8 ) { rounds = 10; KC = 4; } else if ( keylen == 192/8 ) { rounds = 12; KC = 6; } else if ( keylen == 256/8 ) { rounds = 14; KC = 8; } else return GPG_ERR_INV_KEYLEN; ctx->rounds = rounds; hwfeatures = _gcry_get_hw_features (); ctx->decryption_prepared = 0; /* Setup default bulk encryption routines. */ memset (bulk_ops, 0, sizeof(*bulk_ops)); bulk_ops->cfb_enc = _gcry_aes_cfb_enc; bulk_ops->cfb_dec = _gcry_aes_cfb_dec; bulk_ops->cbc_enc = _gcry_aes_cbc_enc; bulk_ops->cbc_dec = _gcry_aes_cbc_dec; bulk_ops->ctr_enc = _gcry_aes_ctr_enc; bulk_ops->ocb_crypt = _gcry_aes_ocb_crypt; bulk_ops->ocb_auth = _gcry_aes_ocb_auth; bulk_ops->xts_crypt = _gcry_aes_xts_crypt; (void)hwfeatures; if (0) { ; } #ifdef USE_AESNI else if (hwfeatures & HWF_INTEL_AESNI) { hw_setkey = _gcry_aes_aesni_do_setkey; ctx->encrypt_fn = _gcry_aes_aesni_encrypt; ctx->decrypt_fn = _gcry_aes_aesni_decrypt; ctx->prefetch_enc_fn = NULL; ctx->prefetch_dec_fn = NULL; ctx->prepare_decryption = _gcry_aes_aesni_prepare_decryption; ctx->use_avx = !!(hwfeatures & HWF_INTEL_AVX); ctx->use_avx2 = !!(hwfeatures & HWF_INTEL_AVX2); /* Setup AES-NI bulk encryption routines. */ bulk_ops->cfb_enc = _gcry_aes_aesni_cfb_enc; bulk_ops->cfb_dec = _gcry_aes_aesni_cfb_dec; bulk_ops->cbc_enc = _gcry_aes_aesni_cbc_enc; bulk_ops->cbc_dec = _gcry_aes_aesni_cbc_dec; bulk_ops->ctr_enc = _gcry_aes_aesni_ctr_enc; bulk_ops->ctr32le_enc = _gcry_aes_aesni_ctr32le_enc; bulk_ops->ocb_crypt = _gcry_aes_aesni_ocb_crypt; bulk_ops->ocb_auth = _gcry_aes_aesni_ocb_auth; bulk_ops->xts_crypt = _gcry_aes_aesni_xts_crypt; + bulk_ops->ecb_crypt = _gcry_aes_aesni_ecb_crypt; #ifdef USE_VAES if ((hwfeatures & HWF_INTEL_VAES_VPCLMUL) && (hwfeatures & HWF_INTEL_AVX2)) { /* Setup VAES bulk encryption routines. */ bulk_ops->cfb_dec = _gcry_aes_vaes_cfb_dec; bulk_ops->cbc_dec = _gcry_aes_vaes_cbc_dec; bulk_ops->ctr_enc = _gcry_aes_vaes_ctr_enc; bulk_ops->ctr32le_enc = _gcry_aes_vaes_ctr32le_enc; bulk_ops->ocb_crypt = _gcry_aes_vaes_ocb_crypt; bulk_ops->xts_crypt = _gcry_aes_vaes_xts_crypt; + bulk_ops->ecb_crypt = _gcry_aes_vaes_ecb_crypt; } #endif } #endif #ifdef USE_PADLOCK else if ((hwfeatures & HWF_PADLOCK_AES) && keylen == 128/8) { ctx->encrypt_fn = _gcry_aes_padlock_encrypt; ctx->decrypt_fn = _gcry_aes_padlock_decrypt; ctx->prefetch_enc_fn = NULL; ctx->prefetch_dec_fn = NULL; ctx->prepare_decryption = _gcry_aes_padlock_prepare_decryption; memcpy (ctx->padlockkey, key, keylen); } #endif #ifdef USE_SSSE3 else if (hwfeatures & HWF_INTEL_SSSE3) { hw_setkey = _gcry_aes_ssse3_do_setkey; ctx->encrypt_fn = _gcry_aes_ssse3_encrypt; ctx->decrypt_fn = _gcry_aes_ssse3_decrypt; ctx->prefetch_enc_fn = NULL; ctx->prefetch_dec_fn = NULL; ctx->prepare_decryption = _gcry_aes_ssse3_prepare_decryption; /* Setup SSSE3 bulk encryption routines. */ bulk_ops->cfb_enc = _gcry_aes_ssse3_cfb_enc; bulk_ops->cfb_dec = _gcry_aes_ssse3_cfb_dec; bulk_ops->cbc_enc = _gcry_aes_ssse3_cbc_enc; bulk_ops->cbc_dec = _gcry_aes_ssse3_cbc_dec; bulk_ops->ctr_enc = _gcry_aes_ssse3_ctr_enc; bulk_ops->ocb_crypt = _gcry_aes_ssse3_ocb_crypt; bulk_ops->ocb_auth = _gcry_aes_ssse3_ocb_auth; } #endif #ifdef USE_ARM_CE else if (hwfeatures & HWF_ARM_AES) { hw_setkey = _gcry_aes_armv8_ce_setkey; ctx->encrypt_fn = _gcry_aes_armv8_ce_encrypt; ctx->decrypt_fn = _gcry_aes_armv8_ce_decrypt; ctx->prefetch_enc_fn = NULL; ctx->prefetch_dec_fn = NULL; ctx->prepare_decryption = _gcry_aes_armv8_ce_prepare_decryption; /* Setup ARM-CE bulk encryption routines. */ bulk_ops->cfb_enc = _gcry_aes_armv8_ce_cfb_enc; bulk_ops->cfb_dec = _gcry_aes_armv8_ce_cfb_dec; bulk_ops->cbc_enc = _gcry_aes_armv8_ce_cbc_enc; bulk_ops->cbc_dec = _gcry_aes_armv8_ce_cbc_dec; bulk_ops->ctr_enc = _gcry_aes_armv8_ce_ctr_enc; bulk_ops->ctr32le_enc = _gcry_aes_armv8_ce_ctr32le_enc; bulk_ops->ocb_crypt = _gcry_aes_armv8_ce_ocb_crypt; bulk_ops->ocb_auth = _gcry_aes_armv8_ce_ocb_auth; bulk_ops->xts_crypt = _gcry_aes_armv8_ce_xts_crypt; + bulk_ops->ecb_crypt = _gcry_aes_armv8_ce_ecb_crypt; } #endif #ifdef USE_PPC_CRYPTO_WITH_PPC9LE else if ((hwfeatures & HWF_PPC_VCRYPTO) && (hwfeatures & HWF_PPC_ARCH_3_00)) { hw_setkey = _gcry_aes_ppc8_setkey; ctx->encrypt_fn = _gcry_aes_ppc9le_encrypt; ctx->decrypt_fn = _gcry_aes_ppc9le_decrypt; ctx->prefetch_enc_fn = NULL; ctx->prefetch_dec_fn = NULL; ctx->prepare_decryption = _gcry_aes_ppc8_prepare_decryption; /* Setup PPC9LE bulk encryption routines. */ bulk_ops->cfb_enc = _gcry_aes_ppc9le_cfb_enc; bulk_ops->cfb_dec = _gcry_aes_ppc9le_cfb_dec; bulk_ops->cbc_enc = _gcry_aes_ppc9le_cbc_enc; bulk_ops->cbc_dec = _gcry_aes_ppc9le_cbc_dec; bulk_ops->ctr_enc = _gcry_aes_ppc9le_ctr_enc; bulk_ops->ocb_crypt = _gcry_aes_ppc9le_ocb_crypt; bulk_ops->ocb_auth = _gcry_aes_ppc9le_ocb_auth; bulk_ops->xts_crypt = _gcry_aes_ppc9le_xts_crypt; if (hwfeatures & HWF_PPC_ARCH_3_10) /* for P10 */ bulk_ops->gcm_crypt = _gcry_aes_p10le_gcm_crypt; # ifdef ENABLE_FORCE_SOFT_HWFEATURES /* HWF_PPC_ARCH_3_10 above is used as soft HW-feature indicator for P10. * Actual implementation works with HWF_PPC_ARCH_3_00 also. */ if (hwfeatures & HWF_PPC_ARCH_3_00) bulk_ops->gcm_crypt = _gcry_aes_p10le_gcm_crypt; # endif } #endif #ifdef USE_PPC_CRYPTO else if (hwfeatures & HWF_PPC_VCRYPTO) { hw_setkey = _gcry_aes_ppc8_setkey; ctx->encrypt_fn = _gcry_aes_ppc8_encrypt; ctx->decrypt_fn = _gcry_aes_ppc8_decrypt; ctx->prefetch_enc_fn = NULL; ctx->prefetch_dec_fn = NULL; ctx->prepare_decryption = _gcry_aes_ppc8_prepare_decryption; /* Setup PPC8 bulk encryption routines. */ bulk_ops->cfb_enc = _gcry_aes_ppc8_cfb_enc; bulk_ops->cfb_dec = _gcry_aes_ppc8_cfb_dec; bulk_ops->cbc_enc = _gcry_aes_ppc8_cbc_enc; bulk_ops->cbc_dec = _gcry_aes_ppc8_cbc_dec; bulk_ops->ctr_enc = _gcry_aes_ppc8_ctr_enc; bulk_ops->ocb_crypt = _gcry_aes_ppc8_ocb_crypt; bulk_ops->ocb_auth = _gcry_aes_ppc8_ocb_auth; bulk_ops->xts_crypt = _gcry_aes_ppc8_xts_crypt; } #endif #ifdef USE_S390X_CRYPTO else if (_gcry_aes_s390x_setup_acceleration (ctx, keylen, hwfeatures, bulk_ops)) { hw_setkey = _gcry_aes_s390x_setkey; ctx->encrypt_fn = _gcry_aes_s390x_encrypt; ctx->decrypt_fn = _gcry_aes_s390x_decrypt; ctx->prefetch_enc_fn = NULL; ctx->prefetch_dec_fn = NULL; ctx->prepare_decryption = _gcry_aes_s390x_prepare_decryption; } #endif else { ctx->encrypt_fn = do_encrypt; ctx->decrypt_fn = do_decrypt; ctx->prefetch_enc_fn = prefetch_enc; ctx->prefetch_dec_fn = prefetch_dec; ctx->prepare_decryption = prepare_decryption; } /* NB: We don't yet support Padlock hardware key generation. */ if (hw_setkey) { hw_setkey (ctx, key); } else { u32 W_prev; u32 *W_u32 = ctx->keyschenc32b; byte rcon = 1; unsigned int i, j; prefetch_enc(); for (i = 0; i < KC; i += 2) { W_u32[i + 0] = buf_get_le32(key + i * 4 + 0); W_u32[i + 1] = buf_get_le32(key + i * 4 + 4); } for (i = KC, j = KC, W_prev = W_u32[KC - 1]; i < 4 * (rounds + 1); i += 2, j += 2) { u32 temp0 = W_prev; u32 temp1; if (j == KC) { j = 0; temp0 = sbox4(rol(temp0, 24)) ^ rcon; rcon = ((rcon << 1) ^ (-(rcon >> 7) & 0x1b)) & 0xff; } else if (KC == 8 && j == 4) { temp0 = sbox4(temp0); } temp1 = W_u32[i - KC + 0]; W_u32[i + 0] = temp0 ^ temp1; W_u32[i + 1] = W_u32[i - KC + 1] ^ temp0 ^ temp1; W_prev = W_u32[i + 1]; } } return 0; } static gcry_err_code_t rijndael_setkey (void *context, const byte *key, const unsigned keylen, cipher_bulk_ops_t *bulk_ops) { RIJNDAEL_context *ctx = context; return do_setkey (ctx, key, keylen, bulk_ops); } /* Make a decryption key from an encryption key. */ static void prepare_decryption( RIJNDAEL_context *ctx ) { const byte *sbox = ((const byte *)encT) + 1; int r; prefetch_enc(); prefetch_dec(); ctx->keyschdec32[0][0] = ctx->keyschenc32[0][0]; ctx->keyschdec32[0][1] = ctx->keyschenc32[0][1]; ctx->keyschdec32[0][2] = ctx->keyschenc32[0][2]; ctx->keyschdec32[0][3] = ctx->keyschenc32[0][3]; for (r = 1; r < ctx->rounds; r++) { u32 *wi = ctx->keyschenc32[r]; u32 *wo = ctx->keyschdec32[r]; u32 wt; wt = wi[0]; wo[0] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0) ^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1) ^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2) ^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3); wt = wi[1]; wo[1] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0) ^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1) ^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2) ^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3); wt = wi[2]; wo[2] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0) ^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1) ^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2) ^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3); wt = wi[3]; wo[3] = rol(decT[sbox[(byte)(wt >> 0) * 4]], 8 * 0) ^ rol(decT[sbox[(byte)(wt >> 8) * 4]], 8 * 1) ^ rol(decT[sbox[(byte)(wt >> 16) * 4]], 8 * 2) ^ rol(decT[sbox[(byte)(wt >> 24) * 4]], 8 * 3); } ctx->keyschdec32[r][0] = ctx->keyschenc32[r][0]; ctx->keyschdec32[r][1] = ctx->keyschenc32[r][1]; ctx->keyschdec32[r][2] = ctx->keyschenc32[r][2]; ctx->keyschdec32[r][3] = ctx->keyschenc32[r][3]; } #if !defined(USE_ARM_ASM) && !defined(USE_AMD64_ASM) /* Encrypt one block. A and B may be the same. */ static unsigned int do_encrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b, const unsigned char *a) { #define rk (ctx->keyschenc32) const byte *sbox = ((const byte *)encT) + 1; int rounds = ctx->rounds; int r; u32 sa[4]; u32 sb[4]; sb[0] = buf_get_le32(a + 0); sb[1] = buf_get_le32(a + 4); sb[2] = buf_get_le32(a + 8); sb[3] = buf_get_le32(a + 12); sa[0] = sb[0] ^ rk[0][0]; sa[1] = sb[1] ^ rk[0][1]; sa[2] = sb[2] ^ rk[0][2]; sa[3] = sb[3] ^ rk[0][3]; sb[0] = rol(encT[(byte)(sa[0] >> (0 * 8))], (0 * 8)); sb[3] = rol(encT[(byte)(sa[0] >> (1 * 8))], (1 * 8)); sb[2] = rol(encT[(byte)(sa[0] >> (2 * 8))], (2 * 8)); sb[1] = rol(encT[(byte)(sa[0] >> (3 * 8))], (3 * 8)); sa[0] = rk[1][0] ^ sb[0]; sb[1] ^= rol(encT[(byte)(sa[1] >> (0 * 8))], (0 * 8)); sa[0] ^= rol(encT[(byte)(sa[1] >> (1 * 8))], (1 * 8)); sb[3] ^= rol(encT[(byte)(sa[1] >> (2 * 8))], (2 * 8)); sb[2] ^= rol(encT[(byte)(sa[1] >> (3 * 8))], (3 * 8)); sa[1] = rk[1][1] ^ sb[1]; sb[2] ^= rol(encT[(byte)(sa[2] >> (0 * 8))], (0 * 8)); sa[1] ^= rol(encT[(byte)(sa[2] >> (1 * 8))], (1 * 8)); sa[0] ^= rol(encT[(byte)(sa[2] >> (2 * 8))], (2 * 8)); sb[3] ^= rol(encT[(byte)(sa[2] >> (3 * 8))], (3 * 8)); sa[2] = rk[1][2] ^ sb[2]; sb[3] ^= rol(encT[(byte)(sa[3] >> (0 * 8))], (0 * 8)); sa[2] ^= rol(encT[(byte)(sa[3] >> (1 * 8))], (1 * 8)); sa[1] ^= rol(encT[(byte)(sa[3] >> (2 * 8))], (2 * 8)); sa[0] ^= rol(encT[(byte)(sa[3] >> (3 * 8))], (3 * 8)); sa[3] = rk[1][3] ^ sb[3]; for (r = 2; r < rounds; r++) { sb[0] = rol(encT[(byte)(sa[0] >> (0 * 8))], (0 * 8)); sb[3] = rol(encT[(byte)(sa[0] >> (1 * 8))], (1 * 8)); sb[2] = rol(encT[(byte)(sa[0] >> (2 * 8))], (2 * 8)); sb[1] = rol(encT[(byte)(sa[0] >> (3 * 8))], (3 * 8)); sa[0] = rk[r][0] ^ sb[0]; sb[1] ^= rol(encT[(byte)(sa[1] >> (0 * 8))], (0 * 8)); sa[0] ^= rol(encT[(byte)(sa[1] >> (1 * 8))], (1 * 8)); sb[3] ^= rol(encT[(byte)(sa[1] >> (2 * 8))], (2 * 8)); sb[2] ^= rol(encT[(byte)(sa[1] >> (3 * 8))], (3 * 8)); sa[1] = rk[r][1] ^ sb[1]; sb[2] ^= rol(encT[(byte)(sa[2] >> (0 * 8))], (0 * 8)); sa[1] ^= rol(encT[(byte)(sa[2] >> (1 * 8))], (1 * 8)); sa[0] ^= rol(encT[(byte)(sa[2] >> (2 * 8))], (2 * 8)); sb[3] ^= rol(encT[(byte)(sa[2] >> (3 * 8))], (3 * 8)); sa[2] = rk[r][2] ^ sb[2]; sb[3] ^= rol(encT[(byte)(sa[3] >> (0 * 8))], (0 * 8)); sa[2] ^= rol(encT[(byte)(sa[3] >> (1 * 8))], (1 * 8)); sa[1] ^= rol(encT[(byte)(sa[3] >> (2 * 8))], (2 * 8)); sa[0] ^= rol(encT[(byte)(sa[3] >> (3 * 8))], (3 * 8)); sa[3] = rk[r][3] ^ sb[3]; r++; sb[0] = rol(encT[(byte)(sa[0] >> (0 * 8))], (0 * 8)); sb[3] = rol(encT[(byte)(sa[0] >> (1 * 8))], (1 * 8)); sb[2] = rol(encT[(byte)(sa[0] >> (2 * 8))], (2 * 8)); sb[1] = rol(encT[(byte)(sa[0] >> (3 * 8))], (3 * 8)); sa[0] = rk[r][0] ^ sb[0]; sb[1] ^= rol(encT[(byte)(sa[1] >> (0 * 8))], (0 * 8)); sa[0] ^= rol(encT[(byte)(sa[1] >> (1 * 8))], (1 * 8)); sb[3] ^= rol(encT[(byte)(sa[1] >> (2 * 8))], (2 * 8)); sb[2] ^= rol(encT[(byte)(sa[1] >> (3 * 8))], (3 * 8)); sa[1] = rk[r][1] ^ sb[1]; sb[2] ^= rol(encT[(byte)(sa[2] >> (0 * 8))], (0 * 8)); sa[1] ^= rol(encT[(byte)(sa[2] >> (1 * 8))], (1 * 8)); sa[0] ^= rol(encT[(byte)(sa[2] >> (2 * 8))], (2 * 8)); sb[3] ^= rol(encT[(byte)(sa[2] >> (3 * 8))], (3 * 8)); sa[2] = rk[r][2] ^ sb[2]; sb[3] ^= rol(encT[(byte)(sa[3] >> (0 * 8))], (0 * 8)); sa[2] ^= rol(encT[(byte)(sa[3] >> (1 * 8))], (1 * 8)); sa[1] ^= rol(encT[(byte)(sa[3] >> (2 * 8))], (2 * 8)); sa[0] ^= rol(encT[(byte)(sa[3] >> (3 * 8))], (3 * 8)); sa[3] = rk[r][3] ^ sb[3]; } /* Last round is special. */ sb[0] = ((u32)sbox[(byte)(sa[0] >> (0 * 8)) * 4]) << (0 * 8); sb[3] = ((u32)sbox[(byte)(sa[0] >> (1 * 8)) * 4]) << (1 * 8); sb[2] = ((u32)sbox[(byte)(sa[0] >> (2 * 8)) * 4]) << (2 * 8); sb[1] = ((u32)sbox[(byte)(sa[0] >> (3 * 8)) * 4]) << (3 * 8); sa[0] = rk[r][0] ^ sb[0]; sb[1] ^= ((u32)sbox[(byte)(sa[1] >> (0 * 8)) * 4]) << (0 * 8); sa[0] ^= ((u32)sbox[(byte)(sa[1] >> (1 * 8)) * 4]) << (1 * 8); sb[3] ^= ((u32)sbox[(byte)(sa[1] >> (2 * 8)) * 4]) << (2 * 8); sb[2] ^= ((u32)sbox[(byte)(sa[1] >> (3 * 8)) * 4]) << (3 * 8); sa[1] = rk[r][1] ^ sb[1]; sb[2] ^= ((u32)sbox[(byte)(sa[2] >> (0 * 8)) * 4]) << (0 * 8); sa[1] ^= ((u32)sbox[(byte)(sa[2] >> (1 * 8)) * 4]) << (1 * 8); sa[0] ^= ((u32)sbox[(byte)(sa[2] >> (2 * 8)) * 4]) << (2 * 8); sb[3] ^= ((u32)sbox[(byte)(sa[2] >> (3 * 8)) * 4]) << (3 * 8); sa[2] = rk[r][2] ^ sb[2]; sb[3] ^= ((u32)sbox[(byte)(sa[3] >> (0 * 8)) * 4]) << (0 * 8); sa[2] ^= ((u32)sbox[(byte)(sa[3] >> (1 * 8)) * 4]) << (1 * 8); sa[1] ^= ((u32)sbox[(byte)(sa[3] >> (2 * 8)) * 4]) << (2 * 8); sa[0] ^= ((u32)sbox[(byte)(sa[3] >> (3 * 8)) * 4]) << (3 * 8); sa[3] = rk[r][3] ^ sb[3]; buf_put_le32(b + 0, sa[0]); buf_put_le32(b + 4, sa[1]); buf_put_le32(b + 8, sa[2]); buf_put_le32(b + 12, sa[3]); #undef rk return (56 + 2*sizeof(int)); } #endif /*!USE_ARM_ASM && !USE_AMD64_ASM*/ static unsigned int do_encrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax) { #ifdef USE_AMD64_ASM return _gcry_aes_amd64_encrypt_block(ctx->keyschenc, bx, ax, ctx->rounds, enc_tables.T); #elif defined(USE_ARM_ASM) return _gcry_aes_arm_encrypt_block(ctx->keyschenc, bx, ax, ctx->rounds, enc_tables.T); #else return do_encrypt_fn (ctx, bx, ax); #endif /* !USE_ARM_ASM && !USE_AMD64_ASM*/ } static unsigned int rijndael_encrypt (void *context, byte *b, const byte *a) { RIJNDAEL_context *ctx = context; if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); return ctx->encrypt_fn (ctx, b, a); } /* Bulk encryption of complete blocks in CFB mode. Caller needs to make sure that IV is aligned on an unsigned long boundary. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_aes_cfb_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int burn_depth = 0; rijndael_cryptfn_t encrypt_fn = ctx->encrypt_fn; if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); for ( ;nblocks; nblocks-- ) { /* Encrypt the IV. */ burn_depth = encrypt_fn (ctx, iv, iv); /* XOR the input with the IV and store input into IV. */ cipher_block_xor_2dst(outbuf, iv, inbuf, BLOCKSIZE); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); } /* Bulk encryption of complete blocks in CBC mode. Caller needs to make sure that IV is aligned on an unsigned long boundary. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_aes_cbc_enc (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int cbc_mac) { RIJNDAEL_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned char *last_iv; unsigned int burn_depth = 0; rijndael_cryptfn_t encrypt_fn = ctx->encrypt_fn; if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); last_iv = iv; for ( ;nblocks; nblocks-- ) { cipher_block_xor(outbuf, inbuf, last_iv, BLOCKSIZE); burn_depth = encrypt_fn (ctx, outbuf, outbuf); last_iv = outbuf; inbuf += BLOCKSIZE; if (!cbc_mac) outbuf += BLOCKSIZE; } if (last_iv != iv) cipher_block_cpy (iv, last_iv, BLOCKSIZE); if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); } /* Bulk encryption of complete blocks in CTR mode. Caller needs to make sure that CTR is aligned on a 16 byte boundary if AESNI; the minimum alignment is for an u32. This function is only intended for the bulk encryption feature of cipher.c. CTR is expected to be of size BLOCKSIZE. */ static void _gcry_aes_ctr_enc (void *context, unsigned char *ctr, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int burn_depth = 0; union { unsigned char x1[16] ATTR_ALIGNED_16; u32 x32[4]; } tmp; rijndael_cryptfn_t encrypt_fn = ctx->encrypt_fn; if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); for ( ;nblocks; nblocks-- ) { /* Encrypt the counter. */ burn_depth = encrypt_fn (ctx, tmp.x1, ctr); /* XOR the input with the encrypted counter and store in output. */ cipher_block_xor(outbuf, tmp.x1, inbuf, BLOCKSIZE); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; /* Increment the counter. */ cipher_block_add(ctr, 1, BLOCKSIZE); } wipememory(&tmp, sizeof(tmp)); if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); } #if !defined(USE_ARM_ASM) && !defined(USE_AMD64_ASM) /* Decrypt one block. A and B may be the same. */ static unsigned int do_decrypt_fn (const RIJNDAEL_context *ctx, unsigned char *b, const unsigned char *a) { #define rk (ctx->keyschdec32) int rounds = ctx->rounds; int r; u32 sa[4]; u32 sb[4]; sb[0] = buf_get_le32(a + 0); sb[1] = buf_get_le32(a + 4); sb[2] = buf_get_le32(a + 8); sb[3] = buf_get_le32(a + 12); sa[0] = sb[0] ^ rk[rounds][0]; sa[1] = sb[1] ^ rk[rounds][1]; sa[2] = sb[2] ^ rk[rounds][2]; sa[3] = sb[3] ^ rk[rounds][3]; for (r = rounds - 1; r > 1; r--) { sb[0] = rol(decT[(byte)(sa[0] >> (0 * 8))], (0 * 8)); sb[1] = rol(decT[(byte)(sa[0] >> (1 * 8))], (1 * 8)); sb[2] = rol(decT[(byte)(sa[0] >> (2 * 8))], (2 * 8)); sb[3] = rol(decT[(byte)(sa[0] >> (3 * 8))], (3 * 8)); sa[0] = rk[r][0] ^ sb[0]; sb[1] ^= rol(decT[(byte)(sa[1] >> (0 * 8))], (0 * 8)); sb[2] ^= rol(decT[(byte)(sa[1] >> (1 * 8))], (1 * 8)); sb[3] ^= rol(decT[(byte)(sa[1] >> (2 * 8))], (2 * 8)); sa[0] ^= rol(decT[(byte)(sa[1] >> (3 * 8))], (3 * 8)); sa[1] = rk[r][1] ^ sb[1]; sb[2] ^= rol(decT[(byte)(sa[2] >> (0 * 8))], (0 * 8)); sb[3] ^= rol(decT[(byte)(sa[2] >> (1 * 8))], (1 * 8)); sa[0] ^= rol(decT[(byte)(sa[2] >> (2 * 8))], (2 * 8)); sa[1] ^= rol(decT[(byte)(sa[2] >> (3 * 8))], (3 * 8)); sa[2] = rk[r][2] ^ sb[2]; sb[3] ^= rol(decT[(byte)(sa[3] >> (0 * 8))], (0 * 8)); sa[0] ^= rol(decT[(byte)(sa[3] >> (1 * 8))], (1 * 8)); sa[1] ^= rol(decT[(byte)(sa[3] >> (2 * 8))], (2 * 8)); sa[2] ^= rol(decT[(byte)(sa[3] >> (3 * 8))], (3 * 8)); sa[3] = rk[r][3] ^ sb[3]; r--; sb[0] = rol(decT[(byte)(sa[0] >> (0 * 8))], (0 * 8)); sb[1] = rol(decT[(byte)(sa[0] >> (1 * 8))], (1 * 8)); sb[2] = rol(decT[(byte)(sa[0] >> (2 * 8))], (2 * 8)); sb[3] = rol(decT[(byte)(sa[0] >> (3 * 8))], (3 * 8)); sa[0] = rk[r][0] ^ sb[0]; sb[1] ^= rol(decT[(byte)(sa[1] >> (0 * 8))], (0 * 8)); sb[2] ^= rol(decT[(byte)(sa[1] >> (1 * 8))], (1 * 8)); sb[3] ^= rol(decT[(byte)(sa[1] >> (2 * 8))], (2 * 8)); sa[0] ^= rol(decT[(byte)(sa[1] >> (3 * 8))], (3 * 8)); sa[1] = rk[r][1] ^ sb[1]; sb[2] ^= rol(decT[(byte)(sa[2] >> (0 * 8))], (0 * 8)); sb[3] ^= rol(decT[(byte)(sa[2] >> (1 * 8))], (1 * 8)); sa[0] ^= rol(decT[(byte)(sa[2] >> (2 * 8))], (2 * 8)); sa[1] ^= rol(decT[(byte)(sa[2] >> (3 * 8))], (3 * 8)); sa[2] = rk[r][2] ^ sb[2]; sb[3] ^= rol(decT[(byte)(sa[3] >> (0 * 8))], (0 * 8)); sa[0] ^= rol(decT[(byte)(sa[3] >> (1 * 8))], (1 * 8)); sa[1] ^= rol(decT[(byte)(sa[3] >> (2 * 8))], (2 * 8)); sa[2] ^= rol(decT[(byte)(sa[3] >> (3 * 8))], (3 * 8)); sa[3] = rk[r][3] ^ sb[3]; } sb[0] = rol(decT[(byte)(sa[0] >> (0 * 8))], (0 * 8)); sb[1] = rol(decT[(byte)(sa[0] >> (1 * 8))], (1 * 8)); sb[2] = rol(decT[(byte)(sa[0] >> (2 * 8))], (2 * 8)); sb[3] = rol(decT[(byte)(sa[0] >> (3 * 8))], (3 * 8)); sa[0] = rk[1][0] ^ sb[0]; sb[1] ^= rol(decT[(byte)(sa[1] >> (0 * 8))], (0 * 8)); sb[2] ^= rol(decT[(byte)(sa[1] >> (1 * 8))], (1 * 8)); sb[3] ^= rol(decT[(byte)(sa[1] >> (2 * 8))], (2 * 8)); sa[0] ^= rol(decT[(byte)(sa[1] >> (3 * 8))], (3 * 8)); sa[1] = rk[1][1] ^ sb[1]; sb[2] ^= rol(decT[(byte)(sa[2] >> (0 * 8))], (0 * 8)); sb[3] ^= rol(decT[(byte)(sa[2] >> (1 * 8))], (1 * 8)); sa[0] ^= rol(decT[(byte)(sa[2] >> (2 * 8))], (2 * 8)); sa[1] ^= rol(decT[(byte)(sa[2] >> (3 * 8))], (3 * 8)); sa[2] = rk[1][2] ^ sb[2]; sb[3] ^= rol(decT[(byte)(sa[3] >> (0 * 8))], (0 * 8)); sa[0] ^= rol(decT[(byte)(sa[3] >> (1 * 8))], (1 * 8)); sa[1] ^= rol(decT[(byte)(sa[3] >> (2 * 8))], (2 * 8)); sa[2] ^= rol(decT[(byte)(sa[3] >> (3 * 8))], (3 * 8)); sa[3] = rk[1][3] ^ sb[3]; /* Last round is special. */ sb[0] = (u32)inv_sbox[(byte)(sa[0] >> (0 * 8))] << (0 * 8); sb[1] = (u32)inv_sbox[(byte)(sa[0] >> (1 * 8))] << (1 * 8); sb[2] = (u32)inv_sbox[(byte)(sa[0] >> (2 * 8))] << (2 * 8); sb[3] = (u32)inv_sbox[(byte)(sa[0] >> (3 * 8))] << (3 * 8); sa[0] = sb[0] ^ rk[0][0]; sb[1] ^= (u32)inv_sbox[(byte)(sa[1] >> (0 * 8))] << (0 * 8); sb[2] ^= (u32)inv_sbox[(byte)(sa[1] >> (1 * 8))] << (1 * 8); sb[3] ^= (u32)inv_sbox[(byte)(sa[1] >> (2 * 8))] << (2 * 8); sa[0] ^= (u32)inv_sbox[(byte)(sa[1] >> (3 * 8))] << (3 * 8); sa[1] = sb[1] ^ rk[0][1]; sb[2] ^= (u32)inv_sbox[(byte)(sa[2] >> (0 * 8))] << (0 * 8); sb[3] ^= (u32)inv_sbox[(byte)(sa[2] >> (1 * 8))] << (1 * 8); sa[0] ^= (u32)inv_sbox[(byte)(sa[2] >> (2 * 8))] << (2 * 8); sa[1] ^= (u32)inv_sbox[(byte)(sa[2] >> (3 * 8))] << (3 * 8); sa[2] = sb[2] ^ rk[0][2]; sb[3] ^= (u32)inv_sbox[(byte)(sa[3] >> (0 * 8))] << (0 * 8); sa[0] ^= (u32)inv_sbox[(byte)(sa[3] >> (1 * 8))] << (1 * 8); sa[1] ^= (u32)inv_sbox[(byte)(sa[3] >> (2 * 8))] << (2 * 8); sa[2] ^= (u32)inv_sbox[(byte)(sa[3] >> (3 * 8))] << (3 * 8); sa[3] = sb[3] ^ rk[0][3]; buf_put_le32(b + 0, sa[0]); buf_put_le32(b + 4, sa[1]); buf_put_le32(b + 8, sa[2]); buf_put_le32(b + 12, sa[3]); #undef rk return (56+2*sizeof(int)); } #endif /*!USE_ARM_ASM && !USE_AMD64_ASM*/ /* Decrypt one block. AX and BX may be the same. */ static unsigned int do_decrypt (const RIJNDAEL_context *ctx, unsigned char *bx, const unsigned char *ax) { #ifdef USE_AMD64_ASM return _gcry_aes_amd64_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds, dec_tables.T); #elif defined(USE_ARM_ASM) return _gcry_aes_arm_decrypt_block(ctx->keyschdec, bx, ax, ctx->rounds, dec_tables.T); #else return do_decrypt_fn (ctx, bx, ax); #endif /*!USE_ARM_ASM && !USE_AMD64_ASM*/ } static inline void check_decryption_preparation (RIJNDAEL_context *ctx) { if ( !ctx->decryption_prepared ) { ctx->prepare_decryption ( ctx ); ctx->decryption_prepared = 1; } } static unsigned int rijndael_decrypt (void *context, byte *b, const byte *a) { RIJNDAEL_context *ctx = context; check_decryption_preparation (ctx); if (ctx->prefetch_dec_fn) ctx->prefetch_dec_fn(); return ctx->decrypt_fn (ctx, b, a); } /* Bulk decryption of complete blocks in CFB mode. Caller needs to make sure that IV is aligned on an unsigned long boundary. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_aes_cfb_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int burn_depth = 0; rijndael_cryptfn_t encrypt_fn = ctx->encrypt_fn; if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); for ( ;nblocks; nblocks-- ) { burn_depth = encrypt_fn (ctx, iv, iv); cipher_block_xor_n_copy(outbuf, iv, inbuf, BLOCKSIZE); outbuf += BLOCKSIZE; inbuf += BLOCKSIZE; } if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); } /* Bulk decryption of complete blocks in CBC mode. Caller needs to make sure that IV is aligned on an unsigned long boundary. This function is only intended for the bulk encryption feature of cipher.c. */ static void _gcry_aes_cbc_dec (void *context, unsigned char *iv, void *outbuf_arg, const void *inbuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int burn_depth = 0; unsigned char savebuf[BLOCKSIZE] ATTR_ALIGNED_16; rijndael_cryptfn_t decrypt_fn = ctx->decrypt_fn; check_decryption_preparation (ctx); if (ctx->prefetch_dec_fn) ctx->prefetch_dec_fn(); for ( ;nblocks; nblocks-- ) { /* INBUF is needed later and it may be identical to OUTBUF, so store the intermediate result to SAVEBUF. */ burn_depth = decrypt_fn (ctx, savebuf, inbuf); cipher_block_xor_n_copy_2(outbuf, savebuf, iv, inbuf, BLOCKSIZE); inbuf += BLOCKSIZE; outbuf += BLOCKSIZE; } wipememory(savebuf, sizeof(savebuf)); if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); } /* Bulk encryption/decryption of complete blocks in OCB mode. */ static size_t _gcry_aes_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { RIJNDAEL_context *ctx = (void *)&c->context.c; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int burn_depth = 0; if (encrypt) { union { unsigned char x1[16] ATTR_ALIGNED_16; u32 x32[4]; } l_tmp; rijndael_cryptfn_t encrypt_fn = ctx->encrypt_fn; if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); for ( ;nblocks; nblocks-- ) { u64 i = ++c->u_mode.ocb.data_nblocks; const unsigned char *l = ocb_get_l(c, i); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ cipher_block_xor_1 (c->u_iv.iv, l, BLOCKSIZE); cipher_block_cpy (l_tmp.x1, inbuf, BLOCKSIZE); /* Checksum_i = Checksum_{i-1} xor P_i */ cipher_block_xor_1 (c->u_ctr.ctr, l_tmp.x1, BLOCKSIZE); /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ cipher_block_xor_1 (l_tmp.x1, c->u_iv.iv, BLOCKSIZE); burn_depth = encrypt_fn (ctx, l_tmp.x1, l_tmp.x1); cipher_block_xor_1 (l_tmp.x1, c->u_iv.iv, BLOCKSIZE); cipher_block_cpy (outbuf, l_tmp.x1, BLOCKSIZE); inbuf += BLOCKSIZE; outbuf += BLOCKSIZE; } } else { union { unsigned char x1[16] ATTR_ALIGNED_16; u32 x32[4]; } l_tmp; rijndael_cryptfn_t decrypt_fn = ctx->decrypt_fn; check_decryption_preparation (ctx); if (ctx->prefetch_dec_fn) ctx->prefetch_dec_fn(); for ( ;nblocks; nblocks-- ) { u64 i = ++c->u_mode.ocb.data_nblocks; const unsigned char *l = ocb_get_l(c, i); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ cipher_block_xor_1 (c->u_iv.iv, l, BLOCKSIZE); cipher_block_cpy (l_tmp.x1, inbuf, BLOCKSIZE); /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ cipher_block_xor_1 (l_tmp.x1, c->u_iv.iv, BLOCKSIZE); burn_depth = decrypt_fn (ctx, l_tmp.x1, l_tmp.x1); cipher_block_xor_1 (l_tmp.x1, c->u_iv.iv, BLOCKSIZE); /* Checksum_i = Checksum_{i-1} xor P_i */ cipher_block_xor_1 (c->u_ctr.ctr, l_tmp.x1, BLOCKSIZE); cipher_block_cpy (outbuf, l_tmp.x1, BLOCKSIZE); inbuf += BLOCKSIZE; outbuf += BLOCKSIZE; } } if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); return 0; } /* Bulk authentication of complete blocks in OCB mode. */ static size_t _gcry_aes_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg, size_t nblocks) { RIJNDAEL_context *ctx = (void *)&c->context.c; const unsigned char *abuf = abuf_arg; unsigned int burn_depth = 0; union { unsigned char x1[16] ATTR_ALIGNED_16; u32 x32[4]; } l_tmp; rijndael_cryptfn_t encrypt_fn = ctx->encrypt_fn; if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); for ( ;nblocks; nblocks-- ) { u64 i = ++c->u_mode.ocb.aad_nblocks; const unsigned char *l = ocb_get_l(c, i); /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ cipher_block_xor_1 (c->u_mode.ocb.aad_offset, l, BLOCKSIZE); /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ cipher_block_xor (l_tmp.x1, c->u_mode.ocb.aad_offset, abuf, BLOCKSIZE); burn_depth = encrypt_fn (ctx, l_tmp.x1, l_tmp.x1); cipher_block_xor_1 (c->u_mode.ocb.aad_sum, l_tmp.x1, BLOCKSIZE); abuf += BLOCKSIZE; } wipememory(&l_tmp, sizeof(l_tmp)); if (burn_depth) _gcry_burn_stack (burn_depth + 4 * sizeof(void *)); return 0; } /* Bulk encryption/decryption of complete blocks in XTS mode. */ static void _gcry_aes_xts_crypt (void *context, unsigned char *tweak, void *outbuf_arg, const void *inbuf_arg, size_t nblocks, int encrypt) { RIJNDAEL_context *ctx = context; unsigned char *outbuf = outbuf_arg; const unsigned char *inbuf = inbuf_arg; unsigned int burn_depth = 0; rijndael_cryptfn_t crypt_fn; u64 tweak_lo, tweak_hi, tweak_next_lo, tweak_next_hi, tmp_lo, tmp_hi, carry; if (encrypt) { if (ctx->prefetch_enc_fn) ctx->prefetch_enc_fn(); crypt_fn = ctx->encrypt_fn; } else { check_decryption_preparation (ctx); if (ctx->prefetch_dec_fn) ctx->prefetch_dec_fn(); crypt_fn = ctx->decrypt_fn; } tweak_next_lo = buf_get_le64 (tweak + 0); tweak_next_hi = buf_get_le64 (tweak + 8); while (nblocks) { tweak_lo = tweak_next_lo; tweak_hi = tweak_next_hi; /* Xor-Encrypt/Decrypt-Xor block. */ tmp_lo = buf_get_le64 (inbuf + 0) ^ tweak_lo; tmp_hi = buf_get_le64 (inbuf + 8) ^ tweak_hi; buf_put_le64 (outbuf + 0, tmp_lo); buf_put_le64 (outbuf + 8, tmp_hi); /* Generate next tweak. */ carry = -(tweak_next_hi >> 63) & 0x87; tweak_next_hi = (tweak_next_hi << 1) + (tweak_next_lo >> 63); tweak_next_lo = (tweak_next_lo << 1) ^ carry; burn_depth = crypt_fn (ctx, outbuf, outbuf); buf_put_le64 (outbuf + 0, buf_get_le64 (outbuf + 0) ^ tweak_lo); buf_put_le64 (outbuf + 8, buf_get_le64 (outbuf + 8) ^ tweak_hi); outbuf += GCRY_XTS_BLOCK_LEN; inbuf += GCRY_XTS_BLOCK_LEN; nblocks--; } buf_put_le64 (tweak + 0, tweak_next_lo); buf_put_le64 (tweak + 8, tweak_next_hi); if (burn_depth) _gcry_burn_stack (burn_depth + 5 * sizeof(void *)); } /* Run the self-tests for AES 128. Returns NULL on success. */ static const char* selftest_basic_128 (void) { RIJNDAEL_context *ctx; unsigned char ctxmem[sizeof(*ctx) + 16]; unsigned char scratch[16]; cipher_bulk_ops_t bulk_ops; /* The test vectors are from the AES supplied ones; more or less randomly taken from ecb_tbl.txt (I=42,81,14) */ #if 1 static const unsigned char plaintext_128[16] = { 0x01,0x4B,0xAF,0x22,0x78,0xA6,0x9D,0x33, 0x1D,0x51,0x80,0x10,0x36,0x43,0xE9,0x9A }; static const unsigned char key_128[16] = { 0xE8,0xE9,0xEA,0xEB,0xED,0xEE,0xEF,0xF0, 0xF2,0xF3,0xF4,0xF5,0xF7,0xF8,0xF9,0xFA }; static const unsigned char ciphertext_128[16] = { 0x67,0x43,0xC3,0xD1,0x51,0x9A,0xB4,0xF2, 0xCD,0x9A,0x78,0xAB,0x09,0xA5,0x11,0xBD }; #else /* Test vectors from fips-197, appendix C. */ # warning debug test vectors in use static const unsigned char plaintext_128[16] = { 0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77, 0x88,0x99,0xaa,0xbb,0xcc,0xdd,0xee,0xff }; static const unsigned char key_128[16] = { 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f /* 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, */ /* 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c */ }; static const unsigned char ciphertext_128[16] = { 0x69,0xc4,0xe0,0xd8,0x6a,0x7b,0x04,0x30, 0xd8,0xcd,0xb7,0x80,0x70,0xb4,0xc5,0x5a }; #endif ctx = (void *)(ctxmem + ((16 - ((uintptr_t)ctxmem & 15)) & 15)); rijndael_setkey (ctx, key_128, sizeof (key_128), &bulk_ops); rijndael_encrypt (ctx, scratch, plaintext_128); if (memcmp (scratch, ciphertext_128, sizeof (ciphertext_128))) { return "AES-128 test encryption failed."; } rijndael_decrypt (ctx, scratch, scratch); if (memcmp (scratch, plaintext_128, sizeof (plaintext_128))) return "AES-128 test decryption failed."; return NULL; } /* Run the self-tests for AES 192. Returns NULL on success. */ static const char* selftest_basic_192 (void) { RIJNDAEL_context *ctx; unsigned char ctxmem[sizeof(*ctx) + 16]; unsigned char scratch[16]; cipher_bulk_ops_t bulk_ops; static unsigned char plaintext_192[16] = { 0x76,0x77,0x74,0x75,0xF1,0xF2,0xF3,0xF4, 0xF8,0xF9,0xE6,0xE7,0x77,0x70,0x71,0x72 }; static unsigned char key_192[24] = { 0x04,0x05,0x06,0x07,0x09,0x0A,0x0B,0x0C, 0x0E,0x0F,0x10,0x11,0x13,0x14,0x15,0x16, 0x18,0x19,0x1A,0x1B,0x1D,0x1E,0x1F,0x20 }; static const unsigned char ciphertext_192[16] = { 0x5D,0x1E,0xF2,0x0D,0xCE,0xD6,0xBC,0xBC, 0x12,0x13,0x1A,0xC7,0xC5,0x47,0x88,0xAA }; ctx = (void *)(ctxmem + ((16 - ((uintptr_t)ctxmem & 15)) & 15)); rijndael_setkey (ctx, key_192, sizeof(key_192), &bulk_ops); rijndael_encrypt (ctx, scratch, plaintext_192); if (memcmp (scratch, ciphertext_192, sizeof (ciphertext_192))) { return "AES-192 test encryption failed."; } rijndael_decrypt (ctx, scratch, scratch); if (memcmp (scratch, plaintext_192, sizeof (plaintext_192))) return "AES-192 test decryption failed."; return NULL; } /* Run the self-tests for AES 256. Returns NULL on success. */ static const char* selftest_basic_256 (void) { RIJNDAEL_context *ctx; unsigned char ctxmem[sizeof(*ctx) + 16]; unsigned char scratch[16]; cipher_bulk_ops_t bulk_ops; static unsigned char plaintext_256[16] = { 0x06,0x9A,0x00,0x7F,0xC7,0x6A,0x45,0x9F, 0x98,0xBA,0xF9,0x17,0xFE,0xDF,0x95,0x21 }; static unsigned char key_256[32] = { 0x08,0x09,0x0A,0x0B,0x0D,0x0E,0x0F,0x10, 0x12,0x13,0x14,0x15,0x17,0x18,0x19,0x1A, 0x1C,0x1D,0x1E,0x1F,0x21,0x22,0x23,0x24, 0x26,0x27,0x28,0x29,0x2B,0x2C,0x2D,0x2E }; static const unsigned char ciphertext_256[16] = { 0x08,0x0E,0x95,0x17,0xEB,0x16,0x77,0x71, 0x9A,0xCF,0x72,0x80,0x86,0x04,0x0A,0xE3 }; ctx = (void *)(ctxmem + ((16 - ((uintptr_t)ctxmem & 15)) & 15)); rijndael_setkey (ctx, key_256, sizeof(key_256), &bulk_ops); rijndael_encrypt (ctx, scratch, plaintext_256); if (memcmp (scratch, ciphertext_256, sizeof (ciphertext_256))) { return "AES-256 test encryption failed."; } rijndael_decrypt (ctx, scratch, scratch); if (memcmp (scratch, plaintext_256, sizeof (plaintext_256))) return "AES-256 test decryption failed."; return NULL; } /* Run all the self-tests and return NULL on success. This function is used for the on-the-fly self-tests. */ static const char * selftest (void) { const char *r; if ( (r = selftest_basic_128 ()) || (r = selftest_basic_192 ()) || (r = selftest_basic_256 ()) ) return r; return r; } /* SP800-38a.pdf for AES-128. */ static const char * selftest_fips_128_38a (int requested_mode) { static const struct tv { int mode; const unsigned char key[16]; const unsigned char iv[16]; struct { const unsigned char input[16]; const unsigned char output[16]; } data[4]; } tv[2] = { { GCRY_CIPHER_MODE_CFB, /* F.3.13, CFB128-AES128 */ { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }, { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, { { { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a }, { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a } }, { { 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51 }, { 0xc8, 0xa6, 0x45, 0x37, 0xa0, 0xb3, 0xa9, 0x3f, 0xcd, 0xe3, 0xcd, 0xad, 0x9f, 0x1c, 0xe5, 0x8b } }, { { 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef }, { 0x26, 0x75, 0x1f, 0x67, 0xa3, 0xcb, 0xb1, 0x40, 0xb1, 0x80, 0x8c, 0xf1, 0x87, 0xa4, 0xf4, 0xdf } }, { { 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 }, { 0xc0, 0x4b, 0x05, 0x35, 0x7c, 0x5d, 0x1c, 0x0e, 0xea, 0xc4, 0xc6, 0x6f, 0x9f, 0xf7, 0xf2, 0xe6 } } } }, { GCRY_CIPHER_MODE_OFB, { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }, { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }, { { { 0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a }, { 0x3b, 0x3f, 0xd9, 0x2e, 0xb7, 0x2d, 0xad, 0x20, 0x33, 0x34, 0x49, 0xf8, 0xe8, 0x3c, 0xfb, 0x4a } }, { { 0xae, 0x2d, 0x8a, 0x57, 0x1e, 0x03, 0xac, 0x9c, 0x9e, 0xb7, 0x6f, 0xac, 0x45, 0xaf, 0x8e, 0x51 }, { 0x77, 0x89, 0x50, 0x8d, 0x16, 0x91, 0x8f, 0x03, 0xf5, 0x3c, 0x52, 0xda, 0xc5, 0x4e, 0xd8, 0x25 } }, { { 0x30, 0xc8, 0x1c, 0x46, 0xa3, 0x5c, 0xe4, 0x11, 0xe5, 0xfb, 0xc1, 0x19, 0x1a, 0x0a, 0x52, 0xef }, { 0x97, 0x40, 0x05, 0x1e, 0x9c, 0x5f, 0xec, 0xf6, 0x43, 0x44, 0xf7, 0xa8, 0x22, 0x60, 0xed, 0xcc } }, { { 0xf6, 0x9f, 0x24, 0x45, 0xdf, 0x4f, 0x9b, 0x17, 0xad, 0x2b, 0x41, 0x7b, 0xe6, 0x6c, 0x37, 0x10 }, { 0x30, 0x4c, 0x65, 0x28, 0xf6, 0x59, 0xc7, 0x78, 0x66, 0xa5, 0x10, 0xd9, 0xc1, 0xd6, 0xae, 0x5e } }, } } }; unsigned char scratch[16]; gpg_error_t err; int tvi, idx; gcry_cipher_hd_t hdenc = NULL; gcry_cipher_hd_t hddec = NULL; #define Fail(a) do { \ _gcry_cipher_close (hdenc); \ _gcry_cipher_close (hddec); \ return a; \ } while (0) gcry_assert (sizeof tv[0].data[0].input == sizeof scratch); gcry_assert (sizeof tv[0].data[0].output == sizeof scratch); for (tvi=0; tvi < DIM (tv); tvi++) if (tv[tvi].mode == requested_mode) break; if (tvi == DIM (tv)) Fail ("no test data for this mode"); err = _gcry_cipher_open (&hdenc, GCRY_CIPHER_AES, tv[tvi].mode, 0); if (err) Fail ("open"); err = _gcry_cipher_open (&hddec, GCRY_CIPHER_AES, tv[tvi].mode, 0); if (err) Fail ("open"); err = _gcry_cipher_setkey (hdenc, tv[tvi].key, sizeof tv[tvi].key); if (!err) err = _gcry_cipher_setkey (hddec, tv[tvi].key, sizeof tv[tvi].key); if (err) Fail ("set key"); err = _gcry_cipher_setiv (hdenc, tv[tvi].iv, sizeof tv[tvi].iv); if (!err) err = _gcry_cipher_setiv (hddec, tv[tvi].iv, sizeof tv[tvi].iv); if (err) Fail ("set IV"); for (idx=0; idx < DIM (tv[tvi].data); idx++) { err = _gcry_cipher_encrypt (hdenc, scratch, sizeof scratch, tv[tvi].data[idx].input, sizeof tv[tvi].data[idx].input); if (err) Fail ("encrypt command"); if (memcmp (scratch, tv[tvi].data[idx].output, sizeof scratch)) Fail ("encrypt mismatch"); err = _gcry_cipher_decrypt (hddec, scratch, sizeof scratch, tv[tvi].data[idx].output, sizeof tv[tvi].data[idx].output); if (err) Fail ("decrypt command"); if (memcmp (scratch, tv[tvi].data[idx].input, sizeof scratch)) Fail ("decrypt mismatch"); } #undef Fail _gcry_cipher_close (hdenc); _gcry_cipher_close (hddec); return NULL; } /* Complete selftest for AES-128 with all modes and driver code. */ static gpg_err_code_t selftest_fips_128 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "low-level"; errtxt = selftest_basic_128 (); if (errtxt) goto failed; if (extended) { what = "cfb"; errtxt = selftest_fips_128_38a (GCRY_CIPHER_MODE_CFB); if (errtxt) goto failed; what = "ofb"; errtxt = selftest_fips_128_38a (GCRY_CIPHER_MODE_OFB); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("cipher", GCRY_CIPHER_AES128, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Complete selftest for AES-192. */ static gpg_err_code_t selftest_fips_192 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; (void)extended; /* No extended tests available. */ what = "low-level"; errtxt = selftest_basic_192 (); if (errtxt) goto failed; return 0; /* Succeeded. */ failed: if (report) report ("cipher", GCRY_CIPHER_AES192, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Complete selftest for AES-256. */ static gpg_err_code_t selftest_fips_256 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; (void)extended; /* No extended tests available. */ what = "low-level"; errtxt = selftest_basic_256 (); if (errtxt) goto failed; return 0; /* Succeeded. */ failed: if (report) report ("cipher", GCRY_CIPHER_AES256, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Run a full self-test for ALGO and return 0 on success. */ static gpg_err_code_t run_selftests (int algo, int extended, selftest_report_func_t report) { gpg_err_code_t ec; switch (algo) { case GCRY_CIPHER_AES128: ec = selftest_fips_128 (extended, report); break; case GCRY_CIPHER_AES192: ec = selftest_fips_192 (extended, report); break; case GCRY_CIPHER_AES256: ec = selftest_fips_256 (extended, report); break; default: ec = GPG_ERR_CIPHER_ALGO; break; } return ec; } static const char *rijndael_names[] = { "RIJNDAEL", "AES128", "AES-128", NULL }; static const gcry_cipher_oid_spec_t rijndael_oids[] = { { "2.16.840.1.101.3.4.1.1", GCRY_CIPHER_MODE_ECB }, { "2.16.840.1.101.3.4.1.2", GCRY_CIPHER_MODE_CBC }, { "2.16.840.1.101.3.4.1.3", GCRY_CIPHER_MODE_OFB }, { "2.16.840.1.101.3.4.1.4", GCRY_CIPHER_MODE_CFB }, { "2.16.840.1.101.3.4.1.6", GCRY_CIPHER_MODE_GCM }, { "2.16.840.1.101.3.4.1.7", GCRY_CIPHER_MODE_CCM }, { NULL } }; gcry_cipher_spec_t _gcry_cipher_spec_aes = { GCRY_CIPHER_AES, {0, 1}, "AES", rijndael_names, rijndael_oids, 16, 128, sizeof (RIJNDAEL_context), rijndael_setkey, rijndael_encrypt, rijndael_decrypt, NULL, NULL, run_selftests }; static const char *rijndael192_names[] = { "RIJNDAEL192", "AES-192", NULL }; static const gcry_cipher_oid_spec_t rijndael192_oids[] = { { "2.16.840.1.101.3.4.1.21", GCRY_CIPHER_MODE_ECB }, { "2.16.840.1.101.3.4.1.22", GCRY_CIPHER_MODE_CBC }, { "2.16.840.1.101.3.4.1.23", GCRY_CIPHER_MODE_OFB }, { "2.16.840.1.101.3.4.1.24", GCRY_CIPHER_MODE_CFB }, { "2.16.840.1.101.3.4.1.26", GCRY_CIPHER_MODE_GCM }, { "2.16.840.1.101.3.4.1.27", GCRY_CIPHER_MODE_CCM }, { NULL } }; gcry_cipher_spec_t _gcry_cipher_spec_aes192 = { GCRY_CIPHER_AES192, {0, 1}, "AES192", rijndael192_names, rijndael192_oids, 16, 192, sizeof (RIJNDAEL_context), rijndael_setkey, rijndael_encrypt, rijndael_decrypt, NULL, NULL, run_selftests }; static const char *rijndael256_names[] = { "RIJNDAEL256", "AES-256", NULL }; static const gcry_cipher_oid_spec_t rijndael256_oids[] = { { "2.16.840.1.101.3.4.1.41", GCRY_CIPHER_MODE_ECB }, { "2.16.840.1.101.3.4.1.42", GCRY_CIPHER_MODE_CBC }, { "2.16.840.1.101.3.4.1.43", GCRY_CIPHER_MODE_OFB }, { "2.16.840.1.101.3.4.1.44", GCRY_CIPHER_MODE_CFB }, { "2.16.840.1.101.3.4.1.46", GCRY_CIPHER_MODE_GCM }, { "2.16.840.1.101.3.4.1.47", GCRY_CIPHER_MODE_CCM }, { NULL } }; gcry_cipher_spec_t _gcry_cipher_spec_aes256 = { GCRY_CIPHER_AES256, {0, 1}, "AES256", rijndael256_names, rijndael256_oids, 16, 256, sizeof (RIJNDAEL_context), rijndael_setkey, rijndael_encrypt, rijndael_decrypt, NULL, NULL, run_selftests };