diff --git a/cipher/blake2.c b/cipher/blake2.c index 0243ed33..750c00c3 100644 --- a/cipher/blake2.c +++ b/cipher/blake2.c @@ -1,985 +1,986 @@ /* blake2.c - BLAKE2b and BLAKE2s hash functions (RFC 7693) * Copyright (C) 2017 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* The code is based on public-domain/CC0 BLAKE2 reference implementation * by Samual Neves, at https://github.com/BLAKE2/BLAKE2/tree/master/ref * Copyright 2012, Samuel Neves */ #include #include #include "g10lib.h" #include "bithelp.h" #include "bufhelp.h" #include "cipher.h" #include "hash-common.h" /* USE_AVX indicates whether to compile with Intel AVX code. */ #undef USE_AVX #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AVX 1 #endif /* USE_AVX2 indicates whether to compile with Intel AVX2 code. */ #undef USE_AVX2 #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX2) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AVX2 1 #endif /* AMD64 assembly implementations use SystemV ABI, ABI conversion and additional * stack to store XMM6-XMM15 needed on Win64. */ #undef ASM_FUNC_ABI #undef ASM_EXTRA_STACK #if defined(USE_AVX2) && defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS) # define ASM_FUNC_ABI __attribute__((sysv_abi)) # define ASM_EXTRA_STACK (10 * 16) #else # define ASM_FUNC_ABI # define ASM_EXTRA_STACK 0 #endif #define BLAKE2B_BLOCKBYTES 128 #define BLAKE2B_OUTBYTES 64 #define BLAKE2B_KEYBYTES 64 #define BLAKE2S_BLOCKBYTES 64 #define BLAKE2S_OUTBYTES 32 #define BLAKE2S_KEYBYTES 32 typedef struct { u64 h[8]; u64 t[2]; u64 f[2]; } BLAKE2B_STATE; struct blake2b_param_s { byte digest_length; byte key_length; byte fanout; byte depth; byte leaf_length[4]; byte node_offset[4]; byte xof_length[4]; byte node_depth; byte inner_length; byte reserved[14]; byte salt[16]; byte personal[16]; }; typedef struct BLAKE2B_CONTEXT_S { BLAKE2B_STATE state; byte buf[BLAKE2B_BLOCKBYTES]; size_t buflen; size_t outlen; #ifdef USE_AVX2 unsigned int use_avx2:1; #endif } BLAKE2B_CONTEXT; typedef struct { u32 h[8]; u32 t[2]; u32 f[2]; } BLAKE2S_STATE; struct blake2s_param_s { byte digest_length; byte key_length; byte fanout; byte depth; byte leaf_length[4]; byte node_offset[4]; byte xof_length[2]; byte node_depth; byte inner_length; /* byte reserved[0]; */ byte salt[8]; byte personal[8]; }; typedef struct BLAKE2S_CONTEXT_S { BLAKE2S_STATE state; byte buf[BLAKE2S_BLOCKBYTES]; size_t buflen; size_t outlen; #ifdef USE_AVX unsigned int use_avx:1; #endif } BLAKE2S_CONTEXT; typedef unsigned int (*blake2_transform_t)(void *S, const void *inblk, size_t nblks); static const u64 blake2b_IV[8] = { U64_C(0x6a09e667f3bcc908), U64_C(0xbb67ae8584caa73b), U64_C(0x3c6ef372fe94f82b), U64_C(0xa54ff53a5f1d36f1), U64_C(0x510e527fade682d1), U64_C(0x9b05688c2b3e6c1f), U64_C(0x1f83d9abfb41bd6b), U64_C(0x5be0cd19137e2179) }; static const u32 blake2s_IV[8] = { 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL, 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL }; static byte zero_block[BLAKE2B_BLOCKBYTES] = { 0, }; static void blake2_write(void *S, const void *inbuf, size_t inlen, byte *tmpbuf, size_t *tmpbuflen, size_t blkbytes, blake2_transform_t transform_fn) { const byte* in = inbuf; unsigned int burn = 0; if (inlen > 0) { size_t left = *tmpbuflen; size_t fill = blkbytes - left; size_t nblks; if (inlen > fill) { if (fill > 0) buf_cpy (tmpbuf + left, in, fill); /* Fill buffer */ left = 0; burn = transform_fn (S, tmpbuf, 1); /* Increment counter + Compress */ in += fill; inlen -= fill; nblks = inlen / blkbytes - !(inlen % blkbytes); if (nblks) { burn = transform_fn(S, in, nblks); in += blkbytes * nblks; inlen -= blkbytes * nblks; } } gcry_assert (inlen > 0); buf_cpy (tmpbuf + left, in, inlen); *tmpbuflen = left + inlen; } if (burn) _gcry_burn_stack (burn); return; } static inline void blake2b_set_lastblock(BLAKE2B_STATE *S) { S->f[0] = U64_C(0xffffffffffffffff); } static inline int blake2b_is_lastblock(const BLAKE2B_STATE *S) { return S->f[0] != 0; } static inline void blake2b_increment_counter(BLAKE2B_STATE *S, const int inc) { S->t[0] += (u64)inc; S->t[1] += (S->t[0] < (u64)inc) - (inc < 0); } static inline u64 rotr64(u64 x, u64 n) { return ((x >> (n & 63)) | (x << ((64 - n) & 63))); } static unsigned int blake2b_transform_generic(BLAKE2B_STATE *S, const void *inblks, size_t nblks) { static const byte blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } }; const byte* in = inblks; u64 m[16]; u64 v[16]; while (nblks--) { /* Increment counter */ blake2b_increment_counter (S, BLAKE2B_BLOCKBYTES); /* Compress */ m[0] = buf_get_le64 (in + 0 * sizeof(m[0])); m[1] = buf_get_le64 (in + 1 * sizeof(m[0])); m[2] = buf_get_le64 (in + 2 * sizeof(m[0])); m[3] = buf_get_le64 (in + 3 * sizeof(m[0])); m[4] = buf_get_le64 (in + 4 * sizeof(m[0])); m[5] = buf_get_le64 (in + 5 * sizeof(m[0])); m[6] = buf_get_le64 (in + 6 * sizeof(m[0])); m[7] = buf_get_le64 (in + 7 * sizeof(m[0])); m[8] = buf_get_le64 (in + 8 * sizeof(m[0])); m[9] = buf_get_le64 (in + 9 * sizeof(m[0])); m[10] = buf_get_le64 (in + 10 * sizeof(m[0])); m[11] = buf_get_le64 (in + 11 * sizeof(m[0])); m[12] = buf_get_le64 (in + 12 * sizeof(m[0])); m[13] = buf_get_le64 (in + 13 * sizeof(m[0])); m[14] = buf_get_le64 (in + 14 * sizeof(m[0])); m[15] = buf_get_le64 (in + 15 * sizeof(m[0])); v[ 0] = S->h[0]; v[ 1] = S->h[1]; v[ 2] = S->h[2]; v[ 3] = S->h[3]; v[ 4] = S->h[4]; v[ 5] = S->h[5]; v[ 6] = S->h[6]; v[ 7] = S->h[7]; v[ 8] = blake2b_IV[0]; v[ 9] = blake2b_IV[1]; v[10] = blake2b_IV[2]; v[11] = blake2b_IV[3]; v[12] = blake2b_IV[4] ^ S->t[0]; v[13] = blake2b_IV[5] ^ S->t[1]; v[14] = blake2b_IV[6] ^ S->f[0]; v[15] = blake2b_IV[7] ^ S->f[1]; #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2b_sigma[r][2*i+0]]; \ d = rotr64(d ^ a, 32); \ c = c + d; \ b = rotr64(b ^ c, 24); \ a = a + b + m[blake2b_sigma[r][2*i+1]]; \ d = rotr64(d ^ a, 16); \ c = c + d; \ b = rotr64(b ^ c, 63); \ } while(0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while(0) ROUND(0); ROUND(1); ROUND(2); ROUND(3); ROUND(4); ROUND(5); ROUND(6); ROUND(7); ROUND(8); ROUND(9); ROUND(10); ROUND(11); #undef G #undef ROUND S->h[0] = S->h[0] ^ v[0] ^ v[0 + 8]; S->h[1] = S->h[1] ^ v[1] ^ v[1 + 8]; S->h[2] = S->h[2] ^ v[2] ^ v[2 + 8]; S->h[3] = S->h[3] ^ v[3] ^ v[3 + 8]; S->h[4] = S->h[4] ^ v[4] ^ v[4 + 8]; S->h[5] = S->h[5] ^ v[5] ^ v[5 + 8]; S->h[6] = S->h[6] ^ v[6] ^ v[6 + 8]; S->h[7] = S->h[7] ^ v[7] ^ v[7 + 8]; in += BLAKE2B_BLOCKBYTES; } return sizeof(void *) * 4 + sizeof(u64) * 16 * 2; } #ifdef USE_AVX2 unsigned int _gcry_blake2b_transform_amd64_avx2(BLAKE2B_STATE *S, const void *inblks, size_t nblks) ASM_FUNC_ABI; #endif static unsigned int blake2b_transform(void *ctx, const void *inblks, size_t nblks) { BLAKE2B_CONTEXT *c = ctx; unsigned int nburn; if (0) {} #ifdef USE_AVX2 if (c->use_avx2) nburn = _gcry_blake2b_transform_amd64_avx2(&c->state, inblks, nblks); #endif else nburn = blake2b_transform_generic(&c->state, inblks, nblks); if (nburn) nburn += ASM_EXTRA_STACK; return nburn; } static void blake2b_final(void *ctx) { BLAKE2B_CONTEXT *c = ctx; BLAKE2B_STATE *S = &c->state; unsigned int burn; size_t i; gcry_assert (sizeof(c->buf) >= c->outlen); if (blake2b_is_lastblock(S)) return; if (c->buflen < BLAKE2B_BLOCKBYTES) memset (c->buf + c->buflen, 0, BLAKE2B_BLOCKBYTES - c->buflen); /* Padding */ blake2b_set_lastblock (S); blake2b_increment_counter (S, (int)c->buflen - BLAKE2B_BLOCKBYTES); burn = blake2b_transform (ctx, c->buf, 1); /* Output full hash to buffer */ for (i = 0; i < 8; ++i) buf_put_le64 (c->buf + sizeof(S->h[i]) * i, S->h[i]); /* Zero out extra buffer bytes. */ if (c->outlen < sizeof(c->buf)) memset (c->buf + c->outlen, 0, sizeof(c->buf) - c->outlen); if (burn) _gcry_burn_stack (burn); } static byte *blake2b_read(void *ctx) { BLAKE2B_CONTEXT *c = ctx; return c->buf; } static void blake2b_write(void *ctx, const void *inbuf, size_t inlen) { BLAKE2B_CONTEXT *c = ctx; BLAKE2B_STATE *S = &c->state; blake2_write(S, inbuf, inlen, c->buf, &c->buflen, BLAKE2B_BLOCKBYTES, blake2b_transform); } static inline void blake2b_init_param(BLAKE2B_STATE *S, const struct blake2b_param_s *P) { const byte *p = (const byte *)P; size_t i; /* init xors IV with input parameter block */ /* IV XOR ParamBlock */ for (i = 0; i < 8; ++i) S->h[i] = blake2b_IV[i] ^ buf_get_le64(p + sizeof(S->h[i]) * i); } static inline gcry_err_code_t blake2b_init(BLAKE2B_CONTEXT *ctx, const byte *key, size_t keylen) { struct blake2b_param_s P[1] = { { 0, } }; BLAKE2B_STATE *S = &ctx->state; if (!ctx->outlen || ctx->outlen > BLAKE2B_OUTBYTES) return GPG_ERR_INV_ARG; if (sizeof(P[0]) != sizeof(u64) * 8) return GPG_ERR_INTERNAL; if (keylen && (!key || keylen > BLAKE2B_KEYBYTES)) return GPG_ERR_INV_KEYLEN; P->digest_length = ctx->outlen; P->key_length = keylen; P->fanout = 1; P->depth = 1; blake2b_init_param (S, P); wipememory (P, sizeof(P)); if (key) { blake2b_write (ctx, key, keylen); blake2b_write (ctx, zero_block, BLAKE2B_BLOCKBYTES - keylen); } return 0; } static gcry_err_code_t blake2b_init_ctx(void *ctx, unsigned int flags, const byte *key, size_t keylen, unsigned int dbits) { BLAKE2B_CONTEXT *c = ctx; unsigned int features = _gcry_get_hw_features (); (void)features; (void)flags; memset (c, 0, sizeof (*c)); #ifdef USE_AVX2 c->use_avx2 = !!(features & HWF_INTEL_AVX2); #endif c->outlen = dbits / 8; c->buflen = 0; return blake2b_init(c, key, keylen); } static inline void blake2s_set_lastblock(BLAKE2S_STATE *S) { S->f[0] = 0xFFFFFFFFUL; } static inline int blake2s_is_lastblock(BLAKE2S_STATE *S) { return S->f[0] != 0; } static inline void blake2s_increment_counter(BLAKE2S_STATE *S, const int inc) { S->t[0] += (u32)inc; S->t[1] += (S->t[0] < (u32)inc) - (inc < 0); } static unsigned int blake2s_transform_generic(BLAKE2S_STATE *S, const void *inblks, size_t nblks) { static const byte blake2s_sigma[10][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 }, }; unsigned int burn = 0; const byte* in = inblks; u32 m[16]; u32 v[16]; while (nblks--) { /* Increment counter */ blake2s_increment_counter (S, BLAKE2S_BLOCKBYTES); /* Compress */ m[0] = buf_get_le32 (in + 0 * sizeof(m[0])); m[1] = buf_get_le32 (in + 1 * sizeof(m[0])); m[2] = buf_get_le32 (in + 2 * sizeof(m[0])); m[3] = buf_get_le32 (in + 3 * sizeof(m[0])); m[4] = buf_get_le32 (in + 4 * sizeof(m[0])); m[5] = buf_get_le32 (in + 5 * sizeof(m[0])); m[6] = buf_get_le32 (in + 6 * sizeof(m[0])); m[7] = buf_get_le32 (in + 7 * sizeof(m[0])); m[8] = buf_get_le32 (in + 8 * sizeof(m[0])); m[9] = buf_get_le32 (in + 9 * sizeof(m[0])); m[10] = buf_get_le32 (in + 10 * sizeof(m[0])); m[11] = buf_get_le32 (in + 11 * sizeof(m[0])); m[12] = buf_get_le32 (in + 12 * sizeof(m[0])); m[13] = buf_get_le32 (in + 13 * sizeof(m[0])); m[14] = buf_get_le32 (in + 14 * sizeof(m[0])); m[15] = buf_get_le32 (in + 15 * sizeof(m[0])); v[ 0] = S->h[0]; v[ 1] = S->h[1]; v[ 2] = S->h[2]; v[ 3] = S->h[3]; v[ 4] = S->h[4]; v[ 5] = S->h[5]; v[ 6] = S->h[6]; v[ 7] = S->h[7]; v[ 8] = blake2s_IV[0]; v[ 9] = blake2s_IV[1]; v[10] = blake2s_IV[2]; v[11] = blake2s_IV[3]; v[12] = S->t[0] ^ blake2s_IV[4]; v[13] = S->t[1] ^ blake2s_IV[5]; v[14] = S->f[0] ^ blake2s_IV[6]; v[15] = S->f[1] ^ blake2s_IV[7]; #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2s_sigma[r][2*i+0]]; \ d = ror(d ^ a, 16); \ c = c + d; \ b = ror(b ^ c, 12); \ a = a + b + m[blake2s_sigma[r][2*i+1]]; \ d = ror(d ^ a, 8); \ c = c + d; \ b = ror(b ^ c, 7); \ } while(0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while(0) ROUND(0); ROUND(1); ROUND(2); ROUND(3); ROUND(4); ROUND(5); ROUND(6); ROUND(7); ROUND(8); ROUND(9); #undef G #undef ROUND S->h[0] = S->h[0] ^ v[0] ^ v[0 + 8]; S->h[1] = S->h[1] ^ v[1] ^ v[1 + 8]; S->h[2] = S->h[2] ^ v[2] ^ v[2 + 8]; S->h[3] = S->h[3] ^ v[3] ^ v[3 + 8]; S->h[4] = S->h[4] ^ v[4] ^ v[4 + 8]; S->h[5] = S->h[5] ^ v[5] ^ v[5 + 8]; S->h[6] = S->h[6] ^ v[6] ^ v[6 + 8]; S->h[7] = S->h[7] ^ v[7] ^ v[7 + 8]; in += BLAKE2S_BLOCKBYTES; } return burn; } #ifdef USE_AVX unsigned int _gcry_blake2s_transform_amd64_avx(BLAKE2S_STATE *S, const void *inblks, size_t nblks) ASM_FUNC_ABI; #endif static unsigned int blake2s_transform(void *ctx, const void *inblks, size_t nblks) { BLAKE2S_CONTEXT *c = ctx; unsigned int nburn; if (0) {} #ifdef USE_AVX if (c->use_avx) nburn = _gcry_blake2s_transform_amd64_avx(&c->state, inblks, nblks); #endif else nburn = blake2s_transform_generic(&c->state, inblks, nblks); if (nburn) nburn += ASM_EXTRA_STACK; return nburn; } static void blake2s_final(void *ctx) { BLAKE2S_CONTEXT *c = ctx; BLAKE2S_STATE *S = &c->state; unsigned int burn; size_t i; gcry_assert (sizeof(c->buf) >= c->outlen); if (blake2s_is_lastblock(S)) return; if (c->buflen < BLAKE2S_BLOCKBYTES) memset (c->buf + c->buflen, 0, BLAKE2S_BLOCKBYTES - c->buflen); /* Padding */ blake2s_set_lastblock (S); blake2s_increment_counter (S, (int)c->buflen - BLAKE2S_BLOCKBYTES); burn = blake2s_transform (ctx, c->buf, 1); /* Output full hash to buffer */ for (i = 0; i < 8; ++i) buf_put_le32 (c->buf + sizeof(S->h[i]) * i, S->h[i]); /* Zero out extra buffer bytes. */ if (c->outlen < sizeof(c->buf)) memset (c->buf + c->outlen, 0, sizeof(c->buf) - c->outlen); if (burn) _gcry_burn_stack (burn); } static byte *blake2s_read(void *ctx) { BLAKE2S_CONTEXT *c = ctx; return c->buf; } static void blake2s_write(void *ctx, const void *inbuf, size_t inlen) { BLAKE2S_CONTEXT *c = ctx; BLAKE2S_STATE *S = &c->state; blake2_write(S, inbuf, inlen, c->buf, &c->buflen, BLAKE2S_BLOCKBYTES, blake2s_transform); } static inline void blake2s_init_param(BLAKE2S_STATE *S, const struct blake2s_param_s *P) { const byte *p = (const byte *)P; size_t i; /* init2 xors IV with input parameter block */ /* IV XOR ParamBlock */ for (i = 0; i < 8; ++i) S->h[i] ^= blake2s_IV[i] ^ buf_get_le32(&p[i * 4]); } static inline gcry_err_code_t blake2s_init(BLAKE2S_CONTEXT *ctx, const byte *key, size_t keylen) { struct blake2s_param_s P[1] = { { 0, } }; BLAKE2S_STATE *S = &ctx->state; if (!ctx->outlen || ctx->outlen > BLAKE2S_OUTBYTES) return GPG_ERR_INV_ARG; if (sizeof(P[0]) != sizeof(u32) * 8) return GPG_ERR_INTERNAL; if (keylen && (!key || keylen > BLAKE2S_KEYBYTES)) return GPG_ERR_INV_KEYLEN; P->digest_length = ctx->outlen; P->key_length = keylen; P->fanout = 1; P->depth = 1; blake2s_init_param (S, P); wipememory (P, sizeof(P)); if (key) { blake2s_write (ctx, key, keylen); blake2s_write (ctx, zero_block, BLAKE2S_BLOCKBYTES - keylen); } return 0; } static gcry_err_code_t blake2s_init_ctx(void *ctx, unsigned int flags, const byte *key, size_t keylen, unsigned int dbits) { BLAKE2S_CONTEXT *c = ctx; unsigned int features = _gcry_get_hw_features (); (void)features; (void)flags; memset (c, 0, sizeof (*c)); #ifdef USE_AVX c->use_avx = !!(features & HWF_INTEL_AVX); #endif c->outlen = dbits / 8; c->buflen = 0; return blake2s_init(c, key, keylen); } /* Selftests from "RFC 7693, Appendix E. BLAKE2b and BLAKE2s Self-Test * Module C Source". */ static void selftest_seq(byte *out, size_t len, u32 seed) { size_t i; u32 t, a, b; a = 0xDEAD4BAD * seed; b = 1; for (i = 0; i < len; i++) { t = a + b; a = b; b = t; out[i] = (t >> 24) & 0xFF; } } static gpg_err_code_t selftests_blake2b (int algo, int extended, selftest_report_func_t report) { static const byte blake2b_res[32] = { 0xC2, 0x3A, 0x78, 0x00, 0xD9, 0x81, 0x23, 0xBD, 0x10, 0xF5, 0x06, 0xC6, 0x1E, 0x29, 0xDA, 0x56, 0x03, 0xD7, 0x63, 0xB8, 0xBB, 0xAD, 0x2E, 0x73, 0x7F, 0x5E, 0x76, 0x5A, 0x7B, 0xCC, 0xD4, 0x75 }; static const size_t b2b_md_len[4] = { 20, 32, 48, 64 }; static const size_t b2b_in_len[6] = { 0, 3, 128, 129, 255, 1024 }; size_t i, j, outlen, inlen; byte in[1024], key[64]; BLAKE2B_CONTEXT ctx; BLAKE2B_CONTEXT ctx2; const char *what; const char *errtxt; (void)extended; what = "rfc7693 BLAKE2b selftest"; /* 256-bit hash for testing */ if (blake2b_init_ctx(&ctx, 0, NULL, 0, 32 * 8)) { errtxt = "init failed"; goto failed; } for (i = 0; i < 4; i++) { outlen = b2b_md_len[i]; for (j = 0; j < 6; j++) { inlen = b2b_in_len[j]; selftest_seq(in, inlen, inlen); /* unkeyed hash */ blake2b_init_ctx(&ctx2, 0, NULL, 0, outlen * 8); blake2b_write(&ctx2, in, inlen); blake2b_final(&ctx2); blake2b_write(&ctx, ctx2.buf, outlen); /* hash the hash */ selftest_seq(key, outlen, outlen); /* keyed hash */ blake2b_init_ctx(&ctx2, 0, key, outlen, outlen * 8); blake2b_write(&ctx2, in, inlen); blake2b_final(&ctx2); blake2b_write(&ctx, ctx2.buf, outlen); /* hash the hash */ } } /* compute and compare the hash of hashes */ blake2b_final(&ctx); for (i = 0; i < 32; i++) { if (ctx.buf[i] != blake2b_res[i]) { errtxt = "digest mismatch"; goto failed; } } return 0; failed: if (report) report ("digest", algo, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } static gpg_err_code_t selftests_blake2s (int algo, int extended, selftest_report_func_t report) { static const byte blake2s_res[32] = { 0x6A, 0x41, 0x1F, 0x08, 0xCE, 0x25, 0xAD, 0xCD, 0xFB, 0x02, 0xAB, 0xA6, 0x41, 0x45, 0x1C, 0xEC, 0x53, 0xC5, 0x98, 0xB2, 0x4F, 0x4F, 0xC7, 0x87, 0xFB, 0xDC, 0x88, 0x79, 0x7F, 0x4C, 0x1D, 0xFE }; static const size_t b2s_md_len[4] = { 16, 20, 28, 32 }; static const size_t b2s_in_len[6] = { 0, 3, 64, 65, 255, 1024 }; size_t i, j, outlen, inlen; byte in[1024], key[32]; BLAKE2S_CONTEXT ctx; BLAKE2S_CONTEXT ctx2; const char *what; const char *errtxt; (void)extended; what = "rfc7693 BLAKE2s selftest"; /* 256-bit hash for testing */ if (blake2s_init_ctx(&ctx, 0, NULL, 0, 32 * 8)) { errtxt = "init failed"; goto failed; } for (i = 0; i < 4; i++) { outlen = b2s_md_len[i]; for (j = 0; j < 6; j++) { inlen = b2s_in_len[j]; selftest_seq(in, inlen, inlen); /* unkeyed hash */ blake2s_init_ctx(&ctx2, 0, NULL, 0, outlen * 8); blake2s_write(&ctx2, in, inlen); blake2s_final(&ctx2); blake2s_write(&ctx, ctx2.buf, outlen); /* hash the hash */ selftest_seq(key, outlen, outlen); /* keyed hash */ blake2s_init_ctx(&ctx2, 0, key, outlen, outlen * 8); blake2s_write(&ctx2, in, inlen); blake2s_final(&ctx2); blake2s_write(&ctx, ctx2.buf, outlen); /* hash the hash */ } } /* compute and compare the hash of hashes */ blake2s_final(&ctx); for (i = 0; i < 32; i++) { if (ctx.buf[i] != blake2s_res[i]) { errtxt = "digest mismatch"; goto failed; } } return 0; failed: if (report) report ("digest", algo, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } gcry_err_code_t _gcry_blake2_init_with_key(void *ctx, unsigned int flags, const unsigned char *key, size_t keylen, int algo) { gcry_err_code_t rc; switch (algo) { case GCRY_MD_BLAKE2B_512: rc = blake2b_init_ctx (ctx, flags, key, keylen, 512); break; case GCRY_MD_BLAKE2B_384: rc = blake2b_init_ctx (ctx, flags, key, keylen, 384); break; case GCRY_MD_BLAKE2B_256: rc = blake2b_init_ctx (ctx, flags, key, keylen, 256); break; case GCRY_MD_BLAKE2B_160: rc = blake2b_init_ctx (ctx, flags, key, keylen, 160); break; case GCRY_MD_BLAKE2S_256: rc = blake2s_init_ctx (ctx, flags, key, keylen, 256); break; case GCRY_MD_BLAKE2S_224: rc = blake2s_init_ctx (ctx, flags, key, keylen, 224); break; case GCRY_MD_BLAKE2S_160: rc = blake2s_init_ctx (ctx, flags, key, keylen, 160); break; case GCRY_MD_BLAKE2S_128: rc = blake2s_init_ctx (ctx, flags, key, keylen, 128); break; default: rc = GPG_ERR_DIGEST_ALGO; break; } return rc; } #define DEFINE_BLAKE2_VARIANT(bs, BS, dbits, oid_branch) \ static void blake2##bs##_##dbits##_init(void *ctx, unsigned int flags) \ { \ int err = blake2##bs##_init_ctx (ctx, flags, NULL, 0, dbits); \ gcry_assert (err == 0); \ } \ static void \ - _gcry_blake2##bs##_##dbits##_hash_buffers(void *outbuf, \ + _gcry_blake2##bs##_##dbits##_hash_buffers(void *outbuf, size_t nbytes, \ const gcry_buffer_t *iov, int iovcnt) \ { \ BLAKE2##BS##_CONTEXT hd; \ + (void)nbytes; \ blake2##bs##_##dbits##_init (&hd, 0); \ for (;iovcnt > 0; iov++, iovcnt--) \ blake2##bs##_write (&hd, (const char*)iov[0].data + iov[0].off, \ iov[0].len); \ blake2##bs##_final (&hd); \ memcpy (outbuf, blake2##bs##_read (&hd), dbits / 8); \ } \ static byte blake2##bs##_##dbits##_asn[] = { 0x30 }; \ static gcry_md_oid_spec_t oid_spec_blake2##bs##_##dbits[] = \ { \ { " 1.3.6.1.4.1.1722.12.2." oid_branch }, \ { NULL } \ }; \ gcry_md_spec_t _gcry_digest_spec_blake2##bs##_##dbits = \ { \ GCRY_MD_BLAKE2##BS##_##dbits, {0, 0}, \ "BLAKE2" #BS "_" #dbits, blake2##bs##_##dbits##_asn, \ DIM (blake2##bs##_##dbits##_asn), oid_spec_blake2##bs##_##dbits, \ dbits / 8, blake2##bs##_##dbits##_init, blake2##bs##_write, \ blake2##bs##_final, blake2##bs##_read, NULL, \ _gcry_blake2##bs##_##dbits##_hash_buffers, \ sizeof (BLAKE2##BS##_CONTEXT), selftests_blake2##bs \ }; DEFINE_BLAKE2_VARIANT(b, B, 512, "1.16") DEFINE_BLAKE2_VARIANT(b, B, 384, "1.12") DEFINE_BLAKE2_VARIANT(b, B, 256, "1.8") DEFINE_BLAKE2_VARIANT(b, B, 160, "1.5") DEFINE_BLAKE2_VARIANT(s, S, 256, "2.8") DEFINE_BLAKE2_VARIANT(s, S, 224, "2.7") DEFINE_BLAKE2_VARIANT(s, S, 160, "2.5") DEFINE_BLAKE2_VARIANT(s, S, 128, "2.4") diff --git a/cipher/keccak.c b/cipher/keccak.c index bb643e3d..00593bdb 100644 --- a/cipher/keccak.c +++ b/cipher/keccak.c @@ -1,1538 +1,1545 @@ /* keccak.c - SHA3 hash functions * Copyright (C) 2015 g10 Code GmbH * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #include #include "g10lib.h" #include "bithelp.h" #include "bufhelp.h" #include "cipher.h" #include "hash-common.h" /* USE_64BIT indicates whether to use 64-bit generic implementation. * USE_32BIT indicates whether to use 32-bit generic implementation. */ #undef USE_64BIT #if defined(__x86_64__) || SIZEOF_UNSIGNED_LONG == 8 # define USE_64BIT 1 #else # define USE_32BIT 1 #endif /* USE_64BIT_BMI2 indicates whether to compile with 64-bit Intel BMI2 code. */ #undef USE_64BIT_BMI2 #if defined(USE_64BIT) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(HAVE_CPU_ARCH_X86) # define USE_64BIT_BMI2 1 #endif /* USE_64BIT_SHLD indicates whether to compile with 64-bit Intel SHLD code. */ #undef USE_64BIT_SHLD #if defined(USE_64BIT) && defined (__GNUC__) && defined(__x86_64__) && \ defined(HAVE_CPU_ARCH_X86) # define USE_64BIT_SHLD 1 #endif /* USE_32BIT_BMI2 indicates whether to compile with 32-bit Intel BMI2 code. */ #undef USE_32BIT_BMI2 #if defined(USE_32BIT) && defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(HAVE_CPU_ARCH_X86) # define USE_32BIT_BMI2 1 #endif /* USE_64BIT_ARM_NEON indicates whether to enable 64-bit ARM/NEON assembly * code. */ #undef USE_64BIT_ARM_NEON #ifdef ENABLE_NEON_SUPPORT # if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \ && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_NEON) # define USE_64BIT_ARM_NEON 1 # endif #endif /*ENABLE_NEON_SUPPORT*/ /* USE_S390X_CRYPTO indicates whether to enable zSeries code. */ #undef USE_S390X_CRYPTO #if defined(HAVE_GCC_INLINE_ASM_S390X) # define USE_S390X_CRYPTO 1 #endif /* USE_S390X_CRYPTO */ #if defined(USE_64BIT) || defined(USE_64BIT_ARM_NEON) # define NEED_COMMON64 1 #endif #ifdef USE_32BIT # define NEED_COMMON32BI 1 #endif #define SHA3_DELIMITED_SUFFIX 0x06 #define SHAKE_DELIMITED_SUFFIX 0x1F typedef struct { union { #ifdef NEED_COMMON64 u64 state64[25]; #endif #ifdef NEED_COMMON32BI u32 state32bi[50]; #endif } u; } KECCAK_STATE; typedef struct { unsigned int (*permute)(KECCAK_STATE *hd); unsigned int (*absorb)(KECCAK_STATE *hd, int pos, const byte *lanes, unsigned int nlanes, int blocklanes); unsigned int (*extract) (KECCAK_STATE *hd, unsigned int pos, byte *outbuf, unsigned int outlen); } keccak_ops_t; typedef struct KECCAK_CONTEXT_S { KECCAK_STATE state; unsigned int outlen; unsigned int blocksize; unsigned int count; unsigned int suffix; const keccak_ops_t *ops; #ifdef USE_S390X_CRYPTO unsigned int kimd_func; unsigned int buf_pos; byte buf[1344 / 8]; /* SHAKE128 requires biggest buffer, 1344 bits. */ #endif } KECCAK_CONTEXT; #ifdef NEED_COMMON64 const u64 _gcry_keccak_round_consts_64bit[24 + 1] = { U64_C(0x0000000000000001), U64_C(0x0000000000008082), U64_C(0x800000000000808A), U64_C(0x8000000080008000), U64_C(0x000000000000808B), U64_C(0x0000000080000001), U64_C(0x8000000080008081), U64_C(0x8000000000008009), U64_C(0x000000000000008A), U64_C(0x0000000000000088), U64_C(0x0000000080008009), U64_C(0x000000008000000A), U64_C(0x000000008000808B), U64_C(0x800000000000008B), U64_C(0x8000000000008089), U64_C(0x8000000000008003), U64_C(0x8000000000008002), U64_C(0x8000000000000080), U64_C(0x000000000000800A), U64_C(0x800000008000000A), U64_C(0x8000000080008081), U64_C(0x8000000000008080), U64_C(0x0000000080000001), U64_C(0x8000000080008008), U64_C(0xFFFFFFFFFFFFFFFF) }; static unsigned int keccak_extract64(KECCAK_STATE *hd, unsigned int pos, byte *outbuf, unsigned int outlen) { unsigned int i; /* NOTE: when pos == 0, hd and outbuf may point to same memory (SHA-3). */ for (i = pos; i < pos + outlen / 8 + !!(outlen % 8); i++) { u64 tmp = hd->u.state64[i]; buf_put_le64(outbuf, tmp); outbuf += 8; } return 0; } #endif /* NEED_COMMON64 */ #ifdef NEED_COMMON32BI static const u32 round_consts_32bit[2 * 24] = { 0x00000001UL, 0x00000000UL, 0x00000000UL, 0x00000089UL, 0x00000000UL, 0x8000008bUL, 0x00000000UL, 0x80008080UL, 0x00000001UL, 0x0000008bUL, 0x00000001UL, 0x00008000UL, 0x00000001UL, 0x80008088UL, 0x00000001UL, 0x80000082UL, 0x00000000UL, 0x0000000bUL, 0x00000000UL, 0x0000000aUL, 0x00000001UL, 0x00008082UL, 0x00000000UL, 0x00008003UL, 0x00000001UL, 0x0000808bUL, 0x00000001UL, 0x8000000bUL, 0x00000001UL, 0x8000008aUL, 0x00000001UL, 0x80000081UL, 0x00000000UL, 0x80000081UL, 0x00000000UL, 0x80000008UL, 0x00000000UL, 0x00000083UL, 0x00000000UL, 0x80008003UL, 0x00000001UL, 0x80008088UL, 0x00000000UL, 0x80000088UL, 0x00000001UL, 0x00008000UL, 0x00000000UL, 0x80008082UL }; static unsigned int keccak_extract32bi(KECCAK_STATE *hd, unsigned int pos, byte *outbuf, unsigned int outlen) { unsigned int i; u32 x0; u32 x1; u32 t; /* NOTE: when pos == 0, hd and outbuf may point to same memory (SHA-3). */ for (i = pos; i < pos + outlen / 8 + !!(outlen % 8); i++) { x0 = hd->u.state32bi[i * 2 + 0]; x1 = hd->u.state32bi[i * 2 + 1]; t = (x0 & 0x0000FFFFUL) + (x1 << 16); x1 = (x0 >> 16) + (x1 & 0xFFFF0000UL); x0 = t; t = (x0 ^ (x0 >> 8)) & 0x0000FF00UL; x0 = x0 ^ t ^ (t << 8); t = (x0 ^ (x0 >> 4)) & 0x00F000F0UL; x0 = x0 ^ t ^ (t << 4); t = (x0 ^ (x0 >> 2)) & 0x0C0C0C0CUL; x0 = x0 ^ t ^ (t << 2); t = (x0 ^ (x0 >> 1)) & 0x22222222UL; x0 = x0 ^ t ^ (t << 1); t = (x1 ^ (x1 >> 8)) & 0x0000FF00UL; x1 = x1 ^ t ^ (t << 8); t = (x1 ^ (x1 >> 4)) & 0x00F000F0UL; x1 = x1 ^ t ^ (t << 4); t = (x1 ^ (x1 >> 2)) & 0x0C0C0C0CUL; x1 = x1 ^ t ^ (t << 2); t = (x1 ^ (x1 >> 1)) & 0x22222222UL; x1 = x1 ^ t ^ (t << 1); buf_put_le32(&outbuf[0], x0); buf_put_le32(&outbuf[4], x1); outbuf += 8; } return 0; } static inline void keccak_absorb_lane32bi(u32 *lane, u32 x0, u32 x1) { u32 t; t = (x0 ^ (x0 >> 1)) & 0x22222222UL; x0 = x0 ^ t ^ (t << 1); t = (x0 ^ (x0 >> 2)) & 0x0C0C0C0CUL; x0 = x0 ^ t ^ (t << 2); t = (x0 ^ (x0 >> 4)) & 0x00F000F0UL; x0 = x0 ^ t ^ (t << 4); t = (x0 ^ (x0 >> 8)) & 0x0000FF00UL; x0 = x0 ^ t ^ (t << 8); t = (x1 ^ (x1 >> 1)) & 0x22222222UL; x1 = x1 ^ t ^ (t << 1); t = (x1 ^ (x1 >> 2)) & 0x0C0C0C0CUL; x1 = x1 ^ t ^ (t << 2); t = (x1 ^ (x1 >> 4)) & 0x00F000F0UL; x1 = x1 ^ t ^ (t << 4); t = (x1 ^ (x1 >> 8)) & 0x0000FF00UL; x1 = x1 ^ t ^ (t << 8); lane[0] ^= (x0 & 0x0000FFFFUL) + (x1 << 16); lane[1] ^= (x0 >> 16) + (x1 & 0xFFFF0000UL); } #endif /* NEED_COMMON32BI */ /* Construct generic 64-bit implementation. */ #ifdef USE_64BIT #if __GNUC__ >= 4 && defined(__x86_64__) static inline void absorb_lanes64_8(u64 *dst, const byte *in) { asm ("movdqu 0*16(%[dst]), %%xmm0\n\t" "movdqu 0*16(%[in]), %%xmm4\n\t" "movdqu 1*16(%[dst]), %%xmm1\n\t" "movdqu 1*16(%[in]), %%xmm5\n\t" "movdqu 2*16(%[dst]), %%xmm2\n\t" "movdqu 3*16(%[dst]), %%xmm3\n\t" "pxor %%xmm4, %%xmm0\n\t" "pxor %%xmm5, %%xmm1\n\t" "movdqu 2*16(%[in]), %%xmm4\n\t" "movdqu 3*16(%[in]), %%xmm5\n\t" "movdqu %%xmm0, 0*16(%[dst])\n\t" "pxor %%xmm4, %%xmm2\n\t" "movdqu %%xmm1, 1*16(%[dst])\n\t" "pxor %%xmm5, %%xmm3\n\t" "movdqu %%xmm2, 2*16(%[dst])\n\t" "movdqu %%xmm3, 3*16(%[dst])\n\t" : : [dst] "r" (dst), [in] "r" (in) : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "memory"); } static inline void absorb_lanes64_4(u64 *dst, const byte *in) { asm ("movdqu 0*16(%[dst]), %%xmm0\n\t" "movdqu 0*16(%[in]), %%xmm4\n\t" "movdqu 1*16(%[dst]), %%xmm1\n\t" "movdqu 1*16(%[in]), %%xmm5\n\t" "pxor %%xmm4, %%xmm0\n\t" "pxor %%xmm5, %%xmm1\n\t" "movdqu %%xmm0, 0*16(%[dst])\n\t" "movdqu %%xmm1, 1*16(%[dst])\n\t" : : [dst] "r" (dst), [in] "r" (in) : "xmm0", "xmm1", "xmm4", "xmm5", "memory"); } static inline void absorb_lanes64_2(u64 *dst, const byte *in) { asm ("movdqu 0*16(%[dst]), %%xmm0\n\t" "movdqu 0*16(%[in]), %%xmm4\n\t" "pxor %%xmm4, %%xmm0\n\t" "movdqu %%xmm0, 0*16(%[dst])\n\t" : : [dst] "r" (dst), [in] "r" (in) : "xmm0", "xmm4", "memory"); } #else /* __x86_64__ */ static inline void absorb_lanes64_8(u64 *dst, const byte *in) { dst[0] ^= buf_get_le64(in + 8 * 0); dst[1] ^= buf_get_le64(in + 8 * 1); dst[2] ^= buf_get_le64(in + 8 * 2); dst[3] ^= buf_get_le64(in + 8 * 3); dst[4] ^= buf_get_le64(in + 8 * 4); dst[5] ^= buf_get_le64(in + 8 * 5); dst[6] ^= buf_get_le64(in + 8 * 6); dst[7] ^= buf_get_le64(in + 8 * 7); } static inline void absorb_lanes64_4(u64 *dst, const byte *in) { dst[0] ^= buf_get_le64(in + 8 * 0); dst[1] ^= buf_get_le64(in + 8 * 1); dst[2] ^= buf_get_le64(in + 8 * 2); dst[3] ^= buf_get_le64(in + 8 * 3); } static inline void absorb_lanes64_2(u64 *dst, const byte *in) { dst[0] ^= buf_get_le64(in + 8 * 0); dst[1] ^= buf_get_le64(in + 8 * 1); } #endif /* !__x86_64__ */ static inline void absorb_lanes64_1(u64 *dst, const byte *in) { dst[0] ^= buf_get_le64(in + 8 * 0); } # define ANDN64(x, y) (~(x) & (y)) # define ROL64(x, n) (((x) << ((unsigned int)n & 63)) | \ ((x) >> ((64 - (unsigned int)(n)) & 63))) # define KECCAK_F1600_PERMUTE_FUNC_NAME keccak_f1600_state_permute64 # define KECCAK_F1600_ABSORB_FUNC_NAME keccak_absorb_lanes64 # include "keccak_permute_64.h" # undef ANDN64 # undef ROL64 # undef KECCAK_F1600_PERMUTE_FUNC_NAME # undef KECCAK_F1600_ABSORB_FUNC_NAME static const keccak_ops_t keccak_generic64_ops = { .permute = keccak_f1600_state_permute64, .absorb = keccak_absorb_lanes64, .extract = keccak_extract64, }; #endif /* USE_64BIT */ /* Construct 64-bit Intel SHLD implementation. */ #ifdef USE_64BIT_SHLD # define ANDN64(x, y) (~(x) & (y)) # define ROL64(x, n) ({ \ u64 tmp = (x); \ asm ("shldq %1, %0, %0" \ : "+r" (tmp) \ : "J" ((n) & 63) \ : "cc"); \ tmp; }) # define KECCAK_F1600_PERMUTE_FUNC_NAME keccak_f1600_state_permute64_shld # define KECCAK_F1600_ABSORB_FUNC_NAME keccak_absorb_lanes64_shld # include "keccak_permute_64.h" # undef ANDN64 # undef ROL64 # undef KECCAK_F1600_PERMUTE_FUNC_NAME # undef KECCAK_F1600_ABSORB_FUNC_NAME static const keccak_ops_t keccak_shld_64_ops = { .permute = keccak_f1600_state_permute64_shld, .absorb = keccak_absorb_lanes64_shld, .extract = keccak_extract64, }; #endif /* USE_64BIT_SHLD */ /* Construct 64-bit Intel BMI2 implementation. */ #ifdef USE_64BIT_BMI2 # define ANDN64(x, y) ({ \ u64 tmp; \ asm ("andnq %2, %1, %0" \ : "=r" (tmp) \ : "r0" (x), "rm" (y)); \ tmp; }) # define ROL64(x, n) ({ \ u64 tmp; \ asm ("rorxq %2, %1, %0" \ : "=r" (tmp) \ : "rm0" (x), "J" (64 - ((n) & 63))); \ tmp; }) # define KECCAK_F1600_PERMUTE_FUNC_NAME keccak_f1600_state_permute64_bmi2 # define KECCAK_F1600_ABSORB_FUNC_NAME keccak_absorb_lanes64_bmi2 # include "keccak_permute_64.h" # undef ANDN64 # undef ROL64 # undef KECCAK_F1600_PERMUTE_FUNC_NAME # undef KECCAK_F1600_ABSORB_FUNC_NAME static const keccak_ops_t keccak_bmi2_64_ops = { .permute = keccak_f1600_state_permute64_bmi2, .absorb = keccak_absorb_lanes64_bmi2, .extract = keccak_extract64, }; #endif /* USE_64BIT_BMI2 */ /* 64-bit ARMv7/NEON implementation. */ #ifdef USE_64BIT_ARM_NEON unsigned int _gcry_keccak_permute_armv7_neon(u64 *state); unsigned int _gcry_keccak_absorb_lanes64_armv7_neon(u64 *state, int pos, const byte *lanes, unsigned int nlanes, int blocklanes); static unsigned int keccak_permute64_armv7_neon(KECCAK_STATE *hd) { return _gcry_keccak_permute_armv7_neon(hd->u.state64); } static unsigned int keccak_absorb_lanes64_armv7_neon(KECCAK_STATE *hd, int pos, const byte *lanes, unsigned int nlanes, int blocklanes) { if (blocklanes < 0) { /* blocklanes == -1, permutationless absorb from keccak_final. */ while (nlanes) { hd->u.state64[pos] ^= buf_get_le64(lanes); lanes += 8; nlanes--; } return 0; } else { return _gcry_keccak_absorb_lanes64_armv7_neon(hd->u.state64, pos, lanes, nlanes, blocklanes); } } static const keccak_ops_t keccak_armv7_neon_64_ops = { .permute = keccak_permute64_armv7_neon, .absorb = keccak_absorb_lanes64_armv7_neon, .extract = keccak_extract64, }; #endif /* USE_64BIT_ARM_NEON */ /* Construct generic 32-bit implementation. */ #ifdef USE_32BIT # define ANDN32(x, y) (~(x) & (y)) # define ROL32(x, n) (((x) << ((unsigned int)n & 31)) | \ ((x) >> ((32 - (unsigned int)(n)) & 31))) # define KECCAK_F1600_PERMUTE_FUNC_NAME keccak_f1600_state_permute32bi # include "keccak_permute_32.h" # undef ANDN32 # undef ROL32 # undef KECCAK_F1600_PERMUTE_FUNC_NAME static unsigned int keccak_absorb_lanes32bi(KECCAK_STATE *hd, int pos, const byte *lanes, unsigned int nlanes, int blocklanes) { unsigned int burn = 0; while (nlanes) { keccak_absorb_lane32bi(&hd->u.state32bi[pos * 2], buf_get_le32(lanes + 0), buf_get_le32(lanes + 4)); lanes += 8; nlanes--; if (++pos == blocklanes) { burn = keccak_f1600_state_permute32bi(hd); pos = 0; } } return burn; } static const keccak_ops_t keccak_generic32bi_ops = { .permute = keccak_f1600_state_permute32bi, .absorb = keccak_absorb_lanes32bi, .extract = keccak_extract32bi, }; #endif /* USE_32BIT */ /* Construct 32-bit Intel BMI2 implementation. */ #ifdef USE_32BIT_BMI2 # define ANDN32(x, y) ({ \ u32 tmp; \ asm ("andnl %2, %1, %0" \ : "=r" (tmp) \ : "r0" (x), "rm" (y)); \ tmp; }) # define ROL32(x, n) ({ \ u32 tmp; \ asm ("rorxl %2, %1, %0" \ : "=r" (tmp) \ : "rm0" (x), "J" (32 - ((n) & 31))); \ tmp; }) # define KECCAK_F1600_PERMUTE_FUNC_NAME keccak_f1600_state_permute32bi_bmi2 # include "keccak_permute_32.h" # undef ANDN32 # undef ROL32 # undef KECCAK_F1600_PERMUTE_FUNC_NAME static inline u32 pext(u32 x, u32 mask) { u32 tmp; asm ("pextl %2, %1, %0" : "=r" (tmp) : "r0" (x), "rm" (mask)); return tmp; } static inline u32 pdep(u32 x, u32 mask) { u32 tmp; asm ("pdepl %2, %1, %0" : "=r" (tmp) : "r0" (x), "rm" (mask)); return tmp; } static inline void keccak_absorb_lane32bi_bmi2(u32 *lane, u32 x0, u32 x1) { x0 = pdep(pext(x0, 0x55555555), 0x0000ffff) | (pext(x0, 0xaaaaaaaa) << 16); x1 = pdep(pext(x1, 0x55555555), 0x0000ffff) | (pext(x1, 0xaaaaaaaa) << 16); lane[0] ^= (x0 & 0x0000FFFFUL) + (x1 << 16); lane[1] ^= (x0 >> 16) + (x1 & 0xFFFF0000UL); } static unsigned int keccak_absorb_lanes32bi_bmi2(KECCAK_STATE *hd, int pos, const byte *lanes, unsigned int nlanes, int blocklanes) { unsigned int burn = 0; while (nlanes) { keccak_absorb_lane32bi_bmi2(&hd->u.state32bi[pos * 2], buf_get_le32(lanes + 0), buf_get_le32(lanes + 4)); lanes += 8; nlanes--; if (++pos == blocklanes) { burn = keccak_f1600_state_permute32bi_bmi2(hd); pos = 0; } } return burn; } static unsigned int keccak_extract32bi_bmi2(KECCAK_STATE *hd, unsigned int pos, byte *outbuf, unsigned int outlen) { unsigned int i; u32 x0; u32 x1; u32 t; /* NOTE: when pos == 0, hd and outbuf may point to same memory (SHA-3). */ for (i = pos; i < pos + outlen / 8 + !!(outlen % 8); i++) { x0 = hd->u.state32bi[i * 2 + 0]; x1 = hd->u.state32bi[i * 2 + 1]; t = (x0 & 0x0000FFFFUL) + (x1 << 16); x1 = (x0 >> 16) + (x1 & 0xFFFF0000UL); x0 = t; x0 = pdep(pext(x0, 0xffff0001), 0xaaaaaaab) | pdep(x0 >> 1, 0x55555554); x1 = pdep(pext(x1, 0xffff0001), 0xaaaaaaab) | pdep(x1 >> 1, 0x55555554); buf_put_le32(&outbuf[0], x0); buf_put_le32(&outbuf[4], x1); outbuf += 8; } return 0; } static const keccak_ops_t keccak_bmi2_32bi_ops = { .permute = keccak_f1600_state_permute32bi_bmi2, .absorb = keccak_absorb_lanes32bi_bmi2, .extract = keccak_extract32bi_bmi2, }; #endif /* USE_32BIT_BMI2 */ #ifdef USE_S390X_CRYPTO #include "asm-inline-s390x.h" static inline void keccak_bwrite_s390x (void *context, const byte *in, size_t inlen) { KECCAK_CONTEXT *ctx = context; /* Write full-blocks. */ kimd_execute (ctx->kimd_func, &ctx->state, in, inlen); return; } static inline void keccak_final_s390x (void *context) { KECCAK_CONTEXT *ctx = context; if (ctx->suffix == SHA3_DELIMITED_SUFFIX) { klmd_execute (ctx->kimd_func, &ctx->state, ctx->buf, ctx->count); } else { klmd_shake_execute (ctx->kimd_func, &ctx->state, NULL, 0, ctx->buf, ctx->count); ctx->count = 0; ctx->buf_pos = 0; } return; } static inline void keccak_bextract_s390x (void *context, byte *out, size_t outlen) { KECCAK_CONTEXT *ctx = context; /* Extract full-blocks. */ klmd_shake_execute (ctx->kimd_func | KLMD_PADDING_STATE, &ctx->state, out, outlen, NULL, 0); return; } static void keccak_write_s390x (void *context, const byte *inbuf, size_t inlen) { KECCAK_CONTEXT *hd = context; const size_t blocksize = hd->blocksize; size_t inblocks; size_t copylen; while (hd->count) { if (hd->count == blocksize) /* Flush the buffer. */ { keccak_bwrite_s390x (hd, hd->buf, blocksize); hd->count = 0; } else { copylen = inlen; if (copylen > blocksize - hd->count) copylen = blocksize - hd->count; if (copylen == 0) break; buf_cpy (&hd->buf[hd->count], inbuf, copylen); hd->count += copylen; inbuf += copylen; inlen -= copylen; } } if (inlen == 0) return; if (inlen >= blocksize) { inblocks = inlen / blocksize; keccak_bwrite_s390x (hd, inbuf, inblocks * blocksize); hd->count = 0; inlen -= inblocks * blocksize; inbuf += inblocks * blocksize; } if (inlen) { buf_cpy (hd->buf, inbuf, inlen); hd->count = inlen; } } static void keccak_extract_s390x (void *context, void *outbuf_arg, size_t outlen) { KECCAK_CONTEXT *hd = context; const size_t blocksize = hd->blocksize; byte *outbuf = outbuf_arg; while (outlen) { gcry_assert(hd->count == 0 || hd->buf_pos < hd->count); if (hd->buf_pos < hd->count && outlen) { size_t copylen = hd->count - hd->buf_pos; if (copylen > outlen) copylen = outlen; buf_cpy (outbuf, &hd->buf[hd->buf_pos], copylen); outbuf += copylen; outlen -= copylen; hd->buf_pos += copylen; } if (hd->buf_pos == hd->count) { hd->buf_pos = 0; hd->count = 0; } if (outlen == 0) return; if (outlen >= blocksize) { size_t outblocks = outlen / blocksize; keccak_bextract_s390x (context, outbuf, outblocks * blocksize); outlen -= outblocks * blocksize; outbuf += outblocks * blocksize; if (outlen == 0) return; } keccak_bextract_s390x (context, hd->buf, blocksize); hd->count = blocksize; } } #endif /* USE_S390X_CRYPTO */ static void keccak_write (void *context, const void *inbuf_arg, size_t inlen) { KECCAK_CONTEXT *ctx = context; const size_t bsize = ctx->blocksize; const size_t blocklanes = bsize / 8; const byte *inbuf = inbuf_arg; unsigned int nburn, burn = 0; unsigned int count, i; unsigned int pos, nlanes; #ifdef USE_S390X_CRYPTO if (ctx->kimd_func) { keccak_write_s390x (context, inbuf, inlen); return; } #endif count = ctx->count; if (inlen && (count % 8)) { byte lane[8] = { 0, }; /* Complete absorbing partial input lane. */ pos = count / 8; for (i = count % 8; inlen && i < 8; i++) { lane[i] = *inbuf++; inlen--; count++; } if (count == bsize) count = 0; nburn = ctx->ops->absorb(&ctx->state, pos, lane, 1, (count % 8) ? -1 : blocklanes); burn = nburn > burn ? nburn : burn; } /* Absorb full input lanes. */ pos = count / 8; nlanes = inlen / 8; if (nlanes > 0) { nburn = ctx->ops->absorb(&ctx->state, pos, inbuf, nlanes, blocklanes); burn = nburn > burn ? nburn : burn; inlen -= nlanes * 8; inbuf += nlanes * 8; count += nlanes * 8; count = count % bsize; } if (inlen) { byte lane[8] = { 0, }; /* Absorb remaining partial input lane. */ pos = count / 8; for (i = count % 8; inlen && i < 8; i++) { lane[i] = *inbuf++; inlen--; count++; } nburn = ctx->ops->absorb(&ctx->state, pos, lane, 1, -1); burn = nburn > burn ? nburn : burn; gcry_assert(count < bsize); } ctx->count = count; if (burn) _gcry_burn_stack (burn); } static void keccak_init (int algo, void *context, unsigned int flags) { KECCAK_CONTEXT *ctx = context; KECCAK_STATE *hd = &ctx->state; unsigned int features = _gcry_get_hw_features (); (void)flags; (void)features; memset (hd, 0, sizeof *hd); ctx->count = 0; /* Select generic implementation. */ #ifdef USE_64BIT ctx->ops = &keccak_generic64_ops; #elif defined USE_32BIT ctx->ops = &keccak_generic32bi_ops; #endif /* Select optimized implementation based in hw features. */ if (0) {} #ifdef USE_64BIT_ARM_NEON else if (features & HWF_ARM_NEON) ctx->ops = &keccak_armv7_neon_64_ops; #endif #ifdef USE_64BIT_BMI2 else if (features & HWF_INTEL_BMI2) ctx->ops = &keccak_bmi2_64_ops; #endif #ifdef USE_32BIT_BMI2 else if (features & HWF_INTEL_BMI2) ctx->ops = &keccak_bmi2_32bi_ops; #endif #ifdef USE_64BIT_SHLD else if (features & HWF_INTEL_FAST_SHLD) ctx->ops = &keccak_shld_64_ops; #endif /* Set input block size, in Keccak terms this is called 'rate'. */ switch (algo) { case GCRY_MD_SHA3_224: ctx->suffix = SHA3_DELIMITED_SUFFIX; ctx->blocksize = 1152 / 8; ctx->outlen = 224 / 8; break; case GCRY_MD_SHA3_256: ctx->suffix = SHA3_DELIMITED_SUFFIX; ctx->blocksize = 1088 / 8; ctx->outlen = 256 / 8; break; case GCRY_MD_SHA3_384: ctx->suffix = SHA3_DELIMITED_SUFFIX; ctx->blocksize = 832 / 8; ctx->outlen = 384 / 8; break; case GCRY_MD_SHA3_512: ctx->suffix = SHA3_DELIMITED_SUFFIX; ctx->blocksize = 576 / 8; ctx->outlen = 512 / 8; break; case GCRY_MD_SHAKE128: ctx->suffix = SHAKE_DELIMITED_SUFFIX; ctx->blocksize = 1344 / 8; ctx->outlen = 0; break; case GCRY_MD_SHAKE256: ctx->suffix = SHAKE_DELIMITED_SUFFIX; ctx->blocksize = 1088 / 8; ctx->outlen = 0; break; default: BUG(); } #ifdef USE_S390X_CRYPTO ctx->kimd_func = 0; if ((features & HWF_S390X_MSA) != 0) { unsigned int kimd_func = 0; switch (algo) { case GCRY_MD_SHA3_224: kimd_func = KMID_FUNCTION_SHA3_224; break; case GCRY_MD_SHA3_256: kimd_func = KMID_FUNCTION_SHA3_256; break; case GCRY_MD_SHA3_384: kimd_func = KMID_FUNCTION_SHA3_384; break; case GCRY_MD_SHA3_512: kimd_func = KMID_FUNCTION_SHA3_512; break; case GCRY_MD_SHAKE128: kimd_func = KMID_FUNCTION_SHAKE128; break; case GCRY_MD_SHAKE256: kimd_func = KMID_FUNCTION_SHAKE256; break; } if ((kimd_query () & km_function_to_mask (kimd_func)) && (klmd_query () & km_function_to_mask (kimd_func))) { ctx->kimd_func = kimd_func; } } #endif } static void sha3_224_init (void *context, unsigned int flags) { keccak_init (GCRY_MD_SHA3_224, context, flags); } static void sha3_256_init (void *context, unsigned int flags) { keccak_init (GCRY_MD_SHA3_256, context, flags); } static void sha3_384_init (void *context, unsigned int flags) { keccak_init (GCRY_MD_SHA3_384, context, flags); } static void sha3_512_init (void *context, unsigned int flags) { keccak_init (GCRY_MD_SHA3_512, context, flags); } static void shake128_init (void *context, unsigned int flags) { keccak_init (GCRY_MD_SHAKE128, context, flags); } static void shake256_init (void *context, unsigned int flags) { keccak_init (GCRY_MD_SHAKE256, context, flags); } /* The routine final terminates the computation and * returns the digest. * The handle is prepared for a new cycle, but adding bytes to the * handle will the destroy the returned buffer. * Returns: 64 bytes representing the digest. When used for sha384, * we take the leftmost 48 of those bytes. */ static void keccak_final (void *context) { KECCAK_CONTEXT *ctx = context; KECCAK_STATE *hd = &ctx->state; const size_t bsize = ctx->blocksize; const byte suffix = ctx->suffix; unsigned int nburn, burn = 0; unsigned int lastbytes; byte lane[8]; #ifdef USE_S390X_CRYPTO if (ctx->kimd_func) { keccak_final_s390x (context); return; } #endif lastbytes = ctx->count; /* Do the padding and switch to the squeezing phase */ /* Absorb the last few bits and add the first bit of padding (which coincides with the delimiter in delimited suffix) */ buf_put_le64(lane, (u64)suffix << ((lastbytes % 8) * 8)); nburn = ctx->ops->absorb(&ctx->state, lastbytes / 8, lane, 1, -1); burn = nburn > burn ? nburn : burn; /* Add the second bit of padding. */ buf_put_le64(lane, (u64)0x80 << (((bsize - 1) % 8) * 8)); nburn = ctx->ops->absorb(&ctx->state, (bsize - 1) / 8, lane, 1, -1); burn = nburn > burn ? nburn : burn; if (suffix == SHA3_DELIMITED_SUFFIX) { /* Switch to the squeezing phase. */ nburn = ctx->ops->permute(hd); burn = nburn > burn ? nburn : burn; /* Squeeze out the SHA3 digest. */ nburn = ctx->ops->extract(hd, 0, (void *)hd, ctx->outlen); burn = nburn > burn ? nburn : burn; } else { /* Output for SHAKE can now be read with md_extract(). */ ctx->count = 0; } wipememory(lane, sizeof(lane)); if (burn) _gcry_burn_stack (burn); } static byte * keccak_read (void *context) { KECCAK_CONTEXT *ctx = (KECCAK_CONTEXT *) context; KECCAK_STATE *hd = &ctx->state; return (byte *)&hd->u; } static void keccak_extract (void *context, void *out, size_t outlen) { KECCAK_CONTEXT *ctx = context; KECCAK_STATE *hd = &ctx->state; const size_t bsize = ctx->blocksize; unsigned int nburn, burn = 0; byte *outbuf = out; unsigned int nlanes; unsigned int nleft; unsigned int count; unsigned int i; byte lane[8]; #ifdef USE_S390X_CRYPTO if (ctx->kimd_func) { keccak_extract_s390x (context, out, outlen); return; } #endif count = ctx->count; while (count && outlen && (outlen < 8 || count % 8)) { /* Extract partial lane. */ nburn = ctx->ops->extract(hd, count / 8, lane, 8); burn = nburn > burn ? nburn : burn; for (i = count % 8; outlen && i < 8; i++) { *outbuf++ = lane[i]; outlen--; count++; } gcry_assert(count <= bsize); if (count == bsize) count = 0; } if (outlen >= 8 && count) { /* Extract tail of partial block. */ nlanes = outlen / 8; nleft = (bsize - count) / 8; nlanes = nlanes < nleft ? nlanes : nleft; nburn = ctx->ops->extract(hd, count / 8, outbuf, nlanes * 8); burn = nburn > burn ? nburn : burn; outlen -= nlanes * 8; outbuf += nlanes * 8; count += nlanes * 8; gcry_assert(count <= bsize); if (count == bsize) count = 0; } while (outlen >= bsize) { gcry_assert(count == 0); /* Squeeze more. */ nburn = ctx->ops->permute(hd); burn = nburn > burn ? nburn : burn; /* Extract full block. */ nburn = ctx->ops->extract(hd, 0, outbuf, bsize); burn = nburn > burn ? nburn : burn; outlen -= bsize; outbuf += bsize; } if (outlen) { gcry_assert(outlen < bsize); if (count == 0) { /* Squeeze more. */ nburn = ctx->ops->permute(hd); burn = nburn > burn ? nburn : burn; } if (outlen >= 8) { /* Extract head of partial block. */ nlanes = outlen / 8; nburn = ctx->ops->extract(hd, count / 8, outbuf, nlanes * 8); burn = nburn > burn ? nburn : burn; outlen -= nlanes * 8; outbuf += nlanes * 8; count += nlanes * 8; gcry_assert(count < bsize); } if (outlen) { /* Extract head of partial lane. */ nburn = ctx->ops->extract(hd, count / 8, lane, 8); burn = nburn > burn ? nburn : burn; for (i = count % 8; outlen && i < 8; i++) { *outbuf++ = lane[i]; outlen--; count++; } gcry_assert(count < bsize); } } ctx->count = count; if (burn) _gcry_burn_stack (burn); } /* Variant of the above shortcut function using multiple buffers. */ static void -_gcry_sha3_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt, - const gcry_md_spec_t *spec) +_gcry_sha3_hash_buffers (void *outbuf, size_t nbytes, const gcry_buffer_t *iov, + int iovcnt, const gcry_md_spec_t *spec) { KECCAK_CONTEXT hd; spec->init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) keccak_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); keccak_final (&hd); - memcpy (outbuf, keccak_read (&hd), spec->mdlen); + if (spec->mdlen > 0) + memcpy (outbuf, keccak_read (&hd), spec->mdlen); + else + keccak_extract (&hd, outbuf, nbytes); } static void -_gcry_sha3_224_hash_buffers (void *outbuf, const gcry_buffer_t *iov, - int iovcnt) +_gcry_sha3_224_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { - _gcry_sha3_hash_buffers (outbuf, iov, iovcnt, &_gcry_digest_spec_sha3_224); + _gcry_sha3_hash_buffers (outbuf, nbytes, iov, iovcnt, + &_gcry_digest_spec_sha3_224); } static void -_gcry_sha3_256_hash_buffers (void *outbuf, const gcry_buffer_t *iov, - int iovcnt) +_gcry_sha3_256_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { - _gcry_sha3_hash_buffers (outbuf, iov, iovcnt, &_gcry_digest_spec_sha3_256); + _gcry_sha3_hash_buffers (outbuf, nbytes, iov, iovcnt, + &_gcry_digest_spec_sha3_256); } static void -_gcry_sha3_384_hash_buffers (void *outbuf, const gcry_buffer_t *iov, - int iovcnt) +_gcry_sha3_384_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { - _gcry_sha3_hash_buffers (outbuf, iov, iovcnt, &_gcry_digest_spec_sha3_384); + _gcry_sha3_hash_buffers (outbuf, nbytes, iov, iovcnt, + &_gcry_digest_spec_sha3_384); } static void -_gcry_sha3_512_hash_buffers (void *outbuf, const gcry_buffer_t *iov, - int iovcnt) +_gcry_sha3_512_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { - _gcry_sha3_hash_buffers (outbuf, iov, iovcnt, &_gcry_digest_spec_sha3_512); + _gcry_sha3_hash_buffers (outbuf, nbytes, iov, iovcnt, + &_gcry_digest_spec_sha3_512); } /* Self-test section. */ static gpg_err_code_t selftests_keccak (int algo, int extended, selftest_report_func_t report) { const char *what; const char *errtxt; const char *short_hash; const char *long_hash; const char *one_million_a_hash; int hash_len; switch (algo) { default: BUG(); case GCRY_MD_SHA3_224: short_hash = "\xe6\x42\x82\x4c\x3f\x8c\xf2\x4a\xd0\x92\x34\xee\x7d\x3c\x76\x6f" "\xc9\xa3\xa5\x16\x8d\x0c\x94\xad\x73\xb4\x6f\xdf"; long_hash = "\x54\x3e\x68\x68\xe1\x66\x6c\x1a\x64\x36\x30\xdf\x77\x36\x7a\xe5" "\xa6\x2a\x85\x07\x0a\x51\xc1\x4c\xbf\x66\x5c\xbc"; one_million_a_hash = "\xd6\x93\x35\xb9\x33\x25\x19\x2e\x51\x6a\x91\x2e\x6d\x19\xa1\x5c" "\xb5\x1c\x6e\xd5\xc1\x52\x43\xe7\xa7\xfd\x65\x3c"; hash_len = 28; break; case GCRY_MD_SHA3_256: short_hash = "\x3a\x98\x5d\xa7\x4f\xe2\x25\xb2\x04\x5c\x17\x2d\x6b\xd3\x90\xbd" "\x85\x5f\x08\x6e\x3e\x9d\x52\x5b\x46\xbf\xe2\x45\x11\x43\x15\x32"; long_hash = "\x91\x6f\x60\x61\xfe\x87\x97\x41\xca\x64\x69\xb4\x39\x71\xdf\xdb" "\x28\xb1\xa3\x2d\xc3\x6c\xb3\x25\x4e\x81\x2b\xe2\x7a\xad\x1d\x18"; one_million_a_hash = "\x5c\x88\x75\xae\x47\x4a\x36\x34\xba\x4f\xd5\x5e\xc8\x5b\xff\xd6" "\x61\xf3\x2a\xca\x75\xc6\xd6\x99\xd0\xcd\xcb\x6c\x11\x58\x91\xc1"; hash_len = 32; break; case GCRY_MD_SHA3_384: short_hash = "\xec\x01\x49\x82\x88\x51\x6f\xc9\x26\x45\x9f\x58\xe2\xc6\xad\x8d" "\xf9\xb4\x73\xcb\x0f\xc0\x8c\x25\x96\xda\x7c\xf0\xe4\x9b\xe4\xb2" "\x98\xd8\x8c\xea\x92\x7a\xc7\xf5\x39\xf1\xed\xf2\x28\x37\x6d\x25"; long_hash = "\x79\x40\x7d\x3b\x59\x16\xb5\x9c\x3e\x30\xb0\x98\x22\x97\x47\x91" "\xc3\x13\xfb\x9e\xcc\x84\x9e\x40\x6f\x23\x59\x2d\x04\xf6\x25\xdc" "\x8c\x70\x9b\x98\xb4\x3b\x38\x52\xb3\x37\x21\x61\x79\xaa\x7f\xc7"; one_million_a_hash = "\xee\xe9\xe2\x4d\x78\xc1\x85\x53\x37\x98\x34\x51\xdf\x97\xc8\xad" "\x9e\xed\xf2\x56\xc6\x33\x4f\x8e\x94\x8d\x25\x2d\x5e\x0e\x76\x84" "\x7a\xa0\x77\x4d\xdb\x90\xa8\x42\x19\x0d\x2c\x55\x8b\x4b\x83\x40"; hash_len = 48; break; case GCRY_MD_SHA3_512: short_hash = "\xb7\x51\x85\x0b\x1a\x57\x16\x8a\x56\x93\xcd\x92\x4b\x6b\x09\x6e" "\x08\xf6\x21\x82\x74\x44\xf7\x0d\x88\x4f\x5d\x02\x40\xd2\x71\x2e" "\x10\xe1\x16\xe9\x19\x2a\xf3\xc9\x1a\x7e\xc5\x76\x47\xe3\x93\x40" "\x57\x34\x0b\x4c\xf4\x08\xd5\xa5\x65\x92\xf8\x27\x4e\xec\x53\xf0"; long_hash = "\xaf\xeb\xb2\xef\x54\x2e\x65\x79\xc5\x0c\xad\x06\xd2\xe5\x78\xf9" "\xf8\xdd\x68\x81\xd7\xdc\x82\x4d\x26\x36\x0f\xee\xbf\x18\xa4\xfa" "\x73\xe3\x26\x11\x22\x94\x8e\xfc\xfd\x49\x2e\x74\xe8\x2e\x21\x89" "\xed\x0f\xb4\x40\xd1\x87\xf3\x82\x27\x0c\xb4\x55\xf2\x1d\xd1\x85"; one_million_a_hash = "\x3c\x3a\x87\x6d\xa1\x40\x34\xab\x60\x62\x7c\x07\x7b\xb9\x8f\x7e" "\x12\x0a\x2a\x53\x70\x21\x2d\xff\xb3\x38\x5a\x18\xd4\xf3\x88\x59" "\xed\x31\x1d\x0a\x9d\x51\x41\xce\x9c\xc5\xc6\x6e\xe6\x89\xb2\x66" "\xa8\xaa\x18\xac\xe8\x28\x2a\x0e\x0d\xb5\x96\xc9\x0b\x0a\x7b\x87"; hash_len = 64; break; case GCRY_MD_SHAKE128: short_hash = "\x58\x81\x09\x2d\xd8\x18\xbf\x5c\xf8\xa3\xdd\xb7\x93\xfb\xcb\xa7" "\x40\x97\xd5\xc5\x26\xa6\xd3\x5f\x97\xb8\x33\x51\x94\x0f\x2c\xc8"; long_hash = "\x7b\x6d\xf6\xff\x18\x11\x73\xb6\xd7\x89\x8d\x7f\xf6\x3f\xb0\x7b" "\x7c\x23\x7d\xaf\x47\x1a\x5a\xe5\x60\x2a\xdb\xcc\xef\x9c\xcf\x4b"; one_million_a_hash = "\x9d\x22\x2c\x79\xc4\xff\x9d\x09\x2c\xf6\xca\x86\x14\x3a\xa4\x11" "\xe3\x69\x97\x38\x08\xef\x97\x09\x32\x55\x82\x6c\x55\x72\xef\x58"; hash_len = 32; break; case GCRY_MD_SHAKE256: short_hash = "\x48\x33\x66\x60\x13\x60\xa8\x77\x1c\x68\x63\x08\x0c\xc4\x11\x4d" "\x8d\xb4\x45\x30\xf8\xf1\xe1\xee\x4f\x94\xea\x37\xe7\x8b\x57\x39"; long_hash = "\x98\xbe\x04\x51\x6c\x04\xcc\x73\x59\x3f\xef\x3e\xd0\x35\x2e\xa9" "\xf6\x44\x39\x42\xd6\x95\x0e\x29\xa3\x72\xa6\x81\xc3\xde\xaf\x45"; one_million_a_hash = "\x35\x78\xa7\xa4\xca\x91\x37\x56\x9c\xdf\x76\xed\x61\x7d\x31\xbb" "\x99\x4f\xca\x9c\x1b\xbf\x8b\x18\x40\x13\xde\x82\x34\xdf\xd1\x3a"; hash_len = 32; break; } what = "short string"; errtxt = _gcry_hash_selftest_check_one (algo, 0, "abc", 3, short_hash, hash_len); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (algo, 0, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn" "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112, long_hash, hash_len); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (algo, 1, NULL, 0, one_million_a_hash, hash_len); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", algo, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Run a full self-test for ALGO and return 0 on success. */ static gpg_err_code_t run_selftests (int algo, int extended, selftest_report_func_t report) { gpg_err_code_t ec; switch (algo) { case GCRY_MD_SHA3_224: case GCRY_MD_SHA3_256: case GCRY_MD_SHA3_384: case GCRY_MD_SHA3_512: case GCRY_MD_SHAKE128: case GCRY_MD_SHAKE256: ec = selftests_keccak (algo, extended, report); break; default: ec = GPG_ERR_DIGEST_ALGO; break; } return ec; } static byte sha3_224_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_sha3_224[] = { { "2.16.840.1.101.3.4.2.7" }, /* PKCS#1 sha3_224WithRSAEncryption */ { "?" }, { NULL } }; static byte sha3_256_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_sha3_256[] = { { "2.16.840.1.101.3.4.2.8" }, /* PKCS#1 sha3_256WithRSAEncryption */ { "?" }, { NULL } }; static byte sha3_384_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_sha3_384[] = { { "2.16.840.1.101.3.4.2.9" }, /* PKCS#1 sha3_384WithRSAEncryption */ { "?" }, { NULL } }; static byte sha3_512_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_sha3_512[] = { { "2.16.840.1.101.3.4.2.10" }, /* PKCS#1 sha3_512WithRSAEncryption */ { "?" }, { NULL } }; static byte shake128_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_shake128[] = { { "2.16.840.1.101.3.4.2.11" }, /* PKCS#1 shake128WithRSAEncryption */ { "?" }, { NULL } }; static byte shake256_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_shake256[] = { { "2.16.840.1.101.3.4.2.12" }, /* PKCS#1 shake256WithRSAEncryption */ { "?" }, { NULL } }; gcry_md_spec_t _gcry_digest_spec_sha3_224 = { GCRY_MD_SHA3_224, {0, 1}, "SHA3-224", sha3_224_asn, DIM (sha3_224_asn), oid_spec_sha3_224, 28, sha3_224_init, keccak_write, keccak_final, keccak_read, NULL, _gcry_sha3_224_hash_buffers, sizeof (KECCAK_CONTEXT), run_selftests }; gcry_md_spec_t _gcry_digest_spec_sha3_256 = { GCRY_MD_SHA3_256, {0, 1}, "SHA3-256", sha3_256_asn, DIM (sha3_256_asn), oid_spec_sha3_256, 32, sha3_256_init, keccak_write, keccak_final, keccak_read, NULL, _gcry_sha3_256_hash_buffers, sizeof (KECCAK_CONTEXT), run_selftests }; gcry_md_spec_t _gcry_digest_spec_sha3_384 = { GCRY_MD_SHA3_384, {0, 1}, "SHA3-384", sha3_384_asn, DIM (sha3_384_asn), oid_spec_sha3_384, 48, sha3_384_init, keccak_write, keccak_final, keccak_read, NULL, _gcry_sha3_384_hash_buffers, sizeof (KECCAK_CONTEXT), run_selftests }; gcry_md_spec_t _gcry_digest_spec_sha3_512 = { GCRY_MD_SHA3_512, {0, 1}, "SHA3-512", sha3_512_asn, DIM (sha3_512_asn), oid_spec_sha3_512, 64, sha3_512_init, keccak_write, keccak_final, keccak_read, NULL, _gcry_sha3_512_hash_buffers, sizeof (KECCAK_CONTEXT), run_selftests }; gcry_md_spec_t _gcry_digest_spec_shake128 = { GCRY_MD_SHAKE128, {0, 1}, "SHAKE128", shake128_asn, DIM (shake128_asn), oid_spec_shake128, 0, shake128_init, keccak_write, keccak_final, NULL, keccak_extract, NULL, sizeof (KECCAK_CONTEXT), run_selftests }; gcry_md_spec_t _gcry_digest_spec_shake256 = { GCRY_MD_SHAKE256, {0, 1}, "SHAKE256", shake256_asn, DIM (shake256_asn), oid_spec_shake256, 0, shake256_init, keccak_write, keccak_final, NULL, keccak_extract, NULL, sizeof (KECCAK_CONTEXT), run_selftests }; diff --git a/cipher/md.c b/cipher/md.c index f142e9c3..6497ab41 100644 --- a/cipher/md.c +++ b/cipher/md.c @@ -1,1662 +1,1662 @@ /* md.c - message digest dispatcher * Copyright (C) 1998, 1999, 2002, 2003, 2006, * 2008 Free Software Foundation, Inc. * Copyright (C) 2013, 2014 g10 Code GmbH * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #include #include #include #include #include "g10lib.h" #include "cipher.h" /* This is the list of the digest implementations included in libgcrypt. */ static gcry_md_spec_t * const digest_list[] = { #if USE_CRC &_gcry_digest_spec_crc32, &_gcry_digest_spec_crc32_rfc1510, &_gcry_digest_spec_crc24_rfc2440, #endif #if USE_SHA1 &_gcry_digest_spec_sha1, #endif #if USE_SHA256 &_gcry_digest_spec_sha256, &_gcry_digest_spec_sha224, #endif #if USE_SHA512 &_gcry_digest_spec_sha512, &_gcry_digest_spec_sha384, &_gcry_digest_spec_sha512_256, &_gcry_digest_spec_sha512_224, #endif #if USE_SHA3 &_gcry_digest_spec_sha3_224, &_gcry_digest_spec_sha3_256, &_gcry_digest_spec_sha3_384, &_gcry_digest_spec_sha3_512, &_gcry_digest_spec_shake128, &_gcry_digest_spec_shake256, #endif #if USE_GOST_R_3411_94 &_gcry_digest_spec_gost3411_94, &_gcry_digest_spec_gost3411_cp, #endif #if USE_GOST_R_3411_12 &_gcry_digest_spec_stribog_256, &_gcry_digest_spec_stribog_512, #endif #if USE_WHIRLPOOL &_gcry_digest_spec_whirlpool, #endif #if USE_RMD160 &_gcry_digest_spec_rmd160, #endif #if USE_TIGER &_gcry_digest_spec_tiger, &_gcry_digest_spec_tiger1, &_gcry_digest_spec_tiger2, #endif #if USE_MD5 &_gcry_digest_spec_md5, #endif #if USE_MD4 &_gcry_digest_spec_md4, #endif #if USE_MD2 &_gcry_digest_spec_md2, #endif #if USE_BLAKE2 &_gcry_digest_spec_blake2b_512, &_gcry_digest_spec_blake2b_384, &_gcry_digest_spec_blake2b_256, &_gcry_digest_spec_blake2b_160, &_gcry_digest_spec_blake2s_256, &_gcry_digest_spec_blake2s_224, &_gcry_digest_spec_blake2s_160, &_gcry_digest_spec_blake2s_128, #endif #if USE_SM3 &_gcry_digest_spec_sm3, #endif NULL }; /* Digest implementations starting with index 0 (enum gcry_md_algos) */ static gcry_md_spec_t * const digest_list_algo0[] = { NULL, /* GCRY_MD_NONE */ #if USE_MD5 &_gcry_digest_spec_md5, #else NULL, #endif #if USE_SHA1 &_gcry_digest_spec_sha1, #else NULL, #endif #if USE_RMD160 &_gcry_digest_spec_rmd160, #else NULL, #endif NULL, /* Unused index 4 */ #if USE_MD2 &_gcry_digest_spec_md2, #else NULL, #endif #if USE_TIGER &_gcry_digest_spec_tiger, #else NULL, #endif NULL, /* GCRY_MD_HAVAL */ #if USE_SHA256 &_gcry_digest_spec_sha256, #else NULL, #endif #if USE_SHA512 &_gcry_digest_spec_sha384, &_gcry_digest_spec_sha512, #else NULL, NULL, #endif #if USE_SHA256 &_gcry_digest_spec_sha224 #else NULL #endif }; /* Digest implementations starting with index 301 (enum gcry_md_algos) */ static gcry_md_spec_t * const digest_list_algo301[] = { #if USE_MD4 &_gcry_digest_spec_md4, #else NULL, #endif #if USE_CRC &_gcry_digest_spec_crc32, &_gcry_digest_spec_crc32_rfc1510, &_gcry_digest_spec_crc24_rfc2440, #else NULL, NULL, NULL, #endif #if USE_WHIRLPOOL &_gcry_digest_spec_whirlpool, #else NULL, #endif #if USE_TIGER &_gcry_digest_spec_tiger1, &_gcry_digest_spec_tiger2, #else NULL, NULL, #endif #if USE_GOST_R_3411_94 &_gcry_digest_spec_gost3411_94, #else NULL, #endif #if USE_GOST_R_3411_12 &_gcry_digest_spec_stribog_256, &_gcry_digest_spec_stribog_512, #else NULL, NULL, #endif #if USE_GOST_R_3411_94 &_gcry_digest_spec_gost3411_cp, #else NULL, #endif #if USE_SHA3 &_gcry_digest_spec_sha3_224, &_gcry_digest_spec_sha3_256, &_gcry_digest_spec_sha3_384, &_gcry_digest_spec_sha3_512, &_gcry_digest_spec_shake128, &_gcry_digest_spec_shake256, #else NULL, NULL, NULL, NULL, NULL, NULL, #endif #if USE_BLAKE2 &_gcry_digest_spec_blake2b_512, &_gcry_digest_spec_blake2b_384, &_gcry_digest_spec_blake2b_256, &_gcry_digest_spec_blake2b_160, &_gcry_digest_spec_blake2s_256, &_gcry_digest_spec_blake2s_224, &_gcry_digest_spec_blake2s_160, &_gcry_digest_spec_blake2s_128, #else NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, #endif #if USE_SM3 &_gcry_digest_spec_sm3, #else NULL, #endif #if USE_SHA512 &_gcry_digest_spec_sha512_256, &_gcry_digest_spec_sha512_224, #else NULL, NULL, #endif }; typedef struct gcry_md_list { gcry_md_spec_t *spec; struct gcry_md_list *next; size_t actual_struct_size; /* Allocated size of this structure. */ PROPERLY_ALIGNED_TYPE context[1]; } GcryDigestEntry; /* This structure is put right after the gcry_md_hd_t buffer, so that * only one memory block is needed. */ struct gcry_md_context { int magic; size_t actual_handle_size; /* Allocated size of this handle. */ FILE *debug; struct { unsigned int secure:1; unsigned int finalized:1; unsigned int bugemu1:1; unsigned int hmac:1; } flags; GcryDigestEntry *list; }; #define CTX_MAGIC_NORMAL 0x11071961 #define CTX_MAGIC_SECURE 0x16917011 static gcry_err_code_t md_enable (gcry_md_hd_t hd, int algo); static void md_close (gcry_md_hd_t a); static void md_write (gcry_md_hd_t a, const void *inbuf, size_t inlen); static byte *md_read( gcry_md_hd_t a, int algo ); static int md_get_algo( gcry_md_hd_t a ); static int md_digest_length( int algo ); static void md_start_debug ( gcry_md_hd_t a, const char *suffix ); static void md_stop_debug ( gcry_md_hd_t a ); static int map_algo (int algo) { return algo; } /* Return the spec structure for the hash algorithm ALGO. For an unknown algorithm NULL is returned. */ static gcry_md_spec_t * spec_from_algo (int algo) { gcry_md_spec_t *spec = NULL; algo = map_algo (algo); if (algo >= 0 && algo < DIM(digest_list_algo0)) spec = digest_list_algo0[algo]; else if (algo >= 301 && algo < 301 + DIM(digest_list_algo301)) spec = digest_list_algo301[algo - 301]; if (spec) gcry_assert (spec->algo == algo); return spec; } /* Lookup a hash's spec by its name. */ static gcry_md_spec_t * spec_from_name (const char *name) { gcry_md_spec_t *spec; int idx; for (idx=0; (spec = digest_list[idx]); idx++) { if (!stricmp (name, spec->name)) return spec; } return NULL; } /* Lookup a hash's spec by its OID. */ static gcry_md_spec_t * spec_from_oid (const char *oid) { gcry_md_spec_t *spec; gcry_md_oid_spec_t *oid_specs; int idx, j; for (idx=0; (spec = digest_list[idx]); idx++) { oid_specs = spec->oids; if (oid_specs) { for (j = 0; oid_specs[j].oidstring; j++) if (!stricmp (oid, oid_specs[j].oidstring)) return spec; } } return NULL; } static gcry_md_spec_t * search_oid (const char *oid, gcry_md_oid_spec_t *oid_spec) { gcry_md_spec_t *spec; int i; if (!oid) return NULL; if (!strncmp (oid, "oid.", 4) || !strncmp (oid, "OID.", 4)) oid += 4; spec = spec_from_oid (oid); if (spec && spec->oids) { for (i = 0; spec->oids[i].oidstring; i++) if (!stricmp (oid, spec->oids[i].oidstring)) { if (oid_spec) *oid_spec = spec->oids[i]; return spec; } } return NULL; } /**************** * Map a string to the digest algo */ int _gcry_md_map_name (const char *string) { gcry_md_spec_t *spec; if (!string) return 0; /* If the string starts with a digit (optionally prefixed with either "OID." or "oid."), we first look into our table of ASN.1 object identifiers to figure out the algorithm */ spec = search_oid (string, NULL); if (spec) return spec->algo; /* Not found, search a matching digest name. */ spec = spec_from_name (string); if (spec) return spec->algo; return 0; } /**************** * This function simply returns the name of the algorithm or some constant * string when there is no algo. It will never return NULL. * Use the macro gcry_md_test_algo() to check whether the algorithm * is valid. */ const char * _gcry_md_algo_name (int algorithm) { gcry_md_spec_t *spec; spec = spec_from_algo (algorithm); return spec ? spec->name : "?"; } static gcry_err_code_t check_digest_algo (int algorithm) { gcry_md_spec_t *spec; spec = spec_from_algo (algorithm); if (spec && !spec->flags.disabled) return 0; return GPG_ERR_DIGEST_ALGO; } /**************** * Open a message digest handle for use with algorithm ALGO. * More algorithms may be added by md_enable(). The initial algorithm * may be 0. */ static gcry_err_code_t md_open (gcry_md_hd_t *h, int algo, unsigned int flags) { gcry_err_code_t err = 0; int secure = !!(flags & GCRY_MD_FLAG_SECURE); int hmac = !!(flags & GCRY_MD_FLAG_HMAC); int bufsize = secure ? 512 : 1024; struct gcry_md_context *ctx; gcry_md_hd_t hd; size_t n; /* Allocate a memory area to hold the caller visible buffer with it's * control information and the data required by this module. Set the * context pointer at the beginning to this area. * We have to use this strange scheme because we want to hide the * internal data but have a variable sized buffer. * * +---+------+---........------+-------------+ * !ctx! bctl ! buffer ! private ! * +---+------+---........------+-------------+ * ! ^ * !---------------------------! * * We have to make sure that private is well aligned. */ n = sizeof (struct gcry_md_handle) + bufsize; n = ((n + sizeof (PROPERLY_ALIGNED_TYPE) - 1) / sizeof (PROPERLY_ALIGNED_TYPE)) * sizeof (PROPERLY_ALIGNED_TYPE); /* Allocate and set the Context pointer to the private data */ if (secure) hd = xtrymalloc_secure (n + sizeof (struct gcry_md_context)); else hd = xtrymalloc (n + sizeof (struct gcry_md_context)); if (! hd) err = gpg_err_code_from_errno (errno); if (! err) { hd->ctx = ctx = (void *) ((char *) hd + n); /* Setup the globally visible data (bctl in the diagram).*/ hd->bufsize = n - sizeof (struct gcry_md_handle) + 1; hd->bufpos = 0; /* Initialize the private data. */ memset (hd->ctx, 0, sizeof *hd->ctx); ctx->magic = secure ? CTX_MAGIC_SECURE : CTX_MAGIC_NORMAL; ctx->actual_handle_size = n + sizeof (struct gcry_md_context); ctx->flags.secure = secure; ctx->flags.hmac = hmac; ctx->flags.bugemu1 = !!(flags & GCRY_MD_FLAG_BUGEMU1); } if (! err) { /* Hmmm, should we really do that? - yes [-wk] */ _gcry_fast_random_poll (); if (algo) { err = md_enable (hd, algo); if (err) md_close (hd); } } if (! err) *h = hd; return err; } /* Create a message digest object for algorithm ALGO. FLAGS may be given as an bitwise OR of the gcry_md_flags values. ALGO may be given as 0 if the algorithms to be used are later set using gcry_md_enable. H is guaranteed to be a valid handle or NULL on error. */ gcry_err_code_t _gcry_md_open (gcry_md_hd_t *h, int algo, unsigned int flags) { gcry_err_code_t rc; gcry_md_hd_t hd; if ((flags & ~(GCRY_MD_FLAG_SECURE | GCRY_MD_FLAG_HMAC | GCRY_MD_FLAG_BUGEMU1))) rc = GPG_ERR_INV_ARG; else rc = md_open (&hd, algo, flags); *h = rc? NULL : hd; return rc; } static gcry_err_code_t md_enable (gcry_md_hd_t hd, int algorithm) { struct gcry_md_context *h = hd->ctx; gcry_md_spec_t *spec; GcryDigestEntry *entry; gcry_err_code_t err = 0; for (entry = h->list; entry; entry = entry->next) if (entry->spec->algo == algorithm) return 0; /* Already enabled */ spec = spec_from_algo (algorithm); if (!spec) { log_debug ("md_enable: algorithm %d not available\n", algorithm); err = GPG_ERR_DIGEST_ALGO; } if (!err && algorithm == GCRY_MD_MD5 && fips_mode ()) { _gcry_inactivate_fips_mode ("MD5 used"); if (_gcry_enforced_fips_mode () ) { /* We should never get to here because we do not register MD5 in enforced fips mode. But better throw an error. */ err = GPG_ERR_DIGEST_ALGO; } } if (!err && h->flags.hmac && spec->read == NULL) { /* Expandable output function cannot act as part of HMAC. */ err = GPG_ERR_DIGEST_ALGO; } if (!err) { size_t size = (sizeof (*entry) + spec->contextsize * (h->flags.hmac? 3 : 1) - sizeof (entry->context)); /* And allocate a new list entry. */ if (h->flags.secure) entry = xtrymalloc_secure (size); else entry = xtrymalloc (size); if (! entry) err = gpg_err_code_from_errno (errno); else { entry->spec = spec; entry->next = h->list; entry->actual_struct_size = size; h->list = entry; /* And init this instance. */ entry->spec->init (entry->context, h->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0); } } return err; } gcry_err_code_t _gcry_md_enable (gcry_md_hd_t hd, int algorithm) { return md_enable (hd, algorithm); } static gcry_err_code_t md_copy (gcry_md_hd_t ahd, gcry_md_hd_t *b_hd) { gcry_err_code_t err = 0; struct gcry_md_context *a = ahd->ctx; struct gcry_md_context *b; GcryDigestEntry *ar, *br; gcry_md_hd_t bhd; size_t n; if (ahd->bufpos) md_write (ahd, NULL, 0); n = (char *) ahd->ctx - (char *) ahd; if (a->flags.secure) bhd = xtrymalloc_secure (n + sizeof (struct gcry_md_context)); else bhd = xtrymalloc (n + sizeof (struct gcry_md_context)); if (!bhd) { err = gpg_err_code_from_syserror (); goto leave; } bhd->ctx = b = (void *) ((char *) bhd + n); /* No need to copy the buffer due to the write above. */ gcry_assert (ahd->bufsize == (n - sizeof (struct gcry_md_handle) + 1)); bhd->bufsize = ahd->bufsize; bhd->bufpos = 0; gcry_assert (! ahd->bufpos); memcpy (b, a, sizeof *a); b->list = NULL; b->debug = NULL; /* Copy the complete list of algorithms. The copied list is reversed, but that doesn't matter. */ for (ar = a->list; ar; ar = ar->next) { if (a->flags.secure) br = xtrymalloc_secure (ar->actual_struct_size); else br = xtrymalloc (ar->actual_struct_size); if (!br) { err = gpg_err_code_from_syserror (); md_close (bhd); goto leave; } memcpy (br, ar, ar->actual_struct_size); br->next = b->list; b->list = br; } if (a->debug) md_start_debug (bhd, "unknown"); *b_hd = bhd; leave: return err; } gcry_err_code_t _gcry_md_copy (gcry_md_hd_t *handle, gcry_md_hd_t hd) { gcry_err_code_t rc; rc = md_copy (hd, handle); if (rc) *handle = NULL; return rc; } /* * Reset all contexts and discard any buffered stuff. This may be used * instead of a md_close(); md_open(). */ void _gcry_md_reset (gcry_md_hd_t a) { GcryDigestEntry *r; /* Note: We allow this even in fips non operational mode. */ a->bufpos = a->ctx->flags.finalized = 0; if (a->ctx->flags.hmac) for (r = a->ctx->list; r; r = r->next) { memcpy (r->context, (char *)r->context + r->spec->contextsize, r->spec->contextsize); } else for (r = a->ctx->list; r; r = r->next) { memset (r->context, 0, r->spec->contextsize); (*r->spec->init) (r->context, a->ctx->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0); } } static void md_close (gcry_md_hd_t a) { GcryDigestEntry *r, *r2; if (! a) return; if (a->ctx->debug) md_stop_debug (a); for (r = a->ctx->list; r; r = r2) { r2 = r->next; wipememory (r, r->actual_struct_size); xfree (r); } wipememory (a, a->ctx->actual_handle_size); xfree(a); } void _gcry_md_close (gcry_md_hd_t hd) { /* Note: We allow this even in fips non operational mode. */ md_close (hd); } static void md_write (gcry_md_hd_t a, const void *inbuf, size_t inlen) { GcryDigestEntry *r; if (a->ctx->debug) { if (a->bufpos && fwrite (a->buf, a->bufpos, 1, a->ctx->debug) != 1) BUG(); if (inlen && fwrite (inbuf, inlen, 1, a->ctx->debug) != 1) BUG(); } for (r = a->ctx->list; r; r = r->next) { if (a->bufpos) (*r->spec->write) (r->context, a->buf, a->bufpos); (*r->spec->write) (r->context, inbuf, inlen); } a->bufpos = 0; } /* Note that this function may be used after finalize and read to keep on writing to the transform function so to mitigate timing attacks. */ void _gcry_md_write (gcry_md_hd_t hd, const void *inbuf, size_t inlen) { md_write (hd, inbuf, inlen); } static void md_final (gcry_md_hd_t a) { GcryDigestEntry *r; if (a->ctx->flags.finalized) return; if (a->bufpos) md_write (a, NULL, 0); for (r = a->ctx->list; r; r = r->next) (*r->spec->final) (r->context); a->ctx->flags.finalized = 1; if (!a->ctx->flags.hmac) return; for (r = a->ctx->list; r; r = r->next) { byte *p; size_t dlen = r->spec->mdlen; byte *hash; gcry_err_code_t err; if (r->spec->read == NULL) continue; p = r->spec->read (r->context); if (a->ctx->flags.secure) hash = xtrymalloc_secure (dlen); else hash = xtrymalloc (dlen); if (!hash) { err = gpg_err_code_from_errno (errno); _gcry_fatal_error (err, NULL); } memcpy (hash, p, dlen); memcpy (r->context, (char *)r->context + r->spec->contextsize * 2, r->spec->contextsize); (*r->spec->write) (r->context, hash, dlen); (*r->spec->final) (r->context); xfree (hash); } } static gcry_err_code_t md_setkey (gcry_md_hd_t h, const unsigned char *key, size_t keylen) { gcry_err_code_t rc = 0; GcryDigestEntry *r; int algo_had_setkey = 0; if (!h->ctx->list) return GPG_ERR_DIGEST_ALGO; /* Might happen if no algo is enabled. */ if (h->ctx->flags.hmac) return GPG_ERR_DIGEST_ALGO; /* Tried md_setkey for HMAC md. */ for (r = h->ctx->list; r; r = r->next) { switch (r->spec->algo) { #if USE_BLAKE2 /* TODO? add spec->init_with_key? */ case GCRY_MD_BLAKE2B_512: case GCRY_MD_BLAKE2B_384: case GCRY_MD_BLAKE2B_256: case GCRY_MD_BLAKE2B_160: case GCRY_MD_BLAKE2S_256: case GCRY_MD_BLAKE2S_224: case GCRY_MD_BLAKE2S_160: case GCRY_MD_BLAKE2S_128: algo_had_setkey = 1; memset (r->context, 0, r->spec->contextsize); rc = _gcry_blake2_init_with_key (r->context, h->ctx->flags.bugemu1 ? GCRY_MD_FLAG_BUGEMU1:0, key, keylen, r->spec->algo); break; #endif default: rc = GPG_ERR_DIGEST_ALGO; break; } if (rc) break; } if (rc && !algo_had_setkey) { /* None of algorithms had setkey implementation, so contexts were not * modified. Just return error. */ return rc; } else if (rc && algo_had_setkey) { /* Some of the contexts have been modified, but got error. Reset * all contexts. */ _gcry_md_reset (h); return rc; } /* Successful md_setkey implies reset. */ h->bufpos = h->ctx->flags.finalized = 0; return 0; } static gcry_err_code_t prepare_macpads (gcry_md_hd_t a, const unsigned char *key, size_t keylen) { GcryDigestEntry *r; if (!a->ctx->list) return GPG_ERR_DIGEST_ALGO; /* Might happen if no algo is enabled. */ if (!a->ctx->flags.hmac) return GPG_ERR_DIGEST_ALGO; /* Tried prepare_macpads for non-HMAC md. */ for (r = a->ctx->list; r; r = r->next) { const unsigned char *k; size_t k_len; unsigned char *key_allocated = NULL; int macpad_Bsize; int i; switch (r->spec->algo) { /* TODO: add spec->blocksize */ case GCRY_MD_SHA3_224: macpad_Bsize = 1152 / 8; break; case GCRY_MD_SHA3_256: macpad_Bsize = 1088 / 8; break; case GCRY_MD_SHA3_384: macpad_Bsize = 832 / 8; break; case GCRY_MD_SHA3_512: macpad_Bsize = 576 / 8; break; case GCRY_MD_SHA384: case GCRY_MD_SHA512: case GCRY_MD_SHA512_256: case GCRY_MD_SHA512_224: case GCRY_MD_BLAKE2B_512: case GCRY_MD_BLAKE2B_384: case GCRY_MD_BLAKE2B_256: case GCRY_MD_BLAKE2B_160: macpad_Bsize = 128; break; case GCRY_MD_GOSTR3411_94: case GCRY_MD_GOSTR3411_CP: macpad_Bsize = 32; break; default: macpad_Bsize = 64; break; } if ( keylen > macpad_Bsize ) { k = key_allocated = xtrymalloc_secure (r->spec->mdlen); if (!k) return gpg_err_code_from_errno (errno); _gcry_md_hash_buffer (r->spec->algo, key_allocated, key, keylen); k_len = r->spec->mdlen; gcry_assert ( k_len <= macpad_Bsize ); } else { k = key; k_len = keylen; } (*r->spec->init) (r->context, a->ctx->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0); a->bufpos = 0; for (i=0; i < k_len; i++ ) _gcry_md_putc (a, k[i] ^ 0x36); for (; i < macpad_Bsize; i++ ) _gcry_md_putc (a, 0x36); (*r->spec->write) (r->context, a->buf, a->bufpos); memcpy ((char *)r->context + r->spec->contextsize, r->context, r->spec->contextsize); (*r->spec->init) (r->context, a->ctx->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0); a->bufpos = 0; for (i=0; i < k_len; i++ ) _gcry_md_putc (a, k[i] ^ 0x5c); for (; i < macpad_Bsize; i++ ) _gcry_md_putc (a, 0x5c); (*r->spec->write) (r->context, a->buf, a->bufpos); memcpy ((char *)r->context + r->spec->contextsize*2, r->context, r->spec->contextsize); xfree (key_allocated); } a->bufpos = 0; return 0; } gcry_err_code_t _gcry_md_ctl (gcry_md_hd_t hd, int cmd, void *buffer, size_t buflen) { gcry_err_code_t rc = 0; (void)buflen; /* Currently not used. */ switch (cmd) { case GCRYCTL_FINALIZE: md_final (hd); break; case GCRYCTL_START_DUMP: md_start_debug (hd, buffer); break; case GCRYCTL_STOP_DUMP: md_stop_debug ( hd ); break; default: rc = GPG_ERR_INV_OP; } return rc; } gcry_err_code_t _gcry_md_setkey (gcry_md_hd_t hd, const void *key, size_t keylen) { gcry_err_code_t rc; if (hd->ctx->flags.hmac) { rc = prepare_macpads (hd, key, keylen); if (!rc) _gcry_md_reset (hd); } else { rc = md_setkey (hd, key, keylen); } return rc; } /* The new debug interface. If SUFFIX is a string it creates an debug file for the context HD. IF suffix is NULL, the file is closed and debugging is stopped. */ void _gcry_md_debug (gcry_md_hd_t hd, const char *suffix) { if (suffix) md_start_debug (hd, suffix); else md_stop_debug (hd); } /**************** * If ALGO is null get the digest for the used algo (which should be * only one) */ static byte * md_read( gcry_md_hd_t a, int algo ) { GcryDigestEntry *r = a->ctx->list; if (! algo) { /* Return the first algorithm */ if (r) { if (r->next) log_debug ("more than one algorithm in md_read(0)\n"); if (r->spec->read) return r->spec->read (r->context); } } else { for (r = a->ctx->list; r; r = r->next) if (r->spec->algo == algo) { if (r->spec->read) return r->spec->read (r->context); break; } } if (r && !r->spec->read) _gcry_fatal_error (GPG_ERR_DIGEST_ALGO, "requested algo has no fixed digest length"); else _gcry_fatal_error (GPG_ERR_DIGEST_ALGO, "requested algo not in md context"); return NULL; } /* * Read out the complete digest, this function implictly finalizes * the hash. */ byte * _gcry_md_read (gcry_md_hd_t hd, int algo) { /* This function is expected to always return a digest, thus we can't return an error which we actually should do in non-operational state. */ _gcry_md_ctl (hd, GCRYCTL_FINALIZE, NULL, 0); return md_read (hd, algo); } /**************** * If ALGO is null get the digest for the used algo (which should be * only one) */ static gcry_err_code_t md_extract(gcry_md_hd_t a, int algo, void *out, size_t outlen) { GcryDigestEntry *r = a->ctx->list; if (!algo) { /* Return the first algorithm */ if (r && r->spec->extract) { if (r->next) log_debug ("more than one algorithm in md_extract(0)\n"); r->spec->extract (r->context, out, outlen); return 0; } } else { for (r = a->ctx->list; r; r = r->next) if (r->spec->algo == algo && r->spec->extract) { r->spec->extract (r->context, out, outlen); return 0; } } return GPG_ERR_DIGEST_ALGO; } /* * Expand the output from XOF class digest, this function implictly finalizes * the hash. */ gcry_err_code_t _gcry_md_extract (gcry_md_hd_t hd, int algo, void *out, size_t outlen) { _gcry_md_ctl (hd, GCRYCTL_FINALIZE, NULL, 0); return md_extract (hd, algo, out, outlen); } /* * Read out an intermediate digest. Not yet functional. */ gcry_err_code_t _gcry_md_get (gcry_md_hd_t hd, int algo, byte *buffer, int buflen) { (void)hd; (void)algo; (void)buffer; (void)buflen; /*md_digest ... */ fips_signal_error ("unimplemented function called"); return GPG_ERR_INTERNAL; } /* * Shortcut function to hash a buffer with a given algo. The only * guaranteed supported algorithms are RIPE-MD160 and SHA-1. The * supplied digest buffer must be large enough to store the resulting * hash. No error is returned, the function will abort on an invalid * algo. DISABLED_ALGOS are ignored here. */ void _gcry_md_hash_buffer (int algo, void *digest, const void *buffer, size_t length) { gcry_md_spec_t *spec; spec = spec_from_algo (algo); if (!spec) { log_debug ("md_hash_buffer: algorithm %d not available\n", algo); return; } if (algo == GCRY_MD_MD5 && fips_mode ()) { _gcry_inactivate_fips_mode ("MD5 used"); if (_gcry_enforced_fips_mode () ) { /* We should never get to here because we do not register MD5 in enforced fips mode. */ _gcry_fips_noreturn (); } } if (spec->hash_buffers != NULL) { gcry_buffer_t iov; iov.size = 0; iov.data = (void *)buffer; iov.off = 0; iov.len = length; - spec->hash_buffers (digest, &iov, 1); + spec->hash_buffers (digest, spec->mdlen, &iov, 1); } else { /* For the others we do not have a fast function, so we use the normal functions. */ gcry_md_hd_t h; gpg_err_code_t err; err = md_open (&h, algo, 0); if (err) log_bug ("gcry_md_open failed for algo %d: %s", algo, gpg_strerror (gcry_error(err))); md_write (h, (byte *) buffer, length); md_final (h); memcpy (digest, md_read (h, algo), md_digest_length (algo)); md_close (h); } } /* Shortcut function to hash multiple buffers with a given algo. In contrast to gcry_md_hash_buffer, this function returns an error on invalid arguments or on other problems; disabled algorithms are _not_ ignored but flagged as an error. The data to sign is taken from the array IOV which has IOVCNT items. The only supported flag in FLAGS is GCRY_MD_FLAG_HMAC which turns this function into a HMAC function; the first item in IOV is then used as the key. On success 0 is returned and resulting hash or HMAC is stored at DIGEST. DIGESTLEN may be given as -1, in which case DIGEST must have been provided by the caller with an appropriate length. DIGESTLEN may also be the appropriate length or, in case of XOF algorithms, DIGESTLEN indicates number bytes to extract from XOF to DIGEST. */ gpg_err_code_t _gcry_md_hash_buffers_extract (int algo, unsigned int flags, void *digest, int digestlen, const gcry_buffer_t *iov, int iovcnt) { gcry_md_spec_t *spec; int hmac; if (!iov || iovcnt < 0) return GPG_ERR_INV_ARG; if (flags & ~(GCRY_MD_FLAG_HMAC)) return GPG_ERR_INV_ARG; hmac = !!(flags & GCRY_MD_FLAG_HMAC); if (hmac && iovcnt < 1) return GPG_ERR_INV_ARG; spec = spec_from_algo (algo); if (!spec) { log_debug ("md_hash_buffers: algorithm %d not available\n", algo); return GPG_ERR_DIGEST_ALGO; } if (algo == GCRY_MD_MD5 && fips_mode ()) { _gcry_inactivate_fips_mode ("MD5 used"); if (_gcry_enforced_fips_mode () ) { /* We should never get to here because we do not register MD5 in enforced fips mode. */ _gcry_fips_noreturn (); } } if (spec->mdlen > 0 && digestlen != -1 && digestlen != spec->mdlen) return GPG_ERR_DIGEST_ALGO; if (spec->mdlen == 0 && digestlen == -1) return GPG_ERR_DIGEST_ALGO; if (!hmac && spec->hash_buffers) { - spec->hash_buffers (digest, iov, iovcnt); + spec->hash_buffers (digest, digestlen, iov, iovcnt); } else { /* For the others we do not have a fast function, so we use the normal functions. */ gcry_md_hd_t h; gpg_err_code_t rc; rc = md_open (&h, algo, (hmac? GCRY_MD_FLAG_HMAC:0)); if (rc) return rc; if (hmac) { rc = _gcry_md_setkey (h, (const char*)iov[0].data + iov[0].off, iov[0].len); if (rc) { md_close (h); return rc; } iov++; iovcnt--; } for (;iovcnt; iov++, iovcnt--) md_write (h, (const char*)iov[0].data + iov[0].off, iov[0].len); md_final (h); if (spec->mdlen > 0) memcpy (digest, md_read (h, algo), spec->mdlen); else if (digestlen > 0) md_extract (h, algo, digest, digestlen); md_close (h); } return 0; } /* Shortcut function to hash multiple buffers with a given algo. In contrast to gcry_md_hash_buffer, this function returns an error on invalid arguments or on other problems; disabled algorithms are _not_ ignored but flagged as an error. The data to sign is taken from the array IOV which has IOVCNT items. The only supported flag in FLAGS is GCRY_MD_FLAG_HMAC which turns this function into a HMAC function; the first item in IOV is then used as the key. On success 0 is returned and resulting hash or HMAC is stored at DIGEST which must have been provided by the caller with an appropriate length. */ gpg_err_code_t _gcry_md_hash_buffers (int algo, unsigned int flags, void *digest, const gcry_buffer_t *iov, int iovcnt) { return _gcry_md_hash_buffers_extract(algo, flags, digest, -1, iov, iovcnt); } static int md_get_algo (gcry_md_hd_t a) { GcryDigestEntry *r = a->ctx->list; if (r && r->next) { fips_signal_error ("possible usage error"); log_error ("WARNING: more than one algorithm in md_get_algo()\n"); } return r ? r->spec->algo : 0; } int _gcry_md_get_algo (gcry_md_hd_t hd) { return md_get_algo (hd); } /**************** * Return the length of the digest */ static int md_digest_length (int algorithm) { gcry_md_spec_t *spec; spec = spec_from_algo (algorithm); return spec? spec->mdlen : 0; } /**************** * Return the length of the digest in bytes. * This function will return 0 in case of errors. */ unsigned int _gcry_md_get_algo_dlen (int algorithm) { return md_digest_length (algorithm); } /* Hmmm: add a mode to enumerate the OIDs * to make g10/sig-check.c more portable */ static const byte * md_asn_oid (int algorithm, size_t *asnlen, size_t *mdlen) { gcry_md_spec_t *spec; const byte *asnoid = NULL; spec = spec_from_algo (algorithm); if (spec) { if (asnlen) *asnlen = spec->asnlen; if (mdlen) *mdlen = spec->mdlen; asnoid = spec->asnoid; } else log_bug ("no ASN.1 OID for md algo %d\n", algorithm); return asnoid; } /**************** * Return information about the given cipher algorithm * WHAT select the kind of information returned: * GCRYCTL_TEST_ALGO: * Returns 0 when the specified algorithm is available for use. * buffer and nbytes must be zero. * GCRYCTL_GET_ASNOID: * Return the ASNOID of the algorithm in buffer. if buffer is NULL, only * the required length is returned. * GCRYCTL_SELFTEST * Helper for the regression tests - shall not be used by applications. * * Note: Because this function is in most cases used to return an * integer value, we can make it easier for the caller to just look at * the return value. The caller will in all cases consult the value * and thereby detecting whether a error occurred or not (i.e. while checking * the block size) */ gcry_err_code_t _gcry_md_algo_info (int algo, int what, void *buffer, size_t *nbytes) { gcry_err_code_t rc; switch (what) { case GCRYCTL_TEST_ALGO: if (buffer || nbytes) rc = GPG_ERR_INV_ARG; else rc = check_digest_algo (algo); break; case GCRYCTL_GET_ASNOID: /* We need to check that the algo is available because md_asn_oid would otherwise raise an assertion. */ rc = check_digest_algo (algo); if (!rc) { const char unsigned *asn; size_t asnlen; asn = md_asn_oid (algo, &asnlen, NULL); if (buffer && (*nbytes >= asnlen)) { memcpy (buffer, asn, asnlen); *nbytes = asnlen; } else if (!buffer && nbytes) *nbytes = asnlen; else { if (buffer) rc = GPG_ERR_TOO_SHORT; else rc = GPG_ERR_INV_ARG; } } break; case GCRYCTL_SELFTEST: /* Helper function for the regression tests. */ rc = gpg_err_code (_gcry_md_selftest (algo, nbytes? (int)*nbytes : 0, NULL)); break; default: rc = GPG_ERR_INV_OP; break; } return rc; } static void md_start_debug ( gcry_md_hd_t md, const char *suffix ) { static int idx=0; char buf[50]; if (fips_mode ()) return; if ( md->ctx->debug ) { log_debug("Oops: md debug already started\n"); return; } idx++; snprintf (buf, DIM(buf)-1, "dbgmd-%05d.%.10s", idx, suffix ); md->ctx->debug = fopen(buf, "w"); if ( !md->ctx->debug ) log_debug("md debug: can't open %s\n", buf ); } static void md_stop_debug( gcry_md_hd_t md ) { if ( md->ctx->debug ) { if ( md->bufpos ) md_write ( md, NULL, 0 ); fclose (md->ctx->debug); md->ctx->debug = NULL; } { /* a kludge to pull in the __muldi3 for Solaris */ volatile u32 a = (u32)(uintptr_t)md; volatile u64 b = 42; volatile u64 c; c = a * b; (void)c; } } /* * Return information about the digest handle. * GCRYCTL_IS_SECURE: * Returns 1 when the handle works on secured memory * otherwise 0 is returned. There is no error return. * GCRYCTL_IS_ALGO_ENABLED: * Returns 1 if the algo is enabled for that handle. * The algo must be passed as the address of an int. */ gcry_err_code_t _gcry_md_info (gcry_md_hd_t h, int cmd, void *buffer, size_t *nbytes) { gcry_err_code_t rc = 0; switch (cmd) { case GCRYCTL_IS_SECURE: *nbytes = h->ctx->flags.secure; break; case GCRYCTL_IS_ALGO_ENABLED: { GcryDigestEntry *r; int algo; if ( !buffer || !nbytes || *nbytes != sizeof (int)) rc = GPG_ERR_INV_ARG; else { algo = *(int*)buffer; *nbytes = 0; for(r=h->ctx->list; r; r = r->next ) { if (r->spec->algo == algo) { *nbytes = 1; break; } } } break; } default: rc = GPG_ERR_INV_OP; } return rc; } /* Explicitly initialize this module. */ gcry_err_code_t _gcry_md_init (void) { if (fips_mode()) { /* disable algorithms that are disallowed in fips */ int idx; gcry_md_spec_t *spec; for (idx = 0; (spec = digest_list[idx]); idx++) if (!spec->flags.fips) spec->flags.disabled = 1; } return 0; } int _gcry_md_is_secure (gcry_md_hd_t a) { size_t value; if (_gcry_md_info (a, GCRYCTL_IS_SECURE, NULL, &value)) value = 1; /* It seems to be better to assume secure memory on error. */ return value; } int _gcry_md_is_enabled (gcry_md_hd_t a, int algo) { size_t value; value = sizeof algo; if (_gcry_md_info (a, GCRYCTL_IS_ALGO_ENABLED, &algo, &value)) value = 0; return value; } /* Run the selftests for digest algorithm ALGO with optional reporting function REPORT. */ gpg_error_t _gcry_md_selftest (int algo, int extended, selftest_report_func_t report) { gcry_err_code_t ec = 0; gcry_md_spec_t *spec; spec = spec_from_algo (algo); if (spec && !spec->flags.disabled && spec->selftest) ec = spec->selftest (algo, extended, report); else { ec = (spec && spec->selftest) ? GPG_ERR_DIGEST_ALGO /* */ : GPG_ERR_NOT_IMPLEMENTED; if (report) report ("digest", algo, "module", (spec && !spec->flags.disabled)? "no selftest available" : spec? "algorithm disabled" : "algorithm not found"); } return gpg_error (ec); } diff --git a/cipher/rmd160.c b/cipher/rmd160.c index 37462130..1861205e 100644 --- a/cipher/rmd160.c +++ b/cipher/rmd160.c @@ -1,517 +1,520 @@ /* rmd160.c - RIPE-MD160 * Copyright (C) 1998, 2001, 2002, 2003 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include #include #include #include #include "g10lib.h" #include "hash-common.h" #include "cipher.h" /* Only used for the rmd160_hash_buffer() prototype. */ #include "bithelp.h" #include "bufhelp.h" /********************************* * RIPEMD-160 is not patented, see (as of 25.10.97) * http://www.esat.kuleuven.ac.be/~bosselae/ripemd160.html * Note that the code uses Little Endian byteorder, which is good for * 386 etc, but we must add some conversion when used on a big endian box. * * * Pseudo-code for RIPEMD-160 * * RIPEMD-160 is an iterative hash function that operates on 32-bit words. * The round function takes as input a 5-word chaining variable and a 16-word * message block and maps this to a new chaining variable. All operations are * defined on 32-bit words. Padding is identical to that of MD4. * * * RIPEMD-160: definitions * * * nonlinear functions at bit level: exor, mux, -, mux, - * * f(j, x, y, z) = x XOR y XOR z (0 <= j <= 15) * f(j, x, y, z) = (x AND y) OR (NOT(x) AND z) (16 <= j <= 31) * f(j, x, y, z) = (x OR NOT(y)) XOR z (32 <= j <= 47) * f(j, x, y, z) = (x AND z) OR (y AND NOT(z)) (48 <= j <= 63) * f(j, x, y, z) = x XOR (y OR NOT(z)) (64 <= j <= 79) * * * added constants (hexadecimal) * * K(j) = 0x00000000 (0 <= j <= 15) * K(j) = 0x5A827999 (16 <= j <= 31) int(2**30 x sqrt(2)) * K(j) = 0x6ED9EBA1 (32 <= j <= 47) int(2**30 x sqrt(3)) * K(j) = 0x8F1BBCDC (48 <= j <= 63) int(2**30 x sqrt(5)) * K(j) = 0xA953FD4E (64 <= j <= 79) int(2**30 x sqrt(7)) * K'(j) = 0x50A28BE6 (0 <= j <= 15) int(2**30 x cbrt(2)) * K'(j) = 0x5C4DD124 (16 <= j <= 31) int(2**30 x cbrt(3)) * K'(j) = 0x6D703EF3 (32 <= j <= 47) int(2**30 x cbrt(5)) * K'(j) = 0x7A6D76E9 (48 <= j <= 63) int(2**30 x cbrt(7)) * K'(j) = 0x00000000 (64 <= j <= 79) * * * selection of message word * * r(j) = j (0 <= j <= 15) * r(16..31) = 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8 * r(32..47) = 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12 * r(48..63) = 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2 * r(64..79) = 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13 * r0(0..15) = 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12 * r0(16..31)= 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2 * r0(32..47)= 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13 * r0(48..63)= 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14 * r0(64..79)= 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11 * * * amount for rotate left (rol) * * s(0..15) = 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8 * s(16..31) = 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12 * s(32..47) = 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5 * s(48..63) = 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12 * s(64..79) = 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6 * s'(0..15) = 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6 * s'(16..31)= 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11 * s'(32..47)= 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5 * s'(48..63)= 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8 * s'(64..79)= 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11 * * * initial value (hexadecimal) * * h0 = 0x67452301; h1 = 0xEFCDAB89; h2 = 0x98BADCFE; h3 = 0x10325476; * h4 = 0xC3D2E1F0; * * * RIPEMD-160: pseudo-code * * It is assumed that the message after padding consists of t 16-word blocks * that will be denoted with X[i][j], with 0 <= i <= t-1 and 0 <= j <= 15. * The symbol [+] denotes addition modulo 2**32 and rol_s denotes cyclic left * shift (rotate) over s positions. * * * for i := 0 to t-1 { * A := h0; B := h1; C := h2; D = h3; E = h4; * A' := h0; B' := h1; C' := h2; D' = h3; E' = h4; * for j := 0 to 79 { * T := rol_s(j)(A [+] f(j, B, C, D) [+] X[i][r(j)] [+] K(j)) [+] E; * A := E; E := D; D := rol_10(C); C := B; B := T; * T := rol_s'(j)(A' [+] f(79-j, B', C', D') [+] X[i][r'(j)] [+] K'(j)) [+] E'; * A' := E'; E' := D'; D' := rol_10(C'); C' := B'; B' := T; * } * T := h1 [+] C [+] D'; h1 := h2 [+] D [+] E'; h2 := h3 [+] E [+] A'; * h3 := h4 [+] A [+] B'; h4 := h0 [+] B [+] C'; h0 := T; * } */ /* Some examples: * "" 9c1185a5c5e9fc54612808977ee8f548b2258d31 * "a" 0bdc9d2d256b3ee9daae347be6f4dc835a467ffe * "abc" 8eb208f7e05d987a9b044a8e98c6b087f15a0bfc * "message digest" 5d0689ef49d2fae572b881b123a85ffa21595f36 * "a...z" f71c27109c692c1b56bbdceb5b9d2865b3708dbc * "abcdbcde...nopq" 12a053384a9c0c88e405a06c27dcf49ada62eb2b * "A...Za...z0...9" b0e20b6e3116640286ed3a87a5713079b21f5189 * 8 times "1234567890" 9b752e45573d4b39f4dbd3323cab82bf63326bfb * 1 million times "a" 52783243c1697bdbe16d37f97f68f08325dc1528 */ typedef struct { gcry_md_block_ctx_t bctx; u32 h0,h1,h2,h3,h4; } RMD160_CONTEXT; static unsigned int transform ( void *ctx, const unsigned char *data, size_t nblks ); static void rmd160_init (void *context, unsigned int flags) { RMD160_CONTEXT *hd = context; (void)flags; hd->h0 = 0x67452301; hd->h1 = 0xEFCDAB89; hd->h2 = 0x98BADCFE; hd->h3 = 0x10325476; hd->h4 = 0xC3D2E1F0; hd->bctx.nblocks = 0; hd->bctx.nblocks_high = 0; hd->bctx.count = 0; hd->bctx.blocksize_shift = _gcry_ctz(64); hd->bctx.bwrite = transform; } /**************** * Transform the message X which consists of 16 32-bit-words */ static unsigned int transform_blk ( void *ctx, const unsigned char *data ) { RMD160_CONTEXT *hd = ctx; register u32 al, ar, bl, br, cl, cr, dl, dr, el, er; u32 x[16]; int i; for ( i = 0; i < 16; i++ ) x[i] = buf_get_le32(data + i * 4); #define K0 0x00000000 #define K1 0x5A827999 #define K2 0x6ED9EBA1 #define K3 0x8F1BBCDC #define K4 0xA953FD4E #define KK0 0x50A28BE6 #define KK1 0x5C4DD124 #define KK2 0x6D703EF3 #define KK3 0x7A6D76E9 #define KK4 0x00000000 #define F0(x,y,z) ( (x) ^ (y) ^ (z) ) #define F1(x,y,z) ( ((x) & (y)) | (~(x) & (z)) ) #define F2(x,y,z) ( ((x) | ~(y)) ^ (z) ) #define F3(x,y,z) ( ((x) & (z)) | ((y) & ~(z)) ) #define F4(x,y,z) ( (x) ^ ((y) | ~(z)) ) #define R(a,b,c,d,e,f,k,r,s) do { a += f(b,c,d) + k + x[r]; \ a = rol(a,s) + e; \ c = rol(c,10); \ } while(0) /* left lane and right lanes interleaved */ al = ar = hd->h0; bl = br = hd->h1; cl = cr = hd->h2; dl = dr = hd->h3; el = er = hd->h4; R( al, bl, cl, dl, el, F0, K0, 0, 11 ); R( ar, br, cr, dr, er, F4, KK0, 5, 8); R( el, al, bl, cl, dl, F0, K0, 1, 14 ); R( er, ar, br, cr, dr, F4, KK0, 14, 9); R( dl, el, al, bl, cl, F0, K0, 2, 15 ); R( dr, er, ar, br, cr, F4, KK0, 7, 9); R( cl, dl, el, al, bl, F0, K0, 3, 12 ); R( cr, dr, er, ar, br, F4, KK0, 0, 11); R( bl, cl, dl, el, al, F0, K0, 4, 5 ); R( br, cr, dr, er, ar, F4, KK0, 9, 13); R( al, bl, cl, dl, el, F0, K0, 5, 8 ); R( ar, br, cr, dr, er, F4, KK0, 2, 15); R( el, al, bl, cl, dl, F0, K0, 6, 7 ); R( er, ar, br, cr, dr, F4, KK0, 11, 15); R( dl, el, al, bl, cl, F0, K0, 7, 9 ); R( dr, er, ar, br, cr, F4, KK0, 4, 5); R( cl, dl, el, al, bl, F0, K0, 8, 11 ); R( cr, dr, er, ar, br, F4, KK0, 13, 7); R( bl, cl, dl, el, al, F0, K0, 9, 13 ); R( br, cr, dr, er, ar, F4, KK0, 6, 7); R( al, bl, cl, dl, el, F0, K0, 10, 14 ); R( ar, br, cr, dr, er, F4, KK0, 15, 8); R( el, al, bl, cl, dl, F0, K0, 11, 15 ); R( er, ar, br, cr, dr, F4, KK0, 8, 11); R( dl, el, al, bl, cl, F0, K0, 12, 6 ); R( dr, er, ar, br, cr, F4, KK0, 1, 14); R( cl, dl, el, al, bl, F0, K0, 13, 7 ); R( cr, dr, er, ar, br, F4, KK0, 10, 14); R( bl, cl, dl, el, al, F0, K0, 14, 9 ); R( br, cr, dr, er, ar, F4, KK0, 3, 12); R( al, bl, cl, dl, el, F0, K0, 15, 8 ); R( ar, br, cr, dr, er, F4, KK0, 12, 6); R( el, al, bl, cl, dl, F1, K1, 7, 7 ); R( er, ar, br, cr, dr, F3, KK1, 6, 9); R( dl, el, al, bl, cl, F1, K1, 4, 6 ); R( dr, er, ar, br, cr, F3, KK1, 11, 13); R( cl, dl, el, al, bl, F1, K1, 13, 8 ); R( cr, dr, er, ar, br, F3, KK1, 3, 15); R( bl, cl, dl, el, al, F1, K1, 1, 13 ); R( br, cr, dr, er, ar, F3, KK1, 7, 7); R( al, bl, cl, dl, el, F1, K1, 10, 11 ); R( ar, br, cr, dr, er, F3, KK1, 0, 12); R( el, al, bl, cl, dl, F1, K1, 6, 9 ); R( er, ar, br, cr, dr, F3, KK1, 13, 8); R( dl, el, al, bl, cl, F1, K1, 15, 7 ); R( dr, er, ar, br, cr, F3, KK1, 5, 9); R( cl, dl, el, al, bl, F1, K1, 3, 15 ); R( cr, dr, er, ar, br, F3, KK1, 10, 11); R( bl, cl, dl, el, al, F1, K1, 12, 7 ); R( br, cr, dr, er, ar, F3, KK1, 14, 7); R( al, bl, cl, dl, el, F1, K1, 0, 12 ); R( ar, br, cr, dr, er, F3, KK1, 15, 7); R( el, al, bl, cl, dl, F1, K1, 9, 15 ); R( er, ar, br, cr, dr, F3, KK1, 8, 12); R( dl, el, al, bl, cl, F1, K1, 5, 9 ); R( dr, er, ar, br, cr, F3, KK1, 12, 7); R( cl, dl, el, al, bl, F1, K1, 2, 11 ); R( cr, dr, er, ar, br, F3, KK1, 4, 6); R( bl, cl, dl, el, al, F1, K1, 14, 7 ); R( br, cr, dr, er, ar, F3, KK1, 9, 15); R( al, bl, cl, dl, el, F1, K1, 11, 13 ); R( ar, br, cr, dr, er, F3, KK1, 1, 13); R( el, al, bl, cl, dl, F1, K1, 8, 12 ); R( er, ar, br, cr, dr, F3, KK1, 2, 11); R( dl, el, al, bl, cl, F2, K2, 3, 11 ); R( dr, er, ar, br, cr, F2, KK2, 15, 9); R( cl, dl, el, al, bl, F2, K2, 10, 13 ); R( cr, dr, er, ar, br, F2, KK2, 5, 7); R( bl, cl, dl, el, al, F2, K2, 14, 6 ); R( br, cr, dr, er, ar, F2, KK2, 1, 15); R( al, bl, cl, dl, el, F2, K2, 4, 7 ); R( ar, br, cr, dr, er, F2, KK2, 3, 11); R( el, al, bl, cl, dl, F2, K2, 9, 14 ); R( er, ar, br, cr, dr, F2, KK2, 7, 8); R( dl, el, al, bl, cl, F2, K2, 15, 9 ); R( dr, er, ar, br, cr, F2, KK2, 14, 6); R( cl, dl, el, al, bl, F2, K2, 8, 13 ); R( cr, dr, er, ar, br, F2, KK2, 6, 6); R( bl, cl, dl, el, al, F2, K2, 1, 15 ); R( br, cr, dr, er, ar, F2, KK2, 9, 14); R( al, bl, cl, dl, el, F2, K2, 2, 14 ); R( ar, br, cr, dr, er, F2, KK2, 11, 12); R( el, al, bl, cl, dl, F2, K2, 7, 8 ); R( er, ar, br, cr, dr, F2, KK2, 8, 13); R( dl, el, al, bl, cl, F2, K2, 0, 13 ); R( dr, er, ar, br, cr, F2, KK2, 12, 5); R( cl, dl, el, al, bl, F2, K2, 6, 6 ); R( cr, dr, er, ar, br, F2, KK2, 2, 14); R( bl, cl, dl, el, al, F2, K2, 13, 5 ); R( br, cr, dr, er, ar, F2, KK2, 10, 13); R( al, bl, cl, dl, el, F2, K2, 11, 12 ); R( ar, br, cr, dr, er, F2, KK2, 0, 13); R( el, al, bl, cl, dl, F2, K2, 5, 7 ); R( er, ar, br, cr, dr, F2, KK2, 4, 7); R( dl, el, al, bl, cl, F2, K2, 12, 5 ); R( dr, er, ar, br, cr, F2, KK2, 13, 5); R( cl, dl, el, al, bl, F3, K3, 1, 11 ); R( cr, dr, er, ar, br, F1, KK3, 8, 15); R( bl, cl, dl, el, al, F3, K3, 9, 12 ); R( br, cr, dr, er, ar, F1, KK3, 6, 5); R( al, bl, cl, dl, el, F3, K3, 11, 14 ); R( ar, br, cr, dr, er, F1, KK3, 4, 8); R( el, al, bl, cl, dl, F3, K3, 10, 15 ); R( er, ar, br, cr, dr, F1, KK3, 1, 11); R( dl, el, al, bl, cl, F3, K3, 0, 14 ); R( dr, er, ar, br, cr, F1, KK3, 3, 14); R( cl, dl, el, al, bl, F3, K3, 8, 15 ); R( cr, dr, er, ar, br, F1, KK3, 11, 14); R( bl, cl, dl, el, al, F3, K3, 12, 9 ); R( br, cr, dr, er, ar, F1, KK3, 15, 6); R( al, bl, cl, dl, el, F3, K3, 4, 8 ); R( ar, br, cr, dr, er, F1, KK3, 0, 14); R( el, al, bl, cl, dl, F3, K3, 13, 9 ); R( er, ar, br, cr, dr, F1, KK3, 5, 6); R( dl, el, al, bl, cl, F3, K3, 3, 14 ); R( dr, er, ar, br, cr, F1, KK3, 12, 9); R( cl, dl, el, al, bl, F3, K3, 7, 5 ); R( cr, dr, er, ar, br, F1, KK3, 2, 12); R( bl, cl, dl, el, al, F3, K3, 15, 6 ); R( br, cr, dr, er, ar, F1, KK3, 13, 9); R( al, bl, cl, dl, el, F3, K3, 14, 8 ); R( ar, br, cr, dr, er, F1, KK3, 9, 12); R( el, al, bl, cl, dl, F3, K3, 5, 6 ); R( er, ar, br, cr, dr, F1, KK3, 7, 5); R( dl, el, al, bl, cl, F3, K3, 6, 5 ); R( dr, er, ar, br, cr, F1, KK3, 10, 15); R( cl, dl, el, al, bl, F3, K3, 2, 12 ); R( cr, dr, er, ar, br, F1, KK3, 14, 8); R( bl, cl, dl, el, al, F4, K4, 4, 9 ); R( br, cr, dr, er, ar, F0, KK4, 12, 8); R( al, bl, cl, dl, el, F4, K4, 0, 15 ); R( ar, br, cr, dr, er, F0, KK4, 15, 5); R( el, al, bl, cl, dl, F4, K4, 5, 5 ); R( er, ar, br, cr, dr, F0, KK4, 10, 12); R( dl, el, al, bl, cl, F4, K4, 9, 11 ); R( dr, er, ar, br, cr, F0, KK4, 4, 9); R( cl, dl, el, al, bl, F4, K4, 7, 6 ); R( cr, dr, er, ar, br, F0, KK4, 1, 12); R( bl, cl, dl, el, al, F4, K4, 12, 8 ); R( br, cr, dr, er, ar, F0, KK4, 5, 5); R( al, bl, cl, dl, el, F4, K4, 2, 13 ); R( ar, br, cr, dr, er, F0, KK4, 8, 14); R( el, al, bl, cl, dl, F4, K4, 10, 12 ); R( er, ar, br, cr, dr, F0, KK4, 7, 6); R( dl, el, al, bl, cl, F4, K4, 14, 5 ); R( dr, er, ar, br, cr, F0, KK4, 6, 8); R( cl, dl, el, al, bl, F4, K4, 1, 12 ); R( cr, dr, er, ar, br, F0, KK4, 2, 13); R( bl, cl, dl, el, al, F4, K4, 3, 13 ); R( br, cr, dr, er, ar, F0, KK4, 13, 6); R( al, bl, cl, dl, el, F4, K4, 8, 14 ); R( ar, br, cr, dr, er, F0, KK4, 14, 5); R( el, al, bl, cl, dl, F4, K4, 11, 11 ); R( er, ar, br, cr, dr, F0, KK4, 0, 15); R( dl, el, al, bl, cl, F4, K4, 6, 8 ); R( dr, er, ar, br, cr, F0, KK4, 3, 13); R( cl, dl, el, al, bl, F4, K4, 15, 5 ); R( cr, dr, er, ar, br, F0, KK4, 9, 11); R( bl, cl, dl, el, al, F4, K4, 13, 6 ); R( br, cr, dr, er, ar, F0, KK4, 11, 11); dr += cl + hd->h1; hd->h1 = hd->h2 + dl + er; hd->h2 = hd->h3 + el + ar; hd->h3 = hd->h4 + al + br; hd->h4 = hd->h0 + bl + cr; hd->h0 = dr; return /*burn_stack*/ 104+5*sizeof(void*); } static unsigned int transform ( void *c, const unsigned char *data, size_t nblks ) { unsigned int burn; do { burn = transform_blk (c, data); data += 64; } while (--nblks); return burn; } /* * The routine terminates the computation */ static void rmd160_final( void *context ) { RMD160_CONTEXT *hd = context; u32 t, th, msb, lsb; byte *p; unsigned int burn; t = hd->bctx.nblocks; if (sizeof t == sizeof hd->bctx.nblocks) th = hd->bctx.nblocks_high; else th = hd->bctx.nblocks >> 32; /* multiply by 64 to make a byte count */ lsb = t << 6; msb = (th << 6) | (t >> 26); /* add the count */ t = lsb; if( (lsb += hd->bctx.count) < t ) msb++; /* multiply by 8 to make a bit count */ t = lsb; lsb <<= 3; msb <<= 3; msb |= t >> 29; if (hd->bctx.count < 56) /* enough room */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */ if (hd->bctx.count < 56) memset (&hd->bctx.buf[hd->bctx.count], 0, 56 - hd->bctx.count); /* append the 64 bit count */ buf_put_le32(hd->bctx.buf + 56, lsb); buf_put_le32(hd->bctx.buf + 60, msb); burn = transform (hd, hd->bctx.buf, 1); } else /* need one extra block */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */ /* fill pad and next block with zeroes */ memset (&hd->bctx.buf[hd->bctx.count], 0, 64 - hd->bctx.count + 56); /* append the 64 bit count */ buf_put_le32(hd->bctx.buf + 64 + 56, lsb); buf_put_le32(hd->bctx.buf + 64 + 60, msb); burn = transform (hd, hd->bctx.buf, 2); } p = hd->bctx.buf; #define X(a) do { buf_put_le32(p, hd->h##a); p += 4; } while(0) X(0); X(1); X(2); X(3); X(4); #undef X hd->bctx.count = 0; _gcry_burn_stack (burn); } static byte * rmd160_read( void *context ) { RMD160_CONTEXT *hd = context; return hd->bctx.buf; } /**************** * Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 20 bytes. */ static void -_gcry_rmd160_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt) +_gcry_rmd160_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { RMD160_CONTEXT hd; + (void)nbytes; + rmd160_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); rmd160_final ( &hd ); memcpy ( outbuf, hd.bctx.buf, 20 ); } static byte asn[15] = /* Object ID is 1.3.36.3.2.1 */ { 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x24, 0x03, 0x02, 0x01, 0x05, 0x00, 0x04, 0x14 }; static gcry_md_oid_spec_t oid_spec_rmd160[] = { /* rsaSignatureWithripemd160 */ { "1.3.36.3.3.1.2" }, /* TeleTrust hash algorithm. */ { "1.3.36.3.2.1" }, { NULL } }; gcry_md_spec_t _gcry_digest_spec_rmd160 = { GCRY_MD_RMD160, {0, 0}, "RIPEMD160", asn, DIM (asn), oid_spec_rmd160, 20, rmd160_init, _gcry_md_block_write, rmd160_final, rmd160_read, NULL, _gcry_rmd160_hash_buffers, sizeof (RMD160_CONTEXT) }; diff --git a/cipher/sha1.c b/cipher/sha1.c index 48fdbb33..3bb24c7e 100644 --- a/cipher/sha1.c +++ b/cipher/sha1.c @@ -1,765 +1,768 @@ /* sha1.c - SHA1 hash function * Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* Test vectors: * * "abc" * A999 3E36 4706 816A BA3E 2571 7850 C26C 9CD0 D89D * * "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" * 8498 3E44 1C3B D26E BAAE 4AA1 F951 29E5 E546 70F1 */ #include #include #include #include #ifdef HAVE_STDINT_H # include #endif #include "g10lib.h" #include "bithelp.h" #include "bufhelp.h" #include "cipher.h" #include "sha1.h" /* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */ #undef USE_SSSE3 #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_SSSE3 1 #endif /* USE_AVX indicates whether to compile with Intel AVX code. */ #undef USE_AVX #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AVX 1 #endif /* USE_BMI2 indicates whether to compile with Intel AVX/BMI2 code. */ #undef USE_BMI2 #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \ defined(HAVE_GCC_INLINE_ASM_BMI2) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_BMI2 1 #endif /* USE_AVX2 indicates whether to compile with Intel AVX2/BMI2 code. */ #undef USE_AVX2 #if defined(USE_BMI2) && defined(HAVE_GCC_INLINE_ASM_AVX2) # define USE_AVX2 1 #endif /* USE_SHAEXT indicates whether to compile with Intel SHA Extension code. */ #undef USE_SHAEXT #if defined(HAVE_GCC_INLINE_ASM_SHAEXT) && \ defined(HAVE_GCC_INLINE_ASM_SSE41) && \ defined(ENABLE_SHAEXT_SUPPORT) # define USE_SHAEXT 1 #endif /* USE_NEON indicates whether to enable ARM NEON assembly code. */ #undef USE_NEON #ifdef ENABLE_NEON_SUPPORT # if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \ && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_NEON) # define USE_NEON 1 # endif #endif /* USE_ARM_CE indicates whether to enable ARMv8 Crypto Extension assembly * code. */ #undef USE_ARM_CE #ifdef ENABLE_ARM_CRYPTO_SUPPORT # if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \ && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO) # define USE_ARM_CE 1 # elif defined(__AARCH64EL__) \ && defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) # define USE_ARM_CE 1 # endif #endif /* A macro to test whether P is properly aligned for an u32 type. Note that config.h provides a suitable replacement for uintptr_t if it does not exist in stdint.h. */ /* #if __GNUC__ >= 2 */ /* # define U32_ALIGNED_P(p) (!(((uintptr_t)p) % __alignof__ (u32))) */ /* #else */ /* # define U32_ALIGNED_P(p) (!(((uintptr_t)p) % sizeof (u32))) */ /* #endif */ /* Assembly implementations use SystemV ABI, ABI conversion and additional * stack to store XMM6-XMM15 needed on Win64. */ #undef ASM_FUNC_ABI #undef ASM_EXTRA_STACK #if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_BMI2) || \ defined(USE_SHAEXT) # ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS # define ASM_FUNC_ABI __attribute__((sysv_abi)) # define ASM_EXTRA_STACK (10 * 16 + sizeof(void *) * 4) # else # define ASM_FUNC_ABI # define ASM_EXTRA_STACK 0 # endif #endif #ifdef USE_SSSE3 unsigned int _gcry_sha1_transform_amd64_ssse3 (void *state, const unsigned char *data, size_t nblks) ASM_FUNC_ABI; static unsigned int do_sha1_transform_amd64_ssse3 (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; return _gcry_sha1_transform_amd64_ssse3 (&hd->h0, data, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_AVX unsigned int _gcry_sha1_transform_amd64_avx (void *state, const unsigned char *data, size_t nblks) ASM_FUNC_ABI; static unsigned int do_sha1_transform_amd64_avx (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; return _gcry_sha1_transform_amd64_avx (&hd->h0, data, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_BMI2 unsigned int _gcry_sha1_transform_amd64_avx_bmi2 (void *state, const unsigned char *data, size_t nblks) ASM_FUNC_ABI; static unsigned int do_sha1_transform_amd64_avx_bmi2 (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; return _gcry_sha1_transform_amd64_avx_bmi2 (&hd->h0, data, nblks) + ASM_EXTRA_STACK; } #ifdef USE_AVX2 unsigned int _gcry_sha1_transform_amd64_avx2_bmi2 (void *state, const unsigned char *data, size_t nblks) ASM_FUNC_ABI; static unsigned int do_sha1_transform_amd64_avx2_bmi2 (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; /* AVX2/BMI2 function only handles pair of blocks so nblks needs to be * multiple of 2 and function does not handle zero nblks. Use AVX/BMI2 * code to handle these cases. */ if (nblks <= 1) return do_sha1_transform_amd64_avx_bmi2 (ctx, data, nblks); if (nblks & 1) { (void)_gcry_sha1_transform_amd64_avx_bmi2 (&hd->h0, data, 1); nblks--; data += 64; } return _gcry_sha1_transform_amd64_avx2_bmi2 (&hd->h0, data, nblks) + ASM_EXTRA_STACK; } #endif /* USE_AVX2 */ #endif /* USE_BMI2 */ #ifdef USE_SHAEXT /* Does not need ASM_FUNC_ABI */ unsigned int _gcry_sha1_transform_intel_shaext (void *state, const unsigned char *data, size_t nblks); static unsigned int do_sha1_transform_intel_shaext (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; return _gcry_sha1_transform_intel_shaext (&hd->h0, data, nblks); } #endif #ifdef USE_NEON unsigned int _gcry_sha1_transform_armv7_neon (void *state, const unsigned char *data, size_t nblks); static unsigned int do_sha1_transform_armv7_neon (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; return _gcry_sha1_transform_armv7_neon (&hd->h0, data, nblks); } #endif #ifdef USE_ARM_CE unsigned int _gcry_sha1_transform_armv8_ce (void *state, const unsigned char *data, size_t nblks); static unsigned int do_sha1_transform_armv8_ce (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; return _gcry_sha1_transform_armv8_ce (&hd->h0, data, nblks); } #endif #ifdef SHA1_USE_S390X_CRYPTO #include "asm-inline-s390x.h" static unsigned int do_sha1_transform_s390x (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; kimd_execute (KMID_FUNCTION_SHA1, &hd->h0, data, nblks * 64); return 0; } static unsigned int do_sha1_final_s390x (void *ctx, const unsigned char *data, size_t datalen, u32 len_msb, u32 len_lsb) { SHA1_CONTEXT *hd = ctx; /* Make sure that 'final_len' is positioned at correct offset relative * to 'h0'. This is because we are passing 'h0' pointer as start of * parameter block to 'klmd' instruction. */ gcry_assert (offsetof (SHA1_CONTEXT, final_len_msb) - offsetof (SHA1_CONTEXT, h0) == 5 * sizeof(u32)); gcry_assert (offsetof (SHA1_CONTEXT, final_len_lsb) - offsetof (SHA1_CONTEXT, final_len_msb) == 1 * sizeof(u32)); hd->final_len_msb = len_msb; hd->final_len_lsb = len_lsb; klmd_execute (KMID_FUNCTION_SHA1, &hd->h0, data, datalen); return 0; } #endif static unsigned int do_transform_generic (void *c, const unsigned char *data, size_t nblks); static void sha1_init (void *context, unsigned int flags) { SHA1_CONTEXT *hd = context; unsigned int features = _gcry_get_hw_features (); (void)flags; hd->h0 = 0x67452301; hd->h1 = 0xefcdab89; hd->h2 = 0x98badcfe; hd->h3 = 0x10325476; hd->h4 = 0xc3d2e1f0; hd->bctx.nblocks = 0; hd->bctx.nblocks_high = 0; hd->bctx.count = 0; hd->bctx.blocksize_shift = _gcry_ctz(64); /* Order of feature checks is important here; last match will be * selected. Keep slower implementations at the top and faster at * the bottom. */ hd->bctx.bwrite = do_transform_generic; #ifdef USE_SSSE3 if ((features & HWF_INTEL_SSSE3) != 0) hd->bctx.bwrite = do_sha1_transform_amd64_ssse3; #endif #ifdef USE_AVX /* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs. * Therefore use this implementation on Intel CPUs only. */ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD)) hd->bctx.bwrite = do_sha1_transform_amd64_avx; #endif #ifdef USE_BMI2 if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_BMI2)) hd->bctx.bwrite = do_sha1_transform_amd64_avx_bmi2; #endif #ifdef USE_AVX2 if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_AVX) && (features & HWF_INTEL_BMI2)) hd->bctx.bwrite = do_sha1_transform_amd64_avx2_bmi2; #endif #ifdef USE_SHAEXT if ((features & HWF_INTEL_SHAEXT) && (features & HWF_INTEL_SSE4_1)) hd->bctx.bwrite = do_sha1_transform_intel_shaext; #endif #ifdef USE_NEON if ((features & HWF_ARM_NEON) != 0) hd->bctx.bwrite = do_sha1_transform_armv7_neon; #endif #ifdef USE_ARM_CE if ((features & HWF_ARM_SHA1) != 0) hd->bctx.bwrite = do_sha1_transform_armv8_ce; #endif #ifdef SHA1_USE_S390X_CRYPTO hd->use_s390x_crypto = 0; if ((features & HWF_S390X_MSA) != 0) { if ((kimd_query () & km_function_to_mask (KMID_FUNCTION_SHA1)) && (klmd_query () & km_function_to_mask (KMID_FUNCTION_SHA1))) { hd->bctx.bwrite = do_sha1_transform_s390x; hd->use_s390x_crypto = 1; } } #endif (void)features; } /* * Initialize the context HD. This is used to prepare the use of * _gcry_sha1_mixblock. WARNING: This is a special purpose function * for exclusive use by random-csprng.c. */ void _gcry_sha1_mixblock_init (SHA1_CONTEXT *hd) { sha1_init (hd, 0); } /* Round function macros. */ #define K1 0x5A827999L #define K2 0x6ED9EBA1L #define K3 0x8F1BBCDCL #define K4 0xCA62C1D6L #define F1(x,y,z) ( z ^ ( x & ( y ^ z ) ) ) #define F2(x,y,z) ( x ^ y ^ z ) #define F3(x,y,z) ( ( x & y ) | ( z & ( x | y ) ) ) #define F4(x,y,z) ( x ^ y ^ z ) #define M(i) ( tm = x[ i &0x0f] \ ^ x[(i-14)&0x0f] \ ^ x[(i-8) &0x0f] \ ^ x[(i-3) &0x0f], \ (x[i&0x0f] = rol(tm, 1))) #define R(a,b,c,d,e,f,k,m) do { e += rol( a, 5 ) \ + f( b, c, d ) \ + k \ + m; \ b = rol( b, 30 ); \ } while(0) /* * Transform NBLOCKS of each 64 bytes (16 32-bit words) at DATA. */ static unsigned int do_transform_generic (void *ctx, const unsigned char *data, size_t nblks) { SHA1_CONTEXT *hd = ctx; do { const u32 *idata = (const void *)data; u32 a, b, c, d, e; /* Local copies of the chaining variables. */ u32 tm; /* Helper. */ u32 x[16]; /* The array we work on. */ #define I(i) (x[i] = buf_get_be32(idata + i)) /* Get the values of the chaining variables. */ a = hd->h0; b = hd->h1; c = hd->h2; d = hd->h3; e = hd->h4; /* Transform. */ R( a, b, c, d, e, F1, K1, I( 0) ); R( e, a, b, c, d, F1, K1, I( 1) ); R( d, e, a, b, c, F1, K1, I( 2) ); R( c, d, e, a, b, F1, K1, I( 3) ); R( b, c, d, e, a, F1, K1, I( 4) ); R( a, b, c, d, e, F1, K1, I( 5) ); R( e, a, b, c, d, F1, K1, I( 6) ); R( d, e, a, b, c, F1, K1, I( 7) ); R( c, d, e, a, b, F1, K1, I( 8) ); R( b, c, d, e, a, F1, K1, I( 9) ); R( a, b, c, d, e, F1, K1, I(10) ); R( e, a, b, c, d, F1, K1, I(11) ); R( d, e, a, b, c, F1, K1, I(12) ); R( c, d, e, a, b, F1, K1, I(13) ); R( b, c, d, e, a, F1, K1, I(14) ); R( a, b, c, d, e, F1, K1, I(15) ); R( e, a, b, c, d, F1, K1, M(16) ); R( d, e, a, b, c, F1, K1, M(17) ); R( c, d, e, a, b, F1, K1, M(18) ); R( b, c, d, e, a, F1, K1, M(19) ); R( a, b, c, d, e, F2, K2, M(20) ); R( e, a, b, c, d, F2, K2, M(21) ); R( d, e, a, b, c, F2, K2, M(22) ); R( c, d, e, a, b, F2, K2, M(23) ); R( b, c, d, e, a, F2, K2, M(24) ); R( a, b, c, d, e, F2, K2, M(25) ); R( e, a, b, c, d, F2, K2, M(26) ); R( d, e, a, b, c, F2, K2, M(27) ); R( c, d, e, a, b, F2, K2, M(28) ); R( b, c, d, e, a, F2, K2, M(29) ); R( a, b, c, d, e, F2, K2, M(30) ); R( e, a, b, c, d, F2, K2, M(31) ); R( d, e, a, b, c, F2, K2, M(32) ); R( c, d, e, a, b, F2, K2, M(33) ); R( b, c, d, e, a, F2, K2, M(34) ); R( a, b, c, d, e, F2, K2, M(35) ); R( e, a, b, c, d, F2, K2, M(36) ); R( d, e, a, b, c, F2, K2, M(37) ); R( c, d, e, a, b, F2, K2, M(38) ); R( b, c, d, e, a, F2, K2, M(39) ); R( a, b, c, d, e, F3, K3, M(40) ); R( e, a, b, c, d, F3, K3, M(41) ); R( d, e, a, b, c, F3, K3, M(42) ); R( c, d, e, a, b, F3, K3, M(43) ); R( b, c, d, e, a, F3, K3, M(44) ); R( a, b, c, d, e, F3, K3, M(45) ); R( e, a, b, c, d, F3, K3, M(46) ); R( d, e, a, b, c, F3, K3, M(47) ); R( c, d, e, a, b, F3, K3, M(48) ); R( b, c, d, e, a, F3, K3, M(49) ); R( a, b, c, d, e, F3, K3, M(50) ); R( e, a, b, c, d, F3, K3, M(51) ); R( d, e, a, b, c, F3, K3, M(52) ); R( c, d, e, a, b, F3, K3, M(53) ); R( b, c, d, e, a, F3, K3, M(54) ); R( a, b, c, d, e, F3, K3, M(55) ); R( e, a, b, c, d, F3, K3, M(56) ); R( d, e, a, b, c, F3, K3, M(57) ); R( c, d, e, a, b, F3, K3, M(58) ); R( b, c, d, e, a, F3, K3, M(59) ); R( a, b, c, d, e, F4, K4, M(60) ); R( e, a, b, c, d, F4, K4, M(61) ); R( d, e, a, b, c, F4, K4, M(62) ); R( c, d, e, a, b, F4, K4, M(63) ); R( b, c, d, e, a, F4, K4, M(64) ); R( a, b, c, d, e, F4, K4, M(65) ); R( e, a, b, c, d, F4, K4, M(66) ); R( d, e, a, b, c, F4, K4, M(67) ); R( c, d, e, a, b, F4, K4, M(68) ); R( b, c, d, e, a, F4, K4, M(69) ); R( a, b, c, d, e, F4, K4, M(70) ); R( e, a, b, c, d, F4, K4, M(71) ); R( d, e, a, b, c, F4, K4, M(72) ); R( c, d, e, a, b, F4, K4, M(73) ); R( b, c, d, e, a, F4, K4, M(74) ); R( a, b, c, d, e, F4, K4, M(75) ); R( e, a, b, c, d, F4, K4, M(76) ); R( d, e, a, b, c, F4, K4, M(77) ); R( c, d, e, a, b, F4, K4, M(78) ); R( b, c, d, e, a, F4, K4, M(79) ); /* Update the chaining variables. */ hd->h0 += a; hd->h1 += b; hd->h2 += c; hd->h3 += d; hd->h4 += e; data += 64; } while (--nblks); return 88+4*sizeof(void*); } /* * Apply the SHA-1 transform function on the buffer BLOCKOF64BYTE * which must have a length 64 bytes. BLOCKOF64BYTE must be 32-bit * aligned. Updates the 20 bytes in BLOCKOF64BYTE with its mixed * content. Returns the number of bytes which should be burned on the * stack. You need to use _gcry_sha1_mixblock_init to initialize the * context. * WARNING: This is a special purpose function for exclusive use by * random-csprng.c. */ unsigned int _gcry_sha1_mixblock (SHA1_CONTEXT *hd, void *blockof64byte) { u32 *p = blockof64byte; unsigned int nburn; nburn = (*hd->bctx.bwrite) (hd, blockof64byte, 1); p[0] = hd->h0; p[1] = hd->h1; p[2] = hd->h2; p[3] = hd->h3; p[4] = hd->h4; return nburn; } /* The routine final terminates the computation and * returns the digest. * The handle is prepared for a new cycle, but adding bytes to the * handle will the destroy the returned buffer. * Returns: 20 bytes representing the digest. */ static void sha1_final(void *context) { SHA1_CONTEXT *hd = context; u32 t, th, msb, lsb; unsigned char *p; unsigned int burn; t = hd->bctx.nblocks; if (sizeof t == sizeof hd->bctx.nblocks) th = hd->bctx.nblocks_high; else th = hd->bctx.nblocks >> 32; /* multiply by 64 to make a byte count */ lsb = t << 6; msb = (th << 6) | (t >> 26); /* add the count */ t = lsb; if( (lsb += hd->bctx.count) < t ) msb++; /* multiply by 8 to make a bit count */ t = lsb; lsb <<= 3; msb <<= 3; msb |= t >> 29; if (0) { } #ifdef SHA1_USE_S390X_CRYPTO else if (hd->use_s390x_crypto) { burn = do_sha1_final_s390x (hd, hd->bctx.buf, hd->bctx.count, msb, lsb); } #endif else if (hd->bctx.count < 56) /* enough room */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */ if (hd->bctx.count < 56) memset (&hd->bctx.buf[hd->bctx.count], 0, 56 - hd->bctx.count); /* append the 64 bit count */ buf_put_be32(hd->bctx.buf + 56, msb); buf_put_be32(hd->bctx.buf + 60, lsb); burn = (*hd->bctx.bwrite) ( hd, hd->bctx.buf, 1 ); } else /* need one extra block */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */ /* fill pad and next block with zeroes */ memset (&hd->bctx.buf[hd->bctx.count], 0, 64 - hd->bctx.count + 56); /* append the 64 bit count */ buf_put_be32(hd->bctx.buf + 64 + 56, msb); buf_put_be32(hd->bctx.buf + 64 + 60, lsb); burn = (*hd->bctx.bwrite) ( hd, hd->bctx.buf, 2 ); } p = hd->bctx.buf; #define X(a) do { buf_put_be32(p, hd->h##a); p += 4; } while(0) X(0); X(1); X(2); X(3); X(4); #undef X hd->bctx.count = 0; _gcry_burn_stack (burn); } static unsigned char * sha1_read( void *context ) { SHA1_CONTEXT *hd = context; return hd->bctx.buf; } /**************** * Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 20 bytes. */ static void -_gcry_sha1_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt) +_gcry_sha1_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SHA1_CONTEXT hd; + (void)nbytes; + sha1_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sha1_final (&hd); memcpy (outbuf, hd.bctx.buf, 20); } /* Variant of the above shortcut function using a single buffer. */ void _gcry_sha1_hash_buffer (void *outbuf, const void *buffer, size_t length) { gcry_buffer_t iov = { 0 }; iov.data = (void *)buffer; iov.len = length; - _gcry_sha1_hash_buffers (outbuf, &iov, 1); + _gcry_sha1_hash_buffers (outbuf, 20, &iov, 1); } /* Self-test section. */ static gpg_err_code_t selftests_sha1 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA1, 0, "abc", 3, "\xA9\x99\x3E\x36\x47\x06\x81\x6A\xBA\x3E" "\x25\x71\x78\x50\xC2\x6C\x9C\xD0\xD8\x9D", 20); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA1, 0, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56, "\x84\x98\x3E\x44\x1C\x3B\xD2\x6E\xBA\xAE" "\x4A\xA1\xF9\x51\x29\xE5\xE5\x46\x70\xF1", 20); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA1, 1, NULL, 0, "\x34\xAA\x97\x3C\xD4\xC4\xDA\xA4\xF6\x1E" "\xEB\x2B\xDB\xAD\x27\x31\x65\x34\x01\x6F", 20); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SHA1, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Run a full self-test for ALGO and return 0 on success. */ static gpg_err_code_t run_selftests (int algo, int extended, selftest_report_func_t report) { gpg_err_code_t ec; switch (algo) { case GCRY_MD_SHA1: ec = selftests_sha1 (extended, report); break; default: ec = GPG_ERR_DIGEST_ALGO; break; } return ec; } static unsigned char asn[15] = /* Object ID is 1.3.14.3.2.26 */ { 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14 }; static gcry_md_oid_spec_t oid_spec_sha1[] = { /* iso.member-body.us.rsadsi.pkcs.pkcs-1.5 (sha1WithRSAEncryption) */ { "1.2.840.113549.1.1.5" }, /* iso.member-body.us.x9-57.x9cm.3 (dsaWithSha1)*/ { "1.2.840.10040.4.3" }, /* from NIST's OIW (sha1) */ { "1.3.14.3.2.26" }, /* from NIST OIW (sha-1WithRSAEncryption) */ { "1.3.14.3.2.29" }, /* iso.member-body.us.ansi-x9-62.signatures.ecdsa-with-sha1 */ { "1.2.840.10045.4.1" }, { NULL }, }; gcry_md_spec_t _gcry_digest_spec_sha1 = { GCRY_MD_SHA1, {0, 1}, "SHA1", asn, DIM (asn), oid_spec_sha1, 20, sha1_init, _gcry_md_block_write, sha1_final, sha1_read, NULL, _gcry_sha1_hash_buffers, sizeof (SHA1_CONTEXT), run_selftests }; diff --git a/cipher/sha256.c b/cipher/sha256.c index 871615b6..7b2c78f7 100644 --- a/cipher/sha256.c +++ b/cipher/sha256.c @@ -1,831 +1,837 @@ /* sha256.c - SHA256 hash function * Copyright (C) 2003, 2006, 2008, 2009 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* Test vectors: "abc" SHA224: 23097d22 3405d822 8642a477 bda255b3 2aadbce4 bda0b3f7 e36c9da7 SHA256: ba7816bf 8f01cfea 414140de 5dae2223 b00361a3 96177a9c b410ff61 f20015ad "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" SHA224: 75388b16 512776cc 5dba5da1 fd890150 b0c6455c b4f58b19 52522525 SHA256: 248d6a61 d20638b8 e5c02693 0c3e6039 a33ce459 64ff2167 f6ecedd4 19db06c1 "a" one million times SHA224: 20794655 980c91d8 bbb4c1ea 97618a4b f03f4258 1948b2ee 4ee7ad67 SHA256: cdc76e5c 9914fb92 81a1c7e2 84d73e67 f1809a48 a497200e 046d39cc c7112cd0 */ #include #include #include #include #include "g10lib.h" #include "bithelp.h" #include "bufhelp.h" #include "cipher.h" #include "hash-common.h" /* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */ #undef USE_SSSE3 #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_SSSE3 1 #endif /* USE_AVX indicates whether to compile with Intel AVX code. */ #undef USE_AVX #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AVX 1 #endif /* USE_AVX2 indicates whether to compile with Intel AVX2/BMI2 code. */ #undef USE_AVX2 #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX2) && \ defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AVX2 1 #endif /* USE_SHAEXT indicates whether to compile with Intel SHA Extension code. */ #undef USE_SHAEXT #if defined(HAVE_GCC_INLINE_ASM_SHAEXT) && \ defined(HAVE_GCC_INLINE_ASM_SSE41) && \ defined(ENABLE_SHAEXT_SUPPORT) # define USE_SHAEXT 1 #endif /* USE_ARM_CE indicates whether to enable ARMv8 Crypto Extension assembly * code. */ #undef USE_ARM_CE #ifdef ENABLE_ARM_CRYPTO_SUPPORT # if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \ && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO) # define USE_ARM_CE 1 # elif defined(__AARCH64EL__) \ && defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) # define USE_ARM_CE 1 # endif #endif /* USE_PPC_CRYPTO indicates whether to enable PowerPC vector crypto * accelerated code. */ #undef USE_PPC_CRYPTO #ifdef ENABLE_PPC_CRYPTO_SUPPORT # if defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \ defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) # if __GNUC__ >= 4 # define USE_PPC_CRYPTO 1 # endif # endif #endif /* USE_S390X_CRYPTO indicates whether to enable zSeries code. */ #undef USE_S390X_CRYPTO #if defined(HAVE_GCC_INLINE_ASM_S390X) # define USE_S390X_CRYPTO 1 #endif /* USE_S390X_CRYPTO */ typedef struct { gcry_md_block_ctx_t bctx; u32 h0,h1,h2,h3,h4,h5,h6,h7; #ifdef USE_S390X_CRYPTO u32 final_len_msb, final_len_lsb; /* needs to be right after h7. */ int use_s390x_crypto; #endif } SHA256_CONTEXT; /* Assembly implementations use SystemV ABI, ABI conversion and additional * stack to store XMM6-XMM15 needed on Win64. */ #undef ASM_FUNC_ABI #undef ASM_EXTRA_STACK #if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2) || \ defined(USE_SHAEXT) # ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS # define ASM_FUNC_ABI __attribute__((sysv_abi)) # define ASM_EXTRA_STACK (10 * 16 + sizeof(void *) * 4) # else # define ASM_FUNC_ABI # define ASM_EXTRA_STACK 0 # endif #endif #ifdef USE_SSSE3 unsigned int _gcry_sha256_transform_amd64_ssse3(const void *input_data, u32 state[8], size_t num_blks) ASM_FUNC_ABI; static unsigned int do_sha256_transform_amd64_ssse3(void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; return _gcry_sha256_transform_amd64_ssse3 (data, &hd->h0, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_AVX unsigned int _gcry_sha256_transform_amd64_avx(const void *input_data, u32 state[8], size_t num_blks) ASM_FUNC_ABI; static unsigned int do_sha256_transform_amd64_avx(void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; return _gcry_sha256_transform_amd64_avx (data, &hd->h0, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_AVX2 unsigned int _gcry_sha256_transform_amd64_avx2(const void *input_data, u32 state[8], size_t num_blks) ASM_FUNC_ABI; static unsigned int do_sha256_transform_amd64_avx2(void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; return _gcry_sha256_transform_amd64_avx2 (data, &hd->h0, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_SHAEXT /* Does not need ASM_FUNC_ABI */ unsigned int _gcry_sha256_transform_intel_shaext(u32 state[8], const unsigned char *input_data, size_t num_blks); static unsigned int do_sha256_transform_intel_shaext(void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; return _gcry_sha256_transform_intel_shaext (&hd->h0, data, nblks); } #endif #ifdef USE_ARM_CE unsigned int _gcry_sha256_transform_armv8_ce(u32 state[8], const void *input_data, size_t num_blks); static unsigned int do_sha256_transform_armv8_ce(void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; return _gcry_sha256_transform_armv8_ce (&hd->h0, data, nblks); } #endif #ifdef USE_PPC_CRYPTO unsigned int _gcry_sha256_transform_ppc8(u32 state[8], const unsigned char *input_data, size_t num_blks); unsigned int _gcry_sha256_transform_ppc9(u32 state[8], const unsigned char *input_data, size_t num_blks); static unsigned int do_sha256_transform_ppc8(void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; return _gcry_sha256_transform_ppc8 (&hd->h0, data, nblks); } static unsigned int do_sha256_transform_ppc9(void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; return _gcry_sha256_transform_ppc9 (&hd->h0, data, nblks); } #endif #ifdef USE_S390X_CRYPTO #include "asm-inline-s390x.h" static unsigned int do_sha256_transform_s390x (void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; kimd_execute (KMID_FUNCTION_SHA256, &hd->h0, data, nblks * 64); return 0; } static unsigned int do_sha256_final_s390x (void *ctx, const unsigned char *data, size_t datalen, u32 len_msb, u32 len_lsb) { SHA256_CONTEXT *hd = ctx; /* Make sure that 'final_len' is positioned at correct offset relative * to 'h0'. This is because we are passing 'h0' pointer as start of * parameter block to 'klmd' instruction. */ gcry_assert (offsetof (SHA256_CONTEXT, final_len_msb) - offsetof (SHA256_CONTEXT, h0) == 8 * sizeof(u32)); gcry_assert (offsetof (SHA256_CONTEXT, final_len_lsb) - offsetof (SHA256_CONTEXT, final_len_msb) == 1 * sizeof(u32)); hd->final_len_msb = len_msb; hd->final_len_lsb = len_lsb; klmd_execute (KMID_FUNCTION_SHA256, &hd->h0, data, datalen); return 0; } #endif static unsigned int do_transform_generic (void *ctx, const unsigned char *data, size_t nblks); static void sha256_common_init (SHA256_CONTEXT *hd) { unsigned int features = _gcry_get_hw_features (); hd->bctx.nblocks = 0; hd->bctx.nblocks_high = 0; hd->bctx.count = 0; hd->bctx.blocksize_shift = _gcry_ctz(64); /* Order of feature checks is important here; last match will be * selected. Keep slower implementations at the top and faster at * the bottom. */ hd->bctx.bwrite = do_transform_generic; #ifdef USE_SSSE3 if ((features & HWF_INTEL_SSSE3) != 0) hd->bctx.bwrite = do_sha256_transform_amd64_ssse3; #endif #ifdef USE_AVX /* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs. * Therefore use this implementation on Intel CPUs only. */ if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD)) hd->bctx.bwrite = do_sha256_transform_amd64_avx; #endif #ifdef USE_AVX2 if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2)) hd->bctx.bwrite = do_sha256_transform_amd64_avx2; #endif #ifdef USE_SHAEXT if ((features & HWF_INTEL_SHAEXT) && (features & HWF_INTEL_SSE4_1)) hd->bctx.bwrite = do_sha256_transform_intel_shaext; #endif #ifdef USE_ARM_CE if ((features & HWF_ARM_SHA2) != 0) hd->bctx.bwrite = do_sha256_transform_armv8_ce; #endif #ifdef USE_PPC_CRYPTO if ((features & HWF_PPC_VCRYPTO) != 0) hd->bctx.bwrite = do_sha256_transform_ppc8; if ((features & HWF_PPC_VCRYPTO) != 0 && (features & HWF_PPC_ARCH_3_00) != 0) hd->bctx.bwrite = do_sha256_transform_ppc9; #endif #ifdef USE_S390X_CRYPTO hd->use_s390x_crypto = 0; if ((features & HWF_S390X_MSA) != 0) { if ((kimd_query () & km_function_to_mask (KMID_FUNCTION_SHA256)) && (klmd_query () & km_function_to_mask (KMID_FUNCTION_SHA256))) { hd->bctx.bwrite = do_sha256_transform_s390x; hd->use_s390x_crypto = 1; } } #endif (void)features; } static void sha256_init (void *context, unsigned int flags) { SHA256_CONTEXT *hd = context; (void)flags; hd->h0 = 0x6a09e667; hd->h1 = 0xbb67ae85; hd->h2 = 0x3c6ef372; hd->h3 = 0xa54ff53a; hd->h4 = 0x510e527f; hd->h5 = 0x9b05688c; hd->h6 = 0x1f83d9ab; hd->h7 = 0x5be0cd19; sha256_common_init (hd); } static void sha224_init (void *context, unsigned int flags) { SHA256_CONTEXT *hd = context; (void)flags; hd->h0 = 0xc1059ed8; hd->h1 = 0x367cd507; hd->h2 = 0x3070dd17; hd->h3 = 0xf70e5939; hd->h4 = 0xffc00b31; hd->h5 = 0x68581511; hd->h6 = 0x64f98fa7; hd->h7 = 0xbefa4fa4; sha256_common_init (hd); } /* Transform the message X which consists of 16 32-bit-words. See FIPS 180-2 for details. */ #define R(a,b,c,d,e,f,g,h,k,w) do \ { \ t1 = (h) + Sum1((e)) + Cho((e),(f),(g)) + (k) + (w); \ t2 = Sum0((a)) + Maj((a),(b),(c)); \ d += t1; \ h = t1 + t2; \ } while (0) /* (4.2) same as SHA-1's F1. */ #define Cho(x, y, z) (z ^ (x & (y ^ z))) /* (4.3) same as SHA-1's F3 */ #define Maj(x, y, z) ((x & y) + (z & (x ^ y))) /* (4.4) */ #define Sum0(x) (ror (x, 2) ^ ror (x, 13) ^ ror (x, 22)) /* (4.5) */ #define Sum1(x) (ror (x, 6) ^ ror (x, 11) ^ ror (x, 25)) /* Message expansion */ #define S0(x) (ror ((x), 7) ^ ror ((x), 18) ^ ((x) >> 3)) /* (4.6) */ #define S1(x) (ror ((x), 17) ^ ror ((x), 19) ^ ((x) >> 10)) /* (4.7) */ #define I(i) ( w[i] = buf_get_be32(data + i * 4) ) #define W(i) ( w[i&0x0f] = S1(w[(i-2) &0x0f]) \ + w[(i-7) &0x0f] \ + S0(w[(i-15)&0x0f]) \ + w[(i-16)&0x0f] ) static unsigned int do_transform_generic (void *ctx, const unsigned char *data, size_t nblks) { SHA256_CONTEXT *hd = ctx; static const u32 K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; do { u32 a,b,c,d,e,f,g,h,t1,t2; u32 w[16]; a = hd->h0; b = hd->h1; c = hd->h2; d = hd->h3; e = hd->h4; f = hd->h5; g = hd->h6; h = hd->h7; R(a, b, c, d, e, f, g, h, K[0], I(0)); R(h, a, b, c, d, e, f, g, K[1], I(1)); R(g, h, a, b, c, d, e, f, K[2], I(2)); R(f, g, h, a, b, c, d, e, K[3], I(3)); R(e, f, g, h, a, b, c, d, K[4], I(4)); R(d, e, f, g, h, a, b, c, K[5], I(5)); R(c, d, e, f, g, h, a, b, K[6], I(6)); R(b, c, d, e, f, g, h, a, K[7], I(7)); R(a, b, c, d, e, f, g, h, K[8], I(8)); R(h, a, b, c, d, e, f, g, K[9], I(9)); R(g, h, a, b, c, d, e, f, K[10], I(10)); R(f, g, h, a, b, c, d, e, K[11], I(11)); R(e, f, g, h, a, b, c, d, K[12], I(12)); R(d, e, f, g, h, a, b, c, K[13], I(13)); R(c, d, e, f, g, h, a, b, K[14], I(14)); R(b, c, d, e, f, g, h, a, K[15], I(15)); R(a, b, c, d, e, f, g, h, K[16], W(16)); R(h, a, b, c, d, e, f, g, K[17], W(17)); R(g, h, a, b, c, d, e, f, K[18], W(18)); R(f, g, h, a, b, c, d, e, K[19], W(19)); R(e, f, g, h, a, b, c, d, K[20], W(20)); R(d, e, f, g, h, a, b, c, K[21], W(21)); R(c, d, e, f, g, h, a, b, K[22], W(22)); R(b, c, d, e, f, g, h, a, K[23], W(23)); R(a, b, c, d, e, f, g, h, K[24], W(24)); R(h, a, b, c, d, e, f, g, K[25], W(25)); R(g, h, a, b, c, d, e, f, K[26], W(26)); R(f, g, h, a, b, c, d, e, K[27], W(27)); R(e, f, g, h, a, b, c, d, K[28], W(28)); R(d, e, f, g, h, a, b, c, K[29], W(29)); R(c, d, e, f, g, h, a, b, K[30], W(30)); R(b, c, d, e, f, g, h, a, K[31], W(31)); R(a, b, c, d, e, f, g, h, K[32], W(32)); R(h, a, b, c, d, e, f, g, K[33], W(33)); R(g, h, a, b, c, d, e, f, K[34], W(34)); R(f, g, h, a, b, c, d, e, K[35], W(35)); R(e, f, g, h, a, b, c, d, K[36], W(36)); R(d, e, f, g, h, a, b, c, K[37], W(37)); R(c, d, e, f, g, h, a, b, K[38], W(38)); R(b, c, d, e, f, g, h, a, K[39], W(39)); R(a, b, c, d, e, f, g, h, K[40], W(40)); R(h, a, b, c, d, e, f, g, K[41], W(41)); R(g, h, a, b, c, d, e, f, K[42], W(42)); R(f, g, h, a, b, c, d, e, K[43], W(43)); R(e, f, g, h, a, b, c, d, K[44], W(44)); R(d, e, f, g, h, a, b, c, K[45], W(45)); R(c, d, e, f, g, h, a, b, K[46], W(46)); R(b, c, d, e, f, g, h, a, K[47], W(47)); R(a, b, c, d, e, f, g, h, K[48], W(48)); R(h, a, b, c, d, e, f, g, K[49], W(49)); R(g, h, a, b, c, d, e, f, K[50], W(50)); R(f, g, h, a, b, c, d, e, K[51], W(51)); R(e, f, g, h, a, b, c, d, K[52], W(52)); R(d, e, f, g, h, a, b, c, K[53], W(53)); R(c, d, e, f, g, h, a, b, K[54], W(54)); R(b, c, d, e, f, g, h, a, K[55], W(55)); R(a, b, c, d, e, f, g, h, K[56], W(56)); R(h, a, b, c, d, e, f, g, K[57], W(57)); R(g, h, a, b, c, d, e, f, K[58], W(58)); R(f, g, h, a, b, c, d, e, K[59], W(59)); R(e, f, g, h, a, b, c, d, K[60], W(60)); R(d, e, f, g, h, a, b, c, K[61], W(61)); R(c, d, e, f, g, h, a, b, K[62], W(62)); R(b, c, d, e, f, g, h, a, K[63], W(63)); hd->h0 += a; hd->h1 += b; hd->h2 += c; hd->h3 += d; hd->h4 += e; hd->h5 += f; hd->h6 += g; hd->h7 += h; data += 64; } while (--nblks); return 26*4 + 32 + 3 * sizeof(void*); } #undef S0 #undef S1 #undef R /* The routine finally terminates the computation and returns the digest. The handle is prepared for a new cycle, but adding bytes to the handle will the destroy the returned buffer. Returns: 32 bytes with the message the digest. */ static void sha256_final(void *context) { SHA256_CONTEXT *hd = context; u32 t, th, msb, lsb; byte *p; unsigned int burn; t = hd->bctx.nblocks; if (sizeof t == sizeof hd->bctx.nblocks) th = hd->bctx.nblocks_high; else th = hd->bctx.nblocks >> 32; /* multiply by 64 to make a byte count */ lsb = t << 6; msb = (th << 6) | (t >> 26); /* add the count */ t = lsb; if ((lsb += hd->bctx.count) < t) msb++; /* multiply by 8 to make a bit count */ t = lsb; lsb <<= 3; msb <<= 3; msb |= t >> 29; if (0) { } #ifdef USE_S390X_CRYPTO else if (hd->use_s390x_crypto) { burn = do_sha256_final_s390x (hd, hd->bctx.buf, hd->bctx.count, msb, lsb); } #endif else if (hd->bctx.count < 56) /* enough room */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */ if (hd->bctx.count < 56) memset (&hd->bctx.buf[hd->bctx.count], 0, 56 - hd->bctx.count); /* append the 64 bit count */ buf_put_be32(hd->bctx.buf + 56, msb); buf_put_be32(hd->bctx.buf + 60, lsb); burn = (*hd->bctx.bwrite) (hd, hd->bctx.buf, 1); } else /* need one extra block */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */ /* fill pad and next block with zeroes */ memset (&hd->bctx.buf[hd->bctx.count], 0, 64 - hd->bctx.count + 56); /* append the 64 bit count */ buf_put_be32(hd->bctx.buf + 64 + 56, msb); buf_put_be32(hd->bctx.buf + 64 + 60, lsb); burn = (*hd->bctx.bwrite) (hd, hd->bctx.buf, 2); } p = hd->bctx.buf; #define X(a) do { buf_put_be32(p, hd->h##a); p += 4; } while(0) X(0); X(1); X(2); X(3); X(4); X(5); X(6); X(7); #undef X hd->bctx.count = 0; _gcry_burn_stack (burn); } static byte * sha256_read (void *context) { SHA256_CONTEXT *hd = context; return hd->bctx.buf; } /* Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 32 bytes. */ static void -_gcry_sha256_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt) +_gcry_sha256_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SHA256_CONTEXT hd; + (void)nbytes; + sha256_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sha256_final (&hd); memcpy (outbuf, hd.bctx.buf, 32); } /* Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 28 bytes. */ static void -_gcry_sha224_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt) +_gcry_sha224_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SHA256_CONTEXT hd; + (void)nbytes; + sha224_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sha256_final (&hd); memcpy (outbuf, hd.bctx.buf, 28); } /* Self-test section. */ static gpg_err_code_t selftests_sha224 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA224, 0, "abc", 3, "\x23\x09\x7d\x22\x34\x05\xd8\x22\x86\x42\xa4\x77\xbd\xa2\x55\xb3" "\x2a\xad\xbc\xe4\xbd\xa0\xb3\xf7\xe3\x6c\x9d\xa7", 28); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA224, 0, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56, "\x75\x38\x8b\x16\x51\x27\x76\xcc\x5d\xba\x5d\xa1\xfd\x89\x01\x50" "\xb0\xc6\x45\x5c\xb4\xf5\x8b\x19\x52\x52\x25\x25", 28); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA224, 1, NULL, 0, "\x20\x79\x46\x55\x98\x0c\x91\xd8\xbb\xb4\xc1\xea\x97\x61\x8a\x4b" "\xf0\x3f\x42\x58\x19\x48\xb2\xee\x4e\xe7\xad\x67", 28); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SHA224, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } static gpg_err_code_t selftests_sha256 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA256, 0, "abc", 3, "\xba\x78\x16\xbf\x8f\x01\xcf\xea\x41\x41\x40\xde\x5d\xae\x22\x23" "\xb0\x03\x61\xa3\x96\x17\x7a\x9c\xb4\x10\xff\x61\xf2\x00\x15\xad", 32); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA256, 0, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56, "\x24\x8d\x6a\x61\xd2\x06\x38\xb8\xe5\xc0\x26\x93\x0c\x3e\x60\x39" "\xa3\x3c\xe4\x59\x64\xff\x21\x67\xf6\xec\xed\xd4\x19\xdb\x06\xc1", 32); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA256, 1, NULL, 0, "\xcd\xc7\x6e\x5c\x99\x14\xfb\x92\x81\xa1\xc7\xe2\x84\xd7\x3e\x67" "\xf1\x80\x9a\x48\xa4\x97\x20\x0e\x04\x6d\x39\xcc\xc7\x11\x2c\xd0", 32); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SHA256, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Run a full self-test for ALGO and return 0 on success. */ static gpg_err_code_t run_selftests (int algo, int extended, selftest_report_func_t report) { gpg_err_code_t ec; switch (algo) { case GCRY_MD_SHA224: ec = selftests_sha224 (extended, report); break; case GCRY_MD_SHA256: ec = selftests_sha256 (extended, report); break; default: ec = GPG_ERR_DIGEST_ALGO; break; } return ec; } static byte asn224[19] = /* Object ID is 2.16.840.1.101.3.4.2.4 */ { 0x30, 0x2D, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1C }; static gcry_md_oid_spec_t oid_spec_sha224[] = { /* From RFC3874, Section 4 */ { "2.16.840.1.101.3.4.2.4" }, { NULL }, }; static byte asn256[19] = /* Object ID is 2.16.840.1.101.3.4.2.1 */ { 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20 }; static gcry_md_oid_spec_t oid_spec_sha256[] = { /* According to the OpenPGP draft rfc2440-bis06 */ { "2.16.840.1.101.3.4.2.1" }, /* PKCS#1 sha256WithRSAEncryption */ { "1.2.840.113549.1.1.11" }, { NULL }, }; gcry_md_spec_t _gcry_digest_spec_sha224 = { GCRY_MD_SHA224, {0, 1}, "SHA224", asn224, DIM (asn224), oid_spec_sha224, 28, sha224_init, _gcry_md_block_write, sha256_final, sha256_read, NULL, _gcry_sha224_hash_buffers, sizeof (SHA256_CONTEXT), run_selftests }; gcry_md_spec_t _gcry_digest_spec_sha256 = { GCRY_MD_SHA256, {0, 1}, "SHA256", asn256, DIM (asn256), oid_spec_sha256, 32, sha256_init, _gcry_md_block_write, sha256_final, sha256_read, NULL, _gcry_sha256_hash_buffers, sizeof (SHA256_CONTEXT), run_selftests }; diff --git a/cipher/sha512.c b/cipher/sha512.c index a23d5bda..6859cf52 100644 --- a/cipher/sha512.c +++ b/cipher/sha512.c @@ -1,1265 +1,1274 @@ /* sha512.c - SHA384 and SHA512 hash functions * Copyright (C) 2003, 2008, 2009 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* Test vectors from FIPS-180-2: * * "abc" * 384: * CB00753F 45A35E8B B5A03D69 9AC65007 272C32AB 0EDED163 * 1A8B605A 43FF5BED 8086072B A1E7CC23 58BAECA1 34C825A7 * 512: * DDAF35A1 93617ABA CC417349 AE204131 12E6FA4E 89A97EA2 0A9EEEE6 4B55D39A * 2192992A 274FC1A8 36BA3C23 A3FEEBBD 454D4423 643CE80E 2A9AC94F A54CA49F * * "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" * 384: * 09330C33 F71147E8 3D192FC7 82CD1B47 53111B17 3B3B05D2 * 2FA08086 E3B0F712 FCC7C71A 557E2DB9 66C3E9FA 91746039 * 512: * 8E959B75 DAE313DA 8CF4F728 14FC143F 8F7779C6 EB9F7FA1 7299AEAD B6889018 * 501D289E 4900F7E4 331B99DE C4B5433A C7D329EE B6DD2654 5E96E55B 874BE909 * * "a" x 1000000 * 384: * 9D0E1809 716474CB 086E834E 310A4A1C ED149E9C 00F24852 * 7972CEC5 704C2A5B 07B8B3DC 38ECC4EB AE97DDD8 7F3D8985 * 512: * E718483D 0CE76964 4E2E42C7 BC15B463 8E1F98B1 3B204428 5632A803 AFA973EB * DE0FF244 877EA60A 4CB0432C E577C31B EB009C5C 2C49AA2E 4EADB217 AD8CC09B */ #include #include #include "g10lib.h" #include "bithelp.h" #include "bufhelp.h" #include "cipher.h" #include "hash-common.h" /* USE_ARM_NEON_ASM indicates whether to enable ARM NEON assembly code. */ #undef USE_ARM_NEON_ASM #ifdef ENABLE_NEON_SUPPORT # if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \ && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \ && defined(HAVE_GCC_INLINE_ASM_NEON) # define USE_ARM_NEON_ASM 1 # endif #endif /*ENABLE_NEON_SUPPORT*/ /* USE_ARM_ASM indicates whether to enable ARM assembly code. */ #undef USE_ARM_ASM #if defined(__ARMEL__) && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) # define USE_ARM_ASM 1 #endif /* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */ #undef USE_SSSE3 #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_SSSE3 1 #endif /* USE_AVX indicates whether to compile with Intel AVX code. */ #undef USE_AVX #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AVX 1 #endif /* USE_AVX2 indicates whether to compile with Intel AVX2/rorx code. */ #undef USE_AVX2 #if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX2) && \ defined(HAVE_GCC_INLINE_ASM_BMI2) && \ defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \ (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) # define USE_AVX2 1 #endif /* USE_SSSE3_I386 indicates whether to compile with Intel SSSE3/i386 code. */ #undef USE_SSSE3_I386 #if defined(__i386__) && SIZEOF_UNSIGNED_LONG == 4 && __GNUC__ >= 4 && \ defined(HAVE_GCC_INLINE_ASM_SSSE3) # define USE_SSSE3_I386 1 #endif /* USE_PPC_CRYPTO indicates whether to enable PowerPC vector crypto * accelerated code. */ #undef USE_PPC_CRYPTO #ifdef ENABLE_PPC_CRYPTO_SUPPORT # if defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \ defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) # if __GNUC__ >= 4 # define USE_PPC_CRYPTO 1 # endif # endif #endif /* USE_S390X_CRYPTO indicates whether to enable zSeries code. */ #undef USE_S390X_CRYPTO #if defined(HAVE_GCC_INLINE_ASM_S390X) # define USE_S390X_CRYPTO 1 #endif /* USE_S390X_CRYPTO */ typedef struct { u64 h0, h1, h2, h3, h4, h5, h6, h7; } SHA512_STATE; typedef struct { gcry_md_block_ctx_t bctx; SHA512_STATE state; #ifdef USE_S390X_CRYPTO u64 final_len_msb, final_len_lsb; /* needs to be right after state.h7. */ int use_s390x_crypto; #endif } SHA512_CONTEXT; static const u64 k[] = { U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd), U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc), U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019), U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118), U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe), U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2), U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1), U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694), U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3), U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65), U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483), U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5), U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210), U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4), U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725), U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70), U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926), U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df), U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8), U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b), U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001), U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30), U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910), U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8), U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53), U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8), U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb), U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3), U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60), U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec), U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9), U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b), U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207), U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178), U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6), U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b), U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493), U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c), U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a), U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817) }; /* AMD64 assembly implementations use SystemV ABI, ABI conversion and additional * stack to store XMM6-XMM15 needed on Win64. */ #undef ASM_FUNC_ABI #undef ASM_EXTRA_STACK #if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2) # ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS # define ASM_FUNC_ABI __attribute__((sysv_abi)) # define ASM_EXTRA_STACK (10 * 16 + 4 * sizeof(void *)) # else # define ASM_FUNC_ABI # define ASM_EXTRA_STACK 0 # endif #endif #ifdef USE_ARM_NEON_ASM unsigned int _gcry_sha512_transform_armv7_neon (SHA512_STATE *hd, const unsigned char *data, const u64 k[], size_t num_blks); static unsigned int do_sha512_transform_armv7_neon(void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; return _gcry_sha512_transform_armv7_neon (&hd->state, data, k, nblks); } #endif #ifdef USE_SSSE3 unsigned int _gcry_sha512_transform_amd64_ssse3(const void *input_data, void *state, size_t num_blks) ASM_FUNC_ABI; static unsigned int do_sha512_transform_amd64_ssse3(void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; return _gcry_sha512_transform_amd64_ssse3 (data, &hd->state, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_AVX unsigned int _gcry_sha512_transform_amd64_avx(const void *input_data, void *state, size_t num_blks) ASM_FUNC_ABI; static unsigned int do_sha512_transform_amd64_avx(void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; return _gcry_sha512_transform_amd64_avx (data, &hd->state, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_AVX2 unsigned int _gcry_sha512_transform_amd64_avx2(const void *input_data, void *state, size_t num_blks) ASM_FUNC_ABI; static unsigned int do_sha512_transform_amd64_avx2(void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; return _gcry_sha512_transform_amd64_avx2 (data, &hd->state, nblks) + ASM_EXTRA_STACK; } #endif #ifdef USE_SSSE3_I386 unsigned int _gcry_sha512_transform_i386_ssse3(u64 state[8], const unsigned char *input_data, size_t num_blks); static unsigned int do_sha512_transform_i386_ssse3(void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; return _gcry_sha512_transform_i386_ssse3 (&hd->state.h0, data, nblks); } #endif #ifdef USE_ARM_ASM unsigned int _gcry_sha512_transform_arm (SHA512_STATE *hd, const unsigned char *data, const u64 k[], size_t num_blks); static unsigned int do_transform_generic (void *context, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = context; return _gcry_sha512_transform_arm (&hd->state, data, k, nblks); } #else static unsigned int do_transform_generic (void *context, const unsigned char *data, size_t nblks); #endif #ifdef USE_PPC_CRYPTO unsigned int _gcry_sha512_transform_ppc8(u64 state[8], const unsigned char *input_data, size_t num_blks); unsigned int _gcry_sha512_transform_ppc9(u64 state[8], const unsigned char *input_data, size_t num_blks); static unsigned int do_sha512_transform_ppc8(void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; return _gcry_sha512_transform_ppc8 (&hd->state.h0, data, nblks); } static unsigned int do_sha512_transform_ppc9(void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; return _gcry_sha512_transform_ppc9 (&hd->state.h0, data, nblks); } #endif #ifdef USE_S390X_CRYPTO #include "asm-inline-s390x.h" static unsigned int do_sha512_transform_s390x (void *ctx, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *hd = ctx; kimd_execute (KMID_FUNCTION_SHA512, &hd->state.h0, data, nblks * 128); return 0; } static unsigned int do_sha512_final_s390x (void *ctx, const unsigned char *data, size_t datalen, u64 len_msb, u64 len_lsb) { SHA512_CONTEXT *hd = ctx; /* Make sure that 'final_len' is positioned at correct offset relative * to 'state.h0'. This is because we are passing 'state.h0' pointer as start of * parameter block to 'klmd' instruction. */ gcry_assert (offsetof (SHA512_CONTEXT, final_len_msb) - offsetof (SHA512_CONTEXT, state.h0) == 8 * sizeof(u64)); gcry_assert (offsetof (SHA512_CONTEXT, final_len_lsb) - offsetof (SHA512_CONTEXT, final_len_msb) == 1 * sizeof(u64)); hd->final_len_msb = len_msb; hd->final_len_lsb = len_lsb; klmd_execute (KMID_FUNCTION_SHA512, &hd->state.h0, data, datalen); return 0; } #endif static void sha512_init_common (SHA512_CONTEXT *ctx, unsigned int flags) { unsigned int features = _gcry_get_hw_features (); (void)flags; (void)k; ctx->bctx.nblocks = 0; ctx->bctx.nblocks_high = 0; ctx->bctx.count = 0; ctx->bctx.blocksize_shift = _gcry_ctz(128); /* Order of feature checks is important here; last match will be * selected. Keep slower implementations at the top and faster at * the bottom. */ ctx->bctx.bwrite = do_transform_generic; #ifdef USE_ARM_NEON_ASM if ((features & HWF_ARM_NEON) != 0) ctx->bctx.bwrite = do_sha512_transform_armv7_neon; #endif #ifdef USE_SSSE3 if ((features & HWF_INTEL_SSSE3) != 0) ctx->bctx.bwrite = do_sha512_transform_amd64_ssse3; #endif #ifdef USE_AVX if ((features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD)) ctx->bctx.bwrite = do_sha512_transform_amd64_avx; #endif #ifdef USE_AVX2 if ((features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2)) ctx->bctx.bwrite = do_sha512_transform_amd64_avx2; #endif #ifdef USE_PPC_CRYPTO if ((features & HWF_PPC_VCRYPTO) != 0) ctx->bctx.bwrite = do_sha512_transform_ppc8; if ((features & HWF_PPC_VCRYPTO) != 0 && (features & HWF_PPC_ARCH_3_00) != 0) ctx->bctx.bwrite = do_sha512_transform_ppc9; #endif #ifdef USE_SSSE3_I386 if ((features & HWF_INTEL_SSSE3) != 0) ctx->bctx.bwrite = do_sha512_transform_i386_ssse3; #endif #ifdef USE_S390X_CRYPTO ctx->use_s390x_crypto = 0; if ((features & HWF_S390X_MSA) != 0) { if ((kimd_query () & km_function_to_mask (KMID_FUNCTION_SHA512)) && (klmd_query () & km_function_to_mask (KMID_FUNCTION_SHA512))) { ctx->bctx.bwrite = do_sha512_transform_s390x; ctx->use_s390x_crypto = 1; } } #endif (void)features; } static void sha512_init (void *context, unsigned int flags) { SHA512_CONTEXT *ctx = context; SHA512_STATE *hd = &ctx->state; hd->h0 = U64_C(0x6a09e667f3bcc908); hd->h1 = U64_C(0xbb67ae8584caa73b); hd->h2 = U64_C(0x3c6ef372fe94f82b); hd->h3 = U64_C(0xa54ff53a5f1d36f1); hd->h4 = U64_C(0x510e527fade682d1); hd->h5 = U64_C(0x9b05688c2b3e6c1f); hd->h6 = U64_C(0x1f83d9abfb41bd6b); hd->h7 = U64_C(0x5be0cd19137e2179); sha512_init_common (ctx, flags); } static void sha384_init (void *context, unsigned int flags) { SHA512_CONTEXT *ctx = context; SHA512_STATE *hd = &ctx->state; hd->h0 = U64_C(0xcbbb9d5dc1059ed8); hd->h1 = U64_C(0x629a292a367cd507); hd->h2 = U64_C(0x9159015a3070dd17); hd->h3 = U64_C(0x152fecd8f70e5939); hd->h4 = U64_C(0x67332667ffc00b31); hd->h5 = U64_C(0x8eb44a8768581511); hd->h6 = U64_C(0xdb0c2e0d64f98fa7); hd->h7 = U64_C(0x47b5481dbefa4fa4); sha512_init_common (ctx, flags); } static void sha512_256_init (void *context, unsigned int flags) { SHA512_CONTEXT *ctx = context; SHA512_STATE *hd = &ctx->state; hd->h0 = U64_C(0x22312194fc2bf72c); hd->h1 = U64_C(0x9f555fa3c84c64c2); hd->h2 = U64_C(0x2393b86b6f53b151); hd->h3 = U64_C(0x963877195940eabd); hd->h4 = U64_C(0x96283ee2a88effe3); hd->h5 = U64_C(0xbe5e1e2553863992); hd->h6 = U64_C(0x2b0199fc2c85b8aa); hd->h7 = U64_C(0x0eb72ddc81c52ca2); sha512_init_common (ctx, flags); } static void sha512_224_init (void *context, unsigned int flags) { SHA512_CONTEXT *ctx = context; SHA512_STATE *hd = &ctx->state; hd->h0 = U64_C(0x8c3d37c819544da2); hd->h1 = U64_C(0x73e1996689dcd4d6); hd->h2 = U64_C(0x1dfab7ae32ff9c82); hd->h3 = U64_C(0x679dd514582f9fcf); hd->h4 = U64_C(0x0f6d2b697bd44da8); hd->h5 = U64_C(0x77e36f7304c48942); hd->h6 = U64_C(0x3f9d85a86a1d36c8); hd->h7 = U64_C(0x1112e6ad91d692a1); sha512_init_common (ctx, flags); } #ifndef USE_ARM_ASM static inline u64 ROTR (u64 x, u64 n) { return ((x >> n) | (x << (64 - n))); } static inline u64 Ch (u64 x, u64 y, u64 z) { return ((x & y) ^ ( ~x & z)); } static inline u64 Maj (u64 x, u64 y, u64 z) { return ((x & y) ^ (x & z) ^ (y & z)); } static inline u64 Sum0 (u64 x) { return (ROTR (x, 28) ^ ROTR (x, 34) ^ ROTR (x, 39)); } static inline u64 Sum1 (u64 x) { return (ROTR (x, 14) ^ ROTR (x, 18) ^ ROTR (x, 41)); } /**************** * Transform the message W which consists of 16 64-bit-words */ static unsigned int do_transform_generic (void *context, const unsigned char *data, size_t nblks) { SHA512_CONTEXT *ctx = context; SHA512_STATE *hd = &ctx->state; do { u64 a, b, c, d, e, f, g, h; u64 w[16]; int t; /* get values from the chaining vars */ a = hd->h0; b = hd->h1; c = hd->h2; d = hd->h3; e = hd->h4; f = hd->h5; g = hd->h6; h = hd->h7; for ( t = 0; t < 16; t++ ) w[t] = buf_get_be64(data + t * 8); #define S0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7)) #define S1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6)) for (t = 0; t < 80 - 16; ) { u64 t1, t2; t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0]; w[0] += S1 (w[14]) + w[9] + S0 (w[1]); t2 = Sum0 (a) + Maj (a, b, c); d += t1; h = t1 + t2; t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1]; w[1] += S1 (w[15]) + w[10] + S0 (w[2]); t2 = Sum0 (h) + Maj (h, a, b); c += t1; g = t1 + t2; t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2]; w[2] += S1 (w[0]) + w[11] + S0 (w[3]); t2 = Sum0 (g) + Maj (g, h, a); b += t1; f = t1 + t2; t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3]; w[3] += S1 (w[1]) + w[12] + S0 (w[4]); t2 = Sum0 (f) + Maj (f, g, h); a += t1; e = t1 + t2; t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4]; w[4] += S1 (w[2]) + w[13] + S0 (w[5]); t2 = Sum0 (e) + Maj (e, f, g); h += t1; d = t1 + t2; t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5]; w[5] += S1 (w[3]) + w[14] + S0 (w[6]); t2 = Sum0 (d) + Maj (d, e, f); g += t1; c = t1 + t2; t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6]; w[6] += S1 (w[4]) + w[15] + S0 (w[7]); t2 = Sum0 (c) + Maj (c, d, e); f += t1; b = t1 + t2; t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7]; w[7] += S1 (w[5]) + w[0] + S0 (w[8]); t2 = Sum0 (b) + Maj (b, c, d); e += t1; a = t1 + t2; t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8]; w[8] += S1 (w[6]) + w[1] + S0 (w[9]); t2 = Sum0 (a) + Maj (a, b, c); d += t1; h = t1 + t2; t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9]; w[9] += S1 (w[7]) + w[2] + S0 (w[10]); t2 = Sum0 (h) + Maj (h, a, b); c += t1; g = t1 + t2; t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10]; w[10] += S1 (w[8]) + w[3] + S0 (w[11]); t2 = Sum0 (g) + Maj (g, h, a); b += t1; f = t1 + t2; t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11]; w[11] += S1 (w[9]) + w[4] + S0 (w[12]); t2 = Sum0 (f) + Maj (f, g, h); a += t1; e = t1 + t2; t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12]; w[12] += S1 (w[10]) + w[5] + S0 (w[13]); t2 = Sum0 (e) + Maj (e, f, g); h += t1; d = t1 + t2; t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13]; w[13] += S1 (w[11]) + w[6] + S0 (w[14]); t2 = Sum0 (d) + Maj (d, e, f); g += t1; c = t1 + t2; t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14]; w[14] += S1 (w[12]) + w[7] + S0 (w[15]); t2 = Sum0 (c) + Maj (c, d, e); f += t1; b = t1 + t2; t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15]; w[15] += S1 (w[13]) + w[8] + S0 (w[0]); t2 = Sum0 (b) + Maj (b, c, d); e += t1; a = t1 + t2; t += 16; } for (; t < 80; ) { u64 t1, t2; t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0]; t2 = Sum0 (a) + Maj (a, b, c); d += t1; h = t1 + t2; t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1]; t2 = Sum0 (h) + Maj (h, a, b); c += t1; g = t1 + t2; t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2]; t2 = Sum0 (g) + Maj (g, h, a); b += t1; f = t1 + t2; t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3]; t2 = Sum0 (f) + Maj (f, g, h); a += t1; e = t1 + t2; t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4]; t2 = Sum0 (e) + Maj (e, f, g); h += t1; d = t1 + t2; t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5]; t2 = Sum0 (d) + Maj (d, e, f); g += t1; c = t1 + t2; t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6]; t2 = Sum0 (c) + Maj (c, d, e); f += t1; b = t1 + t2; t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7]; t2 = Sum0 (b) + Maj (b, c, d); e += t1; a = t1 + t2; t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8]; t2 = Sum0 (a) + Maj (a, b, c); d += t1; h = t1 + t2; t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9]; t2 = Sum0 (h) + Maj (h, a, b); c += t1; g = t1 + t2; t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10]; t2 = Sum0 (g) + Maj (g, h, a); b += t1; f = t1 + t2; t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11]; t2 = Sum0 (f) + Maj (f, g, h); a += t1; e = t1 + t2; t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12]; t2 = Sum0 (e) + Maj (e, f, g); h += t1; d = t1 + t2; t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13]; t2 = Sum0 (d) + Maj (d, e, f); g += t1; c = t1 + t2; t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14]; t2 = Sum0 (c) + Maj (c, d, e); f += t1; b = t1 + t2; t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15]; t2 = Sum0 (b) + Maj (b, c, d); e += t1; a = t1 + t2; t += 16; } /* Update chaining vars. */ hd->h0 += a; hd->h1 += b; hd->h2 += c; hd->h3 += d; hd->h4 += e; hd->h5 += f; hd->h6 += g; hd->h7 += h; data += 128; } while (--nblks); return (8 + 16) * sizeof(u64) + sizeof(u32) + 3 * sizeof(void*); } #endif /*!USE_ARM_ASM*/ /* The routine final terminates the computation and * returns the digest. * The handle is prepared for a new cycle, but adding bytes to the * handle will the destroy the returned buffer. * Returns: 64 bytes representing the digest. When used for sha384, * we take the leftmost 48 of those bytes. */ static void sha512_final (void *context) { SHA512_CONTEXT *hd = context; unsigned int burn; u64 t, th, msb, lsb; byte *p; t = hd->bctx.nblocks; /* if (sizeof t == sizeof hd->bctx.nblocks) */ th = hd->bctx.nblocks_high; /* else */ /* th = hd->bctx.nblocks >> 64; In case we ever use u128 */ /* multiply by 128 to make a byte count */ lsb = t << 7; msb = (th << 7) | (t >> 57); /* add the count */ t = lsb; if ((lsb += hd->bctx.count) < t) msb++; /* multiply by 8 to make a bit count */ t = lsb; lsb <<= 3; msb <<= 3; msb |= t >> 61; if (0) { } #ifdef USE_S390X_CRYPTO else if (hd->use_s390x_crypto) { burn = do_sha512_final_s390x (hd, hd->bctx.buf, hd->bctx.count, msb, lsb); } #endif else { if (hd->bctx.count < 112) { /* enough room */ hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */ if (hd->bctx.count < 112) memset (&hd->bctx.buf[hd->bctx.count], 0, 112 - hd->bctx.count); } else { /* need one extra block */ hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */ if (hd->bctx.count < 128) memset (&hd->bctx.buf[hd->bctx.count], 0, 128 - hd->bctx.count); hd->bctx.count = 128; _gcry_md_block_write (context, NULL, 0); /* flush */ memset (hd->bctx.buf, 0, 112); /* fill next block with zeroes */ } /* append the 128 bit count */ buf_put_be64(hd->bctx.buf + 112, msb); buf_put_be64(hd->bctx.buf + 120, lsb); burn = (*hd->bctx.bwrite) (hd, hd->bctx.buf, 1); } p = hd->bctx.buf; #define X(a) do { buf_put_be64(p, hd->state.h##a); p += 8; } while (0) X (0); X (1); X (2); X (3); X (4); X (5); /* Note that these last two chunks are included even for SHA384. We just ignore them. */ X (6); X (7); #undef X hd->bctx.count = 0; _gcry_burn_stack (burn); } static byte * sha512_read (void *context) { SHA512_CONTEXT *hd = (SHA512_CONTEXT *) context; return hd->bctx.buf; } /* Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 64 bytes. */ static void -_gcry_sha512_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt) +_gcry_sha512_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SHA512_CONTEXT hd; + (void)nbytes; + sha512_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sha512_final (&hd); memcpy (outbuf, hd.bctx.buf, 64); } /* Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 48 bytes. */ static void -_gcry_sha384_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt) +_gcry_sha384_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SHA512_CONTEXT hd; + (void)nbytes; + sha384_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sha512_final (&hd); memcpy (outbuf, hd.bctx.buf, 48); } /* Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 32 bytes. */ static void -_gcry_sha512_256_hash_buffers (void *outbuf, const gcry_buffer_t *iov, - int iovcnt) +_gcry_sha512_256_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SHA512_CONTEXT hd; + (void)nbytes; sha512_256_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sha512_final (&hd); memcpy (outbuf, hd.bctx.buf, 32); } /* Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 28 bytes. */ static void -_gcry_sha512_224_hash_buffers (void *outbuf, const gcry_buffer_t *iov, - int iovcnt) +_gcry_sha512_224_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SHA512_CONTEXT hd; + (void)nbytes; + sha512_224_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sha512_final (&hd); memcpy (outbuf, hd.bctx.buf, 28); } /* Self-test section. */ static gpg_err_code_t selftests_sha384 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA384, 0, "abc", 3, "\xcb\x00\x75\x3f\x45\xa3\x5e\x8b\xb5\xa0\x3d\x69\x9a\xc6\x50\x07" "\x27\x2c\x32\xab\x0e\xde\xd1\x63\x1a\x8b\x60\x5a\x43\xff\x5b\xed" "\x80\x86\x07\x2b\xa1\xe7\xcc\x23\x58\xba\xec\xa1\x34\xc8\x25\xa7", 48); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA384, 0, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn" "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112, "\x09\x33\x0C\x33\xF7\x11\x47\xE8\x3D\x19\x2F\xC7\x82\xCD\x1B\x47" "\x53\x11\x1B\x17\x3B\x3B\x05\xD2\x2F\xA0\x80\x86\xE3\xB0\xF7\x12" "\xFC\xC7\xC7\x1A\x55\x7E\x2D\xB9\x66\xC3\xE9\xFA\x91\x74\x60\x39", 48); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA384, 1, NULL, 0, "\x9D\x0E\x18\x09\x71\x64\x74\xCB\x08\x6E\x83\x4E\x31\x0A\x4A\x1C" "\xED\x14\x9E\x9C\x00\xF2\x48\x52\x79\x72\xCE\xC5\x70\x4C\x2A\x5B" "\x07\xB8\xB3\xDC\x38\xEC\xC4\xEB\xAE\x97\xDD\xD8\x7F\x3D\x89\x85", 48); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SHA384, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } static gpg_err_code_t selftests_sha512 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512, 0, "abc", 3, "\xDD\xAF\x35\xA1\x93\x61\x7A\xBA\xCC\x41\x73\x49\xAE\x20\x41\x31" "\x12\xE6\xFA\x4E\x89\xA9\x7E\xA2\x0A\x9E\xEE\xE6\x4B\x55\xD3\x9A" "\x21\x92\x99\x2A\x27\x4F\xC1\xA8\x36\xBA\x3C\x23\xA3\xFE\xEB\xBD" "\x45\x4D\x44\x23\x64\x3C\xE8\x0E\x2A\x9A\xC9\x4F\xA5\x4C\xA4\x9F", 64); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512, 0, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn" "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112, "\x8E\x95\x9B\x75\xDA\xE3\x13\xDA\x8C\xF4\xF7\x28\x14\xFC\x14\x3F" "\x8F\x77\x79\xC6\xEB\x9F\x7F\xA1\x72\x99\xAE\xAD\xB6\x88\x90\x18" "\x50\x1D\x28\x9E\x49\x00\xF7\xE4\x33\x1B\x99\xDE\xC4\xB5\x43\x3A" "\xC7\xD3\x29\xEE\xB6\xDD\x26\x54\x5E\x96\xE5\x5B\x87\x4B\xE9\x09", 64); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512, 1, NULL, 0, "\xE7\x18\x48\x3D\x0C\xE7\x69\x64\x4E\x2E\x42\xC7\xBC\x15\xB4\x63" "\x8E\x1F\x98\xB1\x3B\x20\x44\x28\x56\x32\xA8\x03\xAF\xA9\x73\xEB" "\xDE\x0F\xF2\x44\x87\x7E\xA6\x0A\x4C\xB0\x43\x2C\xE5\x77\xC3\x1B" "\xEB\x00\x9C\x5C\x2C\x49\xAA\x2E\x4E\xAD\xB2\x17\xAD\x8C\xC0\x9B", 64); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SHA512, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } static gpg_err_code_t selftests_sha512_224 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512_224, 0, "abc", 3, "\x46\x34\x27\x0F\x70\x7B\x6A\x54\xDA\xAE\x75\x30\x46\x08\x42\xE2" "\x0E\x37\xED\x26\x5C\xEE\xE9\xA4\x3E\x89\x24\xAA", 28); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512_224, 0, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn" "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112, "\x23\xFE\xC5\xBB\x94\xD6\x0B\x23\x30\x81\x92\x64\x0B\x0C\x45\x33" "\x35\xD6\x64\x73\x4F\xE4\x0E\x72\x68\x67\x4A\xF9", 28); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512_224, 1, NULL, 0, "\x37\xab\x33\x1d\x76\xf0\xd3\x6d\xe4\x22\xbd\x0e\xde\xb2\x2a\x28" "\xac\xcd\x48\x7b\x7a\x84\x53\xae\x96\x5d\xd2\x87", 28); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SHA512_224, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } static gpg_err_code_t selftests_sha512_256 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512_256, 0, "abc", 3, "\x53\x04\x8E\x26\x81\x94\x1E\xF9\x9B\x2E\x29\xB7\x6B\x4C\x7D\xAB" "\xE4\xC2\xD0\xC6\x34\xFC\x6D\x46\xE0\xE2\xF1\x31\x07\xE7\xAF\x23", 32); if (errtxt) goto failed; if (extended) { what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512_256, 0, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn" "hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112, "\x39\x28\xE1\x84\xFB\x86\x90\xF8\x40\xDA\x39\x88\x12\x1D\x31\xBE" "\x65\xCB\x9D\x3E\xF8\x3E\xE6\x14\x6F\xEA\xC8\x61\xE1\x9B\x56\x3A", 32); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SHA512_256, 1, NULL, 0, "\x9a\x59\xa0\x52\x93\x01\x87\xa9\x70\x38\xca\xe6\x92\xf3\x07\x08" "\xaa\x64\x91\x92\x3e\xf5\x19\x43\x94\xdc\x68\xd5\x6c\x74\xfb\x21", 32); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SHA512_256, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Run a full self-test for ALGO and return 0 on success. */ static gpg_err_code_t run_selftests (int algo, int extended, selftest_report_func_t report) { gpg_err_code_t ec; switch (algo) { case GCRY_MD_SHA384: ec = selftests_sha384 (extended, report); break; case GCRY_MD_SHA512: ec = selftests_sha512 (extended, report); break; case GCRY_MD_SHA512_224: ec = selftests_sha512_224 (extended, report); break; case GCRY_MD_SHA512_256: ec = selftests_sha512_256 (extended, report); break; default: ec = GPG_ERR_DIGEST_ALGO; break; } return ec; } static byte sha512_asn[] = /* Object ID is 2.16.840.1.101.3.4.2.3 */ { 0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40 }; static gcry_md_oid_spec_t oid_spec_sha512[] = { { "2.16.840.1.101.3.4.2.3" }, /* PKCS#1 sha512WithRSAEncryption */ { "1.2.840.113549.1.1.13" }, { NULL } }; gcry_md_spec_t _gcry_digest_spec_sha512 = { GCRY_MD_SHA512, {0, 1}, "SHA512", sha512_asn, DIM (sha512_asn), oid_spec_sha512, 64, sha512_init, _gcry_md_block_write, sha512_final, sha512_read, NULL, _gcry_sha512_hash_buffers, sizeof (SHA512_CONTEXT), run_selftests }; static byte sha384_asn[] = /* Object ID is 2.16.840.1.101.3.4.2.2 */ { 0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30 }; static gcry_md_oid_spec_t oid_spec_sha384[] = { { "2.16.840.1.101.3.4.2.2" }, /* PKCS#1 sha384WithRSAEncryption */ { "1.2.840.113549.1.1.12" }, /* SHA384WithECDSA: RFC 7427 (A.3.3.) */ { "1.2.840.10045.4.3.3" }, { NULL }, }; gcry_md_spec_t _gcry_digest_spec_sha384 = { GCRY_MD_SHA384, {0, 1}, "SHA384", sha384_asn, DIM (sha384_asn), oid_spec_sha384, 48, sha384_init, _gcry_md_block_write, sha512_final, sha512_read, NULL, _gcry_sha384_hash_buffers, sizeof (SHA512_CONTEXT), run_selftests }; static byte sha512_256_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_sha512_256[] = { { "2.16.840.1.101.3.4.2.6" }, { NULL }, }; gcry_md_spec_t _gcry_digest_spec_sha512_256 = { GCRY_MD_SHA512_256, {0, 1}, "SHA512_256", sha512_256_asn, DIM (sha512_256_asn), oid_spec_sha512_256, 32, sha512_256_init, _gcry_md_block_write, sha512_final, sha512_read, NULL, _gcry_sha512_256_hash_buffers, sizeof (SHA512_CONTEXT), run_selftests }; static byte sha512_224_asn[] = { 0x30 }; static gcry_md_oid_spec_t oid_spec_sha512_224[] = { { "2.16.840.1.101.3.4.2.5" }, { NULL }, }; gcry_md_spec_t _gcry_digest_spec_sha512_224 = { GCRY_MD_SHA512_224, {0, 1}, "SHA512_224", sha512_224_asn, DIM (sha512_224_asn), oid_spec_sha512_224, 28, sha512_224_init, _gcry_md_block_write, sha512_final, sha512_read, NULL, _gcry_sha512_224_hash_buffers, sizeof (SHA512_CONTEXT), run_selftests }; diff --git a/cipher/sm3.c b/cipher/sm3.c index c687cdf0..d52a7494 100644 --- a/cipher/sm3.c +++ b/cipher/sm3.c @@ -1,460 +1,463 @@ /* sm3.c - SM3 hash function * Copyright (C) 2017 Jia Zhang * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* Test vectors: "abc" SM3: 66c7f0f4 62eeedd9 d1f2d46b dc10e4e2 4167c487 5cf2f7a2 297da02b 8f4ba8e0 "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd" SM3: debe9ff9 2275b8a1 38604889 c18e5a4d 6fdb70e5 387e5765 293dcba3 9c0c5732 "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" SM3: 639b6cc5 e64d9e37 a390b192 df4fa1ea 0720ab74 7ff692b9 f38c4e66 ad7b8c05 "a" one million times SM3: c8aaf894 29554029 e231941a 2acc0ad6 1ff2a5ac d8fadd25 847a3a73 2b3b02c3 */ #include #include #include #include #include "g10lib.h" #include "bithelp.h" #include "bufhelp.h" #include "cipher.h" #include "hash-common.h" typedef struct { gcry_md_block_ctx_t bctx; u32 h0,h1,h2,h3,h4,h5,h6,h7; } SM3_CONTEXT; static unsigned int transform (void *c, const unsigned char *data, size_t nblks); static void sm3_init (void *context, unsigned int flags) { SM3_CONTEXT *hd = context; unsigned int features = _gcry_get_hw_features (); (void)flags; hd->h0 = 0x7380166f; hd->h1 = 0x4914b2b9; hd->h2 = 0x172442d7; hd->h3 = 0xda8a0600; hd->h4 = 0xa96f30bc; hd->h5 = 0x163138aa; hd->h6 = 0xe38dee4d; hd->h7 = 0xb0fb0e4e; hd->bctx.nblocks = 0; hd->bctx.nblocks_high = 0; hd->bctx.count = 0; hd->bctx.blocksize_shift = _gcry_ctz(64); hd->bctx.bwrite = transform; (void)features; } /* Transform the message X which consists of 16 32-bit-words. See GM/T 004-2012 for details. */ #define R(i,a,b,c,d,e,f,g,h,t,w1,w2) do \ { \ ss1 = rol ((rol ((a), 12) + (e) + (t)), 7); \ ss2 = ss1 ^ rol ((a), 12); \ d += FF##i(a,b,c) + ss2 + ((w1) ^ (w2)); \ h += GG##i(e,f,g) + ss1 + (w1); \ b = rol ((b), 9); \ f = rol ((f), 19); \ h = P0 ((h)); \ } while (0) #define R1(a,b,c,d,e,f,g,h,t,w1,w2) R(1,a,b,c,d,e,f,g,h,t,w1,w2) #define R2(a,b,c,d,e,f,g,h,t,w1,w2) R(2,a,b,c,d,e,f,g,h,t,w1,w2) #define FF1(x, y, z) (x ^ y ^ z) #define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) #define GG1(x, y, z) (x ^ y ^ z) #define GG2(x, y, z) ((x & y) | ( ~x & z)) /* Message expansion */ #define P0(x) ((x) ^ rol ((x), 9) ^ rol ((x), 17)) #define P1(x) ((x) ^ rol ((x), 15) ^ rol ((x), 23)) #define I(i) ( w[i] = buf_get_be32(data + i * 4) ) #define W1(i) ( w[i&0x0f] ) #define W2(i) ( w[i&0x0f] = P1(w[i &0x0f] \ ^ w[(i-9)&0x0f] \ ^ rol (w[(i-3)&0x0f], 15)) \ ^ rol (w[(i-13)&0x0f], 7) \ ^ w[(i-6)&0x0f] ) static unsigned int transform_blk (void *ctx, const unsigned char *data) { SM3_CONTEXT *hd = ctx; static const u32 K[64] = { 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 }; u32 a,b,c,d,e,f,g,h,ss1,ss2; u32 w[16]; a = hd->h0; b = hd->h1; c = hd->h2; d = hd->h3; e = hd->h4; f = hd->h5; g = hd->h6; h = hd->h7; R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); hd->h0 ^= a; hd->h1 ^= b; hd->h2 ^= c; hd->h3 ^= d; hd->h4 ^= e; hd->h5 ^= f; hd->h6 ^= g; hd->h7 ^= h; return /*burn_stack*/ 26*4+32; } #undef P0 #undef P1 #undef R #undef R1 #undef R2 static unsigned int transform (void *ctx, const unsigned char *data, size_t nblks) { SM3_CONTEXT *hd = ctx; unsigned int burn; do { burn = transform_blk (hd, data); data += 64; } while (--nblks); return burn; } /* The routine finally terminates the computation and returns the digest. The handle is prepared for a new cycle, but adding bytes to the handle will the destroy the returned buffer. Returns: 32 bytes with the message the digest. */ static void sm3_final(void *context) { SM3_CONTEXT *hd = context; u32 t, th, msb, lsb; byte *p; unsigned int burn; t = hd->bctx.nblocks; if (sizeof t == sizeof hd->bctx.nblocks) th = hd->bctx.nblocks_high; else th = hd->bctx.nblocks >> 32; /* multiply by 64 to make a byte count */ lsb = t << 6; msb = (th << 6) | (t >> 26); /* add the count */ t = lsb; if ((lsb += hd->bctx.count) < t) msb++; /* multiply by 8 to make a bit count */ t = lsb; lsb <<= 3; msb <<= 3; msb |= t >> 29; if (hd->bctx.count < 56) /* enough room */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */ if (hd->bctx.count < 56) memset (&hd->bctx.buf[hd->bctx.count], 0, 56 - hd->bctx.count); /* append the 64 bit count */ buf_put_be32(hd->bctx.buf + 56, msb); buf_put_be32(hd->bctx.buf + 60, lsb); burn = (*hd->bctx.bwrite) ( hd, hd->bctx.buf, 1 ); } else /* need one extra block */ { hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */ /* fill pad and next block with zeroes */ memset (&hd->bctx.buf[hd->bctx.count], 0, 64 - hd->bctx.count + 56); /* append the 64 bit count */ buf_put_be32(hd->bctx.buf + 64 + 56, msb); buf_put_be32(hd->bctx.buf + 64 + 60, lsb); burn = (*hd->bctx.bwrite) ( hd, hd->bctx.buf, 2 ); } p = hd->bctx.buf; #define X(a) do { buf_put_be32(p, hd->h##a); p += 4; } while(0) X(0); X(1); X(2); X(3); X(4); X(5); X(6); X(7); #undef X hd->bctx.count = 0; _gcry_burn_stack (burn); } static byte * sm3_read (void *context) { SM3_CONTEXT *hd = context; return hd->bctx.buf; } /* Shortcut functions which puts the hash value of the supplied buffer iov * into outbuf which must have a size of 32 bytes. */ static void -_gcry_sm3_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt) +_gcry_sm3_hash_buffers (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, int iovcnt) { SM3_CONTEXT hd; + (void)nbytes; + sm3_init (&hd, 0); for (;iovcnt > 0; iov++, iovcnt--) _gcry_md_block_write (&hd, (const char*)iov[0].data + iov[0].off, iov[0].len); sm3_final (&hd); memcpy (outbuf, hd.bctx.buf, 32); } /* Self-test section. */ static gpg_err_code_t selftests_sm3 (int extended, selftest_report_func_t report) { const char *what; const char *errtxt; what = "short string (spec example 1)"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SM3, 0, "abc", 3, "\x66\xc7\xf0\xf4\x62\xee\xed\xd9\xd1\xf2\xd4\x6b\xdc\x10\xe4\xe2" "\x41\x67\xc4\x87\x5c\xf2\xf7\xa2\x29\x7d\xa0\x2b\x8f\x4b\xa8\xe0", 32); if (errtxt) goto failed; if (extended) { what = "long string (spec example 2)"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SM3, 0, "abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd", 64, "\xde\xbe\x9f\xf9\x22\x75\xb8\xa1\x38\x60\x48\x89\xc1\x8e\x5a\x4d" "\x6f\xdb\x70\xe5\x38\x7e\x57\x65\x29\x3d\xcb\xa3\x9c\x0c\x57\x32", 32); if (errtxt) goto failed; what = "long string"; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SM3, 0, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56, "\x63\x9b\x6c\xc5\xe6\x4d\x9e\x37\xa3\x90\xb1\x92\xdf\x4f\xa1\xea" "\x07\x20\xab\x74\x7f\xf6\x92\xb9\xf3\x8c\x4e\x66\xad\x7b\x8c\x05", 32); if (errtxt) goto failed; what = "one million \"a\""; errtxt = _gcry_hash_selftest_check_one (GCRY_MD_SM3, 1, NULL, 0, "\xc8\xaa\xf8\x94\x29\x55\x40\x29\xe2\x31\x94\x1a\x2a\xcc\x0a\xd6" "\x1f\xf2\xa5\xac\xd8\xfa\xdd\x25\x84\x7a\x3a\x73\x2b\x3b\x02\xc3", 32); if (errtxt) goto failed; } return 0; /* Succeeded. */ failed: if (report) report ("digest", GCRY_MD_SM3, what, errtxt); return GPG_ERR_SELFTEST_FAILED; } /* Run a full self-test for ALGO and return 0 on success. */ static gpg_err_code_t run_selftests (int algo, int extended, selftest_report_func_t report) { gpg_err_code_t ec; switch (algo) { case GCRY_MD_SM3: ec = selftests_sm3 (extended, report); break; default: ec = GPG_ERR_DIGEST_ALGO; break; } return ec; } static byte asn_sm3[] = /* Object ID is 1.2.156.10197.401 */ { 0x30, 0x2F, 0x30, 0x0B, 0x06, 0x07, 0x2A, 0x81, 0x1C, 0xCF, 0x55, 0x83, 0x11, 0x05, 0x00, 0x04, 0x20 }; static gcry_md_oid_spec_t oid_spec_sm3[] = { /* China Electronics Standardization Instutute, OID White paper (2015), Table 6 */ { "1.2.156.10197.401" }, { NULL }, }; gcry_md_spec_t _gcry_digest_spec_sm3 = { GCRY_MD_SM3, {0, 0}, "SM3", asn_sm3, DIM (asn_sm3), oid_spec_sm3, 32, sm3_init, _gcry_md_block_write, sm3_final, sm3_read, NULL, _gcry_sm3_hash_buffers, sizeof (SM3_CONTEXT), run_selftests }; diff --git a/src/cipher-proto.h b/src/cipher-proto.h index dde56d30..b730fd06 100644 --- a/src/cipher-proto.h +++ b/src/cipher-proto.h @@ -1,275 +1,276 @@ /* cipher-proto.h - Internal declarations * Copyright (C) 2008, 2011 Free Software Foundation, Inc. * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser general Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ /* This file has been factored out from cipher.h so that it can be used standalone in visibility.c . */ #ifndef G10_CIPHER_PROTO_H #define G10_CIPHER_PROTO_H enum pk_encoding; /* Definition of a function used to report selftest failures. DOMAIN is a string describing the function block: "cipher", "digest", "pubkey or "random", ALGO is the algorithm under test, WHAT is a string describing what has been tested, DESC is a string describing the error. */ typedef void (*selftest_report_func_t)(const char *domain, int algo, const char *what, const char *errdesc); /* Definition of the selftest functions. */ typedef gpg_err_code_t (*selftest_func_t) (int algo, int extended, selftest_report_func_t report); /* * * Public key related definitions. * */ /* Type for the pk_generate function. */ typedef gcry_err_code_t (*gcry_pk_generate_t) (gcry_sexp_t genparms, gcry_sexp_t *r_skey); /* Type for the pk_check_secret_key function. */ typedef gcry_err_code_t (*gcry_pk_check_secret_key_t) (gcry_sexp_t keyparms); /* Type for the pk_encrypt function. */ typedef gcry_err_code_t (*gcry_pk_encrypt_t) (gcry_sexp_t *r_ciph, gcry_sexp_t s_data, gcry_sexp_t keyparms); /* Type for the pk_decrypt function. */ typedef gcry_err_code_t (*gcry_pk_decrypt_t) (gcry_sexp_t *r_plain, gcry_sexp_t s_data, gcry_sexp_t keyparms); /* Type for the pk_sign function. */ typedef gcry_err_code_t (*gcry_pk_sign_t) (gcry_sexp_t *r_sig, gcry_sexp_t s_data, gcry_sexp_t keyparms); /* Type for the pk_verify function. */ typedef gcry_err_code_t (*gcry_pk_verify_t) (gcry_sexp_t s_sig, gcry_sexp_t s_data, gcry_sexp_t keyparms); /* Type for the pk_get_nbits function. */ typedef unsigned (*gcry_pk_get_nbits_t) (gcry_sexp_t keyparms); /* The type used to compute the keygrip. */ typedef gpg_err_code_t (*pk_comp_keygrip_t) (gcry_md_hd_t md, gcry_sexp_t keyparm); /* The type used to query an ECC curve name. */ typedef const char *(*pk_get_curve_t)(gcry_sexp_t keyparms, int iterator, unsigned int *r_nbits); /* The type used to query ECC curve parameters by name. */ typedef gcry_sexp_t (*pk_get_curve_param_t)(const char *name); /* Module specification structure for public key algorithms. */ typedef struct gcry_pk_spec { int algo; struct { unsigned int disabled:1; unsigned int fips:1; } flags; int use; const char *name; const char **aliases; const char *elements_pkey; const char *elements_skey; const char *elements_enc; const char *elements_sig; const char *elements_grip; gcry_pk_generate_t generate; gcry_pk_check_secret_key_t check_secret_key; gcry_pk_encrypt_t encrypt; gcry_pk_decrypt_t decrypt; gcry_pk_sign_t sign; gcry_pk_verify_t verify; gcry_pk_get_nbits_t get_nbits; selftest_func_t selftest; pk_comp_keygrip_t comp_keygrip; pk_get_curve_t get_curve; pk_get_curve_param_t get_curve_param; } gcry_pk_spec_t; /* * * Symmetric cipher related definitions. * */ struct cipher_bulk_ops; /* Type for the cipher_setkey function. */ typedef gcry_err_code_t (*gcry_cipher_setkey_t) (void *c, const unsigned char *key, unsigned keylen, struct cipher_bulk_ops *bulk_ops); /* Type for the cipher_encrypt function. */ typedef unsigned int (*gcry_cipher_encrypt_t) (void *c, unsigned char *outbuf, const unsigned char *inbuf); /* Type for the cipher_decrypt function. */ typedef unsigned int (*gcry_cipher_decrypt_t) (void *c, unsigned char *outbuf, const unsigned char *inbuf); /* Type for the cipher_stencrypt function. */ typedef void (*gcry_cipher_stencrypt_t) (void *c, unsigned char *outbuf, const unsigned char *inbuf, size_t n); /* Type for the cipher_stdecrypt function. */ typedef void (*gcry_cipher_stdecrypt_t) (void *c, unsigned char *outbuf, const unsigned char *inbuf, size_t n); /* The type used to convey additional information to a cipher. */ typedef gpg_err_code_t (*cipher_set_extra_info_t) (void *c, int what, const void *buffer, size_t buflen); /* The type used to set an IV directly in the algorithm module. */ typedef void (*cipher_setiv_func_t)(void *c, const byte *iv, size_t ivlen); /* A structure to map OIDs to encryption modes. */ typedef struct gcry_cipher_oid_spec { const char *oid; int mode; } gcry_cipher_oid_spec_t; /* Module specification structure for ciphers. */ typedef struct gcry_cipher_spec { int algo; struct { unsigned int disabled:1; unsigned int fips:1; } flags; const char *name; const char **aliases; gcry_cipher_oid_spec_t *oids; size_t blocksize; size_t keylen; size_t contextsize; gcry_cipher_setkey_t setkey; gcry_cipher_encrypt_t encrypt; gcry_cipher_decrypt_t decrypt; gcry_cipher_stencrypt_t stencrypt; gcry_cipher_stdecrypt_t stdecrypt; selftest_func_t selftest; cipher_set_extra_info_t set_extra_info; cipher_setiv_func_t setiv; } gcry_cipher_spec_t; /* * * Message digest related definitions. * */ /* Type for the md_init function. */ typedef void (*gcry_md_init_t) (void *c, unsigned int flags); /* Type for the md_write function. */ typedef void (*gcry_md_write_t) (void *c, const void *buf, size_t nbytes); /* Type for the md_final function. */ typedef void (*gcry_md_final_t) (void *c); /* Type for the md_read function. */ typedef unsigned char *(*gcry_md_read_t) (void *c); /* Type for the md_extract function. */ typedef void (*gcry_md_extract_t) (void *c, void *outbuf, size_t nbytes); /* Type for the md_hash_buffers function. */ -typedef void (*gcry_md_hash_buffers_t) (void *outbuf, const gcry_buffer_t *iov, - int iovcnt); +typedef void (*gcry_md_hash_buffers_t) (void *outbuf, size_t nbytes, + const gcry_buffer_t *iov, + int iovcnt); typedef struct gcry_md_oid_spec { const char *oidstring; } gcry_md_oid_spec_t; /* Module specification structure for message digests. */ typedef struct gcry_md_spec { int algo; struct { unsigned int disabled:1; unsigned int fips:1; } flags; const char *name; unsigned char *asnoid; int asnlen; gcry_md_oid_spec_t *oids; int mdlen; gcry_md_init_t init; gcry_md_write_t write; gcry_md_final_t final; gcry_md_read_t read; gcry_md_extract_t extract; gcry_md_hash_buffers_t hash_buffers; size_t contextsize; /* allocate this amount of context */ selftest_func_t selftest; } gcry_md_spec_t; /* The selftest functions. */ gcry_error_t _gcry_cipher_selftest (int algo, int extended, selftest_report_func_t report); gcry_error_t _gcry_md_selftest (int algo, int extended, selftest_report_func_t report); gcry_error_t _gcry_pk_selftest (int algo, int extended, selftest_report_func_t report); gcry_error_t _gcry_mac_selftest (int algo, int extended, selftest_report_func_t report); gcry_error_t _gcry_kdf_selftest (int algo, int extended, selftest_report_func_t report); gcry_error_t _gcry_random_selftest (selftest_report_func_t report); #endif /*G10_CIPHER_PROTO_H*/