diff --git a/cipher/md.c b/cipher/md.c
index 8df54feb..c1f585f8 100644
--- a/cipher/md.c
+++ b/cipher/md.c
@@ -1,1452 +1,1478 @@
/* md.c - message digest dispatcher
* Copyright (C) 1998, 1999, 2002, 2003, 2006,
* 2008 Free Software Foundation, Inc.
* Copyright (C) 2013, 2014 g10 Code GmbH
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser general Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
#include
#include
#include
#include
#include
#include "g10lib.h"
#include "cipher.h"
/* This is the list of the digest implementations included in
libgcrypt. */
static gcry_md_spec_t *digest_list[] =
{
#if USE_CRC
&_gcry_digest_spec_crc32,
&_gcry_digest_spec_crc32_rfc1510,
&_gcry_digest_spec_crc24_rfc2440,
#endif
#if USE_SHA1
&_gcry_digest_spec_sha1,
#endif
#if USE_SHA256
&_gcry_digest_spec_sha256,
&_gcry_digest_spec_sha224,
#endif
#if USE_SHA512
&_gcry_digest_spec_sha512,
&_gcry_digest_spec_sha384,
#endif
#if USE_SHA3
&_gcry_digest_spec_sha3_224,
&_gcry_digest_spec_sha3_256,
&_gcry_digest_spec_sha3_384,
&_gcry_digest_spec_sha3_512,
&_gcry_digest_spec_shake128,
&_gcry_digest_spec_shake256,
#endif
#if USE_GOST_R_3411_94
&_gcry_digest_spec_gost3411_94,
&_gcry_digest_spec_gost3411_cp,
#endif
#if USE_GOST_R_3411_12
&_gcry_digest_spec_stribog_256,
&_gcry_digest_spec_stribog_512,
#endif
#if USE_WHIRLPOOL
&_gcry_digest_spec_whirlpool,
#endif
#if USE_RMD160
&_gcry_digest_spec_rmd160,
#endif
#if USE_TIGER
&_gcry_digest_spec_tiger,
&_gcry_digest_spec_tiger1,
&_gcry_digest_spec_tiger2,
#endif
#if USE_MD5
&_gcry_digest_spec_md5,
#endif
#if USE_MD4
&_gcry_digest_spec_md4,
#endif
#if USE_MD2
&_gcry_digest_spec_md2,
#endif
#if USE_BLAKE2
&_gcry_digest_spec_blake2b_512,
&_gcry_digest_spec_blake2b_384,
&_gcry_digest_spec_blake2b_256,
&_gcry_digest_spec_blake2b_160,
&_gcry_digest_spec_blake2s_256,
&_gcry_digest_spec_blake2s_224,
&_gcry_digest_spec_blake2s_160,
&_gcry_digest_spec_blake2s_128,
#endif
NULL
};
typedef struct gcry_md_list
{
gcry_md_spec_t *spec;
struct gcry_md_list *next;
size_t actual_struct_size; /* Allocated size of this structure. */
PROPERLY_ALIGNED_TYPE context;
} GcryDigestEntry;
/* This structure is put right after the gcry_md_hd_t buffer, so that
* only one memory block is needed. */
struct gcry_md_context
{
int magic;
size_t actual_handle_size; /* Allocated size of this handle. */
FILE *debug;
struct {
unsigned int secure: 1;
unsigned int finalized:1;
unsigned int bugemu1:1;
unsigned int hmac:1;
} flags;
GcryDigestEntry *list;
};
#define CTX_MAGIC_NORMAL 0x11071961
#define CTX_MAGIC_SECURE 0x16917011
static gcry_err_code_t md_enable (gcry_md_hd_t hd, int algo);
static void md_close (gcry_md_hd_t a);
static void md_write (gcry_md_hd_t a, const void *inbuf, size_t inlen);
static byte *md_read( gcry_md_hd_t a, int algo );
static int md_get_algo( gcry_md_hd_t a );
static int md_digest_length( int algo );
static void md_start_debug ( gcry_md_hd_t a, const char *suffix );
static void md_stop_debug ( gcry_md_hd_t a );
static int
map_algo (int algo)
{
return algo;
}
/* Return the spec structure for the hash algorithm ALGO. For an
unknown algorithm NULL is returned. */
static gcry_md_spec_t *
spec_from_algo (int algo)
{
int idx;
gcry_md_spec_t *spec;
algo = map_algo (algo);
for (idx = 0; (spec = digest_list[idx]); idx++)
if (algo == spec->algo)
return spec;
return NULL;
}
/* Lookup a hash's spec by its name. */
static gcry_md_spec_t *
spec_from_name (const char *name)
{
gcry_md_spec_t *spec;
int idx;
for (idx=0; (spec = digest_list[idx]); idx++)
{
if (!stricmp (name, spec->name))
return spec;
}
return NULL;
}
/* Lookup a hash's spec by its OID. */
static gcry_md_spec_t *
spec_from_oid (const char *oid)
{
gcry_md_spec_t *spec;
gcry_md_oid_spec_t *oid_specs;
int idx, j;
for (idx=0; (spec = digest_list[idx]); idx++)
{
oid_specs = spec->oids;
if (oid_specs)
{
for (j = 0; oid_specs[j].oidstring; j++)
if (!stricmp (oid, oid_specs[j].oidstring))
return spec;
}
}
return NULL;
}
static gcry_md_spec_t *
search_oid (const char *oid, gcry_md_oid_spec_t *oid_spec)
{
gcry_md_spec_t *spec;
int i;
if (!oid)
return NULL;
if (!strncmp (oid, "oid.", 4) || !strncmp (oid, "OID.", 4))
oid += 4;
spec = spec_from_oid (oid);
if (spec && spec->oids)
{
for (i = 0; spec->oids[i].oidstring; i++)
if (!stricmp (oid, spec->oids[i].oidstring))
{
if (oid_spec)
*oid_spec = spec->oids[i];
return spec;
}
}
return NULL;
}
/****************
* Map a string to the digest algo
*/
int
_gcry_md_map_name (const char *string)
{
gcry_md_spec_t *spec;
if (!string)
return 0;
/* If the string starts with a digit (optionally prefixed with
either "OID." or "oid."), we first look into our table of ASN.1
object identifiers to figure out the algorithm */
spec = search_oid (string, NULL);
if (spec)
return spec->algo;
/* Not found, search a matching digest name. */
spec = spec_from_name (string);
if (spec)
return spec->algo;
return 0;
}
/****************
* This function simply returns the name of the algorithm or some constant
* string when there is no algo. It will never return NULL.
* Use the macro gcry_md_test_algo() to check whether the algorithm
* is valid.
*/
const char *
_gcry_md_algo_name (int algorithm)
{
gcry_md_spec_t *spec;
spec = spec_from_algo (algorithm);
return spec ? spec->name : "?";
}
static gcry_err_code_t
check_digest_algo (int algorithm)
{
gcry_md_spec_t *spec;
spec = spec_from_algo (algorithm);
if (spec && !spec->flags.disabled)
return 0;
return GPG_ERR_DIGEST_ALGO;
}
/****************
* Open a message digest handle for use with algorithm ALGO.
* More algorithms may be added by md_enable(). The initial algorithm
* may be 0.
*/
static gcry_err_code_t
md_open (gcry_md_hd_t *h, int algo, unsigned int flags)
{
gcry_err_code_t err = 0;
int secure = !!(flags & GCRY_MD_FLAG_SECURE);
int hmac = !!(flags & GCRY_MD_FLAG_HMAC);
int bufsize = secure ? 512 : 1024;
struct gcry_md_context *ctx;
gcry_md_hd_t hd;
size_t n;
/* Allocate a memory area to hold the caller visible buffer with it's
* control information and the data required by this module. Set the
* context pointer at the beginning to this area.
* We have to use this strange scheme because we want to hide the
* internal data but have a variable sized buffer.
*
* +---+------+---........------+-------------+
* !ctx! bctl ! buffer ! private !
* +---+------+---........------+-------------+
* ! ^
* !---------------------------!
*
* We have to make sure that private is well aligned.
*/
n = sizeof (struct gcry_md_handle) + bufsize;
n = ((n + sizeof (PROPERLY_ALIGNED_TYPE) - 1)
/ sizeof (PROPERLY_ALIGNED_TYPE)) * sizeof (PROPERLY_ALIGNED_TYPE);
/* Allocate and set the Context pointer to the private data */
if (secure)
hd = xtrymalloc_secure (n + sizeof (struct gcry_md_context));
else
hd = xtrymalloc (n + sizeof (struct gcry_md_context));
if (! hd)
err = gpg_err_code_from_errno (errno);
if (! err)
{
hd->ctx = ctx = (void *) ((char *) hd + n);
/* Setup the globally visible data (bctl in the diagram).*/
hd->bufsize = n - sizeof (struct gcry_md_handle) + 1;
hd->bufpos = 0;
/* Initialize the private data. */
memset (hd->ctx, 0, sizeof *hd->ctx);
ctx->magic = secure ? CTX_MAGIC_SECURE : CTX_MAGIC_NORMAL;
ctx->actual_handle_size = n + sizeof (struct gcry_md_context);
ctx->flags.secure = secure;
ctx->flags.hmac = hmac;
ctx->flags.bugemu1 = !!(flags & GCRY_MD_FLAG_BUGEMU1);
}
if (! err)
{
/* Hmmm, should we really do that? - yes [-wk] */
_gcry_fast_random_poll ();
if (algo)
{
err = md_enable (hd, algo);
if (err)
md_close (hd);
}
}
if (! err)
*h = hd;
return err;
}
/* Create a message digest object for algorithm ALGO. FLAGS may be
given as an bitwise OR of the gcry_md_flags values. ALGO may be
given as 0 if the algorithms to be used are later set using
gcry_md_enable. H is guaranteed to be a valid handle or NULL on
error. */
gcry_err_code_t
_gcry_md_open (gcry_md_hd_t *h, int algo, unsigned int flags)
{
gcry_err_code_t rc;
gcry_md_hd_t hd;
if ((flags & ~(GCRY_MD_FLAG_SECURE
| GCRY_MD_FLAG_HMAC
| GCRY_MD_FLAG_BUGEMU1)))
rc = GPG_ERR_INV_ARG;
else
rc = md_open (&hd, algo, flags);
*h = rc? NULL : hd;
return rc;
}
static gcry_err_code_t
md_enable (gcry_md_hd_t hd, int algorithm)
{
struct gcry_md_context *h = hd->ctx;
gcry_md_spec_t *spec;
GcryDigestEntry *entry;
gcry_err_code_t err = 0;
for (entry = h->list; entry; entry = entry->next)
if (entry->spec->algo == algorithm)
return 0; /* Already enabled */
spec = spec_from_algo (algorithm);
if (!spec)
{
log_debug ("md_enable: algorithm %d not available\n", algorithm);
err = GPG_ERR_DIGEST_ALGO;
}
if (!err && algorithm == GCRY_MD_MD5 && fips_mode ())
{
_gcry_inactivate_fips_mode ("MD5 used");
if (_gcry_enforced_fips_mode () )
{
/* We should never get to here because we do not register
MD5 in enforced fips mode. But better throw an error. */
err = GPG_ERR_DIGEST_ALGO;
}
}
if (!err && h->flags.hmac && spec->read == NULL)
{
/* Expandable output function cannot act as part of HMAC. */
err = GPG_ERR_DIGEST_ALGO;
}
if (!err)
{
size_t size = (sizeof (*entry)
+ spec->contextsize * (h->flags.hmac? 3 : 1)
- sizeof (entry->context));
/* And allocate a new list entry. */
if (h->flags.secure)
entry = xtrymalloc_secure (size);
else
entry = xtrymalloc (size);
if (! entry)
err = gpg_err_code_from_errno (errno);
else
{
entry->spec = spec;
entry->next = h->list;
entry->actual_struct_size = size;
h->list = entry;
/* And init this instance. */
entry->spec->init (&entry->context.c,
h->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0);
}
}
return err;
}
gcry_err_code_t
_gcry_md_enable (gcry_md_hd_t hd, int algorithm)
{
return md_enable (hd, algorithm);
}
static gcry_err_code_t
md_copy (gcry_md_hd_t ahd, gcry_md_hd_t *b_hd)
{
gcry_err_code_t err = 0;
struct gcry_md_context *a = ahd->ctx;
struct gcry_md_context *b;
GcryDigestEntry *ar, *br;
gcry_md_hd_t bhd;
size_t n;
if (ahd->bufpos)
md_write (ahd, NULL, 0);
n = (char *) ahd->ctx - (char *) ahd;
if (a->flags.secure)
bhd = xtrymalloc_secure (n + sizeof (struct gcry_md_context));
else
bhd = xtrymalloc (n + sizeof (struct gcry_md_context));
if (!bhd)
{
err = gpg_err_code_from_syserror ();
goto leave;
}
bhd->ctx = b = (void *) ((char *) bhd + n);
/* No need to copy the buffer due to the write above. */
gcry_assert (ahd->bufsize == (n - sizeof (struct gcry_md_handle) + 1));
bhd->bufsize = ahd->bufsize;
bhd->bufpos = 0;
gcry_assert (! ahd->bufpos);
memcpy (b, a, sizeof *a);
b->list = NULL;
b->debug = NULL;
/* Copy the complete list of algorithms. The copied list is
reversed, but that doesn't matter. */
for (ar = a->list; ar; ar = ar->next)
{
if (a->flags.secure)
br = xtrymalloc_secure (ar->actual_struct_size);
else
br = xtrymalloc (ar->actual_struct_size);
if (!br)
{
err = gpg_err_code_from_syserror ();
md_close (bhd);
goto leave;
}
memcpy (br, ar, ar->actual_struct_size);
br->next = b->list;
b->list = br;
}
if (a->debug)
md_start_debug (bhd, "unknown");
*b_hd = bhd;
leave:
return err;
}
gcry_err_code_t
_gcry_md_copy (gcry_md_hd_t *handle, gcry_md_hd_t hd)
{
gcry_err_code_t rc;
rc = md_copy (hd, handle);
if (rc)
*handle = NULL;
return rc;
}
/*
* Reset all contexts and discard any buffered stuff. This may be used
* instead of a md_close(); md_open().
*/
void
_gcry_md_reset (gcry_md_hd_t a)
{
GcryDigestEntry *r;
/* Note: We allow this even in fips non operational mode. */
a->bufpos = a->ctx->flags.finalized = 0;
if (a->ctx->flags.hmac)
for (r = a->ctx->list; r; r = r->next)
{
memcpy (r->context.c, r->context.c + r->spec->contextsize,
r->spec->contextsize);
}
else
for (r = a->ctx->list; r; r = r->next)
{
memset (r->context.c, 0, r->spec->contextsize);
(*r->spec->init) (&r->context.c,
a->ctx->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0);
}
}
static void
md_close (gcry_md_hd_t a)
{
GcryDigestEntry *r, *r2;
if (! a)
return;
if (a->ctx->debug)
md_stop_debug (a);
for (r = a->ctx->list; r; r = r2)
{
r2 = r->next;
wipememory (r, r->actual_struct_size);
xfree (r);
}
wipememory (a, a->ctx->actual_handle_size);
xfree(a);
}
void
_gcry_md_close (gcry_md_hd_t hd)
{
/* Note: We allow this even in fips non operational mode. */
md_close (hd);
}
static void
md_write (gcry_md_hd_t a, const void *inbuf, size_t inlen)
{
GcryDigestEntry *r;
if (a->ctx->debug)
{
if (a->bufpos && fwrite (a->buf, a->bufpos, 1, a->ctx->debug) != 1)
BUG();
if (inlen && fwrite (inbuf, inlen, 1, a->ctx->debug) != 1)
BUG();
}
for (r = a->ctx->list; r; r = r->next)
{
if (a->bufpos)
(*r->spec->write) (&r->context.c, a->buf, a->bufpos);
(*r->spec->write) (&r->context.c, inbuf, inlen);
}
a->bufpos = 0;
}
/* Note that this function may be used after finalize and read to keep
on writing to the transform function so to mitigate timing
attacks. */
void
_gcry_md_write (gcry_md_hd_t hd, const void *inbuf, size_t inlen)
{
md_write (hd, inbuf, inlen);
}
static void
md_final (gcry_md_hd_t a)
{
GcryDigestEntry *r;
if (a->ctx->flags.finalized)
return;
if (a->bufpos)
md_write (a, NULL, 0);
for (r = a->ctx->list; r; r = r->next)
(*r->spec->final) (&r->context.c);
a->ctx->flags.finalized = 1;
if (!a->ctx->flags.hmac)
return;
for (r = a->ctx->list; r; r = r->next)
{
byte *p;
size_t dlen = r->spec->mdlen;
byte *hash;
gcry_err_code_t err;
if (r->spec->read == NULL)
continue;
p = r->spec->read (&r->context.c);
if (a->ctx->flags.secure)
hash = xtrymalloc_secure (dlen);
else
hash = xtrymalloc (dlen);
if (!hash)
{
err = gpg_err_code_from_errno (errno);
_gcry_fatal_error (err, NULL);
}
memcpy (hash, p, dlen);
memcpy (r->context.c, r->context.c + r->spec->contextsize * 2,
r->spec->contextsize);
(*r->spec->write) (&r->context.c, hash, dlen);
(*r->spec->final) (&r->context.c);
xfree (hash);
}
}
static gcry_err_code_t
md_setkey (gcry_md_hd_t h, const unsigned char *key, size_t keylen)
{
gcry_err_code_t rc = 0;
GcryDigestEntry *r;
int algo_had_setkey = 0;
if (!h->ctx->list)
return GPG_ERR_DIGEST_ALGO; /* Might happen if no algo is enabled. */
if (h->ctx->flags.hmac)
return GPG_ERR_DIGEST_ALGO; /* Tried md_setkey for HMAC md. */
for (r = h->ctx->list; r; r = r->next)
{
switch (r->spec->algo)
{
/* TODO? add spec->init_with_key? */
case GCRY_MD_BLAKE2B_512:
case GCRY_MD_BLAKE2B_384:
case GCRY_MD_BLAKE2B_256:
case GCRY_MD_BLAKE2B_160:
case GCRY_MD_BLAKE2S_256:
case GCRY_MD_BLAKE2S_224:
case GCRY_MD_BLAKE2S_160:
case GCRY_MD_BLAKE2S_128:
algo_had_setkey = 1;
memset (r->context.c, 0, r->spec->contextsize);
rc = _gcry_blake2_init_with_key (r->context.c,
h->ctx->flags.bugemu1
? GCRY_MD_FLAG_BUGEMU1:0,
key, keylen, r->spec->algo);
break;
default:
rc = GPG_ERR_DIGEST_ALGO;
break;
}
if (rc)
break;
}
if (rc && !algo_had_setkey)
{
/* None of algorithms had setkey implementation, so contexts were not
* modified. Just return error. */
return rc;
}
else if (rc && algo_had_setkey)
{
/* Some of the contexts have been modified, but got error. Reset
* all contexts. */
_gcry_md_reset (h);
return rc;
}
/* Successful md_setkey implies reset. */
h->bufpos = h->ctx->flags.finalized = 0;
return 0;
}
static gcry_err_code_t
prepare_macpads (gcry_md_hd_t a, const unsigned char *key, size_t keylen)
{
GcryDigestEntry *r;
if (!a->ctx->list)
return GPG_ERR_DIGEST_ALGO; /* Might happen if no algo is enabled. */
if (!a->ctx->flags.hmac)
return GPG_ERR_DIGEST_ALGO; /* Tried prepare_macpads for non-HMAC md. */
for (r = a->ctx->list; r; r = r->next)
{
const unsigned char *k;
size_t k_len;
unsigned char *key_allocated = NULL;
int macpad_Bsize;
int i;
switch (r->spec->algo)
{
/* TODO: add spec->blocksize */
case GCRY_MD_SHA3_224:
macpad_Bsize = 1152 / 8;
break;
case GCRY_MD_SHA3_256:
macpad_Bsize = 1088 / 8;
break;
case GCRY_MD_SHA3_384:
macpad_Bsize = 832 / 8;
break;
case GCRY_MD_SHA3_512:
macpad_Bsize = 576 / 8;
break;
case GCRY_MD_SHA384:
case GCRY_MD_SHA512:
case GCRY_MD_BLAKE2B_512:
case GCRY_MD_BLAKE2B_384:
case GCRY_MD_BLAKE2B_256:
case GCRY_MD_BLAKE2B_160:
macpad_Bsize = 128;
break;
case GCRY_MD_GOSTR3411_94:
case GCRY_MD_GOSTR3411_CP:
macpad_Bsize = 32;
break;
default:
macpad_Bsize = 64;
break;
}
if ( keylen > macpad_Bsize )
{
k = key_allocated = xtrymalloc_secure (r->spec->mdlen);
if (!k)
return gpg_err_code_from_errno (errno);
_gcry_md_hash_buffer (r->spec->algo, key_allocated, key, keylen);
k_len = r->spec->mdlen;
gcry_assert ( k_len <= macpad_Bsize );
}
else
{
k = key;
k_len = keylen;
}
(*r->spec->init) (&r->context.c,
a->ctx->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0);
a->bufpos = 0;
for (i=0; i < k_len; i++ )
_gcry_md_putc (a, k[i] ^ 0x36);
for (; i < macpad_Bsize; i++ )
_gcry_md_putc (a, 0x36);
(*r->spec->write) (&r->context.c, a->buf, a->bufpos);
memcpy (r->context.c + r->spec->contextsize, r->context.c,
r->spec->contextsize);
(*r->spec->init) (&r->context.c,
a->ctx->flags.bugemu1? GCRY_MD_FLAG_BUGEMU1:0);
a->bufpos = 0;
for (i=0; i < k_len; i++ )
_gcry_md_putc (a, k[i] ^ 0x5c);
for (; i < macpad_Bsize; i++ )
_gcry_md_putc (a, 0x5c);
(*r->spec->write) (&r->context.c, a->buf, a->bufpos);
memcpy (r->context.c + r->spec->contextsize*2, r->context.c,
r->spec->contextsize);
xfree (key_allocated);
}
a->bufpos = 0;
return 0;
}
gcry_err_code_t
_gcry_md_ctl (gcry_md_hd_t hd, int cmd, void *buffer, size_t buflen)
{
gcry_err_code_t rc = 0;
(void)buflen; /* Currently not used. */
switch (cmd)
{
case GCRYCTL_FINALIZE:
md_final (hd);
break;
case GCRYCTL_START_DUMP:
md_start_debug (hd, buffer);
break;
case GCRYCTL_STOP_DUMP:
md_stop_debug ( hd );
break;
default:
rc = GPG_ERR_INV_OP;
}
return rc;
}
gcry_err_code_t
_gcry_md_setkey (gcry_md_hd_t hd, const void *key, size_t keylen)
{
gcry_err_code_t rc;
if (hd->ctx->flags.hmac)
{
rc = prepare_macpads (hd, key, keylen);
if (!rc)
_gcry_md_reset (hd);
}
else
{
rc = md_setkey (hd, key, keylen);
}
return rc;
}
/* The new debug interface. If SUFFIX is a string it creates an debug
file for the context HD. IF suffix is NULL, the file is closed and
debugging is stopped. */
void
_gcry_md_debug (gcry_md_hd_t hd, const char *suffix)
{
if (suffix)
md_start_debug (hd, suffix);
else
md_stop_debug (hd);
}
/****************
* If ALGO is null get the digest for the used algo (which should be
* only one)
*/
static byte *
md_read( gcry_md_hd_t a, int algo )
{
GcryDigestEntry *r = a->ctx->list;
if (! algo)
{
/* Return the first algorithm */
if (r)
{
if (r->next)
log_debug ("more than one algorithm in md_read(0)\n");
if (r->spec->read)
return r->spec->read (&r->context.c);
}
}
else
{
for (r = a->ctx->list; r; r = r->next)
if (r->spec->algo == algo)
{
if (r->spec->read)
return r->spec->read (&r->context.c);
break;
}
}
if (r && !r->spec->read)
_gcry_fatal_error (GPG_ERR_DIGEST_ALGO,
"requested algo has no fixed digest length");
else
_gcry_fatal_error (GPG_ERR_DIGEST_ALGO, "requested algo not in md context");
return NULL;
}
/*
* Read out the complete digest, this function implictly finalizes
* the hash.
*/
byte *
_gcry_md_read (gcry_md_hd_t hd, int algo)
{
/* This function is expected to always return a digest, thus we
can't return an error which we actually should do in
non-operational state. */
_gcry_md_ctl (hd, GCRYCTL_FINALIZE, NULL, 0);
return md_read (hd, algo);
}
/****************
* If ALGO is null get the digest for the used algo (which should be
* only one)
*/
static gcry_err_code_t
md_extract(gcry_md_hd_t a, int algo, void *out, size_t outlen)
{
GcryDigestEntry *r = a->ctx->list;
if (!algo)
{
/* Return the first algorithm */
if (r && r->spec->extract)
{
if (r->next)
log_debug ("more than one algorithm in md_extract(0)\n");
r->spec->extract (&r->context.c, out, outlen);
return 0;
}
}
else
{
for (r = a->ctx->list; r; r = r->next)
if (r->spec->algo == algo && r->spec->extract)
{
r->spec->extract (&r->context.c, out, outlen);
return 0;
}
}
return GPG_ERR_DIGEST_ALGO;
}
/*
* Expand the output from XOF class digest, this function implictly finalizes
* the hash.
*/
gcry_err_code_t
_gcry_md_extract (gcry_md_hd_t hd, int algo, void *out, size_t outlen)
{
_gcry_md_ctl (hd, GCRYCTL_FINALIZE, NULL, 0);
return md_extract (hd, algo, out, outlen);
}
/*
* Read out an intermediate digest. Not yet functional.
*/
gcry_err_code_t
_gcry_md_get (gcry_md_hd_t hd, int algo, byte *buffer, int buflen)
{
(void)hd;
(void)algo;
(void)buffer;
(void)buflen;
/*md_digest ... */
fips_signal_error ("unimplemented function called");
return GPG_ERR_INTERNAL;
}
/*
* Shortcut function to hash a buffer with a given algo. The only
* guaranteed supported algorithms are RIPE-MD160 and SHA-1. The
* supplied digest buffer must be large enough to store the resulting
* hash. No error is returned, the function will abort on an invalid
* algo. DISABLED_ALGOS are ignored here. */
void
_gcry_md_hash_buffer (int algo, void *digest,
const void *buffer, size_t length)
{
- if (algo == GCRY_MD_SHA1)
+ if (0)
+ ;
+#if USE_SHA256
+ else if (algo == GCRY_MD_SHA256)
+ _gcry_sha256_hash_buffer (digest, buffer, length);
+#endif
+#if USE_SHA512
+ else if (algo == GCRY_MD_SHA512)
+ _gcry_sha512_hash_buffer (digest, buffer, length);
+#endif
+#if USE_SHA1
+ else if (algo == GCRY_MD_SHA1)
_gcry_sha1_hash_buffer (digest, buffer, length);
+#endif
+#if USE_RMD160
else if (algo == GCRY_MD_RMD160 && !fips_mode () )
_gcry_rmd160_hash_buffer (digest, buffer, length);
+#endif
else
{
/* For the others we do not have a fast function, so we use the
normal functions. */
gcry_md_hd_t h;
gpg_err_code_t err;
if (algo == GCRY_MD_MD5 && fips_mode ())
{
_gcry_inactivate_fips_mode ("MD5 used");
if (_gcry_enforced_fips_mode () )
{
/* We should never get to here because we do not register
MD5 in enforced fips mode. */
_gcry_fips_noreturn ();
}
}
err = md_open (&h, algo, 0);
if (err)
log_bug ("gcry_md_open failed for algo %d: %s",
algo, gpg_strerror (gcry_error(err)));
md_write (h, (byte *) buffer, length);
md_final (h);
memcpy (digest, md_read (h, algo), md_digest_length (algo));
md_close (h);
}
}
/* Shortcut function to hash multiple buffers with a given algo. In
contrast to gcry_md_hash_buffer, this function returns an error on
invalid arguments or on other problems; disabled algorithms are
_not_ ignored but flagged as an error.
The data to sign is taken from the array IOV which has IOVCNT items.
The only supported flag in FLAGS is GCRY_MD_FLAG_HMAC which turns
this function into a HMAC function; the first item in IOV is then
used as the key.
On success 0 is returned and resulting hash or HMAC is stored at
DIGEST which must have been provided by the caller with an
appropriate length. */
gpg_err_code_t
_gcry_md_hash_buffers (int algo, unsigned int flags, void *digest,
const gcry_buffer_t *iov, int iovcnt)
{
int hmac;
if (!iov || iovcnt < 0)
return GPG_ERR_INV_ARG;
if (flags & ~(GCRY_MD_FLAG_HMAC))
return GPG_ERR_INV_ARG;
hmac = !!(flags & GCRY_MD_FLAG_HMAC);
if (hmac && iovcnt < 1)
return GPG_ERR_INV_ARG;
- if (algo == GCRY_MD_SHA1 && !hmac)
+ if (0)
+ ;
+#if USE_SHA256
+ else if (algo == GCRY_MD_SHA256 && !hmac)
+ _gcry_sha256_hash_buffers (digest, iov, iovcnt);
+#endif
+#if USE_SHA512
+ else if (algo == GCRY_MD_SHA512 && !hmac)
+ _gcry_sha512_hash_buffers (digest, iov, iovcnt);
+#endif
+#if USE_SHA1
+ else if (algo == GCRY_MD_SHA1 && !hmac)
_gcry_sha1_hash_buffers (digest, iov, iovcnt);
+#endif
else
{
/* For the others we do not have a fast function, so we use the
- normal functions. */
+ normal functions. */
gcry_md_hd_t h;
gpg_err_code_t rc;
int dlen;
if (algo == GCRY_MD_MD5 && fips_mode ())
{
_gcry_inactivate_fips_mode ("MD5 used");
if (_gcry_enforced_fips_mode () )
{
/* We should never get to here because we do not register
MD5 in enforced fips mode. */
_gcry_fips_noreturn ();
}
}
/* Detect SHAKE128 like algorithms which we can't use because
* our API does not allow for a variable length digest. */
dlen = md_digest_length (algo);
if (!dlen)
return GPG_ERR_DIGEST_ALGO;
rc = md_open (&h, algo, (hmac? GCRY_MD_FLAG_HMAC:0));
if (rc)
return rc;
if (hmac)
{
rc = _gcry_md_setkey (h,
(const char*)iov[0].data + iov[0].off,
iov[0].len);
if (rc)
{
md_close (h);
return rc;
}
iov++; iovcnt--;
}
for (;iovcnt; iov++, iovcnt--)
md_write (h, (const char*)iov[0].data + iov[0].off, iov[0].len);
md_final (h);
memcpy (digest, md_read (h, algo), dlen);
md_close (h);
}
return 0;
}
static int
md_get_algo (gcry_md_hd_t a)
{
GcryDigestEntry *r = a->ctx->list;
if (r && r->next)
{
fips_signal_error ("possible usage error");
log_error ("WARNING: more than one algorithm in md_get_algo()\n");
}
return r ? r->spec->algo : 0;
}
int
_gcry_md_get_algo (gcry_md_hd_t hd)
{
return md_get_algo (hd);
}
/****************
* Return the length of the digest
*/
static int
md_digest_length (int algorithm)
{
gcry_md_spec_t *spec;
spec = spec_from_algo (algorithm);
return spec? spec->mdlen : 0;
}
/****************
* Return the length of the digest in bytes.
* This function will return 0 in case of errors.
*/
unsigned int
_gcry_md_get_algo_dlen (int algorithm)
{
return md_digest_length (algorithm);
}
/* Hmmm: add a mode to enumerate the OIDs
* to make g10/sig-check.c more portable */
static const byte *
md_asn_oid (int algorithm, size_t *asnlen, size_t *mdlen)
{
gcry_md_spec_t *spec;
const byte *asnoid = NULL;
spec = spec_from_algo (algorithm);
if (spec)
{
if (asnlen)
*asnlen = spec->asnlen;
if (mdlen)
*mdlen = spec->mdlen;
asnoid = spec->asnoid;
}
else
log_bug ("no ASN.1 OID for md algo %d\n", algorithm);
return asnoid;
}
/****************
* Return information about the given cipher algorithm
* WHAT select the kind of information returned:
* GCRYCTL_TEST_ALGO:
* Returns 0 when the specified algorithm is available for use.
* buffer and nbytes must be zero.
* GCRYCTL_GET_ASNOID:
* Return the ASNOID of the algorithm in buffer. if buffer is NULL, only
* the required length is returned.
* GCRYCTL_SELFTEST
* Helper for the regression tests - shall not be used by applications.
*
* Note: Because this function is in most cases used to return an
* integer value, we can make it easier for the caller to just look at
* the return value. The caller will in all cases consult the value
* and thereby detecting whether a error occurred or not (i.e. while checking
* the block size)
*/
gcry_err_code_t
_gcry_md_algo_info (int algo, int what, void *buffer, size_t *nbytes)
{
gcry_err_code_t rc;
switch (what)
{
case GCRYCTL_TEST_ALGO:
if (buffer || nbytes)
rc = GPG_ERR_INV_ARG;
else
rc = check_digest_algo (algo);
break;
case GCRYCTL_GET_ASNOID:
/* We need to check that the algo is available because
md_asn_oid would otherwise raise an assertion. */
rc = check_digest_algo (algo);
if (!rc)
{
const char unsigned *asn;
size_t asnlen;
asn = md_asn_oid (algo, &asnlen, NULL);
if (buffer && (*nbytes >= asnlen))
{
memcpy (buffer, asn, asnlen);
*nbytes = asnlen;
}
else if (!buffer && nbytes)
*nbytes = asnlen;
else
{
if (buffer)
rc = GPG_ERR_TOO_SHORT;
else
rc = GPG_ERR_INV_ARG;
}
}
break;
case GCRYCTL_SELFTEST:
/* Helper function for the regression tests. */
rc = gpg_err_code (_gcry_md_selftest (algo, nbytes? (int)*nbytes : 0,
NULL));
break;
default:
rc = GPG_ERR_INV_OP;
break;
}
return rc;
}
static void
md_start_debug ( gcry_md_hd_t md, const char *suffix )
{
static int idx=0;
char buf[50];
if (fips_mode ())
return;
if ( md->ctx->debug )
{
log_debug("Oops: md debug already started\n");
return;
}
idx++;
snprintf (buf, DIM(buf)-1, "dbgmd-%05d.%.10s", idx, suffix );
md->ctx->debug = fopen(buf, "w");
if ( !md->ctx->debug )
log_debug("md debug: can't open %s\n", buf );
}
static void
md_stop_debug( gcry_md_hd_t md )
{
if ( md->ctx->debug )
{
if ( md->bufpos )
md_write ( md, NULL, 0 );
fclose (md->ctx->debug);
md->ctx->debug = NULL;
}
{ /* a kludge to pull in the __muldi3 for Solaris */
volatile u32 a = (u32)(uintptr_t)md;
volatile u64 b = 42;
volatile u64 c;
c = a * b;
(void)c;
}
}
/*
* Return information about the digest handle.
* GCRYCTL_IS_SECURE:
* Returns 1 when the handle works on secured memory
* otherwise 0 is returned. There is no error return.
* GCRYCTL_IS_ALGO_ENABLED:
* Returns 1 if the algo is enabled for that handle.
* The algo must be passed as the address of an int.
*/
gcry_err_code_t
_gcry_md_info (gcry_md_hd_t h, int cmd, void *buffer, size_t *nbytes)
{
gcry_err_code_t rc = 0;
switch (cmd)
{
case GCRYCTL_IS_SECURE:
*nbytes = h->ctx->flags.secure;
break;
case GCRYCTL_IS_ALGO_ENABLED:
{
GcryDigestEntry *r;
int algo;
if ( !buffer || !nbytes || *nbytes != sizeof (int))
rc = GPG_ERR_INV_ARG;
else
{
algo = *(int*)buffer;
*nbytes = 0;
for(r=h->ctx->list; r; r = r->next ) {
if (r->spec->algo == algo)
{
*nbytes = 1;
break;
}
}
}
break;
}
default:
rc = GPG_ERR_INV_OP;
}
return rc;
}
/* Explicitly initialize this module. */
gcry_err_code_t
_gcry_md_init (void)
{
if (fips_mode())
{
/* disable algorithms that are disallowed in fips */
int idx;
gcry_md_spec_t *spec;
for (idx = 0; (spec = digest_list[idx]); idx++)
if (!spec->flags.fips)
spec->flags.disabled = 1;
}
return 0;
}
int
_gcry_md_is_secure (gcry_md_hd_t a)
{
size_t value;
if (_gcry_md_info (a, GCRYCTL_IS_SECURE, NULL, &value))
value = 1; /* It seems to be better to assume secure memory on
error. */
return value;
}
int
_gcry_md_is_enabled (gcry_md_hd_t a, int algo)
{
size_t value;
value = sizeof algo;
if (_gcry_md_info (a, GCRYCTL_IS_ALGO_ENABLED, &algo, &value))
value = 0;
return value;
}
/* Run the selftests for digest algorithm ALGO with optional reporting
function REPORT. */
gpg_error_t
_gcry_md_selftest (int algo, int extended, selftest_report_func_t report)
{
gcry_err_code_t ec = 0;
gcry_md_spec_t *spec;
spec = spec_from_algo (algo);
if (spec && !spec->flags.disabled && spec->selftest)
ec = spec->selftest (algo, extended, report);
else
{
ec = (spec && spec->selftest) ? GPG_ERR_DIGEST_ALGO
/* */ : GPG_ERR_NOT_IMPLEMENTED;
if (report)
report ("digest", algo, "module",
(spec && !spec->flags.disabled)?
"no selftest available" :
spec? "algorithm disabled" : "algorithm not found");
}
return gpg_error (ec);
}
diff --git a/cipher/sha256.c b/cipher/sha256.c
index b450a125..d174321d 100644
--- a/cipher/sha256.c
+++ b/cipher/sha256.c
@@ -1,678 +1,707 @@
/* sha256.c - SHA256 hash function
* Copyright (C) 2003, 2006, 2008, 2009 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
/* Test vectors:
"abc"
SHA224: 23097d22 3405d822 8642a477 bda255b3 2aadbce4 bda0b3f7 e36c9da7
SHA256: ba7816bf 8f01cfea 414140de 5dae2223 b00361a3 96177a9c b410ff61 f20015ad
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
SHA224: 75388b16 512776cc 5dba5da1 fd890150 b0c6455c b4f58b19 52522525
SHA256: 248d6a61 d20638b8 e5c02693 0c3e6039 a33ce459 64ff2167 f6ecedd4 19db06c1
"a" one million times
SHA224: 20794655 980c91d8 bbb4c1ea 97618a4b f03f4258 1948b2ee 4ee7ad67
SHA256: cdc76e5c 9914fb92 81a1c7e2 84d73e67 f1809a48 a497200e 046d39cc c7112cd0
*/
#include
#include
#include
#include
#include "g10lib.h"
#include "bithelp.h"
#include "bufhelp.h"
#include "cipher.h"
#include "hash-common.h"
/* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */
#undef USE_SSSE3
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
(defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_SSSE3 1
#endif
/* USE_AVX indicates whether to compile with Intel AVX code. */
#undef USE_AVX
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \
defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
(defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_AVX 1
#endif
/* USE_AVX2 indicates whether to compile with Intel AVX2/BMI2 code. */
#undef USE_AVX2
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX2) && \
defined(HAVE_GCC_INLINE_ASM_BMI2) && \
defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
(defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_AVX2 1
#endif
/* USE_ARM_CE indicates whether to enable ARMv8 Crypto Extension assembly
* code. */
#undef USE_ARM_CE
#ifdef ENABLE_ARM_CRYPTO_SUPPORT
# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
&& defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
&& defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO)
# define USE_ARM_CE 1
# elif defined(__AARCH64EL__) \
&& defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) \
&& defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO)
# define USE_ARM_CE 1
# endif
#endif
typedef struct {
gcry_md_block_ctx_t bctx;
u32 h0,h1,h2,h3,h4,h5,h6,h7;
#ifdef USE_SSSE3
unsigned int use_ssse3:1;
#endif
#ifdef USE_AVX
unsigned int use_avx:1;
#endif
#ifdef USE_AVX2
unsigned int use_avx2:1;
#endif
#ifdef USE_ARM_CE
unsigned int use_arm_ce:1;
#endif
} SHA256_CONTEXT;
static unsigned int
transform (void *c, const unsigned char *data, size_t nblks);
static void
sha256_init (void *context, unsigned int flags)
{
SHA256_CONTEXT *hd = context;
unsigned int features = _gcry_get_hw_features ();
(void)flags;
hd->h0 = 0x6a09e667;
hd->h1 = 0xbb67ae85;
hd->h2 = 0x3c6ef372;
hd->h3 = 0xa54ff53a;
hd->h4 = 0x510e527f;
hd->h5 = 0x9b05688c;
hd->h6 = 0x1f83d9ab;
hd->h7 = 0x5be0cd19;
hd->bctx.nblocks = 0;
hd->bctx.nblocks_high = 0;
hd->bctx.count = 0;
hd->bctx.blocksize = 64;
hd->bctx.bwrite = transform;
#ifdef USE_SSSE3
hd->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
#endif
#ifdef USE_AVX
/* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs.
* Therefore use this implementation on Intel CPUs only. */
hd->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
#endif
#ifdef USE_AVX2
hd->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
#endif
#ifdef USE_ARM_CE
hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
#endif
(void)features;
}
static void
sha224_init (void *context, unsigned int flags)
{
SHA256_CONTEXT *hd = context;
unsigned int features = _gcry_get_hw_features ();
(void)flags;
hd->h0 = 0xc1059ed8;
hd->h1 = 0x367cd507;
hd->h2 = 0x3070dd17;
hd->h3 = 0xf70e5939;
hd->h4 = 0xffc00b31;
hd->h5 = 0x68581511;
hd->h6 = 0x64f98fa7;
hd->h7 = 0xbefa4fa4;
hd->bctx.nblocks = 0;
hd->bctx.nblocks_high = 0;
hd->bctx.count = 0;
hd->bctx.blocksize = 64;
hd->bctx.bwrite = transform;
#ifdef USE_SSSE3
hd->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
#endif
#ifdef USE_AVX
/* AVX implementation uses SHLD which is known to be slow on non-Intel CPUs.
* Therefore use this implementation on Intel CPUs only. */
hd->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
#endif
#ifdef USE_AVX2
hd->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
#endif
#ifdef USE_ARM_CE
hd->use_arm_ce = (features & HWF_ARM_SHA2) != 0;
#endif
(void)features;
}
/*
Transform the message X which consists of 16 32-bit-words. See FIPS
180-2 for details. */
#define R(a,b,c,d,e,f,g,h,k,w) do \
{ \
t1 = (h) + Sum1((e)) + Cho((e),(f),(g)) + (k) + (w); \
t2 = Sum0((a)) + Maj((a),(b),(c)); \
d += t1; \
h = t1 + t2; \
} while (0)
/* (4.2) same as SHA-1's F1. */
#define Cho(x, y, z) (z ^ (x & (y ^ z)))
/* (4.3) same as SHA-1's F3 */
#define Maj(x, y, z) ((x & y) + (z & (x ^ y)))
/* (4.4) */
#define Sum0(x) (ror (x, 2) ^ ror (x, 13) ^ ror (x, 22))
/* (4.5) */
#define Sum1(x) (ror (x, 6) ^ ror (x, 11) ^ ror (x, 25))
/* Message expansion */
#define S0(x) (ror ((x), 7) ^ ror ((x), 18) ^ ((x) >> 3)) /* (4.6) */
#define S1(x) (ror ((x), 17) ^ ror ((x), 19) ^ ((x) >> 10)) /* (4.7) */
#define I(i) ( w[i] = buf_get_be32(data + i * 4) )
#define W(i) ( w[i&0x0f] = S1(w[(i-2) &0x0f]) \
+ w[(i-7) &0x0f] \
+ S0(w[(i-15)&0x0f]) \
+ w[(i-16)&0x0f] )
static unsigned int
transform_blk (void *ctx, const unsigned char *data)
{
SHA256_CONTEXT *hd = ctx;
static const u32 K[64] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
};
u32 a,b,c,d,e,f,g,h,t1,t2;
u32 w[16];
a = hd->h0;
b = hd->h1;
c = hd->h2;
d = hd->h3;
e = hd->h4;
f = hd->h5;
g = hd->h6;
h = hd->h7;
R(a, b, c, d, e, f, g, h, K[0], I(0));
R(h, a, b, c, d, e, f, g, K[1], I(1));
R(g, h, a, b, c, d, e, f, K[2], I(2));
R(f, g, h, a, b, c, d, e, K[3], I(3));
R(e, f, g, h, a, b, c, d, K[4], I(4));
R(d, e, f, g, h, a, b, c, K[5], I(5));
R(c, d, e, f, g, h, a, b, K[6], I(6));
R(b, c, d, e, f, g, h, a, K[7], I(7));
R(a, b, c, d, e, f, g, h, K[8], I(8));
R(h, a, b, c, d, e, f, g, K[9], I(9));
R(g, h, a, b, c, d, e, f, K[10], I(10));
R(f, g, h, a, b, c, d, e, K[11], I(11));
R(e, f, g, h, a, b, c, d, K[12], I(12));
R(d, e, f, g, h, a, b, c, K[13], I(13));
R(c, d, e, f, g, h, a, b, K[14], I(14));
R(b, c, d, e, f, g, h, a, K[15], I(15));
R(a, b, c, d, e, f, g, h, K[16], W(16));
R(h, a, b, c, d, e, f, g, K[17], W(17));
R(g, h, a, b, c, d, e, f, K[18], W(18));
R(f, g, h, a, b, c, d, e, K[19], W(19));
R(e, f, g, h, a, b, c, d, K[20], W(20));
R(d, e, f, g, h, a, b, c, K[21], W(21));
R(c, d, e, f, g, h, a, b, K[22], W(22));
R(b, c, d, e, f, g, h, a, K[23], W(23));
R(a, b, c, d, e, f, g, h, K[24], W(24));
R(h, a, b, c, d, e, f, g, K[25], W(25));
R(g, h, a, b, c, d, e, f, K[26], W(26));
R(f, g, h, a, b, c, d, e, K[27], W(27));
R(e, f, g, h, a, b, c, d, K[28], W(28));
R(d, e, f, g, h, a, b, c, K[29], W(29));
R(c, d, e, f, g, h, a, b, K[30], W(30));
R(b, c, d, e, f, g, h, a, K[31], W(31));
R(a, b, c, d, e, f, g, h, K[32], W(32));
R(h, a, b, c, d, e, f, g, K[33], W(33));
R(g, h, a, b, c, d, e, f, K[34], W(34));
R(f, g, h, a, b, c, d, e, K[35], W(35));
R(e, f, g, h, a, b, c, d, K[36], W(36));
R(d, e, f, g, h, a, b, c, K[37], W(37));
R(c, d, e, f, g, h, a, b, K[38], W(38));
R(b, c, d, e, f, g, h, a, K[39], W(39));
R(a, b, c, d, e, f, g, h, K[40], W(40));
R(h, a, b, c, d, e, f, g, K[41], W(41));
R(g, h, a, b, c, d, e, f, K[42], W(42));
R(f, g, h, a, b, c, d, e, K[43], W(43));
R(e, f, g, h, a, b, c, d, K[44], W(44));
R(d, e, f, g, h, a, b, c, K[45], W(45));
R(c, d, e, f, g, h, a, b, K[46], W(46));
R(b, c, d, e, f, g, h, a, K[47], W(47));
R(a, b, c, d, e, f, g, h, K[48], W(48));
R(h, a, b, c, d, e, f, g, K[49], W(49));
R(g, h, a, b, c, d, e, f, K[50], W(50));
R(f, g, h, a, b, c, d, e, K[51], W(51));
R(e, f, g, h, a, b, c, d, K[52], W(52));
R(d, e, f, g, h, a, b, c, K[53], W(53));
R(c, d, e, f, g, h, a, b, K[54], W(54));
R(b, c, d, e, f, g, h, a, K[55], W(55));
R(a, b, c, d, e, f, g, h, K[56], W(56));
R(h, a, b, c, d, e, f, g, K[57], W(57));
R(g, h, a, b, c, d, e, f, K[58], W(58));
R(f, g, h, a, b, c, d, e, K[59], W(59));
R(e, f, g, h, a, b, c, d, K[60], W(60));
R(d, e, f, g, h, a, b, c, K[61], W(61));
R(c, d, e, f, g, h, a, b, K[62], W(62));
R(b, c, d, e, f, g, h, a, K[63], W(63));
hd->h0 += a;
hd->h1 += b;
hd->h2 += c;
hd->h3 += d;
hd->h4 += e;
hd->h5 += f;
hd->h6 += g;
hd->h7 += h;
return /*burn_stack*/ 26*4+32;
}
#undef S0
#undef S1
#undef R
/* Assembly implementations use SystemV ABI, ABI conversion and additional
* stack to store XMM6-XMM15 needed on Win64. */
#undef ASM_FUNC_ABI
#undef ASM_EXTRA_STACK
#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2)
# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
# define ASM_FUNC_ABI __attribute__((sysv_abi))
# define ASM_EXTRA_STACK (10 * 16)
# else
# define ASM_FUNC_ABI
# define ASM_EXTRA_STACK 0
# endif
#endif
#ifdef USE_SSSE3
unsigned int _gcry_sha256_transform_amd64_ssse3(const void *input_data,
u32 state[8],
size_t num_blks) ASM_FUNC_ABI;
#endif
#ifdef USE_AVX
unsigned int _gcry_sha256_transform_amd64_avx(const void *input_data,
u32 state[8],
size_t num_blks) ASM_FUNC_ABI;
#endif
#ifdef USE_AVX2
unsigned int _gcry_sha256_transform_amd64_avx2(const void *input_data,
u32 state[8],
size_t num_blks) ASM_FUNC_ABI;
#endif
#ifdef USE_ARM_CE
unsigned int _gcry_sha256_transform_armv8_ce(u32 state[8],
const void *input_data,
size_t num_blks);
#endif
static unsigned int
transform (void *ctx, const unsigned char *data, size_t nblks)
{
SHA256_CONTEXT *hd = ctx;
unsigned int burn;
#ifdef USE_AVX2
if (hd->use_avx2)
return _gcry_sha256_transform_amd64_avx2 (data, &hd->h0, nblks)
+ 4 * sizeof(void*) + ASM_EXTRA_STACK;
#endif
#ifdef USE_AVX
if (hd->use_avx)
return _gcry_sha256_transform_amd64_avx (data, &hd->h0, nblks)
+ 4 * sizeof(void*) + ASM_EXTRA_STACK;
#endif
#ifdef USE_SSSE3
if (hd->use_ssse3)
return _gcry_sha256_transform_amd64_ssse3 (data, &hd->h0, nblks)
+ 4 * sizeof(void*) + ASM_EXTRA_STACK;
#endif
#ifdef USE_ARM_CE
if (hd->use_arm_ce)
return _gcry_sha256_transform_armv8_ce (&hd->h0, data, nblks);
#endif
do
{
burn = transform_blk (hd, data);
data += 64;
}
while (--nblks);
#ifdef ASM_EXTRA_STACK
/* 'transform_blk' is typically inlined and XMM6-XMM15 are stored at
* the prologue of this function. Therefore need to add ASM_EXTRA_STACK to
* here too.
*/
burn += ASM_EXTRA_STACK;
#endif
return burn;
}
/*
The routine finally terminates the computation and returns the
digest. The handle is prepared for a new cycle, but adding bytes
to the handle will the destroy the returned buffer. Returns: 32
bytes with the message the digest. */
static void
sha256_final(void *context)
{
SHA256_CONTEXT *hd = context;
u32 t, th, msb, lsb;
byte *p;
unsigned int burn;
_gcry_md_block_write (hd, NULL, 0); /* flush */;
t = hd->bctx.nblocks;
if (sizeof t == sizeof hd->bctx.nblocks)
th = hd->bctx.nblocks_high;
else
th = hd->bctx.nblocks >> 32;
/* multiply by 64 to make a byte count */
lsb = t << 6;
msb = (th << 6) | (t >> 26);
/* add the count */
t = lsb;
if ((lsb += hd->bctx.count) < t)
msb++;
/* multiply by 8 to make a bit count */
t = lsb;
lsb <<= 3;
msb <<= 3;
msb |= t >> 29;
if (hd->bctx.count < 56)
{ /* enough room */
hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */
while (hd->bctx.count < 56)
hd->bctx.buf[hd->bctx.count++] = 0; /* pad */
}
else
{ /* need one extra block */
hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */
while (hd->bctx.count < 64)
hd->bctx.buf[hd->bctx.count++] = 0;
_gcry_md_block_write (hd, NULL, 0); /* flush */;
memset (hd->bctx.buf, 0, 56 ); /* fill next block with zeroes */
}
/* append the 64 bit count */
buf_put_be32(hd->bctx.buf + 56, msb);
buf_put_be32(hd->bctx.buf + 60, lsb);
burn = transform (hd, hd->bctx.buf, 1);
_gcry_burn_stack (burn);
p = hd->bctx.buf;
#define X(a) do { buf_put_be32(p, hd->h##a); p += 4; } while(0)
X(0);
X(1);
X(2);
X(3);
X(4);
X(5);
X(6);
X(7);
#undef X
}
static byte *
sha256_read (void *context)
{
SHA256_CONTEXT *hd = context;
return hd->bctx.buf;
}
+/* Shortcut functions which puts the hash value of the supplied buffer
+ * into outbuf which must have a size of 32 bytes. */
+void
+_gcry_sha256_hash_buffer (void *outbuf, const void *buffer, size_t length)
+{
+ SHA256_CONTEXT hd;
+
+ sha256_init (&hd, 0);
+ _gcry_md_block_write (&hd, buffer, length);
+ sha256_final (&hd);
+ memcpy (outbuf, hd.bctx.buf, 32);
+}
+
+
+/* Variant of the above shortcut function using multiple buffers. */
+void
+_gcry_sha256_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt)
+{
+ SHA256_CONTEXT hd;
+
+ sha256_init (&hd, 0);
+ for (;iovcnt > 0; iov++, iovcnt--)
+ _gcry_md_block_write (&hd,
+ (const char*)iov[0].data + iov[0].off, iov[0].len);
+ sha256_final (&hd);
+ memcpy (outbuf, hd.bctx.buf, 32);
+}
+
+
/*
Self-test section.
*/
static gpg_err_code_t
selftests_sha224 (int extended, selftest_report_func_t report)
{
const char *what;
const char *errtxt;
what = "short string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA224, 0,
"abc", 3,
"\x23\x09\x7d\x22\x34\x05\xd8\x22\x86\x42\xa4\x77\xbd\xa2\x55\xb3"
"\x2a\xad\xbc\xe4\xbd\xa0\xb3\xf7\xe3\x6c\x9d\xa7", 28);
if (errtxt)
goto failed;
if (extended)
{
what = "long string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA224, 0,
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56,
"\x75\x38\x8b\x16\x51\x27\x76\xcc\x5d\xba\x5d\xa1\xfd\x89\x01\x50"
"\xb0\xc6\x45\x5c\xb4\xf5\x8b\x19\x52\x52\x25\x25", 28);
if (errtxt)
goto failed;
what = "one million \"a\"";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA224, 1,
NULL, 0,
"\x20\x79\x46\x55\x98\x0c\x91\xd8\xbb\xb4\xc1\xea\x97\x61\x8a\x4b"
"\xf0\x3f\x42\x58\x19\x48\xb2\xee\x4e\xe7\xad\x67", 28);
if (errtxt)
goto failed;
}
return 0; /* Succeeded. */
failed:
if (report)
report ("digest", GCRY_MD_SHA224, what, errtxt);
return GPG_ERR_SELFTEST_FAILED;
}
static gpg_err_code_t
selftests_sha256 (int extended, selftest_report_func_t report)
{
const char *what;
const char *errtxt;
what = "short string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA256, 0,
"abc", 3,
"\xba\x78\x16\xbf\x8f\x01\xcf\xea\x41\x41\x40\xde\x5d\xae\x22\x23"
"\xb0\x03\x61\xa3\x96\x17\x7a\x9c\xb4\x10\xff\x61\xf2\x00\x15\xad", 32);
if (errtxt)
goto failed;
if (extended)
{
what = "long string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA256, 0,
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56,
"\x24\x8d\x6a\x61\xd2\x06\x38\xb8\xe5\xc0\x26\x93\x0c\x3e\x60\x39"
"\xa3\x3c\xe4\x59\x64\xff\x21\x67\xf6\xec\xed\xd4\x19\xdb\x06\xc1",
32);
if (errtxt)
goto failed;
what = "one million \"a\"";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA256, 1,
NULL, 0,
"\xcd\xc7\x6e\x5c\x99\x14\xfb\x92\x81\xa1\xc7\xe2\x84\xd7\x3e\x67"
"\xf1\x80\x9a\x48\xa4\x97\x20\x0e\x04\x6d\x39\xcc\xc7\x11\x2c\xd0",
32);
if (errtxt)
goto failed;
}
return 0; /* Succeeded. */
failed:
if (report)
report ("digest", GCRY_MD_SHA256, what, errtxt);
return GPG_ERR_SELFTEST_FAILED;
}
/* Run a full self-test for ALGO and return 0 on success. */
static gpg_err_code_t
run_selftests (int algo, int extended, selftest_report_func_t report)
{
gpg_err_code_t ec;
switch (algo)
{
case GCRY_MD_SHA224:
ec = selftests_sha224 (extended, report);
break;
case GCRY_MD_SHA256:
ec = selftests_sha256 (extended, report);
break;
default:
ec = GPG_ERR_DIGEST_ALGO;
break;
}
return ec;
}
static byte asn224[19] = /* Object ID is 2.16.840.1.101.3.4.2.4 */
{ 0x30, 0x2D, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48,
0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04,
0x1C
};
static gcry_md_oid_spec_t oid_spec_sha224[] =
{
/* From RFC3874, Section 4 */
{ "2.16.840.1.101.3.4.2.4" },
{ NULL },
};
static byte asn256[19] = /* Object ID is 2.16.840.1.101.3.4.2.1 */
{ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
0x00, 0x04, 0x20 };
static gcry_md_oid_spec_t oid_spec_sha256[] =
{
/* According to the OpenPGP draft rfc2440-bis06 */
{ "2.16.840.1.101.3.4.2.1" },
/* PKCS#1 sha256WithRSAEncryption */
{ "1.2.840.113549.1.1.11" },
{ NULL },
};
gcry_md_spec_t _gcry_digest_spec_sha224 =
{
GCRY_MD_SHA224, {0, 1},
"SHA224", asn224, DIM (asn224), oid_spec_sha224, 28,
sha224_init, _gcry_md_block_write, sha256_final, sha256_read, NULL,
sizeof (SHA256_CONTEXT),
run_selftests
};
gcry_md_spec_t _gcry_digest_spec_sha256 =
{
GCRY_MD_SHA256, {0, 1},
"SHA256", asn256, DIM (asn256), oid_spec_sha256, 32,
sha256_init, _gcry_md_block_write, sha256_final, sha256_read, NULL,
sizeof (SHA256_CONTEXT),
run_selftests
};
diff --git a/cipher/sha512.c b/cipher/sha512.c
index 5b259650..2ddc485c 100644
--- a/cipher/sha512.c
+++ b/cipher/sha512.c
@@ -1,927 +1,956 @@
/* sha512.c - SHA384 and SHA512 hash functions
* Copyright (C) 2003, 2008, 2009 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser general Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see .
*/
/* Test vectors from FIPS-180-2:
*
* "abc"
* 384:
* CB00753F 45A35E8B B5A03D69 9AC65007 272C32AB 0EDED163
* 1A8B605A 43FF5BED 8086072B A1E7CC23 58BAECA1 34C825A7
* 512:
* DDAF35A1 93617ABA CC417349 AE204131 12E6FA4E 89A97EA2 0A9EEEE6 4B55D39A
* 2192992A 274FC1A8 36BA3C23 A3FEEBBD 454D4423 643CE80E 2A9AC94F A54CA49F
*
* "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"
* 384:
* 09330C33 F71147E8 3D192FC7 82CD1B47 53111B17 3B3B05D2
* 2FA08086 E3B0F712 FCC7C71A 557E2DB9 66C3E9FA 91746039
* 512:
* 8E959B75 DAE313DA 8CF4F728 14FC143F 8F7779C6 EB9F7FA1 7299AEAD B6889018
* 501D289E 4900F7E4 331B99DE C4B5433A C7D329EE B6DD2654 5E96E55B 874BE909
*
* "a" x 1000000
* 384:
* 9D0E1809 716474CB 086E834E 310A4A1C ED149E9C 00F24852
* 7972CEC5 704C2A5B 07B8B3DC 38ECC4EB AE97DDD8 7F3D8985
* 512:
* E718483D 0CE76964 4E2E42C7 BC15B463 8E1F98B1 3B204428 5632A803 AFA973EB
* DE0FF244 877EA60A 4CB0432C E577C31B EB009C5C 2C49AA2E 4EADB217 AD8CC09B
*/
#include
#include
#include "g10lib.h"
#include "bithelp.h"
#include "bufhelp.h"
#include "cipher.h"
#include "hash-common.h"
/* USE_ARM_NEON_ASM indicates whether to enable ARM NEON assembly code. */
#undef USE_ARM_NEON_ASM
#ifdef ENABLE_NEON_SUPPORT
# if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) \
&& defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) \
&& defined(HAVE_GCC_INLINE_ASM_NEON)
# define USE_ARM_NEON_ASM 1
# endif
#endif /*ENABLE_NEON_SUPPORT*/
/* USE_ARM_ASM indicates whether to enable ARM assembly code. */
#undef USE_ARM_ASM
#if defined(__ARMEL__) && defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS)
# define USE_ARM_ASM 1
#endif
/* USE_SSSE3 indicates whether to compile with Intel SSSE3 code. */
#undef USE_SSSE3
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_SSSE3) && \
defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
(defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_SSSE3 1
#endif
/* USE_AVX indicates whether to compile with Intel AVX code. */
#undef USE_AVX
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX) && \
defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
(defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_AVX 1
#endif
/* USE_AVX2 indicates whether to compile with Intel AVX2/rorx code. */
#undef USE_AVX2
#if defined(__x86_64__) && defined(HAVE_GCC_INLINE_ASM_AVX2) && \
defined(HAVE_GCC_INLINE_ASM_BMI2) && \
defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
(defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS))
# define USE_AVX2 1
#endif
typedef struct
{
u64 h0, h1, h2, h3, h4, h5, h6, h7;
} SHA512_STATE;
typedef struct
{
gcry_md_block_ctx_t bctx;
SHA512_STATE state;
#ifdef USE_ARM_NEON_ASM
unsigned int use_neon:1;
#endif
#ifdef USE_SSSE3
unsigned int use_ssse3:1;
#endif
#ifdef USE_AVX
unsigned int use_avx:1;
#endif
#ifdef USE_AVX2
unsigned int use_avx2:1;
#endif
} SHA512_CONTEXT;
static unsigned int
transform (void *context, const unsigned char *data, size_t nblks);
static void
sha512_init (void *context, unsigned int flags)
{
SHA512_CONTEXT *ctx = context;
SHA512_STATE *hd = &ctx->state;
unsigned int features = _gcry_get_hw_features ();
(void)flags;
hd->h0 = U64_C(0x6a09e667f3bcc908);
hd->h1 = U64_C(0xbb67ae8584caa73b);
hd->h2 = U64_C(0x3c6ef372fe94f82b);
hd->h3 = U64_C(0xa54ff53a5f1d36f1);
hd->h4 = U64_C(0x510e527fade682d1);
hd->h5 = U64_C(0x9b05688c2b3e6c1f);
hd->h6 = U64_C(0x1f83d9abfb41bd6b);
hd->h7 = U64_C(0x5be0cd19137e2179);
ctx->bctx.nblocks = 0;
ctx->bctx.nblocks_high = 0;
ctx->bctx.count = 0;
ctx->bctx.blocksize = 128;
ctx->bctx.bwrite = transform;
#ifdef USE_ARM_NEON_ASM
ctx->use_neon = (features & HWF_ARM_NEON) != 0;
#endif
#ifdef USE_SSSE3
ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
#endif
#ifdef USE_AVX
ctx->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
#endif
#ifdef USE_AVX2
ctx->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
#endif
(void)features;
}
static void
sha384_init (void *context, unsigned int flags)
{
SHA512_CONTEXT *ctx = context;
SHA512_STATE *hd = &ctx->state;
unsigned int features = _gcry_get_hw_features ();
(void)flags;
hd->h0 = U64_C(0xcbbb9d5dc1059ed8);
hd->h1 = U64_C(0x629a292a367cd507);
hd->h2 = U64_C(0x9159015a3070dd17);
hd->h3 = U64_C(0x152fecd8f70e5939);
hd->h4 = U64_C(0x67332667ffc00b31);
hd->h5 = U64_C(0x8eb44a8768581511);
hd->h6 = U64_C(0xdb0c2e0d64f98fa7);
hd->h7 = U64_C(0x47b5481dbefa4fa4);
ctx->bctx.nblocks = 0;
ctx->bctx.nblocks_high = 0;
ctx->bctx.count = 0;
ctx->bctx.blocksize = 128;
ctx->bctx.bwrite = transform;
#ifdef USE_ARM_NEON_ASM
ctx->use_neon = (features & HWF_ARM_NEON) != 0;
#endif
#ifdef USE_SSSE3
ctx->use_ssse3 = (features & HWF_INTEL_SSSE3) != 0;
#endif
#ifdef USE_AVX
ctx->use_avx = (features & HWF_INTEL_AVX) && (features & HWF_INTEL_FAST_SHLD);
#endif
#ifdef USE_AVX2
ctx->use_avx2 = (features & HWF_INTEL_AVX2) && (features & HWF_INTEL_BMI2);
#endif
(void)features;
}
static const u64 k[] =
{
U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd),
U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc),
U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019),
U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118),
U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe),
U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2),
U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1),
U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694),
U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3),
U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65),
U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483),
U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5),
U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210),
U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4),
U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725),
U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70),
U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926),
U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df),
U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8),
U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b),
U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001),
U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30),
U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910),
U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8),
U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53),
U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8),
U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb),
U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3),
U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60),
U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec),
U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9),
U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b),
U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207),
U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178),
U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6),
U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b),
U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493),
U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c),
U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a),
U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817)
};
#ifndef USE_ARM_ASM
static inline u64
ROTR (u64 x, u64 n)
{
return ((x >> n) | (x << (64 - n)));
}
static inline u64
Ch (u64 x, u64 y, u64 z)
{
return ((x & y) ^ ( ~x & z));
}
static inline u64
Maj (u64 x, u64 y, u64 z)
{
return ((x & y) ^ (x & z) ^ (y & z));
}
static inline u64
Sum0 (u64 x)
{
return (ROTR (x, 28) ^ ROTR (x, 34) ^ ROTR (x, 39));
}
static inline u64
Sum1 (u64 x)
{
return (ROTR (x, 14) ^ ROTR (x, 18) ^ ROTR (x, 41));
}
/****************
* Transform the message W which consists of 16 64-bit-words
*/
static unsigned int
transform_blk (SHA512_STATE *hd, const unsigned char *data)
{
u64 a, b, c, d, e, f, g, h;
u64 w[16];
int t;
/* get values from the chaining vars */
a = hd->h0;
b = hd->h1;
c = hd->h2;
d = hd->h3;
e = hd->h4;
f = hd->h5;
g = hd->h6;
h = hd->h7;
for ( t = 0; t < 16; t++ )
w[t] = buf_get_be64(data + t * 8);
#define S0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
#define S1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
for (t = 0; t < 80 - 16; )
{
u64 t1, t2;
/* Performance on a AMD Athlon(tm) Dual Core Processor 4050e
with gcc 4.3.3 using gcry_md_hash_buffer of each 10000 bytes
initialized to 0,1,2,3...255,0,... and 1000 iterations:
Not unrolled with macros: 440ms
Unrolled with macros: 350ms
Unrolled with inline: 330ms
*/
#if 0 /* Not unrolled. */
t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t%16];
w[t%16] += S1 (w[(t - 2)%16]) + w[(t - 7)%16] + S0 (w[(t - 15)%16]);
t2 = Sum0 (a) + Maj (a, b, c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
t++;
#else /* Unrolled to interweave the chain variables. */
t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0];
w[0] += S1 (w[14]) + w[9] + S0 (w[1]);
t2 = Sum0 (a) + Maj (a, b, c);
d += t1;
h = t1 + t2;
t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1];
w[1] += S1 (w[15]) + w[10] + S0 (w[2]);
t2 = Sum0 (h) + Maj (h, a, b);
c += t1;
g = t1 + t2;
t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2];
w[2] += S1 (w[0]) + w[11] + S0 (w[3]);
t2 = Sum0 (g) + Maj (g, h, a);
b += t1;
f = t1 + t2;
t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3];
w[3] += S1 (w[1]) + w[12] + S0 (w[4]);
t2 = Sum0 (f) + Maj (f, g, h);
a += t1;
e = t1 + t2;
t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4];
w[4] += S1 (w[2]) + w[13] + S0 (w[5]);
t2 = Sum0 (e) + Maj (e, f, g);
h += t1;
d = t1 + t2;
t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5];
w[5] += S1 (w[3]) + w[14] + S0 (w[6]);
t2 = Sum0 (d) + Maj (d, e, f);
g += t1;
c = t1 + t2;
t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6];
w[6] += S1 (w[4]) + w[15] + S0 (w[7]);
t2 = Sum0 (c) + Maj (c, d, e);
f += t1;
b = t1 + t2;
t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7];
w[7] += S1 (w[5]) + w[0] + S0 (w[8]);
t2 = Sum0 (b) + Maj (b, c, d);
e += t1;
a = t1 + t2;
t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8];
w[8] += S1 (w[6]) + w[1] + S0 (w[9]);
t2 = Sum0 (a) + Maj (a, b, c);
d += t1;
h = t1 + t2;
t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9];
w[9] += S1 (w[7]) + w[2] + S0 (w[10]);
t2 = Sum0 (h) + Maj (h, a, b);
c += t1;
g = t1 + t2;
t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10];
w[10] += S1 (w[8]) + w[3] + S0 (w[11]);
t2 = Sum0 (g) + Maj (g, h, a);
b += t1;
f = t1 + t2;
t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11];
w[11] += S1 (w[9]) + w[4] + S0 (w[12]);
t2 = Sum0 (f) + Maj (f, g, h);
a += t1;
e = t1 + t2;
t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12];
w[12] += S1 (w[10]) + w[5] + S0 (w[13]);
t2 = Sum0 (e) + Maj (e, f, g);
h += t1;
d = t1 + t2;
t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13];
w[13] += S1 (w[11]) + w[6] + S0 (w[14]);
t2 = Sum0 (d) + Maj (d, e, f);
g += t1;
c = t1 + t2;
t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14];
w[14] += S1 (w[12]) + w[7] + S0 (w[15]);
t2 = Sum0 (c) + Maj (c, d, e);
f += t1;
b = t1 + t2;
t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15];
w[15] += S1 (w[13]) + w[8] + S0 (w[0]);
t2 = Sum0 (b) + Maj (b, c, d);
e += t1;
a = t1 + t2;
t += 16;
#endif
}
for (; t < 80; )
{
u64 t1, t2;
#if 0 /* Not unrolled. */
t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t%16];
t2 = Sum0 (a) + Maj (a, b, c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
t++;
#else /* Unrolled to interweave the chain variables. */
t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[0];
t2 = Sum0 (a) + Maj (a, b, c);
d += t1;
h = t1 + t2;
t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+1] + w[1];
t2 = Sum0 (h) + Maj (h, a, b);
c += t1;
g = t1 + t2;
t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+2] + w[2];
t2 = Sum0 (g) + Maj (g, h, a);
b += t1;
f = t1 + t2;
t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+3] + w[3];
t2 = Sum0 (f) + Maj (f, g, h);
a += t1;
e = t1 + t2;
t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+4] + w[4];
t2 = Sum0 (e) + Maj (e, f, g);
h += t1;
d = t1 + t2;
t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+5] + w[5];
t2 = Sum0 (d) + Maj (d, e, f);
g += t1;
c = t1 + t2;
t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+6] + w[6];
t2 = Sum0 (c) + Maj (c, d, e);
f += t1;
b = t1 + t2;
t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+7] + w[7];
t2 = Sum0 (b) + Maj (b, c, d);
e += t1;
a = t1 + t2;
t1 = h + Sum1 (e) + Ch (e, f, g) + k[t+8] + w[8];
t2 = Sum0 (a) + Maj (a, b, c);
d += t1;
h = t1 + t2;
t1 = g + Sum1 (d) + Ch (d, e, f) + k[t+9] + w[9];
t2 = Sum0 (h) + Maj (h, a, b);
c += t1;
g = t1 + t2;
t1 = f + Sum1 (c) + Ch (c, d, e) + k[t+10] + w[10];
t2 = Sum0 (g) + Maj (g, h, a);
b += t1;
f = t1 + t2;
t1 = e + Sum1 (b) + Ch (b, c, d) + k[t+11] + w[11];
t2 = Sum0 (f) + Maj (f, g, h);
a += t1;
e = t1 + t2;
t1 = d + Sum1 (a) + Ch (a, b, c) + k[t+12] + w[12];
t2 = Sum0 (e) + Maj (e, f, g);
h += t1;
d = t1 + t2;
t1 = c + Sum1 (h) + Ch (h, a, b) + k[t+13] + w[13];
t2 = Sum0 (d) + Maj (d, e, f);
g += t1;
c = t1 + t2;
t1 = b + Sum1 (g) + Ch (g, h, a) + k[t+14] + w[14];
t2 = Sum0 (c) + Maj (c, d, e);
f += t1;
b = t1 + t2;
t1 = a + Sum1 (f) + Ch (f, g, h) + k[t+15] + w[15];
t2 = Sum0 (b) + Maj (b, c, d);
e += t1;
a = t1 + t2;
t += 16;
#endif
}
/* Update chaining vars. */
hd->h0 += a;
hd->h1 += b;
hd->h2 += c;
hd->h3 += d;
hd->h4 += e;
hd->h5 += f;
hd->h6 += g;
hd->h7 += h;
return /* burn_stack */ (8 + 16) * sizeof(u64) + sizeof(u32) +
3 * sizeof(void*);
}
#endif /*!USE_ARM_ASM*/
/* AMD64 assembly implementations use SystemV ABI, ABI conversion and additional
* stack to store XMM6-XMM15 needed on Win64. */
#undef ASM_FUNC_ABI
#undef ASM_EXTRA_STACK
#if defined(USE_SSSE3) || defined(USE_AVX) || defined(USE_AVX2)
# ifdef HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS
# define ASM_FUNC_ABI __attribute__((sysv_abi))
# define ASM_EXTRA_STACK (10 * 16)
# else
# define ASM_FUNC_ABI
# define ASM_EXTRA_STACK 0
# endif
#endif
#ifdef USE_ARM_NEON_ASM
void _gcry_sha512_transform_armv7_neon (SHA512_STATE *hd,
const unsigned char *data,
const u64 k[], size_t num_blks);
#endif
#ifdef USE_ARM_ASM
unsigned int _gcry_sha512_transform_arm (SHA512_STATE *hd,
const unsigned char *data,
const u64 k[], size_t num_blks);
#endif
#ifdef USE_SSSE3
unsigned int _gcry_sha512_transform_amd64_ssse3(const void *input_data,
void *state,
size_t num_blks) ASM_FUNC_ABI;
#endif
#ifdef USE_AVX
unsigned int _gcry_sha512_transform_amd64_avx(const void *input_data,
void *state,
size_t num_blks) ASM_FUNC_ABI;
#endif
#ifdef USE_AVX2
unsigned int _gcry_sha512_transform_amd64_avx2(const void *input_data,
void *state,
size_t num_blks) ASM_FUNC_ABI;
#endif
static unsigned int
transform (void *context, const unsigned char *data, size_t nblks)
{
SHA512_CONTEXT *ctx = context;
unsigned int burn;
#ifdef USE_AVX2
if (ctx->use_avx2)
return _gcry_sha512_transform_amd64_avx2 (data, &ctx->state, nblks)
+ 4 * sizeof(void*) + ASM_EXTRA_STACK;
#endif
#ifdef USE_AVX
if (ctx->use_avx)
return _gcry_sha512_transform_amd64_avx (data, &ctx->state, nblks)
+ 4 * sizeof(void*) + ASM_EXTRA_STACK;
#endif
#ifdef USE_SSSE3
if (ctx->use_ssse3)
return _gcry_sha512_transform_amd64_ssse3 (data, &ctx->state, nblks)
+ 4 * sizeof(void*) + ASM_EXTRA_STACK;
#endif
#ifdef USE_ARM_NEON_ASM
if (ctx->use_neon)
{
_gcry_sha512_transform_armv7_neon (&ctx->state, data, k, nblks);
/* _gcry_sha512_transform_armv7_neon does not store sensitive data
* to stack. */
return /* no burn_stack */ 0;
}
#endif
#ifdef USE_ARM_ASM
burn = _gcry_sha512_transform_arm (&ctx->state, data, k, nblks);
#else
do
{
burn = transform_blk (&ctx->state, data) + 3 * sizeof(void*);
data += 128;
}
while (--nblks);
#ifdef ASM_EXTRA_STACK
/* 'transform_blk' is typically inlined and XMM6-XMM15 are stored at
* the prologue of this function. Therefore need to add ASM_EXTRA_STACK to
* here too.
*/
burn += ASM_EXTRA_STACK;
#endif
#endif
return burn;
}
/* The routine final terminates the computation and
* returns the digest.
* The handle is prepared for a new cycle, but adding bytes to the
* handle will the destroy the returned buffer.
* Returns: 64 bytes representing the digest. When used for sha384,
* we take the leftmost 48 of those bytes.
*/
static void
sha512_final (void *context)
{
SHA512_CONTEXT *hd = context;
unsigned int stack_burn_depth;
u64 t, th, msb, lsb;
byte *p;
_gcry_md_block_write (context, NULL, 0); /* flush */ ;
t = hd->bctx.nblocks;
/* if (sizeof t == sizeof hd->bctx.nblocks) */
th = hd->bctx.nblocks_high;
/* else */
/* th = hd->bctx.nblocks >> 64; In case we ever use u128 */
/* multiply by 128 to make a byte count */
lsb = t << 7;
msb = (th << 7) | (t >> 57);
/* add the count */
t = lsb;
if ((lsb += hd->bctx.count) < t)
msb++;
/* multiply by 8 to make a bit count */
t = lsb;
lsb <<= 3;
msb <<= 3;
msb |= t >> 61;
if (hd->bctx.count < 112)
{ /* enough room */
hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad */
while (hd->bctx.count < 112)
hd->bctx.buf[hd->bctx.count++] = 0; /* pad */
}
else
{ /* need one extra block */
hd->bctx.buf[hd->bctx.count++] = 0x80; /* pad character */
while (hd->bctx.count < 128)
hd->bctx.buf[hd->bctx.count++] = 0;
_gcry_md_block_write (context, NULL, 0); /* flush */ ;
memset (hd->bctx.buf, 0, 112); /* fill next block with zeroes */
}
/* append the 128 bit count */
buf_put_be64(hd->bctx.buf + 112, msb);
buf_put_be64(hd->bctx.buf + 120, lsb);
stack_burn_depth = transform (hd, hd->bctx.buf, 1);
_gcry_burn_stack (stack_burn_depth);
p = hd->bctx.buf;
#define X(a) do { buf_put_be64(p, hd->state.h##a); p += 8; } while (0)
X (0);
X (1);
X (2);
X (3);
X (4);
X (5);
/* Note that these last two chunks are included even for SHA384.
We just ignore them. */
X (6);
X (7);
#undef X
}
static byte *
sha512_read (void *context)
{
SHA512_CONTEXT *hd = (SHA512_CONTEXT *) context;
return hd->bctx.buf;
}
+/* Shortcut functions which puts the hash value of the supplied buffer
+ * into outbuf which must have a size of 64 bytes. */
+void
+_gcry_sha512_hash_buffer (void *outbuf, const void *buffer, size_t length)
+{
+ SHA512_CONTEXT hd;
+
+ sha512_init (&hd, 0);
+ _gcry_md_block_write (&hd, buffer, length);
+ sha512_final (&hd);
+ memcpy (outbuf, hd.bctx.buf, 64);
+}
+
+
+/* Variant of the above shortcut function using multiple buffers. */
+void
+_gcry_sha512_hash_buffers (void *outbuf, const gcry_buffer_t *iov, int iovcnt)
+{
+ SHA512_CONTEXT hd;
+
+ sha512_init (&hd, 0);
+ for (;iovcnt > 0; iov++, iovcnt--)
+ _gcry_md_block_write (&hd,
+ (const char*)iov[0].data + iov[0].off, iov[0].len);
+ sha512_final (&hd);
+ memcpy (outbuf, hd.bctx.buf, 64);
+}
+
+
/*
Self-test section.
*/
static gpg_err_code_t
selftests_sha384 (int extended, selftest_report_func_t report)
{
const char *what;
const char *errtxt;
what = "short string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA384, 0,
"abc", 3,
"\xcb\x00\x75\x3f\x45\xa3\x5e\x8b\xb5\xa0\x3d\x69\x9a\xc6\x50\x07"
"\x27\x2c\x32\xab\x0e\xde\xd1\x63\x1a\x8b\x60\x5a\x43\xff\x5b\xed"
"\x80\x86\x07\x2b\xa1\xe7\xcc\x23\x58\xba\xec\xa1\x34\xc8\x25\xa7", 48);
if (errtxt)
goto failed;
if (extended)
{
what = "long string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA384, 0,
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112,
"\x09\x33\x0C\x33\xF7\x11\x47\xE8\x3D\x19\x2F\xC7\x82\xCD\x1B\x47"
"\x53\x11\x1B\x17\x3B\x3B\x05\xD2\x2F\xA0\x80\x86\xE3\xB0\xF7\x12"
"\xFC\xC7\xC7\x1A\x55\x7E\x2D\xB9\x66\xC3\xE9\xFA\x91\x74\x60\x39",
48);
if (errtxt)
goto failed;
what = "one million \"a\"";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA384, 1,
NULL, 0,
"\x9D\x0E\x18\x09\x71\x64\x74\xCB\x08\x6E\x83\x4E\x31\x0A\x4A\x1C"
"\xED\x14\x9E\x9C\x00\xF2\x48\x52\x79\x72\xCE\xC5\x70\x4C\x2A\x5B"
"\x07\xB8\xB3\xDC\x38\xEC\xC4\xEB\xAE\x97\xDD\xD8\x7F\x3D\x89\x85",
48);
if (errtxt)
goto failed;
}
return 0; /* Succeeded. */
failed:
if (report)
report ("digest", GCRY_MD_SHA384, what, errtxt);
return GPG_ERR_SELFTEST_FAILED;
}
static gpg_err_code_t
selftests_sha512 (int extended, selftest_report_func_t report)
{
const char *what;
const char *errtxt;
what = "short string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA512, 0,
"abc", 3,
"\xDD\xAF\x35\xA1\x93\x61\x7A\xBA\xCC\x41\x73\x49\xAE\x20\x41\x31"
"\x12\xE6\xFA\x4E\x89\xA9\x7E\xA2\x0A\x9E\xEE\xE6\x4B\x55\xD3\x9A"
"\x21\x92\x99\x2A\x27\x4F\xC1\xA8\x36\xBA\x3C\x23\xA3\xFE\xEB\xBD"
"\x45\x4D\x44\x23\x64\x3C\xE8\x0E\x2A\x9A\xC9\x4F\xA5\x4C\xA4\x9F", 64);
if (errtxt)
goto failed;
if (extended)
{
what = "long string";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA512, 0,
"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"
"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu", 112,
"\x8E\x95\x9B\x75\xDA\xE3\x13\xDA\x8C\xF4\xF7\x28\x14\xFC\x14\x3F"
"\x8F\x77\x79\xC6\xEB\x9F\x7F\xA1\x72\x99\xAE\xAD\xB6\x88\x90\x18"
"\x50\x1D\x28\x9E\x49\x00\xF7\xE4\x33\x1B\x99\xDE\xC4\xB5\x43\x3A"
"\xC7\xD3\x29\xEE\xB6\xDD\x26\x54\x5E\x96\xE5\x5B\x87\x4B\xE9\x09",
64);
if (errtxt)
goto failed;
what = "one million \"a\"";
errtxt = _gcry_hash_selftest_check_one
(GCRY_MD_SHA512, 1,
NULL, 0,
"\xE7\x18\x48\x3D\x0C\xE7\x69\x64\x4E\x2E\x42\xC7\xBC\x15\xB4\x63"
"\x8E\x1F\x98\xB1\x3B\x20\x44\x28\x56\x32\xA8\x03\xAF\xA9\x73\xEB"
"\xDE\x0F\xF2\x44\x87\x7E\xA6\x0A\x4C\xB0\x43\x2C\xE5\x77\xC3\x1B"
"\xEB\x00\x9C\x5C\x2C\x49\xAA\x2E\x4E\xAD\xB2\x17\xAD\x8C\xC0\x9B",
64);
if (errtxt)
goto failed;
}
return 0; /* Succeeded. */
failed:
if (report)
report ("digest", GCRY_MD_SHA512, what, errtxt);
return GPG_ERR_SELFTEST_FAILED;
}
/* Run a full self-test for ALGO and return 0 on success. */
static gpg_err_code_t
run_selftests (int algo, int extended, selftest_report_func_t report)
{
gpg_err_code_t ec;
switch (algo)
{
case GCRY_MD_SHA384:
ec = selftests_sha384 (extended, report);
break;
case GCRY_MD_SHA512:
ec = selftests_sha512 (extended, report);
break;
default:
ec = GPG_ERR_DIGEST_ALGO;
break;
}
return ec;
}
static byte sha512_asn[] = /* Object ID is 2.16.840.1.101.3.4.2.3 */
{
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05,
0x00, 0x04, 0x40
};
static gcry_md_oid_spec_t oid_spec_sha512[] =
{
{ "2.16.840.1.101.3.4.2.3" },
/* PKCS#1 sha512WithRSAEncryption */
{ "1.2.840.113549.1.1.13" },
{ NULL }
};
gcry_md_spec_t _gcry_digest_spec_sha512 =
{
GCRY_MD_SHA512, {0, 1},
"SHA512", sha512_asn, DIM (sha512_asn), oid_spec_sha512, 64,
sha512_init, _gcry_md_block_write, sha512_final, sha512_read, NULL,
sizeof (SHA512_CONTEXT),
run_selftests
};
static byte sha384_asn[] = /* Object ID is 2.16.840.1.101.3.4.2.2 */
{
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05,
0x00, 0x04, 0x30
};
static gcry_md_oid_spec_t oid_spec_sha384[] =
{
{ "2.16.840.1.101.3.4.2.2" },
/* PKCS#1 sha384WithRSAEncryption */
{ "1.2.840.113549.1.1.12" },
{ NULL },
};
gcry_md_spec_t _gcry_digest_spec_sha384 =
{
GCRY_MD_SHA384, {0, 1},
"SHA384", sha384_asn, DIM (sha384_asn), oid_spec_sha384, 48,
sha384_init, _gcry_md_block_write, sha512_final, sha512_read, NULL,
sizeof (SHA512_CONTEXT),
run_selftests
};
diff --git a/src/cipher.h b/src/cipher.h
index 725cc73d..f2acb556 100644
--- a/src/cipher.h
+++ b/src/cipher.h
@@ -1,325 +1,338 @@
/* cipher.h
* Copyright (C) 1998, 2002, 2003, 2009 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser general Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#ifndef G10_CIPHER_H
#define G10_CIPHER_H
#include "gcrypt-int.h"
#define DBG_CIPHER _gcry_get_debug_flag( 1 )
#include "../random/random.h"
#define PUBKEY_FLAG_NO_BLINDING (1 << 0)
#define PUBKEY_FLAG_RFC6979 (1 << 1)
#define PUBKEY_FLAG_FIXEDLEN (1 << 2)
#define PUBKEY_FLAG_LEGACYRESULT (1 << 3)
#define PUBKEY_FLAG_RAW_FLAG (1 << 4)
#define PUBKEY_FLAG_TRANSIENT_KEY (1 << 5)
#define PUBKEY_FLAG_USE_X931 (1 << 6)
#define PUBKEY_FLAG_USE_FIPS186 (1 << 7)
#define PUBKEY_FLAG_USE_FIPS186_2 (1 << 8)
#define PUBKEY_FLAG_PARAM (1 << 9)
#define PUBKEY_FLAG_COMP (1 << 10)
#define PUBKEY_FLAG_NOCOMP (1 << 11)
#define PUBKEY_FLAG_EDDSA (1 << 12)
#define PUBKEY_FLAG_GOST (1 << 13)
#define PUBKEY_FLAG_NO_KEYTEST (1 << 14)
#define PUBKEY_FLAG_DJB_TWEAK (1 << 15)
enum pk_operation
{
PUBKEY_OP_ENCRYPT,
PUBKEY_OP_DECRYPT,
PUBKEY_OP_SIGN,
PUBKEY_OP_VERIFY
};
enum pk_encoding
{
PUBKEY_ENC_RAW,
PUBKEY_ENC_PKCS1,
PUBKEY_ENC_PKCS1_RAW,
PUBKEY_ENC_OAEP,
PUBKEY_ENC_PSS,
PUBKEY_ENC_UNKNOWN
};
struct pk_encoding_ctx
{
enum pk_operation op;
unsigned int nbits;
enum pk_encoding encoding;
int flags;
int hash_algo;
/* for OAEP */
unsigned char *label;
size_t labellen;
/* for PSS */
size_t saltlen;
int (* verify_cmp) (void *opaque, gcry_mpi_t tmp);
void *verify_arg;
};
#define CIPHER_INFO_NO_WEAK_KEY 1
#include "cipher-proto.h"
/* The internal encryption modes. */
enum gcry_cipher_internal_modes
{
GCRY_CIPHER_MODE_INTERNAL = 0x10000,
GCRY_CIPHER_MODE_CMAC = 0x10000 + 1 /* Cipher-based MAC. */
};
/*-- cipher.c --*/
gcry_err_code_t _gcry_cipher_open_internal (gcry_cipher_hd_t *handle,
int algo, int mode,
unsigned int flags);
/*-- cipher-cmac.c --*/
gcry_err_code_t _gcry_cipher_cmac_authenticate
/* */ (gcry_cipher_hd_t c, const unsigned char *abuf, size_t abuflen);
gcry_err_code_t _gcry_cipher_cmac_get_tag
/* */ (gcry_cipher_hd_t c,
unsigned char *outtag, size_t taglen);
gcry_err_code_t _gcry_cipher_cmac_check_tag
/* */ (gcry_cipher_hd_t c,
const unsigned char *intag, size_t taglen);
gcry_err_code_t _gcry_cipher_cmac_set_subkeys
/* */ (gcry_cipher_hd_t c);
/*-- rmd160.c --*/
void _gcry_rmd160_hash_buffer (void *outbuf,
const void *buffer, size_t length);
/*-- sha1.c --*/
void _gcry_sha1_hash_buffer (void *outbuf,
const void *buffer, size_t length);
void _gcry_sha1_hash_buffers (void *outbuf,
const gcry_buffer_t *iov, int iovcnt);
+
+/*-- sha256.c --*/
+void _gcry_sha256_hash_buffer (void *outbuf,
+ const void *buffer, size_t length);
+void _gcry_sha256_hash_buffers (void *outbuf,
+ const gcry_buffer_t *iov, int iovcnt);
+
+/*-- sha512.c --*/
+void _gcry_sha512_hash_buffer (void *outbuf,
+ const void *buffer, size_t length);
+void _gcry_sha512_hash_buffers (void *outbuf,
+ const gcry_buffer_t *iov, int iovcnt);
+
/*-- blake2.c --*/
gcry_err_code_t _gcry_blake2_init_with_key(void *ctx, unsigned int flags,
const unsigned char *key,
size_t keylen, int algo);
/*-- rijndael.c --*/
void _gcry_aes_cfb_enc (void *context, unsigned char *iv,
void *outbuf, const void *inbuf,
size_t nblocks);
void _gcry_aes_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_aes_cbc_enc (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks, int cbc_mac);
void _gcry_aes_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_aes_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
size_t _gcry_aes_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks, int encrypt);
size_t _gcry_aes_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
size_t nblocks);
/*-- blowfish.c --*/
void _gcry_blowfish_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_blowfish_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_blowfish_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
/*-- cast5.c --*/
void _gcry_cast5_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_cast5_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_cast5_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
/*-- camellia-glue.c --*/
void _gcry_camellia_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_camellia_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_camellia_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
size_t _gcry_camellia_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks,
int encrypt);
size_t _gcry_camellia_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
size_t nblocks);
/*-- des.c --*/
void _gcry_3des_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_3des_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_3des_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
/*-- serpent.c --*/
void _gcry_serpent_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_serpent_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_serpent_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
size_t _gcry_serpent_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks,
int encrypt);
size_t _gcry_serpent_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
size_t nblocks);
/*-- twofish.c --*/
void _gcry_twofish_ctr_enc (void *context, unsigned char *ctr,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_twofish_cbc_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
void _gcry_twofish_cfb_dec (void *context, unsigned char *iv,
void *outbuf_arg, const void *inbuf_arg,
size_t nblocks);
size_t _gcry_twofish_ocb_crypt (gcry_cipher_hd_t c, void *outbuf_arg,
const void *inbuf_arg, size_t nblocks,
int encrypt);
size_t _gcry_twofish_ocb_auth (gcry_cipher_hd_t c, const void *abuf_arg,
size_t nblocks);
/*-- dsa.c --*/
void _gcry_register_pk_dsa_progress (gcry_handler_progress_t cbc, void *cb_data);
/*-- elgamal.c --*/
void _gcry_register_pk_elg_progress (gcry_handler_progress_t cb,
void *cb_data);
/*-- ecc.c --*/
void _gcry_register_pk_ecc_progress (gcry_handler_progress_t cbc,
void *cb_data);
/*-- primegen.c --*/
void _gcry_register_primegen_progress (gcry_handler_progress_t cb,
void *cb_data);
/*-- pubkey.c --*/
/* Declarations for the cipher specifications. */
extern gcry_cipher_spec_t _gcry_cipher_spec_blowfish;
extern gcry_cipher_spec_t _gcry_cipher_spec_des;
extern gcry_cipher_spec_t _gcry_cipher_spec_tripledes;
extern gcry_cipher_spec_t _gcry_cipher_spec_arcfour;
extern gcry_cipher_spec_t _gcry_cipher_spec_cast5;
extern gcry_cipher_spec_t _gcry_cipher_spec_aes;
extern gcry_cipher_spec_t _gcry_cipher_spec_aes192;
extern gcry_cipher_spec_t _gcry_cipher_spec_aes256;
extern gcry_cipher_spec_t _gcry_cipher_spec_twofish;
extern gcry_cipher_spec_t _gcry_cipher_spec_twofish128;
extern gcry_cipher_spec_t _gcry_cipher_spec_serpent128;
extern gcry_cipher_spec_t _gcry_cipher_spec_serpent192;
extern gcry_cipher_spec_t _gcry_cipher_spec_serpent256;
extern gcry_cipher_spec_t _gcry_cipher_spec_rfc2268_40;
extern gcry_cipher_spec_t _gcry_cipher_spec_rfc2268_128;
extern gcry_cipher_spec_t _gcry_cipher_spec_seed;
extern gcry_cipher_spec_t _gcry_cipher_spec_camellia128;
extern gcry_cipher_spec_t _gcry_cipher_spec_camellia192;
extern gcry_cipher_spec_t _gcry_cipher_spec_camellia256;
extern gcry_cipher_spec_t _gcry_cipher_spec_idea;
extern gcry_cipher_spec_t _gcry_cipher_spec_salsa20;
extern gcry_cipher_spec_t _gcry_cipher_spec_salsa20r12;
extern gcry_cipher_spec_t _gcry_cipher_spec_gost28147;
extern gcry_cipher_spec_t _gcry_cipher_spec_chacha20;
/* Declarations for the digest specifications. */
extern gcry_md_spec_t _gcry_digest_spec_crc32;
extern gcry_md_spec_t _gcry_digest_spec_crc32_rfc1510;
extern gcry_md_spec_t _gcry_digest_spec_crc24_rfc2440;
extern gcry_md_spec_t _gcry_digest_spec_gost3411_94;
extern gcry_md_spec_t _gcry_digest_spec_gost3411_cp;
extern gcry_md_spec_t _gcry_digest_spec_stribog_256;
extern gcry_md_spec_t _gcry_digest_spec_stribog_512;
extern gcry_md_spec_t _gcry_digest_spec_md2;
extern gcry_md_spec_t _gcry_digest_spec_md4;
extern gcry_md_spec_t _gcry_digest_spec_md5;
extern gcry_md_spec_t _gcry_digest_spec_rmd160;
extern gcry_md_spec_t _gcry_digest_spec_sha1;
extern gcry_md_spec_t _gcry_digest_spec_sha224;
extern gcry_md_spec_t _gcry_digest_spec_sha256;
extern gcry_md_spec_t _gcry_digest_spec_sha384;
extern gcry_md_spec_t _gcry_digest_spec_sha512;
extern gcry_md_spec_t _gcry_digest_spec_sha3_224;
extern gcry_md_spec_t _gcry_digest_spec_sha3_256;
extern gcry_md_spec_t _gcry_digest_spec_sha3_512;
extern gcry_md_spec_t _gcry_digest_spec_sha3_384;
extern gcry_md_spec_t _gcry_digest_spec_shake128;
extern gcry_md_spec_t _gcry_digest_spec_shake256;
extern gcry_md_spec_t _gcry_digest_spec_tiger;
extern gcry_md_spec_t _gcry_digest_spec_tiger1;
extern gcry_md_spec_t _gcry_digest_spec_tiger2;
extern gcry_md_spec_t _gcry_digest_spec_whirlpool;
extern gcry_md_spec_t _gcry_digest_spec_blake2b_512;
extern gcry_md_spec_t _gcry_digest_spec_blake2b_384;
extern gcry_md_spec_t _gcry_digest_spec_blake2b_256;
extern gcry_md_spec_t _gcry_digest_spec_blake2b_160;
extern gcry_md_spec_t _gcry_digest_spec_blake2s_256;
extern gcry_md_spec_t _gcry_digest_spec_blake2s_224;
extern gcry_md_spec_t _gcry_digest_spec_blake2s_160;
extern gcry_md_spec_t _gcry_digest_spec_blake2s_128;
/* Declarations for the pubkey cipher specifications. */
extern gcry_pk_spec_t _gcry_pubkey_spec_rsa;
extern gcry_pk_spec_t _gcry_pubkey_spec_elg;
extern gcry_pk_spec_t _gcry_pubkey_spec_elg_e;
extern gcry_pk_spec_t _gcry_pubkey_spec_dsa;
extern gcry_pk_spec_t _gcry_pubkey_spec_ecc;
#endif /*G10_CIPHER_H*/