Page MenuHome GnuPG

No OneTemporary

diff --git a/mpi/mpiutil.c b/mpi/mpiutil.c
index 9625693c..b254e12f 100644
--- a/mpi/mpiutil.c
+++ b/mpi/mpiutil.c
@@ -1,792 +1,790 @@
/* mpiutil.ac - Utility functions for MPI
* Copyright (C) 1998, 2000, 2001, 2002, 2003,
* 2007 Free Software Foundation, Inc.
* Copyright (C) 2013 g10 Code GmbH
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "g10lib.h"
#include "mpi-internal.h"
#include "mod-source-info.h"
#include "const-time.h"
#if SIZEOF_UNSIGNED_INT == 2
# define MY_UINT_MAX 0xffff
/* (visual check: 0123 ) */
#elif SIZEOF_UNSIGNED_INT == 4
# define MY_UINT_MAX 0xffffffff
/* (visual check: 01234567 ) */
#elif SIZEOF_UNSIGNED_INT == 8
# define MY_UINT_MAX 0xffffffffffffffff
/* (visual check: 0123456789abcdef ) */
#else
# error Need MY_UINT_MAX for this limb size
#endif
/* Constants allocated right away at startup. */
static gcry_mpi_t constants[MPI_NUMBER_OF_CONSTANTS];
const char *
_gcry_mpi_get_hw_config (void)
{
return mod_source_info + 1;
}
/* Initialize the MPI subsystem. This is called early and allows
doing some initialization without taking care of threading issues. */
gcry_err_code_t
_gcry_mpi_init (void)
{
int idx;
unsigned long value;
for (idx=0; idx < MPI_NUMBER_OF_CONSTANTS; idx++)
{
switch (idx)
{
case MPI_C_ZERO: value = 0; break;
case MPI_C_ONE: value = 1; break;
case MPI_C_TWO: value = 2; break;
case MPI_C_THREE: value = 3; break;
case MPI_C_FOUR: value = 4; break;
case MPI_C_EIGHT: value = 8; break;
default: log_bug ("invalid mpi_const selector %d\n", idx);
}
constants[idx] = mpi_alloc_set_ui (value);
constants[idx]->flags = (16|32);
}
return 0;
}
/****************
* Note: It was a bad idea to use the number of limbs to allocate
* because on a alpha the limbs are large but we normally need
* integers of n bits - So we should change this to bits (or bytes).
*
* But mpi_alloc is used in a lot of places :-(. New code
* should use mpi_new.
*/
gcry_mpi_t
_gcry_mpi_alloc( unsigned nlimbs )
{
gcry_mpi_t a;
a = xmalloc( sizeof *a );
a->d = nlimbs? mpi_alloc_limb_space( nlimbs, 0 ) : NULL;
a->alloced = nlimbs;
a->nlimbs = 0;
a->sign = 0;
a->flags = 0;
return a;
}
gcry_mpi_t
_gcry_mpi_alloc_secure( unsigned nlimbs )
{
gcry_mpi_t a;
a = xmalloc( sizeof *a );
a->d = nlimbs? mpi_alloc_limb_space( nlimbs, 1 ) : NULL;
a->alloced = nlimbs;
a->flags = 1;
a->nlimbs = 0;
a->sign = 0;
return a;
}
mpi_ptr_t
_gcry_mpi_alloc_limb_space( unsigned int nlimbs, int secure )
{
mpi_ptr_t p;
size_t len;
len = (nlimbs ? nlimbs : 1) * sizeof (mpi_limb_t);
p = secure ? xmalloc_secure (len) : xmalloc (len);
if (! nlimbs)
*p = 0;
return p;
}
void
_gcry_mpi_free_limb_space( mpi_ptr_t a, unsigned int nlimbs)
{
if (a)
{
size_t len = nlimbs * sizeof(mpi_limb_t);
/* If we have information on the number of allocated limbs, we
better wipe that space out. This is a failsafe feature if
secure memory has been disabled or was not properly
implemented in user provided allocation functions. */
if (len)
wipememory (a, len);
xfree(a);
}
}
void
_gcry_mpi_assign_limb_space( gcry_mpi_t a, mpi_ptr_t ap, unsigned int nlimbs )
{
_gcry_mpi_free_limb_space (a->d, a->alloced);
a->d = ap;
a->alloced = nlimbs;
}
/****************
* Resize the array of A to NLIMBS. The additional space is cleared
* (set to 0).
*/
void
_gcry_mpi_resize (gcry_mpi_t a, unsigned nlimbs)
{
size_t i;
if (nlimbs <= a->alloced)
{
/* We only need to clear the new space (this is a nop if the
limb space is already of the correct size. */
for (i=a->nlimbs; i < a->alloced; i++)
a->d[i] = 0;
return;
}
/* Actually resize the limb space. */
if (a->d)
{
a->d = xrealloc (a->d, nlimbs * sizeof (mpi_limb_t));
for (i=a->nlimbs; i < nlimbs; i++)
a->d[i] = 0;
}
else
{
if (a->flags & 1)
/* Secure memory is wanted. */
a->d = xcalloc_secure (nlimbs , sizeof (mpi_limb_t));
else
/* Standard memory. */
a->d = xcalloc (nlimbs , sizeof (mpi_limb_t));
}
a->alloced = nlimbs;
}
void
_gcry_mpi_clear( gcry_mpi_t a )
{
if (mpi_is_immutable (a))
{
mpi_immutable_failed ();
return;
}
a->nlimbs = 0;
a->flags = 0;
}
void
_gcry_mpi_free( gcry_mpi_t a )
{
if (!a )
return;
if ((a->flags & 32))
{
-#if GPGRT_VERSION_NUMBER >= 0x011600 /* 1.22 */
gpgrt_annotate_leaked_object(a);
-#endif
return; /* Never release a constant. */
}
if ((a->flags & 4))
xfree( a->d );
else
{
_gcry_mpi_free_limb_space(a->d, a->alloced);
}
/* Check that the flags makes sense. We better allow for bit 1
(value 2) for backward ABI compatibility. */
if ((a->flags & ~(1|2|4|16
|GCRYMPI_FLAG_USER1
|GCRYMPI_FLAG_USER2
|GCRYMPI_FLAG_USER3
|GCRYMPI_FLAG_USER4)))
log_bug("invalid flag value in mpi_free\n");
xfree (a);
}
void
_gcry_mpi_immutable_failed (void)
{
log_info ("Warning: trying to change an immutable MPI\n");
}
static void
mpi_set_secure( gcry_mpi_t a )
{
mpi_ptr_t ap, bp;
if ( (a->flags & 1) )
return;
a->flags |= 1;
ap = a->d;
if (!a->nlimbs)
{
gcry_assert (!ap);
return;
}
bp = mpi_alloc_limb_space (a->alloced, 1);
MPN_COPY( bp, ap, a->nlimbs );
a->d = bp;
_gcry_mpi_free_limb_space (ap, a->alloced);
}
gcry_mpi_t
_gcry_mpi_set_opaque (gcry_mpi_t a, void *p, unsigned int nbits)
{
if (!a)
a = mpi_alloc(0);
if (mpi_is_immutable (a))
{
mpi_immutable_failed ();
return a;
}
if( a->flags & 4 )
xfree (a->d);
else
_gcry_mpi_free_limb_space (a->d, a->alloced);
a->d = p;
a->alloced = 0;
a->nlimbs = 0;
a->sign = nbits;
a->flags = 4 | (a->flags & (GCRYMPI_FLAG_USER1|GCRYMPI_FLAG_USER2
|GCRYMPI_FLAG_USER3|GCRYMPI_FLAG_USER4));
if (_gcry_is_secure (a->d))
a->flags |= 1;
return a;
}
gcry_mpi_t
_gcry_mpi_set_opaque_copy (gcry_mpi_t a, const void *p, unsigned int nbits)
{
void *d;
unsigned int n;
n = (nbits+7)/8;
d = _gcry_is_secure (p)? xtrymalloc_secure (n) : xtrymalloc (n);
if (!d)
return NULL;
memcpy (d, p, n);
return mpi_set_opaque (a, d, nbits);
}
void *
_gcry_mpi_get_opaque (gcry_mpi_t a, unsigned int *nbits)
{
if( !(a->flags & 4) )
log_bug("mpi_get_opaque on normal mpi\n");
if( nbits )
*nbits = a->sign;
return a->d;
}
void *
_gcry_mpi_get_opaque_copy (gcry_mpi_t a, unsigned int *nbits)
{
const void *s;
void *d;
unsigned int n;
s = mpi_get_opaque (a, nbits);
if (!s && nbits)
return NULL;
n = (*nbits+7)/8;
d = _gcry_is_secure (s)? xtrymalloc_secure (n) : xtrymalloc (n);
if (d)
memcpy (d, s, n);
return d;
}
/****************
* Note: This copy function should not interpret the MPI
* but copy it transparently.
*/
gcry_mpi_t
_gcry_mpi_copy (gcry_mpi_t a)
{
int i;
gcry_mpi_t b;
if( a && (a->flags & 4) ) {
void *p = NULL;
if (a->sign) {
p = _gcry_is_secure(a->d)? xmalloc_secure ((a->sign+7)/8)
: xmalloc ((a->sign+7)/8);
if (a->d)
memcpy( p, a->d, (a->sign+7)/8 );
}
b = mpi_set_opaque( NULL, p, a->sign );
b->flags = a->flags;
b->flags &= ~(16|32); /* Reset the immutable and constant flags. */
}
else if( a ) {
b = mpi_is_secure(a)? mpi_alloc_secure( a->nlimbs )
: mpi_alloc( a->nlimbs );
b->nlimbs = a->nlimbs;
b->sign = a->sign;
b->flags = a->flags;
b->flags &= ~(16|32); /* Reset the immutable and constant flags. */
for(i=0; i < b->nlimbs; i++ )
b->d[i] = a->d[i];
}
else
b = NULL;
return b;
}
/* Return true if A is negative. */
int
_gcry_mpi_is_neg (gcry_mpi_t a)
{
if (a->sign && _gcry_mpi_cmp_ui (a, 0))
return 1;
else
return 0;
}
/* W = - U */
void
_gcry_mpi_neg (gcry_mpi_t w, gcry_mpi_t u)
{
if (w != u)
mpi_set (w, u);
else if (mpi_is_immutable (w))
{
mpi_immutable_failed ();
return;
}
w->sign = !u->sign;
}
/* W = [W] */
void
_gcry_mpi_abs (gcry_mpi_t w)
{
if (mpi_is_immutable (w))
{
mpi_immutable_failed ();
return;
}
w->sign = 0;
}
/****************
* This function allocates an MPI which is optimized to hold
* a value as large as the one given in the argument and allocates it
* with the same flags as A.
*/
gcry_mpi_t
_gcry_mpi_alloc_like( gcry_mpi_t a )
{
gcry_mpi_t b;
if( a && (a->flags & 4) ) {
int n = (a->sign+7)/8;
void *p = _gcry_is_secure(a->d)? xtrymalloc_secure (n)
: xtrymalloc (n);
memcpy( p, a->d, n );
b = mpi_set_opaque( NULL, p, a->sign );
}
else if( a ) {
b = mpi_is_secure(a)? mpi_alloc_secure( a->nlimbs )
: mpi_alloc( a->nlimbs );
b->nlimbs = 0;
b->sign = 0;
b->flags = a->flags;
}
else
b = NULL;
return b;
}
/* Set U into W and release U. If W is NULL only U will be released. */
void
_gcry_mpi_snatch (gcry_mpi_t w, gcry_mpi_t u)
{
if (w)
{
if (mpi_is_immutable (w))
{
mpi_immutable_failed ();
return;
}
_gcry_mpi_assign_limb_space (w, u->d, u->alloced);
w->nlimbs = u->nlimbs;
w->sign = u->sign;
w->flags = u->flags;
u->alloced = 0;
u->nlimbs = 0;
u->d = NULL;
}
_gcry_mpi_free (u);
}
gcry_mpi_t
_gcry_mpi_set (gcry_mpi_t w, gcry_mpi_t u)
{
mpi_ptr_t wp, up;
mpi_size_t usize = u->nlimbs;
int usign = u->sign;
if (!w)
w = _gcry_mpi_alloc( mpi_get_nlimbs(u) );
if (mpi_is_immutable (w))
{
mpi_immutable_failed ();
return w;
}
RESIZE_IF_NEEDED(w, usize);
wp = w->d;
up = u->d;
MPN_COPY( wp, up, usize );
w->nlimbs = usize;
w->flags = u->flags;
w->flags &= ~(16|32); /* Reset the immutable and constant flags. */
w->sign = usign;
return w;
}
/****************
* Set the value of W by the one of U, when SET is 1.
* Leave the value when SET is 0.
* This implementation should be constant-time regardless of SET.
*/
gcry_mpi_t
_gcry_mpi_set_cond (gcry_mpi_t w, const gcry_mpi_t u, unsigned long set)
{
/* Note: dual mask with AND/OR used for EM leakage mitigation */
mpi_limb_t mask1 = ct_limb_gen_mask(set);
mpi_limb_t mask2 = ct_limb_gen_inv_mask(set);
mpi_size_t i;
mpi_size_t nlimbs = u->alloced;
mpi_limb_t xu;
mpi_limb_t xw;
mpi_limb_t *uu = u->d;
mpi_limb_t *uw = w->d;
if (w->alloced != u->alloced)
log_bug ("mpi_set_cond: different sizes\n");
for (i = 0; i < nlimbs; i++)
{
xu = uu[i];
xw = uw[i];
uw[i] = (xw & mask2) | (xu & mask1);
}
xu = u->nlimbs;
xw = w->nlimbs;
w->nlimbs = (xw & mask2) | (xu & mask1);
xu = u->sign;
xw = w->sign;
w->sign = (xw & mask2) | (xu & mask1);
return w;
}
gcry_mpi_t
_gcry_mpi_set_ui (gcry_mpi_t w, unsigned long u)
{
if (!w)
w = _gcry_mpi_alloc (1);
/* FIXME: If U is 0 we have no need to resize and thus possible
allocating the the limbs. */
if (mpi_is_immutable (w))
{
mpi_immutable_failed ();
return w;
}
RESIZE_IF_NEEDED(w, 1);
w->d[0] = u;
w->nlimbs = u? 1:0;
w->sign = 0;
w->flags = 0;
return w;
}
/* If U is non-negative and small enough store it as an unsigned int
* at W. If the value does not fit into an unsigned int or is
* negative return GPG_ERR_ERANGE. Note that we return an unsigned
* int so that the value can be used with the bit test functions; in
* contrast the other _ui functions take an unsigned long so that on
* some platforms they may accept a larger value. On error the value
* at W is not changed. */
gcry_err_code_t
_gcry_mpi_get_ui (unsigned int *w, gcry_mpi_t u)
{
mpi_limb_t x;
if (u->nlimbs > 1 || u->sign)
return GPG_ERR_ERANGE;
x = (u->nlimbs == 1) ? u->d[0] : 0;
if (sizeof (x) > sizeof (unsigned int) && x > MY_UINT_MAX)
return GPG_ERR_ERANGE;
*w = x;
return 0;
}
gcry_mpi_t
_gcry_mpi_alloc_set_ui( unsigned long u)
{
gcry_mpi_t w = mpi_alloc(1);
w->d[0] = u;
w->nlimbs = u? 1:0;
w->sign = 0;
return w;
}
void
_gcry_mpi_swap (gcry_mpi_t a, gcry_mpi_t b)
{
struct gcry_mpi tmp;
tmp = *a; *a = *b; *b = tmp;
}
/****************
* Swap the value of A and B, when SWAP is 1.
* Leave the value when SWAP is 0.
* This implementation should be constant-time regardless of SWAP.
*/
void
_gcry_mpi_swap_cond (gcry_mpi_t a, gcry_mpi_t b, unsigned long swap)
{
/* Note: dual mask with AND/OR used for EM leakage mitigation */
mpi_limb_t mask1 = ct_limb_gen_mask(swap);
mpi_limb_t mask2 = ct_limb_gen_inv_mask(swap);
mpi_size_t i;
mpi_size_t nlimbs;
mpi_limb_t *ua = a->d;
mpi_limb_t *ub = b->d;
mpi_limb_t xa;
mpi_limb_t xb;
if (a->alloced > b->alloced)
nlimbs = b->alloced;
else
nlimbs = a->alloced;
if (a->nlimbs > nlimbs || b->nlimbs > nlimbs)
log_bug ("mpi_swap_cond: different sizes\n");
for (i = 0; i < nlimbs; i++)
{
xa = ua[i];
xb = ub[i];
ua[i] = (xa & mask2) | (xb & mask1);
ub[i] = (xa & mask1) | (xb & mask2);
}
xa = a->nlimbs;
xb = b->nlimbs;
a->nlimbs = (xa & mask2) | (xb & mask1);
b->nlimbs = (xa & mask1) | (xb & mask2);
xa = a->sign;
xb = b->sign;
a->sign = (xa & mask2) | (xb & mask1);
b->sign = (xa & mask1) | (xb & mask2);
}
/****************
* Set bit N of A, when SET is 1.
* This implementation should be constant-time regardless of SET.
*/
void
_gcry_mpi_set_bit_cond (gcry_mpi_t a, unsigned int n, unsigned long set)
{
unsigned int limbno, bitno;
mpi_limb_t set_the_bit = !!set;
limbno = n / BITS_PER_MPI_LIMB;
bitno = n % BITS_PER_MPI_LIMB;
a->d[limbno] |= (set_the_bit<<bitno);
}
gcry_mpi_t
_gcry_mpi_new (unsigned int nbits)
{
return _gcry_mpi_alloc ( (nbits+BITS_PER_MPI_LIMB-1)
/ BITS_PER_MPI_LIMB );
}
gcry_mpi_t
_gcry_mpi_snew (unsigned int nbits)
{
return _gcry_mpi_alloc_secure ( (nbits+BITS_PER_MPI_LIMB-1)
/ BITS_PER_MPI_LIMB );
}
void
_gcry_mpi_release( gcry_mpi_t a )
{
_gcry_mpi_free( a );
}
void
_gcry_mpi_randomize (gcry_mpi_t w,
unsigned int nbits, enum gcry_random_level level)
{
unsigned char *p;
size_t nbytes = (nbits+7)/8;
if (mpi_is_immutable (w))
{
mpi_immutable_failed ();
return;
}
if (level == GCRY_WEAK_RANDOM)
{
p = mpi_is_secure(w) ? xmalloc_secure (nbytes)
: xmalloc (nbytes);
_gcry_create_nonce (p, nbytes);
}
else
{
p = mpi_is_secure(w) ? _gcry_random_bytes_secure (nbytes, level)
: _gcry_random_bytes (nbytes, level);
}
_gcry_mpi_set_buffer( w, p, nbytes, 0 );
xfree (p);
}
void
_gcry_mpi_set_flag (gcry_mpi_t a, enum gcry_mpi_flag flag)
{
switch (flag)
{
case GCRYMPI_FLAG_SECURE: mpi_set_secure(a); break;
case GCRYMPI_FLAG_CONST: a->flags |= (16|32); break;
case GCRYMPI_FLAG_IMMUTABLE: a->flags |= 16; break;
case GCRYMPI_FLAG_USER1:
case GCRYMPI_FLAG_USER2:
case GCRYMPI_FLAG_USER3:
case GCRYMPI_FLAG_USER4: a->flags |= flag; break;
case GCRYMPI_FLAG_OPAQUE:
default: log_bug("invalid flag value\n");
}
}
void
_gcry_mpi_clear_flag (gcry_mpi_t a, enum gcry_mpi_flag flag)
{
(void)a; /* Not yet used. */
switch (flag)
{
case GCRYMPI_FLAG_IMMUTABLE:
if (!(a->flags & 32))
a->flags &= ~16;
break;
case GCRYMPI_FLAG_USER1:
case GCRYMPI_FLAG_USER2:
case GCRYMPI_FLAG_USER3:
case GCRYMPI_FLAG_USER4:
a->flags &= ~flag;
break;
case GCRYMPI_FLAG_CONST:
case GCRYMPI_FLAG_SECURE:
case GCRYMPI_FLAG_OPAQUE:
default: log_bug("invalid flag value\n");
}
}
int
_gcry_mpi_get_flag (gcry_mpi_t a, enum gcry_mpi_flag flag)
{
switch (flag)
{
case GCRYMPI_FLAG_SECURE: return !!(a->flags & 1);
case GCRYMPI_FLAG_OPAQUE: return !!(a->flags & 4);
case GCRYMPI_FLAG_IMMUTABLE: return !!(a->flags & 16);
case GCRYMPI_FLAG_CONST: return !!(a->flags & 32);
case GCRYMPI_FLAG_USER1:
case GCRYMPI_FLAG_USER2:
case GCRYMPI_FLAG_USER3:
case GCRYMPI_FLAG_USER4: return !!(a->flags & flag);
default: log_bug("invalid flag value\n");
}
/*NOTREACHED*/
return 0;
}
/* Return a constant MPI descripbed by NO which is one of the
MPI_C_xxx macros. There is no need to copy this returned value; it
may be used directly. */
gcry_mpi_t
_gcry_mpi_const (enum gcry_mpi_constants no)
{
if ((int)no < 0 || no > MPI_NUMBER_OF_CONSTANTS)
log_bug("invalid mpi_const selector %d\n", no);
if (!constants[no])
log_bug("MPI subsystem not initialized\n");
return constants[no];
}
diff --git a/random/jitterentropy-base.c b/random/jitterentropy-base.c
index cf7630d9..0da2d78e 100644
--- a/random/jitterentropy-base.c
+++ b/random/jitterentropy-base.c
@@ -1,734 +1,737 @@
/*
* Non-physical true random number generator based on timing jitter.
*
* Copyright Stephan Mueller <smueller@chronox.de>, 2014 - 2021
*
* Design
* ======
*
* See documentation in doc/ folder.
*
* Interface
* =========
*
* See documentation in jitterentropy(3) man page.
*
* License: see LICENSE file in root directory
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "jitterentropy.h"
#include "jitterentropy-base.h"
#include "jitterentropy-gcd.h"
#include "jitterentropy-health.h"
#include "jitterentropy-noise.h"
#include "jitterentropy-timer.h"
#include "jitterentropy-sha3.h"
#define MAJVERSION 3 /* API / ABI incompatible changes, functional changes that
* require consumer to be updated (as long as this number
* is zero, the API is not considered stable and can
* change without a bump of the major version) */
#define MINVERSION 3 /* API compatible, ABI may change, functional
* enhancements only, consumer can be left unchanged if
* enhancements are not considered */
#define PATCHLEVEL 0 /* API / ABI compatible, no functional changes, no
* enhancements, bug fixes only */
/***************************************************************************
* Jitter RNG Static Definitions
*
* None of the following should be altered
***************************************************************************/
#ifdef __OPTIMIZE__
#error "The CPU Jitter random number generator must not be compiled with optimizations. See documentation. Use the compiler switch -O0 for compiling jitterentropy.c."
#endif
/*
* JENT_POWERUP_TESTLOOPCOUNT needs some loops to identify edge
* systems. 100 is definitely too little.
*
* SP800-90B requires at least 1024 initial test cycles.
*/
#define JENT_POWERUP_TESTLOOPCOUNT 1024
/**
* jent_version() - Return machine-usable version number of jent library
*
* The function returns a version number that is monotonic increasing
* for newer versions. The version numbers are multiples of 100. For example,
* version 1.2.3 is converted to 1020300 -- the last two digits are reserved
* for future use.
*
* The result of this function can be used in comparing the version number
* in a calling program if version-specific calls need to be make.
*
* @return Version number of jitterentropy library
*/
JENT_PRIVATE_STATIC
unsigned int jent_version(void)
{
unsigned int version = 0;
version = MAJVERSION * 1000000;
version += MINVERSION * 10000;
version += PATCHLEVEL * 100;
return version;
}
/***************************************************************************
* Helper
***************************************************************************/
/* Calculate log2 of given value assuming that the value is a power of 2 */
static inline unsigned int jent_log2_simple(unsigned int val)
{
unsigned int idx = 0;
while (val >>= 1)
idx++;
return idx;
}
/* Increase the memory size by one step */
static inline unsigned int jent_update_memsize(unsigned int flags)
{
unsigned int global_max = JENT_FLAGS_TO_MAX_MEMSIZE(
JENT_MAX_MEMSIZE_MAX);
unsigned int max;
max = JENT_FLAGS_TO_MAX_MEMSIZE(flags);
if (!max) {
/*
* The safe starting value is the amount of memory we allocated
* last round.
*/
max = jent_log2_simple(JENT_MEMORY_SIZE);
/* Adjust offset */
max = (max > JENT_MAX_MEMSIZE_OFFSET) ?
max - JENT_MAX_MEMSIZE_OFFSET : 0;
} else {
max++;
}
max = (max > global_max) ? global_max : max;
/* Clear out the max size */
flags &= ~JENT_MAX_MEMSIZE_MASK;
/* Set the freshly calculated max size */
flags |= JENT_MAX_MEMSIZE_TO_FLAGS(max);
return flags;
}
/***************************************************************************
* Random Number Generation
***************************************************************************/
/**
* Entry function: Obtain entropy for the caller.
*
* This function invokes the entropy gathering logic as often to generate
* as many bytes as requested by the caller. The entropy gathering logic
* creates 64 bit per invocation.
*
* This function truncates the last 64 bit entropy value output to the exact
* size specified by the caller.
*
* @ec [in] Reference to entropy collector
* @data [out] pointer to buffer for storing random data -- buffer must
* already exist
* @len [in] size of the buffer, specifying also the requested number of random
* in bytes
*
* @return number of bytes returned when request is fulfilled or an error
*
* The following error codes can occur:
* -1 entropy_collector is NULL
* -2 RCT failed
* -3 APT test failed
* -4 The timer cannot be initialized
* -5 LAG failure
*/
JENT_PRIVATE_STATIC
ssize_t jent_read_entropy(struct rand_data *ec, char *data, size_t len)
{
char *p = data;
size_t orig_len = len;
int ret = 0;
if (NULL == ec)
return -1;
if (jent_notime_settick(ec))
return -4;
while (len > 0) {
size_t tocopy;
unsigned int health_test_result;
jent_random_data(ec);
if ((health_test_result = jent_health_failure(ec))) {
if (health_test_result & JENT_RCT_FAILURE)
ret = -2;
else if (health_test_result & JENT_APT_FAILURE)
ret = -3;
else
ret = -5;
goto err;
}
if ((DATA_SIZE_BITS / 8) < len)
tocopy = (DATA_SIZE_BITS / 8);
else
tocopy = len;
memcpy(p, &ec->data, tocopy);
len -= tocopy;
p += tocopy;
}
/*
* To be on the safe side, we generate one more round of entropy
* which we do not give out to the caller. That round shall ensure
* that in case the calling application crashes, memory dumps, pages
* out, or due to the CPU Jitter RNG lingering in memory for long
* time without being moved and an attacker cracks the application,
* all he reads in the entropy pool is a value that is NEVER EVER
* being used for anything. Thus, he does NOT see the previous value
* that was returned to the caller for cryptographic purposes.
*/
/*
* If we use secured memory, do not use that precaution as the secure
* memory protects the entropy pool. Moreover, note that using this
* call reduces the speed of the RNG by up to half
*/
#ifndef JENT_CPU_JITTERENTROPY_SECURE_MEMORY
jent_random_data(ec);
#endif
err:
jent_notime_unsettick(ec);
return ret ? ret : (ssize_t)orig_len;
}
static struct rand_data *_jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags);
/**
* Entry function: Obtain entropy for the caller.
*
* This is a service function to jent_read_entropy() with the difference
* that it automatically re-allocates the entropy collector if a health
* test failure is observed. Before reallocation, a new power-on health test
* is performed. The allocation of the new entropy collector automatically
* increases the OSR by one. This is done based on the idea that a health
* test failure indicates that the assumed entropy rate is too high.
*
* Note the function returns with an health test error if the OSR is
* getting too large. If an error is returned by this function, the Jitter RNG
* is not safe to be used on the current system.
*
* @ec [in] Reference to entropy collector - this is a double pointer as
* The entropy collector may be freed and reallocated.
* @data [out] pointer to buffer for storing random data -- buffer must
* already exist
* @len [in] size of the buffer, specifying also the requested number of random
* in bytes
*
* @return see jent_read_entropy()
*/
JENT_PRIVATE_STATIC
ssize_t jent_read_entropy_safe(struct rand_data **ec, char *data, size_t len)
{
char *p = data;
size_t orig_len = len;
ssize_t ret = 0;
if (!ec)
return -1;
while (len > 0) {
unsigned int osr, flags, max_mem_set;
ret = jent_read_entropy(*ec, p, len);
switch (ret) {
case -1:
case -4:
return ret;
case -2:
case -3:
case -5:
osr = (*ec)->osr + 1;
flags = (*ec)->flags;
max_mem_set = (*ec)->max_mem_set;
/* generic arbitrary cutoff */
if (osr > 20)
return ret;
/*
* If the caller did not set any specific maximum value
* let the Jitter RNG increase the maximum memory by
* one step.
*/
if (!max_mem_set)
flags = jent_update_memsize(flags);
/*
* re-allocate entropy collector with higher OSR and
* memory size
*/
jent_entropy_collector_free(*ec);
/* Perform new health test with updated OSR */
if (jent_entropy_init_ex(osr, flags))
return -1;
*ec = _jent_entropy_collector_alloc(osr, flags);
if (!*ec)
return -1;
/* Remember whether caller configured memory size */
(*ec)->max_mem_set = !!max_mem_set;
break;
default:
len -= (size_t)ret;
p += (size_t)ret;
}
}
return (ssize_t)orig_len;
}
/***************************************************************************
* Initialization logic
***************************************************************************/
/*
* Obtain memory size to allocate for memory access variations.
*
* The maximum variations we can get from the memory access is when we allocate
* a bit more memory than we have as data cache. But allocating as much
* memory as we have as data cache might strain the resources on the system
* more than necessary.
*
* On a lot of systems it is not necessary to need so much memory as the
* variations coming from the general Jitter RNG execution commonly provide
* large amount of variations.
*
* Thus, the default is:
*
* min(JENT_MEMORY_SIZE, data cache size)
*
* In case the data cache size cannot be obtained, use JENT_MEMORY_SIZE.
*
* If the caller provides a maximum memory size, use
* min(provided max memory, data cache size).
*/
static inline uint32_t jent_memsize(unsigned int flags)
{
uint32_t memsize, max_memsize;
max_memsize = JENT_FLAGS_TO_MAX_MEMSIZE(flags);
if (max_memsize == 0) {
max_memsize = JENT_MEMORY_SIZE;
} else {
max_memsize = UINT32_C(1) << (max_memsize +
JENT_MAX_MEMSIZE_OFFSET);
}
/* Allocate memory for adding variations based on memory access */
memsize = jent_cache_size_roundup();
/* Limit the memory as defined by caller */
memsize = (memsize > max_memsize) ? max_memsize : memsize;
/* Set a value if none was found */
if (!memsize)
memsize = JENT_MEMORY_SIZE;
return memsize;
}
static int jent_selftest_run = 0;
static struct rand_data
*jent_entropy_collector_alloc_internal(unsigned int osr, unsigned int flags)
{
struct rand_data *entropy_collector;
/*
* Requesting disabling and forcing of internal timer
* makes no sense.
*/
if ((flags & JENT_DISABLE_INTERNAL_TIMER) &&
(flags & JENT_FORCE_INTERNAL_TIMER))
return NULL;
/* Force the self test to be run */
if (!jent_selftest_run && jent_entropy_init_ex(osr, flags))
return NULL;
/*
* If the initial test code concludes to force the internal timer
* and the user requests it not to be used, do not allocate
* the Jitter RNG instance.
*/
if (jent_notime_forced() && (flags & JENT_DISABLE_INTERNAL_TIMER))
return NULL;
entropy_collector = jent_zalloc(sizeof(struct rand_data));
if (NULL == entropy_collector)
return NULL;
if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) {
uint32_t memsize = jent_memsize(flags);
entropy_collector->mem = _gcry_calloc (1, memsize);
+#ifdef _GCRYPT_IN_LIBGCRYPT
+ gpgrt_annotate_leaked_object (entropy_collector->mem);
+#endif
#ifdef JENT_RANDOM_MEMACCESS
/*
* Transform the size into a mask - it is assumed that size is
* a power of 2.
*/
entropy_collector->memmask = memsize - 1;
#else /* JENT_RANDOM_MEMACCESS */
entropy_collector->memblocksize = memsize / JENT_MEMORY_BLOCKS;
entropy_collector->memblocks = JENT_MEMORY_BLOCKS;
/* sanity check */
if (entropy_collector->memblocksize *
entropy_collector->memblocks != memsize)
goto err;
#endif /* JENT_RANDOM_MEMACCESS */
if (entropy_collector->mem == NULL)
goto err;
entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS;
}
/* verify and set the oversampling rate */
if (osr < JENT_MIN_OSR)
osr = JENT_MIN_OSR;
entropy_collector->osr = osr;
entropy_collector->flags = flags;
if (jent_fips_enabled() || (flags & JENT_FORCE_FIPS))
entropy_collector->fips_enabled = 1;
/* Initialize the APT */
jent_apt_init(entropy_collector, osr);
/* Initialize the Lag Predictor Test */
jent_lag_init(entropy_collector, osr);
/* Was jent_entropy_init run (establishing the common GCD)? */
if (jent_gcd_get(&entropy_collector->jent_common_timer_gcd)) {
/*
* It was not. This should probably be an error, but this
* behavior breaks the test code. Set the gcd to a value that
* won't hurt anything.
*/
entropy_collector->jent_common_timer_gcd = 1;
}
/*
* Use timer-less noise source - note, OSR must be set in
* entropy_collector!
*/
if (!(flags & JENT_DISABLE_INTERNAL_TIMER)) {
if (jent_notime_enable(entropy_collector, flags))
goto err;
}
return entropy_collector;
err:
if (entropy_collector->mem != NULL)
jent_zfree(entropy_collector->mem, JENT_MEMORY_SIZE);
jent_zfree(entropy_collector, sizeof(struct rand_data));
return NULL;
}
static struct rand_data *_jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags)
{
struct rand_data *ec = jent_entropy_collector_alloc_internal(osr,
flags);
if (!ec)
return ec;
/* fill the data pad with non-zero values */
if (jent_notime_settick(ec)) {
jent_entropy_collector_free(ec);
return NULL;
}
jent_random_data(ec);
jent_notime_unsettick(ec);
return ec;
}
JENT_PRIVATE_STATIC
struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags)
{
struct rand_data *ec = _jent_entropy_collector_alloc(osr, flags);
/* Remember that the caller provided a maximum size flag */
if (ec)
ec->max_mem_set = !!JENT_FLAGS_TO_MAX_MEMSIZE(flags);
return ec;
}
JENT_PRIVATE_STATIC
void jent_entropy_collector_free(struct rand_data *entropy_collector)
{
if (entropy_collector != NULL) {
jent_notime_disable(entropy_collector);
if (entropy_collector->mem != NULL) {
jent_zfree(entropy_collector->mem,
jent_memsize(entropy_collector->flags));
entropy_collector->mem = NULL;
}
jent_zfree(entropy_collector, sizeof(struct rand_data));
}
}
int jent_time_entropy_init(unsigned int osr, unsigned int flags)
{
struct rand_data *ec;
uint64_t *delta_history;
int i, time_backwards = 0, count_stuck = 0, ret = 0;
unsigned int health_test_result;
delta_history = jent_gcd_init(JENT_POWERUP_TESTLOOPCOUNT);
if (!delta_history)
return EMEM;
if (flags & JENT_FORCE_INTERNAL_TIMER)
jent_notime_force();
else
flags |= JENT_DISABLE_INTERNAL_TIMER;
/*
* If the start-up health tests (including the APT and RCT) are not
* run, then the entropy source is not 90B compliant. We could test if
* fips_enabled should be set using the jent_fips_enabled() function,
* but this can be overridden using the JENT_FORCE_FIPS flag, which
* isn't passed in yet. It is better to run the tests on the small
* amount of data that we have, which should not fail unless things
* are really bad.
*/
flags |= JENT_FORCE_FIPS;
ec = jent_entropy_collector_alloc_internal(osr, flags);
if (!ec) {
ret = EMEM;
goto out;
}
if (jent_notime_settick(ec)) {
ret = EMEM;
goto out;
}
/* To initialize the prior time. */
jent_measure_jitter(ec, 0, NULL);
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
* loop counts may show some slight skew leading to false positives.
*/
/*
* We could add a check for system capabilities such as clock_getres or
* check for CONFIG_X86_TSC, but it does not make much sense as the
* following sanity checks verify that we have a high-resolution
* timer.
*/
#define CLEARCACHE 100
for (i = -CLEARCACHE; i < JENT_POWERUP_TESTLOOPCOUNT; i++) {
uint64_t start_time = 0, end_time = 0, delta = 0;
unsigned int stuck;
/* Invoke core entropy collection logic */
stuck = jent_measure_jitter(ec, 0, &delta);
end_time = ec->prev_time;
start_time = ec->prev_time - delta;
/* test whether timer works */
if (!start_time || !end_time) {
ret = ENOTIME;
goto out;
}
/*
* test whether timer is fine grained enough to provide
* delta even when called shortly after each other -- this
* implies that we also have a high resolution timer
*/
if (!delta || (end_time == start_time)) {
ret = ECOARSETIME;
goto out;
}
/*
* up to here we did not modify any variable that will be
* evaluated later, but we already performed some work. Thus we
* already have had an impact on the caches, branch prediction,
* etc. with the goal to clear it to get the worst case
* measurements.
*/
if (i < 0)
continue;
if (stuck)
count_stuck++;
/* test whether we have an increasing timer */
if (!(end_time > start_time))
time_backwards++;
/* Watch for common adjacent GCD values */
jent_gcd_add_value(delta_history, delta, i);
}
/*
* we allow up to three times the time running backwards.
* CLOCK_REALTIME is affected by adjtime and NTP operations. Thus,
* if such an operation just happens to interfere with our test, it
* should not fail. The value of 3 should cover the NTP case being
* performed during our test run.
*/
if (time_backwards > 3) {
ret = ENOMONOTONIC;
goto out;
}
/* First, did we encounter a health test failure? */
if ((health_test_result = jent_health_failure(ec))) {
ret = (health_test_result & JENT_RCT_FAILURE) ? ERCT : EHEALTH;
goto out;
}
ret = jent_gcd_analyze(delta_history, JENT_POWERUP_TESTLOOPCOUNT);
if (ret)
goto out;
/*
* If we have more than 90% stuck results, then this Jitter RNG is
* likely to not work well.
*/
if (JENT_STUCK_INIT_THRES(JENT_POWERUP_TESTLOOPCOUNT) < count_stuck)
ret = ESTUCK;
out:
jent_gcd_fini(delta_history, JENT_POWERUP_TESTLOOPCOUNT);
if ((flags & JENT_FORCE_INTERNAL_TIMER) && ec)
jent_notime_unsettick(ec);
jent_entropy_collector_free(ec);
return ret;
}
static inline int jent_entropy_init_common_pre(void)
{
int ret;
jent_notime_block_switch();
if (sha3_tester())
return EHASH;
ret = jent_gcd_selftest();
jent_selftest_run = 1;
return ret;
}
static inline int jent_entropy_init_common_post(int ret)
{
/* Unmark the execution of the self tests if they failed. */
if (ret)
jent_selftest_run = 0;
return ret;
}
JENT_PRIVATE_STATIC
int jent_entropy_init(void)
{
int ret = jent_entropy_init_common_pre();
if (ret)
return ret;
ret = jent_time_entropy_init(0, JENT_DISABLE_INTERNAL_TIMER);
#ifdef JENT_CONF_ENABLE_INTERNAL_TIMER
if (ret)
ret = jent_time_entropy_init(0, JENT_FORCE_INTERNAL_TIMER);
#endif /* JENT_CONF_ENABLE_INTERNAL_TIMER */
return jent_entropy_init_common_post(ret);
}
JENT_PRIVATE_STATIC
int jent_entropy_init_ex(unsigned int osr, unsigned int flags)
{
int ret = jent_entropy_init_common_pre();
if (ret)
return ret;
/* Test without internal timer unless caller does not want it */
if (!(flags & JENT_FORCE_INTERNAL_TIMER))
ret = jent_time_entropy_init(osr,
flags | JENT_DISABLE_INTERNAL_TIMER);
#ifdef JENT_CONF_ENABLE_INTERNAL_TIMER
/* Test with internal timer unless caller does not want it */
if (ret && !(flags & JENT_DISABLE_INTERNAL_TIMER))
ret = jent_time_entropy_init(osr,
flags | JENT_FORCE_INTERNAL_TIMER);
#endif /* JENT_CONF_ENABLE_INTERNAL_TIMER */
return jent_entropy_init_common_post(ret);
}
#ifdef JENT_CONF_ENABLE_INTERNAL_TIMER
JENT_PRIVATE_STATIC
int jent_entropy_switch_notime_impl(struct jent_notime_thread *new_thread)
{
return jent_notime_switch(new_thread);
}
#endif

File Metadata

Mime Type
text/x-diff
Expires
Sat, Dec 6, 10:42 PM (1 d, 13 h)
Storage Engine
local-disk
Storage Format
Raw Data
Storage Handle
ac/e3/f2e2d86ec941117a4b7d89e6f456

Event Timeline