diff --git a/cipher/chacha20-ppc.c b/cipher/chacha20-ppc.c index 243c12ff..f135a32f 100644 --- a/cipher/chacha20-ppc.c +++ b/cipher/chacha20-ppc.c @@ -1,747 +1,747 @@ /* chacha20-ppc.c - PowerPC vector implementation of ChaCha20 * Copyright (C) 2019 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \ defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \ defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \ defined(USE_CHACHA20) && \ __GNUC__ >= 4 #include #include "bufhelp.h" #include "poly1305-internal.h" #include "mpi-internal.h" #include "longlong.h" typedef vector unsigned char vector16x_u8; typedef vector unsigned int vector4x_u32; typedef vector unsigned long long vector2x_u64; #define ALWAYS_INLINE inline __attribute__((always_inline)) #define NO_INLINE __attribute__((noinline)) #define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function)) #define ASM_FUNC_ATTR NO_INSTRUMENT_FUNCTION #define ASM_FUNC_ATTR_INLINE ASM_FUNC_ATTR ALWAYS_INLINE #define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE #ifdef WORDS_BIGENDIAN static const vector16x_u8 le_bswap_const = { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 }; #endif static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_rol_elems(vector4x_u32 v, unsigned int idx) { #ifndef WORDS_BIGENDIAN return vec_sld (v, v, (16 - (4 * idx)) & 15); #else return vec_sld (v, v, (4 * idx) & 15); #endif } static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_load_le(unsigned long offset, const unsigned char *ptr) { vector4x_u32 vec; vec = vec_vsx_ld (offset, (const u32 *)ptr); #ifdef WORDS_BIGENDIAN vec = (vector4x_u32)vec_perm((vector16x_u8)vec, (vector16x_u8)vec, le_bswap_const); #endif return vec; } static ASM_FUNC_ATTR_INLINE void vec_store_le(vector4x_u32 vec, unsigned long offset, unsigned char *ptr) { #ifdef WORDS_BIGENDIAN vec = (vector4x_u32)vec_perm((vector16x_u8)vec, (vector16x_u8)vec, le_bswap_const); #endif vec_vsx_st (vec, offset, (u32 *)ptr); } static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_add_ctr_u64(vector4x_u32 v, vector4x_u32 a) { #ifdef WORDS_BIGENDIAN static const vector16x_u8 swap32 = { 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11 }; vector2x_u64 vec, add, sum; vec = (vector2x_u64)vec_perm((vector16x_u8)v, (vector16x_u8)v, swap32); add = (vector2x_u64)vec_perm((vector16x_u8)a, (vector16x_u8)a, swap32); sum = vec + add; return (vector4x_u32)vec_perm((vector16x_u8)sum, (vector16x_u8)sum, swap32); #else return (vector4x_u32)((vector2x_u64)(v) + (vector2x_u64)(a)); #endif } /********************************************************************** 2-way && 1-way chacha20 **********************************************************************/ #define ROTATE(v1,rolv) \ __asm__ ("vrlw %0,%1,%2\n\t" : "=v" (v1) : "v" (v1), "v" (rolv)) #define WORD_ROL(v1,c) \ ((v1) = vec_rol_elems((v1), (c))) #define XOR(ds,s) \ ((ds) ^= (s)) #define PLUS(ds,s) \ ((ds) += (s)) #define QUARTERROUND4(x0,x1,x2,x3,rol_x1,rol_x2,rol_x3) \ PLUS(x0, x1); XOR(x3, x0); ROTATE(x3, rotate_16); \ PLUS(x2, x3); XOR(x1, x2); ROTATE(x1, rotate_12); \ PLUS(x0, x1); XOR(x3, x0); ROTATE(x3, rotate_8); \ PLUS(x2, x3); \ WORD_ROL(x3, rol_x3); \ XOR(x1, x2); \ WORD_ROL(x2, rol_x2); \ ROTATE(x1, rotate_7); \ WORD_ROL(x1, rol_x1); #define ADD_U64(v,a) \ (v = vec_add_ctr_u64(v, a)) -static unsigned int ASM_FUNC_ATTR_INLINE +static ASM_FUNC_ATTR_INLINE unsigned int chacha20_ppc_blocks1(u32 *state, byte *dst, const byte *src, size_t nblks) { vector4x_u32 counter_1 = { 1, 0, 0, 0 }; vector4x_u32 rotate_16 = { 16, 16, 16, 16 }; vector4x_u32 rotate_12 = { 12, 12, 12, 12 }; vector4x_u32 rotate_8 = { 8, 8, 8, 8 }; vector4x_u32 rotate_7 = { 7, 7, 7, 7 }; vector4x_u32 state0, state1, state2, state3; vector4x_u32 v0, v1, v2, v3; vector4x_u32 v4, v5, v6, v7; int i; /* force preload of constants to vector registers */ __asm__ ("": "+v" (counter_1) :: "memory"); __asm__ ("": "+v" (rotate_16) :: "memory"); __asm__ ("": "+v" (rotate_12) :: "memory"); __asm__ ("": "+v" (rotate_8) :: "memory"); __asm__ ("": "+v" (rotate_7) :: "memory"); state0 = vec_vsx_ld(0 * 16, state); state1 = vec_vsx_ld(1 * 16, state); state2 = vec_vsx_ld(2 * 16, state); state3 = vec_vsx_ld(3 * 16, state); while (nblks >= 2) { v0 = state0; v1 = state1; v2 = state2; v3 = state3; v4 = state0; v5 = state1; v6 = state2; v7 = state3; ADD_U64(v7, counter_1); for (i = 20; i > 0; i -= 2) { QUARTERROUND4(v0, v1, v2, v3, 1, 2, 3); QUARTERROUND4(v4, v5, v6, v7, 1, 2, 3); QUARTERROUND4(v0, v1, v2, v3, 3, 2, 1); QUARTERROUND4(v4, v5, v6, v7, 3, 2, 1); } v0 += state0; v1 += state1; v2 += state2; v3 += state3; ADD_U64(state3, counter_1); /* update counter */ v4 += state0; v5 += state1; v6 += state2; v7 += state3; ADD_U64(state3, counter_1); /* update counter */ v0 ^= vec_load_le(0 * 16, src); v1 ^= vec_load_le(1 * 16, src); v2 ^= vec_load_le(2 * 16, src); v3 ^= vec_load_le(3 * 16, src); vec_store_le(v0, 0 * 16, dst); vec_store_le(v1, 1 * 16, dst); vec_store_le(v2, 2 * 16, dst); vec_store_le(v3, 3 * 16, dst); src += 64; dst += 64; v4 ^= vec_load_le(0 * 16, src); v5 ^= vec_load_le(1 * 16, src); v6 ^= vec_load_le(2 * 16, src); v7 ^= vec_load_le(3 * 16, src); vec_store_le(v4, 0 * 16, dst); vec_store_le(v5, 1 * 16, dst); vec_store_le(v6, 2 * 16, dst); vec_store_le(v7, 3 * 16, dst); src += 64; dst += 64; nblks -= 2; } while (nblks) { v0 = state0; v1 = state1; v2 = state2; v3 = state3; for (i = 20; i > 0; i -= 2) { QUARTERROUND4(v0, v1, v2, v3, 1, 2, 3); QUARTERROUND4(v0, v1, v2, v3, 3, 2, 1); } v0 += state0; v1 += state1; v2 += state2; v3 += state3; ADD_U64(state3, counter_1); /* update counter */ v0 ^= vec_load_le(0 * 16, src); v1 ^= vec_load_le(1 * 16, src); v2 ^= vec_load_le(2 * 16, src); v3 ^= vec_load_le(3 * 16, src); vec_store_le(v0, 0 * 16, dst); vec_store_le(v1, 1 * 16, dst); vec_store_le(v2, 2 * 16, dst); vec_store_le(v3, 3 * 16, dst); src += 64; dst += 64; nblks--; } vec_vsx_st(state3, 3 * 16, state); /* store counter */ return 0; } /********************************************************************** 4-way chacha20 **********************************************************************/ /* 4x4 32-bit integer matrix transpose */ #define transpose_4x4(x0, x1, x2, x3) ({ \ vector4x_u32 t1 = vec_mergeh(x0, x2); \ vector4x_u32 t2 = vec_mergel(x0, x2); \ vector4x_u32 t3 = vec_mergeh(x1, x3); \ x3 = vec_mergel(x1, x3); \ x0 = vec_mergeh(t1, t3); \ x1 = vec_mergel(t1, t3); \ x2 = vec_mergeh(t2, x3); \ x3 = vec_mergel(t2, x3); \ }) #define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2) \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE(d1, rotate_16); ROTATE(d2, rotate_16); \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE(b1, rotate_12); ROTATE(b2, rotate_12); \ PLUS(a1,b1); PLUS(a2,b2); XOR(d1,a1); XOR(d2,a2); \ ROTATE(d1, rotate_8); ROTATE(d2, rotate_8); \ PLUS(c1,d1); PLUS(c2,d2); XOR(b1,c1); XOR(b2,c2); \ ROTATE(b1, rotate_7); ROTATE(b2, rotate_7); -static unsigned int ASM_FUNC_ATTR_INLINE +static ASM_FUNC_ATTR_INLINE unsigned int chacha20_ppc_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks) { vector4x_u32 counters_0123 = { 0, 1, 2, 3 }; vector4x_u32 counter_4 = { 4, 0, 0, 0 }; vector4x_u32 rotate_16 = { 16, 16, 16, 16 }; vector4x_u32 rotate_12 = { 12, 12, 12, 12 }; vector4x_u32 rotate_8 = { 8, 8, 8, 8 }; vector4x_u32 rotate_7 = { 7, 7, 7, 7 }; vector4x_u32 state0, state1, state2, state3; vector4x_u32 v0, v1, v2, v3, v4, v5, v6, v7; vector4x_u32 v8, v9, v10, v11, v12, v13, v14, v15; vector4x_u32 tmp; int i; /* force preload of constants to vector registers */ __asm__ ("": "+v" (counters_0123) :: "memory"); __asm__ ("": "+v" (counter_4) :: "memory"); __asm__ ("": "+v" (rotate_16) :: "memory"); __asm__ ("": "+v" (rotate_12) :: "memory"); __asm__ ("": "+v" (rotate_8) :: "memory"); __asm__ ("": "+v" (rotate_7) :: "memory"); state0 = vec_vsx_ld(0 * 16, state); state1 = vec_vsx_ld(1 * 16, state); state2 = vec_vsx_ld(2 * 16, state); state3 = vec_vsx_ld(3 * 16, state); do { v0 = vec_splat(state0, 0); v1 = vec_splat(state0, 1); v2 = vec_splat(state0, 2); v3 = vec_splat(state0, 3); v4 = vec_splat(state1, 0); v5 = vec_splat(state1, 1); v6 = vec_splat(state1, 2); v7 = vec_splat(state1, 3); v8 = vec_splat(state2, 0); v9 = vec_splat(state2, 1); v10 = vec_splat(state2, 2); v11 = vec_splat(state2, 3); v12 = vec_splat(state3, 0); v13 = vec_splat(state3, 1); v14 = vec_splat(state3, 2); v15 = vec_splat(state3, 3); v12 += counters_0123; v13 -= vec_cmplt(v12, counters_0123); for (i = 20; i > 0; i -= 2) { QUARTERROUND2(v0, v4, v8, v12, v1, v5, v9, v13) QUARTERROUND2(v2, v6, v10, v14, v3, v7, v11, v15) QUARTERROUND2(v0, v5, v10, v15, v1, v6, v11, v12) QUARTERROUND2(v2, v7, v8, v13, v3, v4, v9, v14) } v0 += vec_splat(state0, 0); v1 += vec_splat(state0, 1); v2 += vec_splat(state0, 2); v3 += vec_splat(state0, 3); v4 += vec_splat(state1, 0); v5 += vec_splat(state1, 1); v6 += vec_splat(state1, 2); v7 += vec_splat(state1, 3); v8 += vec_splat(state2, 0); v9 += vec_splat(state2, 1); v10 += vec_splat(state2, 2); v11 += vec_splat(state2, 3); tmp = vec_splat(state3, 0); tmp += counters_0123; v12 += tmp; v13 += vec_splat(state3, 1) - vec_cmplt(tmp, counters_0123); v14 += vec_splat(state3, 2); v15 += vec_splat(state3, 3); ADD_U64(state3, counter_4); /* update counter */ transpose_4x4(v0, v1, v2, v3); transpose_4x4(v4, v5, v6, v7); transpose_4x4(v8, v9, v10, v11); transpose_4x4(v12, v13, v14, v15); v0 ^= vec_load_le((64 * 0 + 16 * 0), src); v1 ^= vec_load_le((64 * 1 + 16 * 0), src); v2 ^= vec_load_le((64 * 2 + 16 * 0), src); v3 ^= vec_load_le((64 * 3 + 16 * 0), src); v4 ^= vec_load_le((64 * 0 + 16 * 1), src); v5 ^= vec_load_le((64 * 1 + 16 * 1), src); v6 ^= vec_load_le((64 * 2 + 16 * 1), src); v7 ^= vec_load_le((64 * 3 + 16 * 1), src); v8 ^= vec_load_le((64 * 0 + 16 * 2), src); v9 ^= vec_load_le((64 * 1 + 16 * 2), src); v10 ^= vec_load_le((64 * 2 + 16 * 2), src); v11 ^= vec_load_le((64 * 3 + 16 * 2), src); v12 ^= vec_load_le((64 * 0 + 16 * 3), src); v13 ^= vec_load_le((64 * 1 + 16 * 3), src); v14 ^= vec_load_le((64 * 2 + 16 * 3), src); v15 ^= vec_load_le((64 * 3 + 16 * 3), src); vec_store_le(v0, (64 * 0 + 16 * 0), dst); vec_store_le(v1, (64 * 1 + 16 * 0), dst); vec_store_le(v2, (64 * 2 + 16 * 0), dst); vec_store_le(v3, (64 * 3 + 16 * 0), dst); vec_store_le(v4, (64 * 0 + 16 * 1), dst); vec_store_le(v5, (64 * 1 + 16 * 1), dst); vec_store_le(v6, (64 * 2 + 16 * 1), dst); vec_store_le(v7, (64 * 3 + 16 * 1), dst); vec_store_le(v8, (64 * 0 + 16 * 2), dst); vec_store_le(v9, (64 * 1 + 16 * 2), dst); vec_store_le(v10, (64 * 2 + 16 * 2), dst); vec_store_le(v11, (64 * 3 + 16 * 2), dst); vec_store_le(v12, (64 * 0 + 16 * 3), dst); vec_store_le(v13, (64 * 1 + 16 * 3), dst); vec_store_le(v14, (64 * 2 + 16 * 3), dst); vec_store_le(v15, (64 * 3 + 16 * 3), dst); src += 4*64; dst += 4*64; nblks -= 4; } while (nblks); vec_vsx_st(state3, 3 * 16, state); /* store counter */ return 0; } #if SIZEOF_UNSIGNED_LONG == 8 /********************************************************************** 4-way stitched chacha20-poly1305 **********************************************************************/ #define ADD_1305_64(A2, A1, A0, B2, B1, B0) \ __asm__ ("addc %0, %3, %0\n" \ "adde %1, %4, %1\n" \ "adde %2, %5, %2\n" \ : "+r" (A0), "+r" (A1), "+r" (A2) \ : "r" (B0), "r" (B1), "r" (B2) \ : "cc" ) #define MUL_MOD_1305_64_PART1(H2, H1, H0, R1, R0, R1_MULT5) do { \ /* x = a * r (partial mod 2^130-5) */ \ umul_ppmm(x0_hi, x0_lo, H0, R0); /* h0 * r0 */ \ umul_ppmm(x1_hi, x1_lo, H0, R1); /* h0 * r1 */ \ \ umul_ppmm(t0_hi, t0_lo, H1, R1_MULT5); /* h1 * r1 mod 2^130-5 */ \ } while (0) #define MUL_MOD_1305_64_PART2(H2, H1, H0, R1, R0, R1_MULT5) do { \ add_ssaaaa(x0_hi, x0_lo, x0_hi, x0_lo, t0_hi, t0_lo); \ umul_ppmm(t1_hi, t1_lo, H1, R0); /* h1 * r0 */ \ add_ssaaaa(x1_hi, x1_lo, x1_hi, x1_lo, t1_hi, t1_lo); \ \ t1_lo = H2 * R1_MULT5; /* h2 * r1 mod 2^130-5 */ \ t1_hi = H2 * R0; /* h2 * r0 */ \ add_ssaaaa(H0, H1, x1_hi, x1_lo, t1_hi, t1_lo); \ \ /* carry propagation */ \ H2 = H0 & 3; \ H0 = (H0 >> 2) * 5; /* msb mod 2^130-5 */ \ ADD_1305_64(H2, H1, H0, (u64)0, x0_hi, x0_lo); \ } while (0) #define POLY1305_BLOCK_PART1(in_pos) do { \ m0 = buf_get_le64(poly1305_src + (in_pos) + 0); \ m1 = buf_get_le64(poly1305_src + (in_pos) + 8); \ /* a = h + m */ \ ADD_1305_64(h2, h1, h0, m2, m1, m0); \ /* h = a * r (partial mod 2^130-5) */ \ MUL_MOD_1305_64_PART1(h2, h1, h0, r1, r0, r1_mult5); \ } while (0) #define POLY1305_BLOCK_PART2(in_pos) do { \ MUL_MOD_1305_64_PART2(h2, h1, h0, r1, r0, r1_mult5); \ } while (0) -static unsigned int ASM_FUNC_ATTR_INLINE +static ASM_FUNC_ATTR_INLINE unsigned int chacha20_poly1305_ppc_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks, POLY1305_STATE *st, const byte *poly1305_src) { vector4x_u32 counters_0123 = { 0, 1, 2, 3 }; vector4x_u32 counter_4 = { 4, 0, 0, 0 }; vector4x_u32 rotate_16 = { 16, 16, 16, 16 }; vector4x_u32 rotate_12 = { 12, 12, 12, 12 }; vector4x_u32 rotate_8 = { 8, 8, 8, 8 }; vector4x_u32 rotate_7 = { 7, 7, 7, 7 }; vector4x_u32 state0, state1, state2, state3; vector4x_u32 v0, v1, v2, v3, v4, v5, v6, v7; vector4x_u32 v8, v9, v10, v11, v12, v13, v14, v15; vector4x_u32 tmp; u64 r0, r1, r1_mult5; u64 h0, h1, h2; u64 m0, m1, m2; u64 x0_lo, x0_hi, x1_lo, x1_hi; u64 t0_lo, t0_hi, t1_lo, t1_hi; unsigned int i, o; /* load poly1305 state */ m2 = 1; h0 = st->h[0] + ((u64)st->h[1] << 32); h1 = st->h[2] + ((u64)st->h[3] << 32); h2 = st->h[4]; r0 = st->r[0] + ((u64)st->r[1] << 32); r1 = st->r[2] + ((u64)st->r[3] << 32); r1_mult5 = (r1 >> 2) + r1; /* force preload of constants to vector registers */ __asm__ ("": "+v" (counters_0123) :: "memory"); __asm__ ("": "+v" (counter_4) :: "memory"); __asm__ ("": "+v" (rotate_16) :: "memory"); __asm__ ("": "+v" (rotate_12) :: "memory"); __asm__ ("": "+v" (rotate_8) :: "memory"); __asm__ ("": "+v" (rotate_7) :: "memory"); state0 = vec_vsx_ld(0 * 16, state); state1 = vec_vsx_ld(1 * 16, state); state2 = vec_vsx_ld(2 * 16, state); state3 = vec_vsx_ld(3 * 16, state); do { v0 = vec_splat(state0, 0); v1 = vec_splat(state0, 1); v2 = vec_splat(state0, 2); v3 = vec_splat(state0, 3); v4 = vec_splat(state1, 0); v5 = vec_splat(state1, 1); v6 = vec_splat(state1, 2); v7 = vec_splat(state1, 3); v8 = vec_splat(state2, 0); v9 = vec_splat(state2, 1); v10 = vec_splat(state2, 2); v11 = vec_splat(state2, 3); v12 = vec_splat(state3, 0); v13 = vec_splat(state3, 1); v14 = vec_splat(state3, 2); v15 = vec_splat(state3, 3); v12 += counters_0123; v13 -= vec_cmplt(v12, counters_0123); for (o = 20; o; o -= 10) { for (i = 8; i; i -= 2) { POLY1305_BLOCK_PART1(0 * 16); QUARTERROUND2(v0, v4, v8, v12, v1, v5, v9, v13) POLY1305_BLOCK_PART2(); QUARTERROUND2(v2, v6, v10, v14, v3, v7, v11, v15) POLY1305_BLOCK_PART1(1 * 16); poly1305_src += 2 * 16; QUARTERROUND2(v0, v5, v10, v15, v1, v6, v11, v12) POLY1305_BLOCK_PART2(); QUARTERROUND2(v2, v7, v8, v13, v3, v4, v9, v14) } QUARTERROUND2(v0, v4, v8, v12, v1, v5, v9, v13) QUARTERROUND2(v2, v6, v10, v14, v3, v7, v11, v15) QUARTERROUND2(v0, v5, v10, v15, v1, v6, v11, v12) QUARTERROUND2(v2, v7, v8, v13, v3, v4, v9, v14) } v0 += vec_splat(state0, 0); v1 += vec_splat(state0, 1); v2 += vec_splat(state0, 2); v3 += vec_splat(state0, 3); v4 += vec_splat(state1, 0); v5 += vec_splat(state1, 1); v6 += vec_splat(state1, 2); v7 += vec_splat(state1, 3); v8 += vec_splat(state2, 0); v9 += vec_splat(state2, 1); v10 += vec_splat(state2, 2); v11 += vec_splat(state2, 3); tmp = vec_splat(state3, 0); tmp += counters_0123; v12 += tmp; v13 += vec_splat(state3, 1) - vec_cmplt(tmp, counters_0123); v14 += vec_splat(state3, 2); v15 += vec_splat(state3, 3); ADD_U64(state3, counter_4); /* update counter */ transpose_4x4(v0, v1, v2, v3); transpose_4x4(v4, v5, v6, v7); transpose_4x4(v8, v9, v10, v11); transpose_4x4(v12, v13, v14, v15); v0 ^= vec_load_le((64 * 0 + 16 * 0), src); v1 ^= vec_load_le((64 * 1 + 16 * 0), src); v2 ^= vec_load_le((64 * 2 + 16 * 0), src); v3 ^= vec_load_le((64 * 3 + 16 * 0), src); v4 ^= vec_load_le((64 * 0 + 16 * 1), src); v5 ^= vec_load_le((64 * 1 + 16 * 1), src); v6 ^= vec_load_le((64 * 2 + 16 * 1), src); v7 ^= vec_load_le((64 * 3 + 16 * 1), src); v8 ^= vec_load_le((64 * 0 + 16 * 2), src); v9 ^= vec_load_le((64 * 1 + 16 * 2), src); v10 ^= vec_load_le((64 * 2 + 16 * 2), src); v11 ^= vec_load_le((64 * 3 + 16 * 2), src); v12 ^= vec_load_le((64 * 0 + 16 * 3), src); v13 ^= vec_load_le((64 * 1 + 16 * 3), src); v14 ^= vec_load_le((64 * 2 + 16 * 3), src); v15 ^= vec_load_le((64 * 3 + 16 * 3), src); vec_store_le(v0, (64 * 0 + 16 * 0), dst); vec_store_le(v1, (64 * 1 + 16 * 0), dst); vec_store_le(v2, (64 * 2 + 16 * 0), dst); vec_store_le(v3, (64 * 3 + 16 * 0), dst); vec_store_le(v4, (64 * 0 + 16 * 1), dst); vec_store_le(v5, (64 * 1 + 16 * 1), dst); vec_store_le(v6, (64 * 2 + 16 * 1), dst); vec_store_le(v7, (64 * 3 + 16 * 1), dst); vec_store_le(v8, (64 * 0 + 16 * 2), dst); vec_store_le(v9, (64 * 1 + 16 * 2), dst); vec_store_le(v10, (64 * 2 + 16 * 2), dst); vec_store_le(v11, (64 * 3 + 16 * 2), dst); vec_store_le(v12, (64 * 0 + 16 * 3), dst); vec_store_le(v13, (64 * 1 + 16 * 3), dst); vec_store_le(v14, (64 * 2 + 16 * 3), dst); vec_store_le(v15, (64 * 3 + 16 * 3), dst); src += 4*64; dst += 4*64; nblks -= 4; } while (nblks); vec_vsx_st(state3, 3 * 16, state); /* store counter */ /* store poly1305 state */ st->h[0] = h0; st->h[1] = h0 >> 32; st->h[2] = h1; st->h[3] = h1 >> 32; st->h[4] = h2; return 0; } #else -static unsigned int ASM_FUNC_ATTR_INLINE +static ASM_FUNC_ATTR_INLINE unsigned int chacha20_poly1305_ppc_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks, POLY1305_STATE *st, const byte *poly1305_src) { } #endif /* SIZEOF_UNSIGNED_LONG == 8 */ #ifdef HAVE_GCC_ATTRIBUTE_OPTIMIZE # define FUNC_ATTR_OPT_O2 __attribute__((optimize("-O2"))) #else # define FUNC_ATTR_OPT_O2 #endif #if defined(__clang__) && defined(HAVE_CLANG_ATTRIBUTE_PPC_TARGET) # define FUNC_ATTR_TARGET_P8 __attribute__((target("arch=pwr8"))) # define FUNC_ATTR_TARGET_P9 __attribute__((target("arch=pwr9"))) #elif defined(HAVE_GCC_ATTRIBUTE_PPC_TARGET) # define FUNC_ATTR_TARGET_P8 __attribute__((target("cpu=power8"))) # define FUNC_ATTR_TARGET_P9 __attribute__((target("cpu=power9"))) #else # define FUNC_ATTR_TARGET_P8 # define FUNC_ATTR_TARGET_P9 #endif /* Functions targetting POWER8. */ unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P8 FUNC_ATTR_OPT_O2 _gcry_chacha20_ppc8_blocks1(u32 *state, byte *dst, const byte *src, size_t nblks) { return chacha20_ppc_blocks1(state, dst, src, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P8 FUNC_ATTR_OPT_O2 _gcry_chacha20_ppc8_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks) { return chacha20_ppc_blocks4(state, dst, src, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P8 FUNC_ATTR_OPT_O2 _gcry_chacha20_poly1305_ppc8_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks, POLY1305_STATE *st, const byte *poly1305_src) { return chacha20_poly1305_ppc_blocks4(state, dst, src, nblks, st, poly1305_src); } #ifdef HAVE_GCC_ATTRIBUTE_PPC_TARGET /* Functions targetting POWER9. */ unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_chacha20_ppc9_blocks1(u32 *state, byte *dst, const byte *src, size_t nblks) { return chacha20_ppc_blocks1(state, dst, src, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_chacha20_ppc9_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks) { return chacha20_ppc_blocks4(state, dst, src, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_chacha20_poly1305_ppc9_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks, POLY1305_STATE *st, const byte *poly1305_src) { return chacha20_poly1305_ppc_blocks4(state, dst, src, nblks, st, poly1305_src); } #else /* Compiler does not support target attribute, use same functions for POWER9 * as for POWER8. */ unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_chacha20_ppc9_blocks1(u32 *state, byte *dst, const byte *src, size_t nblks) { return _gcry_chacha20_ppc8_blocks1(state, dst, src, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_chacha20_ppc9_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks) { return _gcry_chacha20_ppc8_blocks4(state, dst, src, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_chacha20_poly1305_ppc9_blocks4(u32 *state, byte *dst, const byte *src, size_t nblks, POLY1305_STATE *st, const byte *poly1305_src) { return _gcry_chacha20_poly1305_ppc8_blocks4(state, dst, src, nblks, st, poly1305_src); } #endif /* HAVE_GCC_ATTRIBUTE_PPC_TARGET */ #endif /* ENABLE_PPC_CRYPTO_SUPPORT */ diff --git a/cipher/sha256-ppc.c b/cipher/sha256-ppc.c index fd69380f..e5839a84 100644 --- a/cipher/sha256-ppc.c +++ b/cipher/sha256-ppc.c @@ -1,610 +1,610 @@ /* sha256-ppc.c - PowerPC vcrypto implementation of SHA-256 transform * Copyright (C) 2019,2023 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \ defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \ defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \ defined(USE_SHA256) && \ __GNUC__ >= 4 #include #include "bufhelp.h" typedef vector unsigned char vector16x_u8; typedef vector unsigned int vector4x_u32; typedef vector unsigned long long vector2x_u64; #define ALWAYS_INLINE inline __attribute__((always_inline)) #define NO_INLINE __attribute__((noinline)) #define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function)) #define ASM_FUNC_ATTR NO_INSTRUMENT_FUNCTION #define ASM_FUNC_ATTR_INLINE ASM_FUNC_ATTR ALWAYS_INLINE #define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE #ifdef HAVE_GCC_ATTRIBUTE_OPTIMIZE # define FUNC_ATTR_OPT_O2 __attribute__((optimize("-O2"))) #else # define FUNC_ATTR_OPT_O2 #endif #if defined(__clang__) && defined(HAVE_CLANG_ATTRIBUTE_PPC_TARGET) # define FUNC_ATTR_TARGET_P8 __attribute__((target("arch=pwr8"))) # define FUNC_ATTR_TARGET_P9 __attribute__((target("arch=pwr9"))) #elif defined(HAVE_GCC_ATTRIBUTE_PPC_TARGET) # define FUNC_ATTR_TARGET_P8 __attribute__((target("cpu=power8"))) # define FUNC_ATTR_TARGET_P9 __attribute__((target("cpu=power9"))) #else # define FUNC_ATTR_TARGET_P8 # define FUNC_ATTR_TARGET_P9 #endif static const vector4x_u32 K[64 / 4] = { #define TBL(v) v { TBL(0x428a2f98), TBL(0x71374491), TBL(0xb5c0fbcf), TBL(0xe9b5dba5) }, { TBL(0x3956c25b), TBL(0x59f111f1), TBL(0x923f82a4), TBL(0xab1c5ed5) }, { TBL(0xd807aa98), TBL(0x12835b01), TBL(0x243185be), TBL(0x550c7dc3) }, { TBL(0x72be5d74), TBL(0x80deb1fe), TBL(0x9bdc06a7), TBL(0xc19bf174) }, { TBL(0xe49b69c1), TBL(0xefbe4786), TBL(0x0fc19dc6), TBL(0x240ca1cc) }, { TBL(0x2de92c6f), TBL(0x4a7484aa), TBL(0x5cb0a9dc), TBL(0x76f988da) }, { TBL(0x983e5152), TBL(0xa831c66d), TBL(0xb00327c8), TBL(0xbf597fc7) }, { TBL(0xc6e00bf3), TBL(0xd5a79147), TBL(0x06ca6351), TBL(0x14292967) }, { TBL(0x27b70a85), TBL(0x2e1b2138), TBL(0x4d2c6dfc), TBL(0x53380d13) }, { TBL(0x650a7354), TBL(0x766a0abb), TBL(0x81c2c92e), TBL(0x92722c85) }, { TBL(0xa2bfe8a1), TBL(0xa81a664b), TBL(0xc24b8b70), TBL(0xc76c51a3) }, { TBL(0xd192e819), TBL(0xd6990624), TBL(0xf40e3585), TBL(0x106aa070) }, { TBL(0x19a4c116), TBL(0x1e376c08), TBL(0x2748774c), TBL(0x34b0bcb5) }, { TBL(0x391c0cb3), TBL(0x4ed8aa4a), TBL(0x5b9cca4f), TBL(0x682e6ff3) }, { TBL(0x748f82ee), TBL(0x78a5636f), TBL(0x84c87814), TBL(0x8cc70208) }, { TBL(0x90befffa), TBL(0xa4506ceb), TBL(0xbef9a3f7), TBL(0xc67178f2) } #undef TBL }; static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_rol_elems(vector4x_u32 v, unsigned int idx) { #ifndef WORDS_BIGENDIAN return vec_sld (v, v, (16 - (4 * idx)) & 15); #else return vec_sld (v, v, (4 * idx) & 15); #endif } static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_merge_idx0_elems(vector4x_u32 v0, vector4x_u32 v1, vector4x_u32 v2, vector4x_u32 v3) { return (vector4x_u32)vec_mergeh ((vector2x_u64) vec_mergeh(v0, v1), (vector2x_u64) vec_mergeh(v2, v3)); } static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_vshasigma_u32(vector4x_u32 v, unsigned int a, unsigned int b) { asm ("vshasigmaw %0,%1,%2,%3" : "=v" (v) : "v" (v), "g" (a), "g" (b) : "memory"); return v; } static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_add_u32(vector4x_u32 v, vector4x_u32 w) { __asm__ ("vadduwm %0,%1,%2" : "=v" (v) : "v" (v), "v" (w) : "memory"); return v; } static ASM_FUNC_ATTR_INLINE vector4x_u32 vec_u32_load_be(unsigned long offset, const void *ptr) { vector4x_u32 vecu32; #if __GNUC__ >= 4 if (__builtin_constant_p (offset) && offset == 0) __asm__ volatile ("lxvw4x %x0,0,%1\n\t" : "=wa" (vecu32) : "r" ((uintptr_t)ptr) : "memory"); else #endif __asm__ volatile ("lxvw4x %x0,%1,%2\n\t" : "=wa" (vecu32) : "r" (offset), "r" ((uintptr_t)ptr) : "memory", "r0"); #ifndef WORDS_BIGENDIAN return (vector4x_u32)vec_reve((vector16x_u8)vecu32); #else return vecu32; #endif } /* SHA2 round in vector registers */ #define R(a,b,c,d,e,f,g,h,ki,w) do \ { \ t1 = vec_add_u32((h), (w)); \ t2 = Cho((e),(f),(g)); \ t1 = vec_add_u32(t1, GETK(ki)); \ t1 = vec_add_u32(t1, t2); \ t1 = Sum1add(t1, e); \ t2 = Maj((a),(b),(c)); \ t2 = Sum0add(t2, a); \ h = vec_add_u32(t1, t2); \ d += t1; \ } while (0) #define GETK(kidx) \ ({ \ vector4x_u32 rk; \ if (((kidx) % 4) == 0) \ { \ rk = ktmp = *(kptr++); \ if ((kidx) < 63) \ asm volatile("" : "+r" (kptr) :: "memory"); \ } \ else if (((kidx) % 4) == 1) \ { \ rk = vec_mergeo(ktmp, ktmp); \ } \ else \ { \ rk = vec_rol_elems(ktmp, ((kidx) % 4)); \ } \ rk; \ }) #define Cho(b, c, d) (vec_sel(d, c, b)) #define Maj(c, d, b) (vec_sel(c, b, c ^ d)) #define Sum0(x) (vec_vshasigma_u32(x, 1, 0)) #define Sum1(x) (vec_vshasigma_u32(x, 1, 15)) #define S0(x) (vec_vshasigma_u32(x, 0, 0)) #define S1(x) (vec_vshasigma_u32(x, 0, 15)) #define Xadd(X, d, x) vec_add_u32(d, X(x)) #define Sum0add(d, x) Xadd(Sum0, d, x) #define Sum1add(d, x) Xadd(Sum1, d, x) #define S0add(d, x) Xadd(S0, d, x) #define S1add(d, x) Xadd(S1, d, x) #define I(i) \ ({ \ if (((i) % 4) == 0) \ { \ w[i] = vec_u32_load_be(0, data); \ data += 4 * 4; \ if ((i) / 4 < 3) \ asm volatile("" : "+r"(data) :: "memory"); \ } \ else if (((i) % 4) == 1) \ { \ w[i] = vec_mergeo(w[(i) - 1], w[(i) - 1]); \ } \ else \ { \ w[i] = vec_rol_elems(w[(i) - (i) % 4], (i)); \ } \ }) #define WN(i) ({ w[(i)&0x0f] += w[((i)-7) &0x0f]; \ w[(i)&0x0f] = S0add(w[(i)&0x0f], w[((i)-15)&0x0f]); \ w[(i)&0x0f] = S1add(w[(i)&0x0f], w[((i)-2) &0x0f]); }) #define W(i) ({ vector4x_u32 r = w[(i)&0x0f]; WN(i); r; }) #define L(i) w[(i)&0x0f] #define I2(i) \ ({ \ if ((i) % 4 == 0) \ { \ vector4x_u32 iw = vec_u32_load_be(0, data); \ vector4x_u32 iw2 = vec_u32_load_be(64, data); \ if ((i) / 4 < 3) \ { \ data += 4 * 4; \ asm volatile("" : "+r"(data) :: "memory"); \ } \ else \ { \ data += 4 * 4 + 64; \ asm volatile("" : "+r"(data) :: "memory"); \ } \ w[(i) + 0] = vec_mergeh(iw, iw2); \ w[(i) + 1] = vec_rol_elems(w[(i) + 0], 2); \ w[(i) + 2] = vec_mergel(iw, iw2); \ w[(i) + 3] = vec_rol_elems(w[(i) + 2], 2); \ } \ }) #define W2(i) \ ({ \ vector4x_u32 wt1 = w[(i)&0x0f]; \ WN(i); \ w2[(i) / 2] = (((i) % 2) == 0) ? wt1 : vec_mergeo(w2[(i) / 2], wt1); \ wt1; \ }) #define L2(i) \ ({ \ vector4x_u32 lt1 = w[(i)&0x0f]; \ w2[(i) / 2] = (((i) % 2) == 0) ? lt1 : vec_mergeo(w2[(i) / 2], lt1); \ lt1; \ }) #define WL(i) \ ({ \ vector4x_u32 wlt1 = w2[(i) / 2]; \ if (((i) % 2) == 0 && (i) < 63) \ w2[(i) / 2] = vec_mergeo(wlt1, wlt1); \ wlt1; \ }) -static unsigned int ASM_FUNC_ATTR ASM_FUNC_ATTR_INLINE FUNC_ATTR_OPT_O2 +static ASM_FUNC_ATTR_INLINE FUNC_ATTR_OPT_O2 unsigned int sha256_transform_ppc(u32 state[8], const unsigned char *data, size_t nblks) { vector4x_u32 h0, h1, h2, h3, h4, h5, h6, h7; vector4x_u32 h0_h3, h4_h7; vector4x_u32 a, b, c, d, e, f, g, h, t1, t2; vector4x_u32 w[16]; vector4x_u32 w2[64 / 2]; h0_h3 = vec_vsx_ld (4 * 0, state); h4_h7 = vec_vsx_ld (4 * 4, state); h0 = h0_h3; h1 = vec_mergeo (h0_h3, h0_h3); h2 = vec_rol_elems (h0_h3, 2); h3 = vec_rol_elems (h0_h3, 3); h4 = h4_h7; h5 = vec_mergeo (h4_h7, h4_h7); h6 = vec_rol_elems (h4_h7, 2); h7 = vec_rol_elems (h4_h7, 3); while (nblks >= 2) { const vector4x_u32 *kptr = K; vector4x_u32 ktmp; a = h0; b = h1; c = h2; d = h3; e = h4; f = h5; g = h6; h = h7; I2(0); I2(1); I2(2); I2(3); I2(4); I2(5); I2(6); I2(7); I2(8); I2(9); I2(10); I2(11); I2(12); I2(13); I2(14); I2(15); R(a, b, c, d, e, f, g, h, 0, W2(0)); R(h, a, b, c, d, e, f, g, 1, W2(1)); R(g, h, a, b, c, d, e, f, 2, W2(2)); R(f, g, h, a, b, c, d, e, 3, W2(3)); R(e, f, g, h, a, b, c, d, 4, W2(4)); R(d, e, f, g, h, a, b, c, 5, W2(5)); R(c, d, e, f, g, h, a, b, 6, W2(6)); R(b, c, d, e, f, g, h, a, 7, W2(7)); R(a, b, c, d, e, f, g, h, 8, W2(8)); R(h, a, b, c, d, e, f, g, 9, W2(9)); R(g, h, a, b, c, d, e, f, 10, W2(10)); R(f, g, h, a, b, c, d, e, 11, W2(11)); R(e, f, g, h, a, b, c, d, 12, W2(12)); R(d, e, f, g, h, a, b, c, 13, W2(13)); R(c, d, e, f, g, h, a, b, 14, W2(14)); R(b, c, d, e, f, g, h, a, 15, W2(15)); R(a, b, c, d, e, f, g, h, 16, W2(16)); R(h, a, b, c, d, e, f, g, 17, W2(17)); R(g, h, a, b, c, d, e, f, 18, W2(18)); R(f, g, h, a, b, c, d, e, 19, W2(19)); R(e, f, g, h, a, b, c, d, 20, W2(20)); R(d, e, f, g, h, a, b, c, 21, W2(21)); R(c, d, e, f, g, h, a, b, 22, W2(22)); R(b, c, d, e, f, g, h, a, 23, W2(23)); R(a, b, c, d, e, f, g, h, 24, W2(24)); R(h, a, b, c, d, e, f, g, 25, W2(25)); R(g, h, a, b, c, d, e, f, 26, W2(26)); R(f, g, h, a, b, c, d, e, 27, W2(27)); R(e, f, g, h, a, b, c, d, 28, W2(28)); R(d, e, f, g, h, a, b, c, 29, W2(29)); R(c, d, e, f, g, h, a, b, 30, W2(30)); R(b, c, d, e, f, g, h, a, 31, W2(31)); R(a, b, c, d, e, f, g, h, 32, W2(32)); R(h, a, b, c, d, e, f, g, 33, W2(33)); R(g, h, a, b, c, d, e, f, 34, W2(34)); R(f, g, h, a, b, c, d, e, 35, W2(35)); R(e, f, g, h, a, b, c, d, 36, W2(36)); R(d, e, f, g, h, a, b, c, 37, W2(37)); R(c, d, e, f, g, h, a, b, 38, W2(38)); R(b, c, d, e, f, g, h, a, 39, W2(39)); R(a, b, c, d, e, f, g, h, 40, W2(40)); R(h, a, b, c, d, e, f, g, 41, W2(41)); R(g, h, a, b, c, d, e, f, 42, W2(42)); R(f, g, h, a, b, c, d, e, 43, W2(43)); R(e, f, g, h, a, b, c, d, 44, W2(44)); R(d, e, f, g, h, a, b, c, 45, W2(45)); R(c, d, e, f, g, h, a, b, 46, W2(46)); R(b, c, d, e, f, g, h, a, 47, W2(47)); R(a, b, c, d, e, f, g, h, 48, L2(48)); R(h, a, b, c, d, e, f, g, 49, L2(49)); R(g, h, a, b, c, d, e, f, 50, L2(50)); R(f, g, h, a, b, c, d, e, 51, L2(51)); R(e, f, g, h, a, b, c, d, 52, L2(52)); R(d, e, f, g, h, a, b, c, 53, L2(53)); R(c, d, e, f, g, h, a, b, 54, L2(54)); R(b, c, d, e, f, g, h, a, 55, L2(55)); R(a, b, c, d, e, f, g, h, 56, L2(56)); R(h, a, b, c, d, e, f, g, 57, L2(57)); R(g, h, a, b, c, d, e, f, 58, L2(58)); R(f, g, h, a, b, c, d, e, 59, L2(59)); R(e, f, g, h, a, b, c, d, 60, L2(60)); R(d, e, f, g, h, a, b, c, 61, L2(61)); R(c, d, e, f, g, h, a, b, 62, L2(62)); R(b, c, d, e, f, g, h, a, 63, L2(63)); h0 += a; h1 += b; h2 += c; h3 += d; h4 += e; h5 += f; h6 += g; h7 += h; kptr = K; a = h0; b = h1; c = h2; d = h3; e = h4; f = h5; g = h6; h = h7; R(a, b, c, d, e, f, g, h, 0, WL(0)); R(h, a, b, c, d, e, f, g, 1, WL(1)); R(g, h, a, b, c, d, e, f, 2, WL(2)); R(f, g, h, a, b, c, d, e, 3, WL(3)); R(e, f, g, h, a, b, c, d, 4, WL(4)); R(d, e, f, g, h, a, b, c, 5, WL(5)); R(c, d, e, f, g, h, a, b, 6, WL(6)); R(b, c, d, e, f, g, h, a, 7, WL(7)); R(a, b, c, d, e, f, g, h, 8, WL(8)); R(h, a, b, c, d, e, f, g, 9, WL(9)); R(g, h, a, b, c, d, e, f, 10, WL(10)); R(f, g, h, a, b, c, d, e, 11, WL(11)); R(e, f, g, h, a, b, c, d, 12, WL(12)); R(d, e, f, g, h, a, b, c, 13, WL(13)); R(c, d, e, f, g, h, a, b, 14, WL(14)); R(b, c, d, e, f, g, h, a, 15, WL(15)); R(a, b, c, d, e, f, g, h, 16, WL(16)); R(h, a, b, c, d, e, f, g, 17, WL(17)); R(g, h, a, b, c, d, e, f, 18, WL(18)); R(f, g, h, a, b, c, d, e, 19, WL(19)); R(e, f, g, h, a, b, c, d, 20, WL(20)); R(d, e, f, g, h, a, b, c, 21, WL(21)); R(c, d, e, f, g, h, a, b, 22, WL(22)); R(b, c, d, e, f, g, h, a, 23, WL(23)); R(a, b, c, d, e, f, g, h, 24, WL(24)); R(h, a, b, c, d, e, f, g, 25, WL(25)); R(g, h, a, b, c, d, e, f, 26, WL(26)); R(f, g, h, a, b, c, d, e, 27, WL(27)); R(e, f, g, h, a, b, c, d, 28, WL(28)); R(d, e, f, g, h, a, b, c, 29, WL(29)); R(c, d, e, f, g, h, a, b, 30, WL(30)); R(b, c, d, e, f, g, h, a, 31, WL(31)); R(a, b, c, d, e, f, g, h, 32, WL(32)); R(h, a, b, c, d, e, f, g, 33, WL(33)); R(g, h, a, b, c, d, e, f, 34, WL(34)); R(f, g, h, a, b, c, d, e, 35, WL(35)); R(e, f, g, h, a, b, c, d, 36, WL(36)); R(d, e, f, g, h, a, b, c, 37, WL(37)); R(c, d, e, f, g, h, a, b, 38, WL(38)); R(b, c, d, e, f, g, h, a, 39, WL(39)); R(a, b, c, d, e, f, g, h, 40, WL(40)); R(h, a, b, c, d, e, f, g, 41, WL(41)); R(g, h, a, b, c, d, e, f, 42, WL(42)); R(f, g, h, a, b, c, d, e, 43, WL(43)); R(e, f, g, h, a, b, c, d, 44, WL(44)); R(d, e, f, g, h, a, b, c, 45, WL(45)); R(c, d, e, f, g, h, a, b, 46, WL(46)); R(b, c, d, e, f, g, h, a, 47, WL(47)); R(a, b, c, d, e, f, g, h, 48, WL(48)); R(h, a, b, c, d, e, f, g, 49, WL(49)); R(g, h, a, b, c, d, e, f, 50, WL(50)); R(f, g, h, a, b, c, d, e, 51, WL(51)); R(e, f, g, h, a, b, c, d, 52, WL(52)); R(d, e, f, g, h, a, b, c, 53, WL(53)); R(c, d, e, f, g, h, a, b, 54, WL(54)); R(b, c, d, e, f, g, h, a, 55, WL(55)); R(a, b, c, d, e, f, g, h, 56, WL(56)); R(h, a, b, c, d, e, f, g, 57, WL(57)); R(g, h, a, b, c, d, e, f, 58, WL(58)); R(f, g, h, a, b, c, d, e, 59, WL(59)); R(e, f, g, h, a, b, c, d, 60, WL(60)); R(d, e, f, g, h, a, b, c, 61, WL(61)); R(c, d, e, f, g, h, a, b, 62, WL(62)); R(b, c, d, e, f, g, h, a, 63, WL(63)); h0 += a; h1 += b; h2 += c; h3 += d; h4 += e; h5 += f; h6 += g; h7 += h; nblks -= 2; } if (nblks) { const vector4x_u32 *kptr = K; vector4x_u32 ktmp; a = h0; b = h1; c = h2; d = h3; e = h4; f = h5; g = h6; h = h7; I(0); I(1); I(2); I(3); I(4); I(5); I(6); I(7); I(8); I(9); I(10); I(11); I(12); I(13); I(14); I(15); R(a, b, c, d, e, f, g, h, 0, W(0)); R(h, a, b, c, d, e, f, g, 1, W(1)); R(g, h, a, b, c, d, e, f, 2, W(2)); R(f, g, h, a, b, c, d, e, 3, W(3)); R(e, f, g, h, a, b, c, d, 4, W(4)); R(d, e, f, g, h, a, b, c, 5, W(5)); R(c, d, e, f, g, h, a, b, 6, W(6)); R(b, c, d, e, f, g, h, a, 7, W(7)); R(a, b, c, d, e, f, g, h, 8, W(8)); R(h, a, b, c, d, e, f, g, 9, W(9)); R(g, h, a, b, c, d, e, f, 10, W(10)); R(f, g, h, a, b, c, d, e, 11, W(11)); R(e, f, g, h, a, b, c, d, 12, W(12)); R(d, e, f, g, h, a, b, c, 13, W(13)); R(c, d, e, f, g, h, a, b, 14, W(14)); R(b, c, d, e, f, g, h, a, 15, W(15)); R(a, b, c, d, e, f, g, h, 16, W(16)); R(h, a, b, c, d, e, f, g, 17, W(17)); R(g, h, a, b, c, d, e, f, 18, W(18)); R(f, g, h, a, b, c, d, e, 19, W(19)); R(e, f, g, h, a, b, c, d, 20, W(20)); R(d, e, f, g, h, a, b, c, 21, W(21)); R(c, d, e, f, g, h, a, b, 22, W(22)); R(b, c, d, e, f, g, h, a, 23, W(23)); R(a, b, c, d, e, f, g, h, 24, W(24)); R(h, a, b, c, d, e, f, g, 25, W(25)); R(g, h, a, b, c, d, e, f, 26, W(26)); R(f, g, h, a, b, c, d, e, 27, W(27)); R(e, f, g, h, a, b, c, d, 28, W(28)); R(d, e, f, g, h, a, b, c, 29, W(29)); R(c, d, e, f, g, h, a, b, 30, W(30)); R(b, c, d, e, f, g, h, a, 31, W(31)); R(a, b, c, d, e, f, g, h, 32, W(32)); R(h, a, b, c, d, e, f, g, 33, W(33)); R(g, h, a, b, c, d, e, f, 34, W(34)); R(f, g, h, a, b, c, d, e, 35, W(35)); R(e, f, g, h, a, b, c, d, 36, W(36)); R(d, e, f, g, h, a, b, c, 37, W(37)); R(c, d, e, f, g, h, a, b, 38, W(38)); R(b, c, d, e, f, g, h, a, 39, W(39)); R(a, b, c, d, e, f, g, h, 40, W(40)); R(h, a, b, c, d, e, f, g, 41, W(41)); R(g, h, a, b, c, d, e, f, 42, W(42)); R(f, g, h, a, b, c, d, e, 43, W(43)); R(e, f, g, h, a, b, c, d, 44, W(44)); R(d, e, f, g, h, a, b, c, 45, W(45)); R(c, d, e, f, g, h, a, b, 46, W(46)); R(b, c, d, e, f, g, h, a, 47, W(47)); R(a, b, c, d, e, f, g, h, 48, L(48)); R(h, a, b, c, d, e, f, g, 49, L(49)); R(g, h, a, b, c, d, e, f, 50, L(50)); R(f, g, h, a, b, c, d, e, 51, L(51)); R(e, f, g, h, a, b, c, d, 52, L(52)); R(d, e, f, g, h, a, b, c, 53, L(53)); R(c, d, e, f, g, h, a, b, 54, L(54)); R(b, c, d, e, f, g, h, a, 55, L(55)); R(a, b, c, d, e, f, g, h, 56, L(56)); R(h, a, b, c, d, e, f, g, 57, L(57)); R(g, h, a, b, c, d, e, f, 58, L(58)); R(f, g, h, a, b, c, d, e, 59, L(59)); R(e, f, g, h, a, b, c, d, 60, L(60)); R(d, e, f, g, h, a, b, c, 61, L(61)); R(c, d, e, f, g, h, a, b, 62, L(62)); R(b, c, d, e, f, g, h, a, 63, L(63)); h0 += a; h1 += b; h2 += c; h3 += d; h4 += e; h5 += f; h6 += g; h7 += h; nblks--; } h0_h3 = vec_merge_idx0_elems (h0, h1, h2, h3); h4_h7 = vec_merge_idx0_elems (h4, h5, h6, h7); vec_vsx_st (h0_h3, 4 * 0, state); vec_vsx_st (h4_h7, 4 * 4, state); return sizeof(w2) + sizeof(w); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P8 FUNC_ATTR_OPT_O2 _gcry_sha256_transform_ppc8(u32 state[8], const unsigned char *data, size_t nblks) { return sha256_transform_ppc(state, data, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_sha256_transform_ppc9(u32 state[8], const unsigned char *data, size_t nblks) { return sha256_transform_ppc(state, data, nblks); } #endif /* ENABLE_PPC_CRYPTO_SUPPORT */ diff --git a/cipher/sha512-ppc.c b/cipher/sha512-ppc.c index 6e69ddb9..d213c241 100644 --- a/cipher/sha512-ppc.c +++ b/cipher/sha512-ppc.c @@ -1,725 +1,725 @@ /* sha512-ppc.c - PowerPC vcrypto implementation of SHA-512 transform * Copyright (C) 2019,2023 Jussi Kivilinna * * This file is part of Libgcrypt. * * Libgcrypt is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * Libgcrypt is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this program; if not, see . */ #include #if defined(ENABLE_PPC_CRYPTO_SUPPORT) && \ defined(HAVE_COMPATIBLE_CC_PPC_ALTIVEC) && \ defined(HAVE_GCC_INLINE_ASM_PPC_ALTIVEC) && \ defined(USE_SHA512) && \ __GNUC__ >= 4 #include #include "bufhelp.h" typedef vector unsigned char vector16x_u8; typedef vector unsigned long long vector2x_u64; #define ALWAYS_INLINE inline __attribute__((always_inline)) #define NO_INLINE __attribute__((noinline)) #define NO_INSTRUMENT_FUNCTION __attribute__((no_instrument_function)) #define ASM_FUNC_ATTR NO_INSTRUMENT_FUNCTION #define ASM_FUNC_ATTR_INLINE ASM_FUNC_ATTR ALWAYS_INLINE #define ASM_FUNC_ATTR_NOINLINE ASM_FUNC_ATTR NO_INLINE #ifdef HAVE_GCC_ATTRIBUTE_OPTIMIZE # define FUNC_ATTR_OPT_O2 __attribute__((optimize("-O2"))) #else # define FUNC_ATTR_OPT_O2 #endif #if defined(__clang__) && defined(HAVE_CLANG_ATTRIBUTE_PPC_TARGET) # define FUNC_ATTR_TARGET_P8 __attribute__((target("arch=pwr8"))) # define FUNC_ATTR_TARGET_P9 __attribute__((target("arch=pwr9"))) #elif defined(HAVE_GCC_ATTRIBUTE_PPC_TARGET) # define FUNC_ATTR_TARGET_P8 __attribute__((target("cpu=power8"))) # define FUNC_ATTR_TARGET_P9 __attribute__((target("cpu=power9"))) #else # define FUNC_ATTR_TARGET_P8 # define FUNC_ATTR_TARGET_P9 #endif static const vector2x_u64 K[80] = { { U64_C(0x428a2f98d728ae22), U64_C(0x7137449123ef65cd) }, { U64_C(0xb5c0fbcfec4d3b2f), U64_C(0xe9b5dba58189dbbc) }, { U64_C(0x3956c25bf348b538), U64_C(0x59f111f1b605d019) }, { U64_C(0x923f82a4af194f9b), U64_C(0xab1c5ed5da6d8118) }, { U64_C(0xd807aa98a3030242), U64_C(0x12835b0145706fbe) }, { U64_C(0x243185be4ee4b28c), U64_C(0x550c7dc3d5ffb4e2) }, { U64_C(0x72be5d74f27b896f), U64_C(0x80deb1fe3b1696b1) }, { U64_C(0x9bdc06a725c71235), U64_C(0xc19bf174cf692694) }, { U64_C(0xe49b69c19ef14ad2), U64_C(0xefbe4786384f25e3) }, { U64_C(0x0fc19dc68b8cd5b5), U64_C(0x240ca1cc77ac9c65) }, { U64_C(0x2de92c6f592b0275), U64_C(0x4a7484aa6ea6e483) }, { U64_C(0x5cb0a9dcbd41fbd4), U64_C(0x76f988da831153b5) }, { U64_C(0x983e5152ee66dfab), U64_C(0xa831c66d2db43210) }, { U64_C(0xb00327c898fb213f), U64_C(0xbf597fc7beef0ee4) }, { U64_C(0xc6e00bf33da88fc2), U64_C(0xd5a79147930aa725) }, { U64_C(0x06ca6351e003826f), U64_C(0x142929670a0e6e70) }, { U64_C(0x27b70a8546d22ffc), U64_C(0x2e1b21385c26c926) }, { U64_C(0x4d2c6dfc5ac42aed), U64_C(0x53380d139d95b3df) }, { U64_C(0x650a73548baf63de), U64_C(0x766a0abb3c77b2a8) }, { U64_C(0x81c2c92e47edaee6), U64_C(0x92722c851482353b) }, { U64_C(0xa2bfe8a14cf10364), U64_C(0xa81a664bbc423001) }, { U64_C(0xc24b8b70d0f89791), U64_C(0xc76c51a30654be30) }, { U64_C(0xd192e819d6ef5218), U64_C(0xd69906245565a910) }, { U64_C(0xf40e35855771202a), U64_C(0x106aa07032bbd1b8) }, { U64_C(0x19a4c116b8d2d0c8), U64_C(0x1e376c085141ab53) }, { U64_C(0x2748774cdf8eeb99), U64_C(0x34b0bcb5e19b48a8) }, { U64_C(0x391c0cb3c5c95a63), U64_C(0x4ed8aa4ae3418acb) }, { U64_C(0x5b9cca4f7763e373), U64_C(0x682e6ff3d6b2b8a3) }, { U64_C(0x748f82ee5defb2fc), U64_C(0x78a5636f43172f60) }, { U64_C(0x84c87814a1f0ab72), U64_C(0x8cc702081a6439ec) }, { U64_C(0x90befffa23631e28), U64_C(0xa4506cebde82bde9) }, { U64_C(0xbef9a3f7b2c67915), U64_C(0xc67178f2e372532b) }, { U64_C(0xca273eceea26619c), U64_C(0xd186b8c721c0c207) }, { U64_C(0xeada7dd6cde0eb1e), U64_C(0xf57d4f7fee6ed178) }, { U64_C(0x06f067aa72176fba), U64_C(0x0a637dc5a2c898a6) }, { U64_C(0x113f9804bef90dae), U64_C(0x1b710b35131c471b) }, { U64_C(0x28db77f523047d84), U64_C(0x32caab7b40c72493) }, { U64_C(0x3c9ebe0a15c9bebc), U64_C(0x431d67c49c100d4c) }, { U64_C(0x4cc5d4becb3e42b6), U64_C(0x597f299cfc657e2a) }, { U64_C(0x5fcb6fab3ad6faec), U64_C(0x6c44198c4a475817) } }; static ASM_FUNC_ATTR_INLINE vector2x_u64 vec_rol_elems(vector2x_u64 v, unsigned int idx) { #ifndef WORDS_BIGENDIAN return vec_sld (v, v, (16 - (8 * idx)) & 15); #else return vec_sld (v, v, (8 * idx) & 15); #endif } static ASM_FUNC_ATTR_INLINE vector2x_u64 vec_merge_idx0_elems(vector2x_u64 v0, vector2x_u64 v1) { return vec_mergeh (v0, v1); } static ASM_FUNC_ATTR_INLINE vector2x_u64 vec_vshasigma_u64(vector2x_u64 v, unsigned int a, unsigned int b) { __asm__ ("vshasigmad %0,%1,%2,%3" : "=v" (v) : "v" (v), "g" (a), "g" (b) : "memory"); return v; } static ASM_FUNC_ATTR_INLINE vector2x_u64 vec_add_u64(vector2x_u64 v, vector2x_u64 w) { __asm__ ("vaddudm %0,%1,%2" : "=v" (v) : "v" (v), "v" (w) : "memory"); return v; } static ASM_FUNC_ATTR_INLINE vector2x_u64 vec_u64_load(unsigned long offset, const void *ptr) { vector2x_u64 vecu64; #if __GNUC__ >= 4 if (__builtin_constant_p (offset) && offset == 0) __asm__ ("lxvd2x %x0,0,%1\n\t" : "=wa" (vecu64) : "r" ((uintptr_t)ptr) : "memory"); else #endif __asm__ ("lxvd2x %x0,%1,%2\n\t" : "=wa" (vecu64) : "r" (offset), "r" ((uintptr_t)ptr) : "memory", "r0"); #ifndef WORDS_BIGENDIAN __asm__ ("xxswapd %x0, %x1" : "=wa" (vecu64) : "wa" (vecu64)); #endif return vecu64; } static ASM_FUNC_ATTR_INLINE void vec_u64_store(vector2x_u64 vecu64, unsigned long offset, void *ptr) { #ifndef WORDS_BIGENDIAN __asm__ ("xxswapd %x0, %x1" : "=wa" (vecu64) : "wa" (vecu64)); #endif #if __GNUC__ >= 4 if (__builtin_constant_p (offset) && offset == 0) __asm__ ("stxvd2x %x0,0,%1\n\t" : : "wa" (vecu64), "r" ((uintptr_t)ptr) : "memory"); else #endif __asm__ ("stxvd2x %x0,%1,%2\n\t" : : "wa" (vecu64), "r" (offset), "r" ((uintptr_t)ptr) : "memory", "r0"); } static ASM_FUNC_ATTR_INLINE vector2x_u64 vec_u64_load_be(unsigned long offset, const void *ptr) { vector2x_u64 vecu64; #if __GNUC__ >= 4 if (__builtin_constant_p (offset) && offset == 0) __asm__ volatile ("lxvd2x %x0,0,%1\n\t" : "=wa" (vecu64) : "r" ((uintptr_t)ptr) : "memory"); else #endif __asm__ volatile ("lxvd2x %x0,%1,%2\n\t" : "=wa" (vecu64) : "r" (offset), "r" ((uintptr_t)ptr) : "memory", "r0"); #ifndef WORDS_BIGENDIAN return (vector2x_u64)vec_reve((vector16x_u8)vecu64); #else return vecu64; #endif } /* SHA2 round in vector registers */ #define R(a,b,c,d,e,f,g,h,ki,w) do \ { \ t1 = vec_add_u64((h), (w)); \ t2 = Cho((e),(f),(g)); \ t1 = vec_add_u64(t1, GETK(ki)); \ t1 = vec_add_u64(t1, t2); \ t1 = Sum1add(t1, e); \ t2 = Maj((a),(b),(c)); \ t2 = Sum0add(t2, a); \ h = vec_add_u64(t1, t2); \ d += t1; \ } while (0) #define GETK(kidx) \ ({ \ if (((kidx) % 2) == 0) \ { \ ktmp = *(kptr++); \ if ((kidx) < 79) \ asm volatile("" : "+r" (kptr) :: "memory"); \ } \ else \ { \ ktmp = vec_mergel(ktmp, ktmp); \ } \ ktmp; \ }) #define Cho(b, c, d) (vec_sel(d, c, b)) #define Maj(c, d, b) (vec_sel(c, b, c ^ d)) #define Sum0(x) (vec_vshasigma_u64(x, 1, 0)) #define Sum1(x) (vec_vshasigma_u64(x, 1, 15)) #define S0(x) (vec_vshasigma_u64(x, 0, 0)) #define S1(x) (vec_vshasigma_u64(x, 0, 15)) #define Xadd(X, d, x) vec_add_u64(d, X(x)) #define Sum0add(d, x) Xadd(Sum0, d, x) #define Sum1add(d, x) Xadd(Sum1, d, x) #define S0add(d, x) Xadd(S0, d, x) #define S1add(d, x) Xadd(S1, d, x) #define I(i) \ ({ \ if (((i) % 2) == 0) \ { \ w[i] = vec_u64_load_be(0, data); \ data += 2 * 8; \ if ((i) / 2 < 7) \ asm volatile("" : "+r"(data) :: "memory"); \ } \ else \ { \ w[i] = vec_mergel(w[(i) - 1], w[(i) - 1]); \ } \ }) #define WN(i) ({ w[(i)&0x0f] += w[((i)-7) &0x0f]; \ w[(i)&0x0f] = S0add(w[(i)&0x0f], w[((i)-15)&0x0f]); \ w[(i)&0x0f] = S1add(w[(i)&0x0f], w[((i)-2) &0x0f]); }) #define W(i) ({ vector2x_u64 r = w[(i)&0x0f]; WN(i); r; }) #define L(i) w[(i)&0x0f] #define I2(i) \ ({ \ if (((i) % 2) == 0) \ { \ w[i] = vec_u64_load_be(0, data); \ } \ else \ { \ vector2x_u64 it1 = vec_u64_load_be(128, data); \ vector2x_u64 it2 = vec_mergeh(w[(i) - 1], it1); \ w[i] = vec_mergel(w[(i) - 1], it1); \ w[(i) - 1] = it2; \ if ((i) < 15) \ { \ data += 2 * 8; \ asm volatile("" : "+r"(data) :: "memory"); \ } \ else \ { \ data += 2 * 8 + 128; \ asm volatile("" : "+r"(data) :: "memory"); \ } \ } \ }) #define W2(i) \ ({ \ vector2x_u64 wt1 = w[(i)&0x0f]; \ WN(i); \ w2[(i) / 2] = (((i) % 2) == 0) ? wt1 : vec_mergel(w2[(i) / 2], wt1); \ wt1; \ }) #define L2(i) \ ({ \ vector2x_u64 lt1 = w[(i)&0x0f]; \ w2[(i) / 2] = (((i) % 2) == 0) ? lt1 : vec_mergel(w2[(i) / 2], lt1); \ lt1; \ }) #define WL(i) \ ({ \ vector2x_u64 wlt1 = w2[(i) / 2]; \ if (((i) % 2) == 0 && (i) < 79) \ w2[(i) / 2] = vec_mergel(wlt1, wlt1); \ wlt1; \ }) -static unsigned int ASM_FUNC_ATTR_INLINE FUNC_ATTR_OPT_O2 +static ASM_FUNC_ATTR_INLINE FUNC_ATTR_OPT_O2 unsigned int sha512_transform_ppc(u64 state[8], const unsigned char *data, size_t nblks) { vector2x_u64 h0, h1, h2, h3, h4, h5, h6, h7; vector2x_u64 a, b, c, d, e, f, g, h, t1, t2; vector2x_u64 w[16]; vector2x_u64 w2[80 / 2]; h0 = vec_u64_load (8 * 0, (unsigned long long *)state); h1 = vec_rol_elems (h0, 1); h2 = vec_u64_load (8 * 2, (unsigned long long *)state); h3 = vec_rol_elems (h2, 1); h4 = vec_u64_load (8 * 4, (unsigned long long *)state); h5 = vec_rol_elems (h4, 1); h6 = vec_u64_load (8 * 6, (unsigned long long *)state); h7 = vec_rol_elems (h6, 1); while (nblks >= 2) { const vector2x_u64 *kptr = K; vector2x_u64 ktmp; a = h0; b = h1; c = h2; d = h3; e = h4; f = h5; g = h6; h = h7; I2(0); I2(1); I2(2); I2(3); I2(4); I2(5); I2(6); I2(7); I2(8); I2(9); I2(10); I2(11); I2(12); I2(13); I2(14); I2(15); R(a, b, c, d, e, f, g, h, 0, W2(0)); R(h, a, b, c, d, e, f, g, 1, W2(1)); R(g, h, a, b, c, d, e, f, 2, W2(2)); R(f, g, h, a, b, c, d, e, 3, W2(3)); R(e, f, g, h, a, b, c, d, 4, W2(4)); R(d, e, f, g, h, a, b, c, 5, W2(5)); R(c, d, e, f, g, h, a, b, 6, W2(6)); R(b, c, d, e, f, g, h, a, 7, W2(7)); R(a, b, c, d, e, f, g, h, 8, W2(8)); R(h, a, b, c, d, e, f, g, 9, W2(9)); R(g, h, a, b, c, d, e, f, 10, W2(10)); R(f, g, h, a, b, c, d, e, 11, W2(11)); R(e, f, g, h, a, b, c, d, 12, W2(12)); R(d, e, f, g, h, a, b, c, 13, W2(13)); R(c, d, e, f, g, h, a, b, 14, W2(14)); R(b, c, d, e, f, g, h, a, 15, W2(15)); R(a, b, c, d, e, f, g, h, 16, W2(16)); R(h, a, b, c, d, e, f, g, 17, W2(17)); R(g, h, a, b, c, d, e, f, 18, W2(18)); R(f, g, h, a, b, c, d, e, 19, W2(19)); R(e, f, g, h, a, b, c, d, 20, W2(20)); R(d, e, f, g, h, a, b, c, 21, W2(21)); R(c, d, e, f, g, h, a, b, 22, W2(22)); R(b, c, d, e, f, g, h, a, 23, W2(23)); R(a, b, c, d, e, f, g, h, 24, W2(24)); R(h, a, b, c, d, e, f, g, 25, W2(25)); R(g, h, a, b, c, d, e, f, 26, W2(26)); R(f, g, h, a, b, c, d, e, 27, W2(27)); R(e, f, g, h, a, b, c, d, 28, W2(28)); R(d, e, f, g, h, a, b, c, 29, W2(29)); R(c, d, e, f, g, h, a, b, 30, W2(30)); R(b, c, d, e, f, g, h, a, 31, W2(31)); R(a, b, c, d, e, f, g, h, 32, W2(32)); R(h, a, b, c, d, e, f, g, 33, W2(33)); R(g, h, a, b, c, d, e, f, 34, W2(34)); R(f, g, h, a, b, c, d, e, 35, W2(35)); R(e, f, g, h, a, b, c, d, 36, W2(36)); R(d, e, f, g, h, a, b, c, 37, W2(37)); R(c, d, e, f, g, h, a, b, 38, W2(38)); R(b, c, d, e, f, g, h, a, 39, W2(39)); R(a, b, c, d, e, f, g, h, 40, W2(40)); R(h, a, b, c, d, e, f, g, 41, W2(41)); R(g, h, a, b, c, d, e, f, 42, W2(42)); R(f, g, h, a, b, c, d, e, 43, W2(43)); R(e, f, g, h, a, b, c, d, 44, W2(44)); R(d, e, f, g, h, a, b, c, 45, W2(45)); R(c, d, e, f, g, h, a, b, 46, W2(46)); R(b, c, d, e, f, g, h, a, 47, W2(47)); R(a, b, c, d, e, f, g, h, 48, W2(48)); R(h, a, b, c, d, e, f, g, 49, W2(49)); R(g, h, a, b, c, d, e, f, 50, W2(50)); R(f, g, h, a, b, c, d, e, 51, W2(51)); R(e, f, g, h, a, b, c, d, 52, W2(52)); R(d, e, f, g, h, a, b, c, 53, W2(53)); R(c, d, e, f, g, h, a, b, 54, W2(54)); R(b, c, d, e, f, g, h, a, 55, W2(55)); R(a, b, c, d, e, f, g, h, 56, W2(56)); R(h, a, b, c, d, e, f, g, 57, W2(57)); R(g, h, a, b, c, d, e, f, 58, W2(58)); R(f, g, h, a, b, c, d, e, 59, W2(59)); R(e, f, g, h, a, b, c, d, 60, W2(60)); R(d, e, f, g, h, a, b, c, 61, W2(61)); R(c, d, e, f, g, h, a, b, 62, W2(62)); R(b, c, d, e, f, g, h, a, 63, W2(63)); R(a, b, c, d, e, f, g, h, 64, L2(64)); R(h, a, b, c, d, e, f, g, 65, L2(65)); R(g, h, a, b, c, d, e, f, 66, L2(66)); R(f, g, h, a, b, c, d, e, 67, L2(67)); R(e, f, g, h, a, b, c, d, 68, L2(68)); R(d, e, f, g, h, a, b, c, 69, L2(69)); R(c, d, e, f, g, h, a, b, 70, L2(70)); R(b, c, d, e, f, g, h, a, 71, L2(71)); R(a, b, c, d, e, f, g, h, 72, L2(72)); R(h, a, b, c, d, e, f, g, 73, L2(73)); R(g, h, a, b, c, d, e, f, 74, L2(74)); R(f, g, h, a, b, c, d, e, 75, L2(75)); R(e, f, g, h, a, b, c, d, 76, L2(76)); R(d, e, f, g, h, a, b, c, 77, L2(77)); R(c, d, e, f, g, h, a, b, 78, L2(78)); R(b, c, d, e, f, g, h, a, 79, L2(79)); h0 += a; h1 += b; h2 += c; h3 += d; h4 += e; h5 += f; h6 += g; h7 += h; kptr = K; a = h0; b = h1; c = h2; d = h3; e = h4; f = h5; g = h6; h = h7; R(a, b, c, d, e, f, g, h, 0, WL(0)); R(h, a, b, c, d, e, f, g, 1, WL(1)); R(g, h, a, b, c, d, e, f, 2, WL(2)); R(f, g, h, a, b, c, d, e, 3, WL(3)); R(e, f, g, h, a, b, c, d, 4, WL(4)); R(d, e, f, g, h, a, b, c, 5, WL(5)); R(c, d, e, f, g, h, a, b, 6, WL(6)); R(b, c, d, e, f, g, h, a, 7, WL(7)); R(a, b, c, d, e, f, g, h, 8, WL(8)); R(h, a, b, c, d, e, f, g, 9, WL(9)); R(g, h, a, b, c, d, e, f, 10, WL(10)); R(f, g, h, a, b, c, d, e, 11, WL(11)); R(e, f, g, h, a, b, c, d, 12, WL(12)); R(d, e, f, g, h, a, b, c, 13, WL(13)); R(c, d, e, f, g, h, a, b, 14, WL(14)); R(b, c, d, e, f, g, h, a, 15, WL(15)); R(a, b, c, d, e, f, g, h, 16, WL(16)); R(h, a, b, c, d, e, f, g, 17, WL(17)); R(g, h, a, b, c, d, e, f, 18, WL(18)); R(f, g, h, a, b, c, d, e, 19, WL(19)); R(e, f, g, h, a, b, c, d, 20, WL(20)); R(d, e, f, g, h, a, b, c, 21, WL(21)); R(c, d, e, f, g, h, a, b, 22, WL(22)); R(b, c, d, e, f, g, h, a, 23, WL(23)); R(a, b, c, d, e, f, g, h, 24, WL(24)); R(h, a, b, c, d, e, f, g, 25, WL(25)); R(g, h, a, b, c, d, e, f, 26, WL(26)); R(f, g, h, a, b, c, d, e, 27, WL(27)); R(e, f, g, h, a, b, c, d, 28, WL(28)); R(d, e, f, g, h, a, b, c, 29, WL(29)); R(c, d, e, f, g, h, a, b, 30, WL(30)); R(b, c, d, e, f, g, h, a, 31, WL(31)); R(a, b, c, d, e, f, g, h, 32, WL(32)); R(h, a, b, c, d, e, f, g, 33, WL(33)); R(g, h, a, b, c, d, e, f, 34, WL(34)); R(f, g, h, a, b, c, d, e, 35, WL(35)); R(e, f, g, h, a, b, c, d, 36, WL(36)); R(d, e, f, g, h, a, b, c, 37, WL(37)); R(c, d, e, f, g, h, a, b, 38, WL(38)); R(b, c, d, e, f, g, h, a, 39, WL(39)); R(a, b, c, d, e, f, g, h, 40, WL(40)); R(h, a, b, c, d, e, f, g, 41, WL(41)); R(g, h, a, b, c, d, e, f, 42, WL(42)); R(f, g, h, a, b, c, d, e, 43, WL(43)); R(e, f, g, h, a, b, c, d, 44, WL(44)); R(d, e, f, g, h, a, b, c, 45, WL(45)); R(c, d, e, f, g, h, a, b, 46, WL(46)); R(b, c, d, e, f, g, h, a, 47, WL(47)); R(a, b, c, d, e, f, g, h, 48, WL(48)); R(h, a, b, c, d, e, f, g, 49, WL(49)); R(g, h, a, b, c, d, e, f, 50, WL(50)); R(f, g, h, a, b, c, d, e, 51, WL(51)); R(e, f, g, h, a, b, c, d, 52, WL(52)); R(d, e, f, g, h, a, b, c, 53, WL(53)); R(c, d, e, f, g, h, a, b, 54, WL(54)); R(b, c, d, e, f, g, h, a, 55, WL(55)); R(a, b, c, d, e, f, g, h, 56, WL(56)); R(h, a, b, c, d, e, f, g, 57, WL(57)); R(g, h, a, b, c, d, e, f, 58, WL(58)); R(f, g, h, a, b, c, d, e, 59, WL(59)); R(e, f, g, h, a, b, c, d, 60, WL(60)); R(d, e, f, g, h, a, b, c, 61, WL(61)); R(c, d, e, f, g, h, a, b, 62, WL(62)); R(b, c, d, e, f, g, h, a, 63, WL(63)); R(a, b, c, d, e, f, g, h, 64, WL(64)); R(h, a, b, c, d, e, f, g, 65, WL(65)); R(g, h, a, b, c, d, e, f, 66, WL(66)); R(f, g, h, a, b, c, d, e, 67, WL(67)); R(e, f, g, h, a, b, c, d, 68, WL(68)); R(d, e, f, g, h, a, b, c, 69, WL(69)); R(c, d, e, f, g, h, a, b, 70, WL(70)); R(b, c, d, e, f, g, h, a, 71, WL(71)); R(a, b, c, d, e, f, g, h, 72, WL(72)); R(h, a, b, c, d, e, f, g, 73, WL(73)); R(g, h, a, b, c, d, e, f, 74, WL(74)); R(f, g, h, a, b, c, d, e, 75, WL(75)); R(e, f, g, h, a, b, c, d, 76, WL(76)); R(d, e, f, g, h, a, b, c, 77, WL(77)); R(c, d, e, f, g, h, a, b, 78, WL(78)); R(b, c, d, e, f, g, h, a, 79, WL(79)); h0 += a; h1 += b; h2 += c; h3 += d; h4 += e; h5 += f; h6 += g; h7 += h; nblks -= 2; } if (nblks) { const vector2x_u64 *kptr = K; vector2x_u64 ktmp; a = h0; b = h1; c = h2; d = h3; e = h4; f = h5; g = h6; h = h7; I(0); I(1); I(2); I(3); I(4); I(5); I(6); I(7); I(8); I(9); I(10); I(11); I(12); I(13); I(14); I(15); R(a, b, c, d, e, f, g, h, 0, W(0)); R(h, a, b, c, d, e, f, g, 1, W(1)); R(g, h, a, b, c, d, e, f, 2, W(2)); R(f, g, h, a, b, c, d, e, 3, W(3)); R(e, f, g, h, a, b, c, d, 4, W(4)); R(d, e, f, g, h, a, b, c, 5, W(5)); R(c, d, e, f, g, h, a, b, 6, W(6)); R(b, c, d, e, f, g, h, a, 7, W(7)); R(a, b, c, d, e, f, g, h, 8, W(8)); R(h, a, b, c, d, e, f, g, 9, W(9)); R(g, h, a, b, c, d, e, f, 10, W(10)); R(f, g, h, a, b, c, d, e, 11, W(11)); R(e, f, g, h, a, b, c, d, 12, W(12)); R(d, e, f, g, h, a, b, c, 13, W(13)); R(c, d, e, f, g, h, a, b, 14, W(14)); R(b, c, d, e, f, g, h, a, 15, W(15)); R(a, b, c, d, e, f, g, h, 16, W(16)); R(h, a, b, c, d, e, f, g, 17, W(17)); R(g, h, a, b, c, d, e, f, 18, W(18)); R(f, g, h, a, b, c, d, e, 19, W(19)); R(e, f, g, h, a, b, c, d, 20, W(20)); R(d, e, f, g, h, a, b, c, 21, W(21)); R(c, d, e, f, g, h, a, b, 22, W(22)); R(b, c, d, e, f, g, h, a, 23, W(23)); R(a, b, c, d, e, f, g, h, 24, W(24)); R(h, a, b, c, d, e, f, g, 25, W(25)); R(g, h, a, b, c, d, e, f, 26, W(26)); R(f, g, h, a, b, c, d, e, 27, W(27)); R(e, f, g, h, a, b, c, d, 28, W(28)); R(d, e, f, g, h, a, b, c, 29, W(29)); R(c, d, e, f, g, h, a, b, 30, W(30)); R(b, c, d, e, f, g, h, a, 31, W(31)); R(a, b, c, d, e, f, g, h, 32, W(32)); R(h, a, b, c, d, e, f, g, 33, W(33)); R(g, h, a, b, c, d, e, f, 34, W(34)); R(f, g, h, a, b, c, d, e, 35, W(35)); R(e, f, g, h, a, b, c, d, 36, W(36)); R(d, e, f, g, h, a, b, c, 37, W(37)); R(c, d, e, f, g, h, a, b, 38, W(38)); R(b, c, d, e, f, g, h, a, 39, W(39)); R(a, b, c, d, e, f, g, h, 40, W(40)); R(h, a, b, c, d, e, f, g, 41, W(41)); R(g, h, a, b, c, d, e, f, 42, W(42)); R(f, g, h, a, b, c, d, e, 43, W(43)); R(e, f, g, h, a, b, c, d, 44, W(44)); R(d, e, f, g, h, a, b, c, 45, W(45)); R(c, d, e, f, g, h, a, b, 46, W(46)); R(b, c, d, e, f, g, h, a, 47, W(47)); R(a, b, c, d, e, f, g, h, 48, W(48)); R(h, a, b, c, d, e, f, g, 49, W(49)); R(g, h, a, b, c, d, e, f, 50, W(50)); R(f, g, h, a, b, c, d, e, 51, W(51)); R(e, f, g, h, a, b, c, d, 52, W(52)); R(d, e, f, g, h, a, b, c, 53, W(53)); R(c, d, e, f, g, h, a, b, 54, W(54)); R(b, c, d, e, f, g, h, a, 55, W(55)); R(a, b, c, d, e, f, g, h, 56, W(56)); R(h, a, b, c, d, e, f, g, 57, W(57)); R(g, h, a, b, c, d, e, f, 58, W(58)); R(f, g, h, a, b, c, d, e, 59, W(59)); R(e, f, g, h, a, b, c, d, 60, W(60)); R(d, e, f, g, h, a, b, c, 61, W(61)); R(c, d, e, f, g, h, a, b, 62, W(62)); R(b, c, d, e, f, g, h, a, 63, W(63)); R(a, b, c, d, e, f, g, h, 64, L(64)); R(h, a, b, c, d, e, f, g, 65, L(65)); R(g, h, a, b, c, d, e, f, 66, L(66)); R(f, g, h, a, b, c, d, e, 67, L(67)); R(e, f, g, h, a, b, c, d, 68, L(68)); R(d, e, f, g, h, a, b, c, 69, L(69)); R(c, d, e, f, g, h, a, b, 70, L(70)); R(b, c, d, e, f, g, h, a, 71, L(71)); R(a, b, c, d, e, f, g, h, 72, L(72)); R(h, a, b, c, d, e, f, g, 73, L(73)); R(g, h, a, b, c, d, e, f, 74, L(74)); R(f, g, h, a, b, c, d, e, 75, L(75)); R(e, f, g, h, a, b, c, d, 76, L(76)); R(d, e, f, g, h, a, b, c, 77, L(77)); R(c, d, e, f, g, h, a, b, 78, L(78)); R(b, c, d, e, f, g, h, a, 79, L(79)); h0 += a; h1 += b; h2 += c; h3 += d; h4 += e; h5 += f; h6 += g; h7 += h; nblks--; } h0 = vec_merge_idx0_elems (h0, h1); h2 = vec_merge_idx0_elems (h2, h3); h4 = vec_merge_idx0_elems (h4, h5); h6 = vec_merge_idx0_elems (h6, h7); vec_u64_store (h0, 8 * 0, (unsigned long long *)state); vec_u64_store (h2, 8 * 2, (unsigned long long *)state); vec_u64_store (h4, 8 * 4, (unsigned long long *)state); vec_u64_store (h6, 8 * 6, (unsigned long long *)state); return sizeof(w) + sizeof(w2); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P8 FUNC_ATTR_OPT_O2 _gcry_sha512_transform_ppc8(u64 state[8], const unsigned char *data, size_t nblks) { return sha512_transform_ppc(state, data, nblks); } unsigned int ASM_FUNC_ATTR FUNC_ATTR_TARGET_P9 FUNC_ATTR_OPT_O2 _gcry_sha512_transform_ppc9(u64 state[8], const unsigned char *data, size_t nblks) { return sha512_transform_ppc(state, data, nblks); } #endif /* ENABLE_PPC_CRYPTO_SUPPORT */