Page Menu
Home
GnuPG
Search
Configure Global Search
Log In
Files
F40151152
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Size
169 KB
Subscribers
None
View Options
diff --git a/cipher/chacha20-aarch64.S b/cipher/chacha20-aarch64.S
index b8f9724a..4f76834b 100644
--- a/cipher/chacha20-aarch64.S
+++ b/cipher/chacha20-aarch64.S
@@ -1,648 +1,648 @@
/* chacha20-aarch64.S - ARMv8/AArch64 accelerated chacha20 blocks function
*
* Copyright (C) 2017-2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Based on D. J. Bernstein reference implementation at
* http://cr.yp.to/chacha.html:
*
* chacha-regs.c version 20080118
* D. J. Bernstein
* Public domain.
*/
#include "asm-common-aarch64.h"
#if defined(__AARCH64EL__) && \
defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH64_NEON) && \
defined(USE_CHACHA20)
.cpu generic+simd
.text
#include "asm-poly1305-aarch64.h"
/* register macros */
#define INPUT x0
#define DST x1
#define SRC x2
#define NBLKS x3
#define ROUND x4
#define INPUT_CTR x5
#define INPUT_POS x6
#define CTR x7
/* vector registers */
#define X0 v16
#define X1 v17
#define X2 v18
#define X3 v19
#define X4 v20
#define X5 v21
#define X6 v22
#define X7 v23
#define X8 v24
#define X9 v25
#define X10 v26
#define X11 v27
#define X12 v28
#define X13 v29
#define X14 v30
#define X15 v31
#define VCTR v0
#define VTMP0 v1
#define VTMP1 v2
#define VTMP2 v3
#define VTMP3 v4
#define X12_TMP v5
#define X13_TMP v6
#define ROT8 v7
/**********************************************************************
helper macros
**********************************************************************/
#define _(...) __VA_ARGS__
#define vpunpckldq(s1, s2, dst) \
zip1 dst.4s, s2.4s, s1.4s;
#define vpunpckhdq(s1, s2, dst) \
zip2 dst.4s, s2.4s, s1.4s;
#define vpunpcklqdq(s1, s2, dst) \
zip1 dst.2d, s2.2d, s1.2d;
#define vpunpckhqdq(s1, s2, dst) \
zip2 dst.2d, s2.2d, s1.2d;
/* 4x4 32-bit integer matrix transpose */
#define transpose_4x4(x0, x1, x2, x3, t1, t2, t3) \
vpunpckhdq(x1, x0, t2); \
vpunpckldq(x1, x0, x0); \
\
vpunpckldq(x3, x2, t1); \
vpunpckhdq(x3, x2, x2); \
\
vpunpckhqdq(t1, x0, x1); \
vpunpcklqdq(t1, x0, x0); \
\
vpunpckhqdq(x2, t2, x3); \
vpunpcklqdq(x2, t2, x2);
#define clear(x) \
- eor x.16b, x.16b, x.16b;
+ movi x.16b, #0;
/**********************************************************************
4-way chacha20
**********************************************************************/
#define XOR(d,s1,s2) \
eor d.16b, s2.16b, s1.16b;
#define PLUS(ds,s) \
add ds.4s, ds.4s, s.4s;
#define ROTATE4(dst1,dst2,dst3,dst4,c,src1,src2,src3,src4,iop1,iop2,iop3) \
shl dst1.4s, src1.4s, #(c); \
shl dst2.4s, src2.4s, #(c); \
iop1; \
shl dst3.4s, src3.4s, #(c); \
shl dst4.4s, src4.4s, #(c); \
iop2; \
sri dst1.4s, src1.4s, #(32 - (c)); \
sri dst2.4s, src2.4s, #(32 - (c)); \
iop3; \
sri dst3.4s, src3.4s, #(32 - (c)); \
sri dst4.4s, src4.4s, #(32 - (c));
#define ROTATE4_8(dst1,dst2,dst3,dst4,src1,src2,src3,src4,iop1,iop2,iop3) \
tbl dst1.16b, {src1.16b}, ROT8.16b; \
iop1; \
tbl dst2.16b, {src2.16b}, ROT8.16b; \
iop2; \
tbl dst3.16b, {src3.16b}, ROT8.16b; \
iop3; \
tbl dst4.16b, {src4.16b}, ROT8.16b;
#define ROTATE4_16(dst1,dst2,dst3,dst4,src1,src2,src3,src4,iop1) \
rev32 dst1.8h, src1.8h; \
rev32 dst2.8h, src2.8h; \
iop1; \
rev32 dst3.8h, src3.8h; \
rev32 dst4.8h, src4.8h;
#define QUARTERROUND4(a1,b1,c1,d1,a2,b2,c2,d2,a3,b3,c3,d3,a4,b4,c4,d4,ign,tmp1,tmp2,tmp3,tmp4,\
iop1,iop2,iop3,iop4,iop5,iop6,iop7,iop8,iop9,iop10,iop11,iop12,iop13,iop14,\
iop15,iop16,iop17,iop18,iop19,iop20,iop21,iop22,iop23,iop24,iop25,iop26,\
iop27,iop28,iop29) \
PLUS(a1,b1); PLUS(a2,b2); iop1; \
PLUS(a3,b3); PLUS(a4,b4); iop2; \
XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); iop3; \
XOR(tmp3,d3,a3); XOR(tmp4,d4,a4); iop4; \
ROTATE4_16(d1, d2, d3, d4, tmp1, tmp2, tmp3, tmp4, _(iop5)); \
iop6; \
PLUS(c1,d1); PLUS(c2,d2); iop7; \
PLUS(c3,d3); PLUS(c4,d4); iop8; \
XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); iop9; \
XOR(tmp3,b3,c3); XOR(tmp4,b4,c4); iop10; \
ROTATE4(b1, b2, b3, b4, 12, tmp1, tmp2, tmp3, tmp4, \
_(iop11), _(iop12), _(iop13)); iop14; \
PLUS(a1,b1); PLUS(a2,b2); iop15; \
PLUS(a3,b3); PLUS(a4,b4); iop16; \
XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); iop17; \
XOR(tmp3,d3,a3); XOR(tmp4,d4,a4); iop18; \
ROTATE4_8(d1, d2, d3, d4, tmp1, tmp2, tmp3, tmp4, \
_(iop19), _(iop20), _(iop21)); iop22; \
PLUS(c1,d1); PLUS(c2,d2); iop23; \
PLUS(c3,d3); PLUS(c4,d4); iop24; \
XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); iop25; \
XOR(tmp3,b3,c3); XOR(tmp4,b4,c4); iop26; \
ROTATE4(b1, b2, b3, b4, 7, tmp1, tmp2, tmp3, tmp4, \
_(iop27), _(iop28), _(iop29));
.align 4
.globl _gcry_chacha20_aarch64_blocks4_data_inc_counter
_gcry_chacha20_aarch64_blocks4_data_inc_counter:
.long 0,1,2,3
.align 4
.globl _gcry_chacha20_aarch64_blocks4_data_rot8
_gcry_chacha20_aarch64_blocks4_data_rot8:
.byte 3,0,1,2
.byte 7,4,5,6
.byte 11,8,9,10
.byte 15,12,13,14
.align 3
.globl _gcry_chacha20_aarch64_blocks4
ELF(.type _gcry_chacha20_aarch64_blocks4,%function;)
_gcry_chacha20_aarch64_blocks4:
/* input:
* x0: input
* x1: dst
* x2: src
* x3: nblks (multiple of 4)
*/
CFI_STARTPROC()
GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_rot8);
add INPUT_CTR, INPUT, #(12*4);
ld1 {ROT8.16b}, [CTR];
GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_inc_counter);
mov INPUT_POS, INPUT;
ld1 {VCTR.16b}, [CTR];
.Loop4:
/* Construct counter vectors X12 and X13 */
ld1 {X15.16b}, [INPUT_CTR];
mov ROUND, #20;
ld1 {VTMP1.16b-VTMP3.16b}, [INPUT_POS];
dup X12.4s, X15.s[0];
dup X13.4s, X15.s[1];
ldr CTR, [INPUT_CTR];
add X12.4s, X12.4s, VCTR.4s;
dup X0.4s, VTMP1.s[0];
dup X1.4s, VTMP1.s[1];
dup X2.4s, VTMP1.s[2];
dup X3.4s, VTMP1.s[3];
dup X14.4s, X15.s[2];
cmhi VTMP0.4s, VCTR.4s, X12.4s;
dup X15.4s, X15.s[3];
add CTR, CTR, #4; /* Update counter */
dup X4.4s, VTMP2.s[0];
dup X5.4s, VTMP2.s[1];
dup X6.4s, VTMP2.s[2];
dup X7.4s, VTMP2.s[3];
sub X13.4s, X13.4s, VTMP0.4s;
dup X8.4s, VTMP3.s[0];
dup X9.4s, VTMP3.s[1];
dup X10.4s, VTMP3.s[2];
dup X11.4s, VTMP3.s[3];
mov X12_TMP.16b, X12.16b;
mov X13_TMP.16b, X13.16b;
str CTR, [INPUT_CTR];
.Lround2:
subs ROUND, ROUND, #2
QUARTERROUND4(X0, X4, X8, X12, X1, X5, X9, X13,
X2, X6, X10, X14, X3, X7, X11, X15,
tmp:=,VTMP0,VTMP1,VTMP2,VTMP3,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,)
QUARTERROUND4(X0, X5, X10, X15, X1, X6, X11, X12,
X2, X7, X8, X13, X3, X4, X9, X14,
tmp:=,VTMP0,VTMP1,VTMP2,VTMP3,
,,,,,,,,,,,,,,,,,,,,,,,,,,,,)
b.ne .Lround2;
ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS], #32;
PLUS(X12, X12_TMP); /* INPUT + 12 * 4 + counter */
PLUS(X13, X13_TMP); /* INPUT + 13 * 4 + counter */
dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 0 * 4 */
dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 1 * 4 */
dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 2 * 4 */
dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 3 * 4 */
PLUS(X0, VTMP2);
PLUS(X1, VTMP3);
PLUS(X2, X12_TMP);
PLUS(X3, X13_TMP);
dup VTMP2.4s, VTMP1.s[0]; /* INPUT + 4 * 4 */
dup VTMP3.4s, VTMP1.s[1]; /* INPUT + 5 * 4 */
dup X12_TMP.4s, VTMP1.s[2]; /* INPUT + 6 * 4 */
dup X13_TMP.4s, VTMP1.s[3]; /* INPUT + 7 * 4 */
ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS];
mov INPUT_POS, INPUT;
PLUS(X4, VTMP2);
PLUS(X5, VTMP3);
PLUS(X6, X12_TMP);
PLUS(X7, X13_TMP);
dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 8 * 4 */
dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 9 * 4 */
dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 10 * 4 */
dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 11 * 4 */
dup VTMP0.4s, VTMP1.s[2]; /* INPUT + 14 * 4 */
dup VTMP1.4s, VTMP1.s[3]; /* INPUT + 15 * 4 */
PLUS(X8, VTMP2);
PLUS(X9, VTMP3);
PLUS(X10, X12_TMP);
PLUS(X11, X13_TMP);
PLUS(X14, VTMP0);
PLUS(X15, VTMP1);
transpose_4x4(X0, X1, X2, X3, VTMP0, VTMP1, VTMP2);
transpose_4x4(X4, X5, X6, X7, VTMP0, VTMP1, VTMP2);
transpose_4x4(X8, X9, X10, X11, VTMP0, VTMP1, VTMP2);
transpose_4x4(X12, X13, X14, X15, VTMP0, VTMP1, VTMP2);
subs NBLKS, NBLKS, #4;
ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32;
eor VTMP0.16b, X0.16b, VTMP0.16b;
eor VTMP1.16b, X4.16b, VTMP1.16b;
eor VTMP2.16b, X8.16b, VTMP2.16b;
eor VTMP3.16b, X12.16b, VTMP3.16b;
eor X12_TMP.16b, X1.16b, X12_TMP.16b;
eor X13_TMP.16b, X5.16b, X13_TMP.16b;
st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32;
ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32;
eor VTMP0.16b, X9.16b, VTMP0.16b;
eor VTMP1.16b, X13.16b, VTMP1.16b;
eor VTMP2.16b, X2.16b, VTMP2.16b;
eor VTMP3.16b, X6.16b, VTMP3.16b;
eor X12_TMP.16b, X10.16b, X12_TMP.16b;
eor X13_TMP.16b, X14.16b, X13_TMP.16b;
st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32;
eor VTMP0.16b, X3.16b, VTMP0.16b;
eor VTMP1.16b, X7.16b, VTMP1.16b;
eor VTMP2.16b, X11.16b, VTMP2.16b;
eor VTMP3.16b, X15.16b, VTMP3.16b;
st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
b.ne .Loop4;
/* clear the used vector registers and stack */
clear(VTMP0);
clear(VTMP1);
clear(VTMP2);
clear(VTMP3);
clear(X12_TMP);
clear(X13_TMP);
clear(X0);
clear(X1);
clear(X2);
clear(X3);
clear(X4);
clear(X5);
clear(X6);
clear(X7);
clear(X8);
clear(X9);
clear(X10);
clear(X11);
clear(X12);
clear(X13);
clear(X14);
clear(X15);
eor x0, x0, x0
ret
CFI_ENDPROC()
ELF(.size _gcry_chacha20_aarch64_blocks4, .-_gcry_chacha20_aarch64_blocks4;)
/**********************************************************************
4-way stitched chacha20-poly1305
**********************************************************************/
.align 3
.globl _gcry_chacha20_poly1305_aarch64_blocks4
ELF(.type _gcry_chacha20_poly1305_aarch64_blocks4,%function;)
_gcry_chacha20_poly1305_aarch64_blocks4:
/* input:
* x0: input
* x1: dst
* x2: src
* x3: nblks (multiple of 4)
* x4: poly1305-state
* x5: poly1305-src
*/
CFI_STARTPROC()
POLY1305_PUSH_REGS()
mov POLY_RSTATE, x4;
mov POLY_RSRC, x5;
GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_rot8);
add INPUT_CTR, INPUT, #(12*4);
ld1 {ROT8.16b}, [CTR];
GET_DATA_POINTER(CTR, _gcry_chacha20_aarch64_blocks4_data_inc_counter);
mov INPUT_POS, INPUT;
ld1 {VCTR.16b}, [CTR];
POLY1305_LOAD_STATE()
.Loop_poly4:
/* Construct counter vectors X12 and X13 */
ld1 {X15.16b}, [INPUT_CTR];
ld1 {VTMP1.16b-VTMP3.16b}, [INPUT_POS];
dup X12.4s, X15.s[0];
dup X13.4s, X15.s[1];
ldr CTR, [INPUT_CTR];
add X12.4s, X12.4s, VCTR.4s;
dup X0.4s, VTMP1.s[0];
dup X1.4s, VTMP1.s[1];
dup X2.4s, VTMP1.s[2];
dup X3.4s, VTMP1.s[3];
dup X14.4s, X15.s[2];
cmhi VTMP0.4s, VCTR.4s, X12.4s;
dup X15.4s, X15.s[3];
add CTR, CTR, #4; /* Update counter */
dup X4.4s, VTMP2.s[0];
dup X5.4s, VTMP2.s[1];
dup X6.4s, VTMP2.s[2];
dup X7.4s, VTMP2.s[3];
sub X13.4s, X13.4s, VTMP0.4s;
dup X8.4s, VTMP3.s[0];
dup X9.4s, VTMP3.s[1];
dup X10.4s, VTMP3.s[2];
dup X11.4s, VTMP3.s[3];
mov X12_TMP.16b, X12.16b;
mov X13_TMP.16b, X13.16b;
str CTR, [INPUT_CTR];
mov ROUND, #20
.Lround4_with_poly1305_outer:
mov POLY_CHACHA_ROUND, #6;
.Lround4_with_poly1305_inner1:
POLY1305_BLOCK_PART1(0 * 16)
QUARTERROUND4(X0, X4, X8, X12, X1, X5, X9, X13,
X2, X6, X10, X14, X3, X7, X11, X15,
tmp:=,VTMP0,VTMP1,VTMP2,VTMP3,
POLY1305_BLOCK_PART2(0 * 16),
POLY1305_BLOCK_PART3(),
POLY1305_BLOCK_PART4(),
POLY1305_BLOCK_PART5(),
POLY1305_BLOCK_PART6(),
POLY1305_BLOCK_PART7(),
POLY1305_BLOCK_PART8(),
POLY1305_BLOCK_PART9(),
POLY1305_BLOCK_PART10(),
POLY1305_BLOCK_PART11(),
POLY1305_BLOCK_PART12(),
POLY1305_BLOCK_PART13(),
POLY1305_BLOCK_PART14(),
POLY1305_BLOCK_PART15(),
POLY1305_BLOCK_PART16(),
POLY1305_BLOCK_PART17(),
POLY1305_BLOCK_PART18(),
POLY1305_BLOCK_PART19(),
POLY1305_BLOCK_PART20(),
POLY1305_BLOCK_PART21(),
POLY1305_BLOCK_PART22(),
POLY1305_BLOCK_PART23(),
POLY1305_BLOCK_PART24(),
POLY1305_BLOCK_PART25(),
POLY1305_BLOCK_PART26(),
POLY1305_BLOCK_PART27(),
POLY1305_BLOCK_PART28(),
POLY1305_BLOCK_PART29(),
POLY1305_BLOCK_PART1(1 * 16))
POLY1305_BLOCK_PART2(1 * 16)
QUARTERROUND4(X0, X5, X10, X15, X1, X6, X11, X12,
X2, X7, X8, X13, X3, X4, X9, X14,
tmp:=,VTMP0,VTMP1,VTMP2,VTMP3,
_(add POLY_RSRC, POLY_RSRC, #(2*16)),
POLY1305_BLOCK_PART3(),
POLY1305_BLOCK_PART4(),
POLY1305_BLOCK_PART5(),
POLY1305_BLOCK_PART6(),
POLY1305_BLOCK_PART7(),
POLY1305_BLOCK_PART8(),
POLY1305_BLOCK_PART9(),
POLY1305_BLOCK_PART10(),
POLY1305_BLOCK_PART11(),
POLY1305_BLOCK_PART12(),
POLY1305_BLOCK_PART13(),
POLY1305_BLOCK_PART14(),
POLY1305_BLOCK_PART15(),
POLY1305_BLOCK_PART16(),
POLY1305_BLOCK_PART17(),
POLY1305_BLOCK_PART18(),
POLY1305_BLOCK_PART19(),
POLY1305_BLOCK_PART20(),
POLY1305_BLOCK_PART21(),
POLY1305_BLOCK_PART22(),
POLY1305_BLOCK_PART23(),
POLY1305_BLOCK_PART24(),
POLY1305_BLOCK_PART25(),
POLY1305_BLOCK_PART26(),
POLY1305_BLOCK_PART27(),
POLY1305_BLOCK_PART28(),
POLY1305_BLOCK_PART29(),
_(subs POLY_CHACHA_ROUND, POLY_CHACHA_ROUND, #2));
b.ne .Lround4_with_poly1305_inner1;
mov POLY_CHACHA_ROUND, #4;
.Lround4_with_poly1305_inner2:
POLY1305_BLOCK_PART1(0 * 16)
QUARTERROUND4(X0, X4, X8, X12, X1, X5, X9, X13,
X2, X6, X10, X14, X3, X7, X11, X15,
tmp:=,VTMP0,VTMP1,VTMP2,VTMP3,,
POLY1305_BLOCK_PART2(0 * 16),,
_(add POLY_RSRC, POLY_RSRC, #(1*16)),,
POLY1305_BLOCK_PART3(),,
POLY1305_BLOCK_PART4(),,
POLY1305_BLOCK_PART5(),,
POLY1305_BLOCK_PART6(),,
POLY1305_BLOCK_PART7(),,
POLY1305_BLOCK_PART8(),,
POLY1305_BLOCK_PART9(),,
POLY1305_BLOCK_PART10(),,
POLY1305_BLOCK_PART11(),,
POLY1305_BLOCK_PART12(),,
POLY1305_BLOCK_PART13(),,
POLY1305_BLOCK_PART14(),)
POLY1305_BLOCK_PART15()
QUARTERROUND4(X0, X5, X10, X15, X1, X6, X11, X12,
X2, X7, X8, X13, X3, X4, X9, X14,
tmp:=,VTMP0,VTMP1,VTMP2,VTMP3,
POLY1305_BLOCK_PART16(),,
POLY1305_BLOCK_PART17(),,
POLY1305_BLOCK_PART18(),,
POLY1305_BLOCK_PART19(),,
POLY1305_BLOCK_PART20(),,
POLY1305_BLOCK_PART21(),,
POLY1305_BLOCK_PART22(),,
POLY1305_BLOCK_PART23(),,
POLY1305_BLOCK_PART24(),,
POLY1305_BLOCK_PART25(),,
POLY1305_BLOCK_PART26(),,
POLY1305_BLOCK_PART27(),,
POLY1305_BLOCK_PART28(),,
POLY1305_BLOCK_PART29(),
_(subs POLY_CHACHA_ROUND, POLY_CHACHA_ROUND, #2),)
b.ne .Lround4_with_poly1305_inner2;
subs ROUND, ROUND, #10
b.ne .Lround4_with_poly1305_outer;
ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS], #32;
PLUS(X12, X12_TMP); /* INPUT + 12 * 4 + counter */
PLUS(X13, X13_TMP); /* INPUT + 13 * 4 + counter */
dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 0 * 4 */
dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 1 * 4 */
dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 2 * 4 */
dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 3 * 4 */
PLUS(X0, VTMP2);
PLUS(X1, VTMP3);
PLUS(X2, X12_TMP);
PLUS(X3, X13_TMP);
dup VTMP2.4s, VTMP1.s[0]; /* INPUT + 4 * 4 */
dup VTMP3.4s, VTMP1.s[1]; /* INPUT + 5 * 4 */
dup X12_TMP.4s, VTMP1.s[2]; /* INPUT + 6 * 4 */
dup X13_TMP.4s, VTMP1.s[3]; /* INPUT + 7 * 4 */
ld1 {VTMP0.16b, VTMP1.16b}, [INPUT_POS];
mov INPUT_POS, INPUT;
PLUS(X4, VTMP2);
PLUS(X5, VTMP3);
PLUS(X6, X12_TMP);
PLUS(X7, X13_TMP);
dup VTMP2.4s, VTMP0.s[0]; /* INPUT + 8 * 4 */
dup VTMP3.4s, VTMP0.s[1]; /* INPUT + 9 * 4 */
dup X12_TMP.4s, VTMP0.s[2]; /* INPUT + 10 * 4 */
dup X13_TMP.4s, VTMP0.s[3]; /* INPUT + 11 * 4 */
dup VTMP0.4s, VTMP1.s[2]; /* INPUT + 14 * 4 */
dup VTMP1.4s, VTMP1.s[3]; /* INPUT + 15 * 4 */
PLUS(X8, VTMP2);
PLUS(X9, VTMP3);
PLUS(X10, X12_TMP);
PLUS(X11, X13_TMP);
PLUS(X14, VTMP0);
PLUS(X15, VTMP1);
transpose_4x4(X0, X1, X2, X3, VTMP0, VTMP1, VTMP2);
transpose_4x4(X4, X5, X6, X7, VTMP0, VTMP1, VTMP2);
transpose_4x4(X8, X9, X10, X11, VTMP0, VTMP1, VTMP2);
transpose_4x4(X12, X13, X14, X15, VTMP0, VTMP1, VTMP2);
subs NBLKS, NBLKS, #4;
ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32;
eor VTMP0.16b, X0.16b, VTMP0.16b;
eor VTMP1.16b, X4.16b, VTMP1.16b;
eor VTMP2.16b, X8.16b, VTMP2.16b;
eor VTMP3.16b, X12.16b, VTMP3.16b;
eor X12_TMP.16b, X1.16b, X12_TMP.16b;
eor X13_TMP.16b, X5.16b, X13_TMP.16b;
st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32;
ld1 {X12_TMP.16b-X13_TMP.16b}, [SRC], #32;
eor VTMP0.16b, X9.16b, VTMP0.16b;
eor VTMP1.16b, X13.16b, VTMP1.16b;
eor VTMP2.16b, X2.16b, VTMP2.16b;
eor VTMP3.16b, X6.16b, VTMP3.16b;
eor X12_TMP.16b, X10.16b, X12_TMP.16b;
eor X13_TMP.16b, X14.16b, X13_TMP.16b;
st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
ld1 {VTMP0.16b-VTMP3.16b}, [SRC], #64;
st1 {X12_TMP.16b-X13_TMP.16b}, [DST], #32;
eor VTMP0.16b, X3.16b, VTMP0.16b;
eor VTMP1.16b, X7.16b, VTMP1.16b;
eor VTMP2.16b, X11.16b, VTMP2.16b;
eor VTMP3.16b, X15.16b, VTMP3.16b;
st1 {VTMP0.16b-VTMP3.16b}, [DST], #64;
b.ne .Loop_poly4;
POLY1305_STORE_STATE()
/* clear the used vector registers and stack */
clear(VTMP0);
clear(VTMP1);
clear(VTMP2);
clear(VTMP3);
clear(X12_TMP);
clear(X13_TMP);
clear(X0);
clear(X1);
clear(X2);
clear(X3);
clear(X4);
clear(X5);
clear(X6);
clear(X7);
clear(X8);
clear(X9);
clear(X10);
clear(X11);
clear(X12);
clear(X13);
clear(X14);
clear(X15);
eor x0, x0, x0
POLY1305_POP_REGS()
ret
CFI_ENDPROC()
ELF(.size _gcry_chacha20_poly1305_aarch64_blocks4, .-_gcry_chacha20_poly1305_aarch64_blocks4;)
#endif
diff --git a/cipher/chacha20-armv7-neon.S b/cipher/chacha20-armv7-neon.S
index 33a43df1..a862be4e 100644
--- a/cipher/chacha20-armv7-neon.S
+++ b/cipher/chacha20-armv7-neon.S
@@ -1,393 +1,393 @@
/* chacha20-armv7-neon.S - ARMv7 NEON implementation of ChaCha20 cipher
*
* Copyright (C) 2017,2018 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* Based on D. J. Bernstein reference implementation at
* http://cr.yp.to/chacha.html:
*
* chacha-regs.c version 20080118
* D. J. Bernstein
* Public domain.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_NEON)
.syntax unified
.fpu neon
.arm
.text
#ifdef __PIC__
# define GET_DATA_POINTER(reg, name, rtmp) \
ldr reg, 1f; \
ldr rtmp, 2f; \
b 3f; \
1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
2: .word name(GOT); \
3: add reg, pc, reg; \
ldr reg, [reg, rtmp];
#else
# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
#endif
/* register macros */
#define INPUT r0
#define DST r1
#define SRC r2
#define NBLKS r3
#define ROUND r4
/* stack structure */
#define STACK_VEC_X12 (16)
#define STACK_VEC_X13 (STACK_VEC_X12 + 16)
#define STACK_TMP (STACK_VEC_X13 + 16)
#define STACK_TMP1 (16 + STACK_TMP)
#define STACK_TMP2 (16 + STACK_TMP1)
#define STACK_MAX (16 + STACK_TMP2)
/* vector registers */
#define X0 q0
#define X1 q1
#define X2 q2
#define X3 q3
#define X4 q4
#define X5 q5
#define X6 q6
#define X7 q7
#define X8 q8
#define X9 q9
#define X10 q10
#define X11 q11
#define X12 q12
#define X13 q13
#define X14 q14
#define X15 q15
#define X0l d0
#define X1l d2
#define X2l d4
#define X3l d6
#define X4l d8
#define X5l d10
#define X6l d12
#define X7l d14
#define X8l d16
#define X9l d18
#define X10l d20
#define X11l d22
#define X12l d24
#define X13l d26
#define X14l d28
#define X15l d30
#define X0h d1
#define X1h d3
#define X2h d5
#define X3h d7
#define X4h d9
#define X5h d11
#define X6h d13
#define X7h d15
#define X8h d17
#define X9h d19
#define X10h d21
#define X11h d23
#define X12h d25
#define X13h d27
#define X14h d29
#define X15h d31
/**********************************************************************
helper macros
**********************************************************************/
/* 4x4 32-bit integer matrix transpose */
#define transpose_4x4_part1(_q0, _q1, _q2, _q3) \
vtrn.32 _q0, _q1; \
vtrn.32 _q2, _q3;
#define transpose_4x4_part2(_q0, _q1, _q2, _q3) \
vswp _q0##h, _q2##l; \
vswp _q1##h, _q3##l;
-#define clear(x) veor x,x,x;
+#define clear(x) vmov.i8 x, #0;
/**********************************************************************
4-way chacha20
**********************************************************************/
#define ROTATE2(dst1,dst2,c,src1,src2) \
vshl.u32 dst1, src1, #(c); \
vshl.u32 dst2, src2, #(c); \
vsri.u32 dst1, src1, #(32 - (c)); \
vsri.u32 dst2, src2, #(32 - (c));
#define ROTATE2_16(dst1,dst2,src1,src2) \
vrev32.16 dst1, src1; \
vrev32.16 dst2, src2;
#define XOR(d,s1,s2) \
veor d, s2, s1;
#define PLUS(ds,s) \
vadd.u32 ds, ds, s;
#define QUARTERROUND2(a1,b1,c1,d1,a2,b2,c2,d2,ign,tmp1,tmp2) \
PLUS(a1,b1); PLUS(a2,b2); XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); \
ROTATE2_16(d1, d2, tmp1, tmp2); \
PLUS(c1,d1); PLUS(c2,d2); XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); \
ROTATE2(b1, b2, 12, tmp1, tmp2); \
PLUS(a1,b1); PLUS(a2,b2); XOR(tmp1,d1,a1); XOR(tmp2,d2,a2); \
ROTATE2(d1, d2, 8, tmp1, tmp2); \
PLUS(c1,d1); PLUS(c2,d2); XOR(tmp1,b1,c1); XOR(tmp2,b2,c2); \
ROTATE2(b1, b2, 7, tmp1, tmp2);
chacha20_data:
.align 4
.Linc_counter:
.long 0,1,2,3
.align 3
.globl _gcry_chacha20_armv7_neon_blocks4
.type _gcry_chacha20_armv7_neon_blocks4,%function;
_gcry_chacha20_armv7_neon_blocks4:
/* input:
* r0: input
* r1: dst
* r2: src
* r3: nblks (multiple of 4)
*/
vpush {q4-q7};
push {r4-r12,lr};
mov r12, sp
mov r6, sp;
sub r6, r6, #(STACK_MAX);
and r6, r6, #(~15);
mov sp, r6;
GET_DATA_POINTER(r9, .Linc_counter, lr);
add lr, INPUT, #(12*4);
add r8, sp, #STACK_VEC_X12;
.Loop4:
mov ROUND, #20;
/* Construct counter vectors X12 and X13 */
vld1.8 {X15}, [lr];
mov lr, INPUT;
vld1.8 {X8}, [r9];
vdup.32 X12, X15l[0];
vdup.32 X13, X15l[1];
vld1.8 {X3}, [lr]!;
vadd.u32 X12, X12, X8;
vdup.32 X0, X3l[0];
vdup.32 X1, X3l[1];
vdup.32 X2, X3h[0];
vcgt.u32 X8, X8, X12;
vdup.32 X3, X3h[1];
vdup.32 X14, X15h[0];
vdup.32 X15, X15h[1];
vsub.u32 X13, X13, X8;
vld1.8 {X7}, [lr]!;
vld1.8 {X11}, [lr];
vst1.8 {X12, X13}, [r8];
vdup.32 X4, X7l[0];
vdup.32 X5, X7l[1];
vdup.32 X6, X7h[0];
vdup.32 X7, X7h[1];
vdup.32 X8, X11l[0];
vdup.32 X9, X11l[1];
vdup.32 X10, X11h[0];
vdup.32 X11, X11h[1];
add r7, sp, #STACK_TMP2;
add r6, sp, #STACK_TMP1;
add r5, sp, #STACK_TMP;
vst1.8 {X15}, [r6];
vst1.8 {X11}, [r5];
mov lr, INPUT;
.Lround2:
subs ROUND, ROUND, #2
QUARTERROUND2(X0, X4, X8, X12, X1, X5, X9, X13, tmp:=,X11,X15)
vld1.8 {X11}, [r5];
vld1.8 {X15}, [r6];
vst1.8 {X8}, [r5];
vst1.8 {X9}, [r6];
QUARTERROUND2(X2, X6, X10, X14, X3, X7, X11, X15, tmp:=,X8,X9)
QUARTERROUND2(X0, X5, X10, X15, X1, X6, X11, X12, tmp:=,X8,X9)
vld1.8 {X8}, [r5];
vld1.8 {X9}, [r6];
vst1.8 {X11}, [r5];
vst1.8 {X15}, [r6];
QUARTERROUND2(X2, X7, X8, X13, X3, X4, X9, X14, tmp:=,X11,X15)
bne .Lround2;
vld1.8 {X11}, [lr]!;
vst1.8 {X14}, [r7];
vdup.32 X14, X11l[0]; /* INPUT + 0 * 4 */
vdup.32 X15, X11l[1]; /* INPUT + 1 * 4 */
PLUS(X0, X14);
PLUS(X1, X15);
vdup.32 X14, X11h[0]; /* INPUT + 2 * 4 */
vdup.32 X15, X11h[1]; /* INPUT + 3 * 4 */
PLUS(X2, X14);
PLUS(X3, X15);
vld1.8 {X11}, [r5];
vld1.8 {X15}, [r6];
vst1.8 {X0}, [r5];
vld1.8 {X0}, [lr]!;
vst1.8 {X1}, [r6];
vdup.32 X14, X0l[0]; /* INPUT + 4 * 4 */
vdup.32 X1, X0l[1]; /* INPUT + 5 * 4 */
PLUS(X4, X14);
PLUS(X5, X1);
vdup.32 X14, X0h[0]; /* INPUT + 6 * 4 */
vdup.32 X1, X0h[1]; /* INPUT + 7 * 4 */
PLUS(X6, X14);
PLUS(X7, X1);
vld1.8 {X0}, [lr]!;
vdup.32 X14, X0l[0]; /* INPUT + 8 * 4 */
vdup.32 X1, X0l[1]; /* INPUT + 9 * 4 */
PLUS(X8, X14);
PLUS(X9, X1);
vdup.32 X14, X0h[0]; /* INPUT + 10 * 4 */
vdup.32 X1, X0h[1]; /* INPUT + 11 * 4 */
PLUS(X10, X14);
PLUS(X11, X1);
vld1.8 {X0}, [lr];
add lr, INPUT, #(12*4)
vld1.8 {X14}, [r7];
vdup.32 X1, X0h[0]; /* INPUT + 10 * 4 */
ldm lr, {r10, r11}; /* Update counter */
vdup.32 X0, X0h[1]; /* INPUT + 11 * 4 */
PLUS(X14, X1);
PLUS(X15, X0);
adds r10, r10, #4; /* Update counter */
vld1.8 {X0, X1}, [r8];
PLUS(X12, X0);
vld1.8 {X0}, [r5];
PLUS(X13, X1);
adc r11, r11, #0; /* Update counter */
vld1.8 {X1}, [r6];
stm lr, {r10, r11}; /* Update counter */
transpose_4x4_part1(X0, X1, X2, X3);
transpose_4x4_part1(X4, X5, X6, X7);
transpose_4x4_part1(X8, X9, X10, X11);
transpose_4x4_part1(X12, X13, X14, X15);
transpose_4x4_part2(X0, X1, X2, X3);
transpose_4x4_part2(X4, X5, X6, X7);
transpose_4x4_part2(X8, X9, X10, X11);
transpose_4x4_part2(X12, X13, X14, X15);
subs NBLKS, NBLKS, #4;
vst1.8 {X10}, [r5];
add lr, INPUT, #(12*4)
vst1.8 {X11}, [r6];
vld1.8 {X10, X11}, [SRC]!;
veor X10, X0, X10;
vld1.8 {X0}, [SRC]!;
veor X11, X4, X11;
vld1.8 {X4}, [SRC]!;
vst1.8 {X10, X11}, [DST]!;
vld1.8 {X10, X11}, [SRC]!;
veor X0, X8, X0;
veor X4, X12, X4;
veor X10, X1, X10;
veor X11, X5, X11;
vst1.8 {X0}, [DST]!;
vld1.8 {X0, X1}, [SRC]!;
vst1.8 {X4}, [DST]!;
vld1.8 {X4, X5}, [SRC]!;
vst1.8 {X10, X11}, [DST]!;
vld1.8 {X10}, [r5];
vld1.8 {X11}, [r6];
veor X0, X9, X0;
vld1.8 {X8, X9}, [SRC]!;
veor X1, X13, X1;
vld1.8 {X12, X13}, [SRC]!;
veor X4, X2, X4;
veor X5, X6, X5;
vst1.8 {X0, X1}, [DST]!;
vld1.8 {X0, X1}, [SRC]!;
vst1.8 {X4, X5}, [DST]!;
veor X8, X10, X8;
veor X9, X14, X9;
veor X12, X3, X12;
veor X13, X7, X13;
veor X0, X11, X0;
veor X1, X15, X1;
vst1.8 {X8, X9}, [DST]!;
vst1.8 {X12, X13}, [DST]!;
vst1.8 {X0, X1}, [DST]!;
bne .Loop4;
/* clear the used vector registers and stack */
clear(X0);
vst1.8 {X0}, [r5];
vst1.8 {X0}, [r6];
vst1.8 {X0}, [r7];
vst1.8 {X0}, [r8]!;
vst1.8 {X0}, [r8];
mov sp, r12
clear(X1);
clear(X2);
clear(X3);
clear(X4);
clear(X5);
clear(X6);
clear(X7);
clear(X8);
clear(X9);
clear(X10);
clear(X11);
clear(X12);
clear(X13);
clear(X14);
clear(X15);
pop {r4-r12,lr}
vpop {q4-q7}
eor r0, r0, r0
bx lr
.size _gcry_chacha20_armv7_neon_blocks4, .-_gcry_chacha20_armv7_neon_blocks4;
#endif
diff --git a/cipher/cipher-gcm-armv7-neon.S b/cipher/cipher-gcm-armv7-neon.S
index a801a5e5..16502b4a 100644
--- a/cipher/cipher-gcm-armv7-neon.S
+++ b/cipher/cipher-gcm-armv7-neon.S
@@ -1,341 +1,341 @@
/* cipher-gcm-armv7-neon.S - ARM/NEON accelerated GHASH
* Copyright (C) 2019 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_NEON)
.syntax unified
.fpu neon
.arm
.text
#ifdef __PIC__
# define GET_DATA_POINTER(reg, name, rtmp) \
ldr reg, 1f; \
ldr rtmp, 2f; \
b 3f; \
1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
2: .word name(GOT); \
3: add reg, pc, reg; \
ldr reg, [reg, rtmp];
#else
# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
#endif
/* Constants */
.align 4
gcry_gcm_reduction_constant:
.Lrconst64:
.quad 0xc200000000000000
/* Register macros */
#define rhash q0
#define rhash_l d0
#define rhash_h d1
#define rh1 q1
#define rh1_l d2
#define rh1_h d3
#define rbuf q2
#define rbuf_l d4
#define rbuf_h d5
#define rbuf1 q3
#define rbuf1_l d6
#define rbuf1_h d7
#define t0q q4
#define t0l d8
#define t0h d9
#define t1q q5
#define t1l d10
#define t1h d11
#define t2q q6
#define t2l d12
#define t2h d13
#define t3q q7
#define t3l d14
#define t3h d15
/* q8 */
#define k16 d16
#define k32 d17
/* q9 */
#define k48 d18
#define k0 q10
#define rr0 q11
#define rr0_l d22
#define rr0_h d23
#define rr1 q12
#define rr1_l d24
#define rr1_h d25
#define rt0 q13
#define rt0_l d26
#define rt0_h d27
#define rt1 q14
#define rt1_l d28
#define rt1_h d29
#define rrconst q15
#define rrconst_l d30
#define rrconst_h d31
/* Macro for 64x64=>128 carry-less multiplication using vmull.p8 instruction.
*
* From "Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R. Fast Software
* Polynomial Multiplication on ARM Processors using the NEON Engine. The
* Second International Workshop on Modern Cryptography and Security
* Engineering — MoCrySEn, 2013". */
#define vmull_p64(rq, rl, rh, ad, bd) \
vext.8 t0l, ad, ad, $1; \
vmull.p8 t0q, t0l, bd; \
vext.8 rl, bd, bd, $1; \
vmull.p8 rq, ad, rl; \
vext.8 t1l, ad, ad, $2; \
vmull.p8 t1q, t1l, bd; \
vext.8 t3l, bd, bd, $2; \
vmull.p8 t3q, ad, t3l; \
vext.8 t2l, ad, ad, $3; \
vmull.p8 t2q, t2l, bd; \
veor t0q, t0q, rq; \
vext.8 rl, bd, bd, $3; \
vmull.p8 rq, ad, rl; \
veor t1q, t1q, t3q; \
vext.8 t3l, bd, bd, $4; \
vmull.p8 t3q, ad, t3l; \
veor t0l, t0l, t0h; \
vand t0h, t0h, k48; \
veor t1l, t1l, t1h; \
vand t1h, t1h, k32; \
veor t2q, t2q, rq; \
veor t0l, t0l, t0h; \
veor t1l, t1l, t1h; \
veor t2l, t2l, t2h; \
vand t2h, t2h, k16; \
veor t3l, t3l, t3h; \
vmov.i64 t3h, $0; \
vext.8 t0q, t0q, t0q, $15; \
veor t2l, t2l, t2h; \
vext.8 t1q, t1q, t1q, $14; \
vmull.p8 rq, ad, bd; \
vext.8 t2q, t2q, t2q, $13; \
vext.8 t3q, t3q, t3q, $12; \
veor t0q, t0q, t1q; \
veor t2q, t2q, t3q; \
veor rq, rq, t0q; \
veor rq, rq, t2q;
/* GHASH macros.
*
* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in
* Cryptology — CT-RSA 2015" for details.
*/
/* Input: 'a' and 'b', Output: 'r0:r1' (low 128-bits in r0, high in r1)
* Note: 'r1' may be 'a' or 'b', 'r0' must not be either 'a' or 'b'.
*/
#define PMUL_128x128(r0, r1, a, b, t1, t2, interleave_op) \
veor t1##_h, b##_l, b##_h; \
veor t1##_l, a##_l, a##_h; \
vmull_p64( r0, r0##_l, r0##_h, a##_l, b##_l ); \
vmull_p64( r1, r1##_l, r1##_h, a##_h, b##_h ); \
vmull_p64( t2, t2##_h, t2##_l, t1##_h, t1##_l ); \
interleave_op; \
veor t2, r0; \
veor t2, r1; \
veor r0##_h, t2##_l; \
veor r1##_l, t2##_h;
/* Reduction using Xor and Shift.
* Input: 'r0:r1', Output: 'a'
*
* See "Shay Gueron, Michael E. Kounavis. Intel Carry-Less Multiplication
* Instruction and its Usage for Computing the GCM Mode" for details.
*/
#define REDUCTION(a, r0, r1, t, interleave_op) \
vshl.u32 t0q, r0, #31; \
vshl.u32 t1q, r0, #30; \
vshl.u32 t2q, r0, #25; \
veor t0q, t0q, t1q; \
veor t0q, t0q, t2q; \
vext.8 t, t0q, k0, #4; \
vext.8 t0q, k0, t0q, #(16-12); \
veor r0, r0, t0q; \
interleave_op; \
vshr.u32 t0q, r0, #1; \
vshr.u32 t1q, r0, #2; \
vshr.u32 t2q, r0, #7; \
veor t0q, t0q, t1q; \
veor t0q, t0q, t2q; \
veor t0q, t0q, t; \
veor r0, r0, t0q; \
veor a, r0, r1;
#define _(...) __VA_ARGS__
#define __ _()
/* Other functional macros */
-#define CLEAR_REG(reg) veor reg, reg;
+#define CLEAR_REG(reg) vmov.i8 reg, #0;
/*
* unsigned int _gcry_ghash_armv7_neon (void *gcm_key, byte *result,
* const byte *buf, size_t nblocks);
*/
.align 3
.globl _gcry_ghash_armv7_neon
.type _gcry_ghash_armv7_neon,%function;
_gcry_ghash_armv7_neon:
/* input:
* r0: gcm_key
* r1: result/hash
* r2: buf
* r3: nblocks
*/
push {r4-r6, lr}
cmp r3, #0
beq .Ldo_nothing
vpush {q4-q7}
vld1.64 {rhash}, [r1]
vld1.64 {rh1}, [r0]
vrev64.8 rhash, rhash /* byte-swap */
vmov.i64 k0, #0x0
vmov.i64 k16, #0xffff
vmov.i64 k32, #0xffffffff
vmov.i64 k48, #0xffffffffffff
vext.8 rhash, rhash, rhash, #8
/* Handle remaining blocks. */
vld1.64 {rbuf}, [r2]!
subs r3, r3, #1
vrev64.8 rbuf, rbuf /* byte-swap */
vext.8 rbuf, rbuf, rbuf, #8
veor rhash, rhash, rbuf
beq .Lend
.Loop:
vld1.64 {rbuf}, [r2]!
PMUL_128x128(rr0, rr1, rhash, rh1, rt0, rt1, _(vrev64.8 rbuf, rbuf))
REDUCTION(rhash, rr0, rr1, rt0, _(vext.8 rbuf, rbuf, rbuf, #8))
subs r3, r3, #1
veor rhash, rhash, rbuf
bne .Loop
.Lend:
PMUL_128x128(rr0, rr1, rhash, rh1, rt0, rt1, _(CLEAR_REG(rbuf)))
REDUCTION(rhash, rr0, rr1, rt0, _(CLEAR_REG(rh1)))
.Ldone:
CLEAR_REG(rr1)
vrev64.8 rhash, rhash /* byte-swap */
CLEAR_REG(rt0)
CLEAR_REG(rr0)
vext.8 rhash, rhash, rhash, #8
CLEAR_REG(rt1)
CLEAR_REG(t0q)
CLEAR_REG(t1q)
CLEAR_REG(t2q)
CLEAR_REG(t3q)
vst1.64 {rhash}, [r1]
CLEAR_REG(rhash)
vpop {q4-q7}
.Ldo_nothing:
mov r0, #0
pop {r4-r6, pc}
.size _gcry_ghash_armv7_neon,.-_gcry_ghash_armv7_neon;
/*
* void _gcry_ghash_armv7_neon (void *gcm_key);
*/
.align 3
.globl _gcry_ghash_setup_armv7_neon
.type _gcry_ghash_setup_armv7_neon,%function;
_gcry_ghash_setup_armv7_neon:
/* input:
* r0: gcm_key
*/
vpush {q4-q7}
GET_DATA_POINTER(r2, .Lrconst64, r3)
vld1.64 {rrconst_h}, [r2]
#define GCM_LSH_1(r_out, ia, ib, const_d, oa, ob, ma) \
/* H <<< 1 */ \
vshr.s64 ma, ib, #63; \
vshr.u64 oa, ib, #63; \
vshr.u64 ob, ia, #63; \
vand ma, const_d; \
vshl.u64 ib, ib, #1; \
vshl.u64 ia, ia, #1; \
vorr ob, ib; \
vorr oa, ia; \
veor ob, ma; \
vst1.64 {oa, ob}, [r_out]
vld1.64 {rhash}, [r0]
vrev64.8 rhash, rhash /* byte-swap */
vext.8 rhash, rhash, rhash, #8
vmov rbuf1, rhash
GCM_LSH_1(r0, rhash_l, rhash_h, rrconst_h, rh1_l, rh1_h, rt1_l) /* H<<<1 */
CLEAR_REG(rh1)
CLEAR_REG(rhash)
CLEAR_REG(rbuf1)
CLEAR_REG(rrconst)
vpop {q4-q7}
bx lr
.size _gcry_ghash_setup_armv7_neon,.-_gcry_ghash_setup_armv7_neon;
#endif
diff --git a/cipher/cipher-gcm-armv8-aarch32-ce.S b/cipher/cipher-gcm-armv8-aarch32-ce.S
index 1de66a16..fb51b339 100644
--- a/cipher/cipher-gcm-armv8-aarch32-ce.S
+++ b/cipher/cipher-gcm-armv8-aarch32-ce.S
@@ -1,433 +1,433 @@
/* cipher-gcm-armv8-aarch32-ce.S - ARM/CE accelerated GHASH
* Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO)
.syntax unified
.arch armv8-a
.fpu crypto-neon-fp-armv8
.arm
.text
#ifdef __PIC__
# define GET_DATA_POINTER(reg, name, rtmp) \
ldr reg, 1f; \
ldr rtmp, 2f; \
b 3f; \
1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
2: .word name(GOT); \
3: add reg, pc, reg; \
ldr reg, [reg, rtmp];
#else
# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
#endif
/* Constants */
.align 4
gcry_gcm_reduction_constant:
.Lrconst64:
.quad 0xc200000000000000
/* Register macros */
#define rhash q0
#define rhash_l d0
#define rhash_h d1
#define rh1 q1
#define rh1_l d2
#define rh1_h d3
#define rbuf q2
#define rbuf_l d4
#define rbuf_h d5
#define rbuf1 q3
#define rbuf1_l d6
#define rbuf1_h d7
#define rbuf2 q4
#define rbuf2_l d8
#define rbuf2_h d9
#define rbuf3 q5
#define rbuf3_l d10
#define rbuf3_h d11
#define rh2 q6
#define rh2_l d12
#define rh2_h d13
#define rh3 q7
#define rh3_l d14
#define rh3_h d15
#define rh4 q8
#define rh4_l d16
#define rh4_h d17
#define rr2 q9
#define rr2_l d18
#define rr2_h d19
#define rr3 q10
#define rr3_l d20
#define rr3_h d21
#define rr0 q11
#define rr0_l d22
#define rr0_h d23
#define rr1 q12
#define rr1_l d24
#define rr1_h d25
#define rt0 q13
#define rt0_l d26
#define rt0_h d27
#define rt1 q14
#define rt1_l d28
#define rt1_h d29
#define rrconst q15
#define rrconst_l d30
#define rrconst_h d31
/* GHASH macros */
/* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in
* Cryptology — CT-RSA 2015" for details.
*/
/* Input: 'a' and 'b', Output: 'r0:r1' (low 128-bits in r0, high in r1)
* Note: 'r1' may be 'a' or 'b', 'r0' must not be either 'a' or 'b'.
*/
#define PMUL_128x128(r0, r1, a, b, t, interleave_op) \
veor t##_h, b##_l, b##_h; \
veor t##_l, a##_l, a##_h; \
vmull.p64 r0, a##_l, b##_l; \
vmull.p64 r1, a##_h, b##_h; \
vmull.p64 t, t##_h, t##_l; \
interleave_op; \
veor t, r0; \
veor t, r1; \
veor r0##_h, t##_l; \
veor r1##_l, t##_h;
/* Input: 'aA' and 'bA', Output: 'r0A:r1A' (low 128-bits in r0A, high in r1A)
* Note: 'r1A' may be 'aA' or 'bA', 'r0A' must not be either 'aA' or 'bA'.
* Input: 'aB' and 'bB', Output: 'r0B:r1B' (low 128-bits in r0B, high in r1B)
* Note: 'r1B' may be 'aB' or 'bB', 'r0B' must not be either 'aB' or 'bB'.
*/
#define PMUL_128x128_2(r0A, r1A, aA, bA, r0B, r1B, aB, bB, tA, tB, interleave_op) \
veor tA##_h, bA##_l, bA##_h; \
veor tA##_l, aA##_l, aA##_h; \
veor tB##_h, bB##_l, bB##_h; \
veor tB##_l, aB##_l, aB##_h; \
vmull.p64 r0A, aA##_l, bA##_l; \
vmull.p64 r1A, aA##_h, bA##_h; \
vmull.p64 tA, tA##_h, tA##_l; \
vmull.p64 r0B, aB##_l, bB##_l; \
vmull.p64 r1B, aB##_h, bB##_h; \
vmull.p64 tB, tB##_h, tB##_l; \
interleave_op; \
veor tA, r0A; \
veor tA, r1A; \
veor tB, r0B; \
veor tB, r1B; \
veor r0A##_h, tA##_l; \
veor r1A##_l, tA##_h; \
veor r0B##_h, tB##_l; \
veor r1B##_l, tB##_h; \
/* Input: 'r0:r1', Output: 'a' */
#define REDUCTION(a, r0, r1, rconst, t, interleave_op) \
vmull.p64 t, r0##_l, rconst; \
veor r0##_h, t##_l; \
veor r1##_l, t##_h; \
interleave_op; \
vmull.p64 t, r0##_h, rconst; \
veor r1, t; \
veor a, r0, r1;
#define _(...) __VA_ARGS__
#define __ _()
/* Other functional macros */
-#define CLEAR_REG(reg) veor reg, reg;
+#define CLEAR_REG(reg) vmov.i8 reg, #0;
/*
* unsigned int _gcry_ghash_armv8_ce_pmull (void *gcm_key, byte *result,
* const byte *buf, size_t nblocks,
* void *gcm_table);
*/
.align 3
.globl _gcry_ghash_armv8_ce_pmull
.type _gcry_ghash_armv8_ce_pmull,%function;
_gcry_ghash_armv8_ce_pmull:
/* input:
* r0: gcm_key
* r1: result/hash
* r2: buf
* r3: nblocks
* %st+0: gcm_table
*/
push {r4-r6, lr}
cmp r3, #0
beq .Ldo_nothing
GET_DATA_POINTER(r4, .Lrconst64, lr)
vld1.64 {rhash}, [r1]
vld1.64 {rh1}, [r0]
vrev64.8 rhash, rhash /* byte-swap */
vld1.64 {rrconst_h}, [r4]
vext.8 rhash, rhash, rhash, #8
cmp r3, #4
blo .Less_than_4
/* Bulk processing of 4 blocks per loop iteration. */
ldr r5, [sp, #(4*4)];
add r6, r5, #32
vpush {q4-q7}
vld1.64 {rh2-rh3}, [r5]
vld1.64 {rh4}, [r6]
vld1.64 {rbuf-rbuf1}, [r2]!
sub r3, r3, #4
vld1.64 {rbuf2-rbuf3}, [r2]!
cmp r3, #4
vrev64.8 rbuf, rbuf /* byte-swap */
vrev64.8 rbuf1, rbuf1 /* byte-swap */
vrev64.8 rbuf2, rbuf2 /* byte-swap */
vrev64.8 rbuf3, rbuf3 /* byte-swap */
vext.8 rbuf, rbuf, rbuf, #8
vext.8 rbuf1, rbuf1, rbuf1, #8
vext.8 rbuf2, rbuf2, rbuf2, #8
vext.8 rbuf3, rbuf3, rbuf3, #8
veor rhash, rhash, rbuf /* in0 ^ hash */
blo .Lend_4
.Loop_4:
/* (in0 ^ hash) * H⁴ => rr2:rr3 */
/* (in1) * H³ => rr0:rr1 */
PMUL_128x128_2(rr0, rr1, rbuf1, rh3, rr2, rr3, rhash, rh4, rt1, rt0, __)
vld1.64 {rbuf-rbuf1}, [r2]!
sub r3, r3, #4
veor rr0, rr0, rr2
veor rr1, rr1, rr3
/* (in2) * H² => rr2:rr3 */
/* (in3) * H¹ => rhash:rbuf3 */
PMUL_128x128_2(rr2, rr3, rbuf2, rh2, rhash, rbuf3, rbuf3, rh1, rt0, rt1,
_(vrev64.8 rbuf, rbuf))
vld1.64 {rbuf2}, [r2]!
vrev64.8 rbuf1, rbuf1
veor rr0, rr0, rr2
veor rr1, rr1, rr3
cmp r3, #4
vext.8 rbuf, rbuf, rbuf, #8
vext.8 rbuf1, rbuf1, rbuf1, #8
veor rr0, rr0, rhash
veor rr1, rr1, rbuf3
vld1.64 {rbuf3}, [r2]!
REDUCTION(rhash, rr0, rr1, rrconst_h, rt1,
_(vrev64.8 rbuf2, rbuf2;
vrev64.8 rbuf3, rbuf3))
vext.8 rbuf2, rbuf2, rbuf2, #8
vext.8 rbuf3, rbuf3, rbuf3, #8
veor rhash, rhash, rbuf /* in0 ^ hash */
bhs .Loop_4
.Lend_4:
/* (in0 ^ hash) * H⁴ => rr2:rr3 */
/* (in1) * H³ => rr0:rr1 */
PMUL_128x128_2(rr0, rr1, rbuf1, rh3, rr2, rr3, rhash, rh4, rt1, rt0, __)
/* (in2) * H² => rhash:rbuf */
/* (in3) * H¹ => rbuf1:rbuf2 */
PMUL_128x128_2(rhash, rbuf, rbuf2, rh2, rbuf1, rbuf2, rbuf3, rh1, rt0, rt1,
_(veor rr0, rr0, rr2;
veor rr1, rr1, rr3))
veor rr0, rr0, rhash
veor rr1, rr1, rbuf
veor rr0, rr0, rbuf1
veor rr1, rr1, rbuf2
REDUCTION(rhash, rr0, rr1, rrconst_h, rt1,
_(CLEAR_REG(rr2);
CLEAR_REG(rr3);
CLEAR_REG(rbuf1);
CLEAR_REG(rbuf2);
CLEAR_REG(rbuf3);
CLEAR_REG(rh2);
CLEAR_REG(rh3);
CLEAR_REG(rh4)))
vpop {q4-q7}
cmp r3, #0
beq .Ldone
.Less_than_4:
/* Handle remaining blocks. */
vld1.64 {rbuf}, [r2]!
subs r3, r3, #1
vrev64.8 rbuf, rbuf /* byte-swap */
vext.8 rbuf, rbuf, rbuf, #8
veor rhash, rhash, rbuf
beq .Lend
.Loop:
vld1.64 {rbuf}, [r2]!
subs r3, r3, #1
PMUL_128x128(rr0, rr1, rhash, rh1, rt0, _(vrev64.8 rbuf, rbuf))
REDUCTION(rhash, rr0, rr1, rrconst_h, rt0, _(vext.8 rbuf, rbuf, rbuf, #8))
veor rhash, rhash, rbuf
bne .Loop
.Lend:
PMUL_128x128(rr0, rr1, rhash, rh1, rt0, _(CLEAR_REG(rbuf)))
REDUCTION(rhash, rr0, rr1, rrconst_h, rt0, _(CLEAR_REG(rh1)))
.Ldone:
CLEAR_REG(rr1)
vrev64.8 rhash, rhash /* byte-swap */
CLEAR_REG(rt0)
CLEAR_REG(rr0)
vext.8 rhash, rhash, rhash, #8
CLEAR_REG(rt1)
vst1.64 {rhash}, [r1]
CLEAR_REG(rhash)
.Ldo_nothing:
mov r0, #0
pop {r4-r6, pc}
.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;
/*
* void _gcry_ghash_setup_armv8_ce_pmull (void *gcm_key, void *gcm_table);
*/
.align 3
.globl _gcry_ghash_setup_armv8_ce_pmull
.type _gcry_ghash_setup_armv8_ce_pmull,%function;
_gcry_ghash_setup_armv8_ce_pmull:
/* input:
* r0: gcm_key
* r1: gcm_table
*/
vpush {q4-q7}
GET_DATA_POINTER(r2, .Lrconst64, r3)
vld1.64 {rrconst_h}, [r2]
#define GCM_LSH_1(r_out, ia, ib, const_d, oa, ob, ma) \
/* H <<< 1 */ \
vshr.s64 ma, ib, #63; \
vshr.u64 oa, ib, #63; \
vshr.u64 ob, ia, #63; \
vand ma, const_d; \
vshl.u64 ib, ib, #1; \
vshl.u64 ia, ia, #1; \
vorr ob, ib; \
vorr oa, ia; \
veor ob, ma; \
vst1.64 {oa, ob}, [r_out]
vld1.64 {rhash}, [r0]
vrev64.8 rhash, rhash /* byte-swap */
vext.8 rhash, rhash, rhash, #8
vmov rbuf1, rhash
GCM_LSH_1(r0, rhash_l, rhash_h, rrconst_h, rh1_l, rh1_h, rt1_l) /* H<<<1 */
/* H² */
PMUL_128x128(rr0, rr1, rbuf1, rh1, rt0, __)
REDUCTION(rh2, rr0, rr1, rrconst_h, rt0, __)
vmov rhash, rh2
GCM_LSH_1(r1, rh2_l, rh2_h, rrconst_h, rbuf1_l, rbuf1_h, rt1_l) /* H²<<<1 */
add r1, r1, #16
/* H³ */
PMUL_128x128(rr0, rr1, rhash, rh1, rt1, __)
REDUCTION(rh3, rr0, rr1, rrconst_h, rt1, __)
/* H⁴ */
PMUL_128x128(rr0, rr1, rhash, rbuf1, rt0, __)
REDUCTION(rh4, rr0, rr1, rrconst_h, rt0, __)
GCM_LSH_1(r1, rh3_l, rh3_h, rrconst_h, rt0_l, rt0_h, rt1_l) /* H³<<<1 */
add r1, r1, #16
GCM_LSH_1(r1, rh4_l, rh4_h, rrconst_h, rt0_l, rt0_h, rt1_l) /* H⁴<<<1 */
CLEAR_REG(rt0)
CLEAR_REG(rt1)
CLEAR_REG(rr1)
CLEAR_REG(rr0)
CLEAR_REG(rh1)
CLEAR_REG(rh2)
CLEAR_REG(rh3)
CLEAR_REG(rh4)
CLEAR_REG(rhash)
CLEAR_REG(rbuf1)
CLEAR_REG(rrconst)
vpop {q4-q7}
bx lr
.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;
#endif
diff --git a/cipher/cipher-gcm-armv8-aarch64-ce.S b/cipher/cipher-gcm-armv8-aarch64-ce.S
index 877207d3..13ee83ed 100644
--- a/cipher/cipher-gcm-armv8-aarch64-ce.S
+++ b/cipher/cipher-gcm-armv8-aarch64-ce.S
@@ -1,424 +1,424 @@
/* cipher-gcm-armv8-aarch64-ce.S - ARM/CE accelerated GHASH
* Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asm-common-aarch64.h"
#if defined(__AARCH64EL__) && \
defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO)
.cpu generic+simd+crypto
.text
/* Constants */
.align 4
gcry_gcm_reduction_constant:
.Lrconst:
.quad 0x87
/* Register macros */
#define rhash v0
#define rr0 v1
#define rr1 v2
#define rbuf v3
#define rbuf1 v4
#define rbuf2 v5
#define rbuf3 v6
#define rbuf4 v7
#define rbuf5 v8
#define rr2 v9
#define rr3 v10
#define rr4 v11
#define rr5 v12
#define rr6 v13
#define rr7 v14
#define rr8 v15
#define rr9 v16
#define rrconst v18
#define rh1 v19
#define rh2 v20
#define rh3 v21
#define rh4 v22
#define rh5 v23
#define rh6 v24
#define t0 v25
#define t1 v26
#define t2 v27
#define t3 v28
#define t4 v29
#define t5 v30
#define vZZ v31
/* GHASH macros */
/* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in
* Cryptology — CT-RSA 2015" for details.
*/
/* Input: 'a' and 'b', Output: 'r0:r1' (low 128-bits in r0, high in r1) */
#define PMUL_128x128(r0, r1, a, b, T0, T1, interleave_op) \
ext T0.16b, b.16b, b.16b, #8; \
pmull r0.1q, a.1d, b.1d; \
pmull2 r1.1q, a.2d, b.2d; \
pmull T1.1q, a.1d, T0.1d; \
pmull2 T0.1q, a.2d, T0.2d; \
interleave_op; \
eor T0.16b, T0.16b, T1.16b; \
ext T1.16b, vZZ.16b, T0.16b, #8; \
ext T0.16b, T0.16b, vZZ.16b, #8; \
eor r0.16b, r0.16b, T1.16b; \
eor r1.16b, r1.16b, T0.16b;
/* Input: 'aA' and 'bA', Output: 'r0A:r1A' (low 128-bits in r0A, high in r1A)
* Input: 'aB' and 'bB', Output: 'r0B:r1B' (low 128-bits in r0B, high in r1B)
* Input: 'aC' and 'bC', Output: 'r0C:r1C' (low 128-bits in r0C, high in r1C)
*/
#define PMUL_128x128_3(r0A, r1A, aA, bA, t0A, t1A, \
r0B, r1B, aB, bB, t0B, t1B, \
r0C, r1C, aC, bC, t0C, t1C, interleave_op) \
ext t0A.16b, bA.16b, bA.16b, #8; \
pmull r0A.1q, aA.1d, bA.1d; \
pmull2 r1A.1q, aA.2d, bA.2d; \
ext t0B.16b, bB.16b, bB.16b, #8; \
pmull r0B.1q, aB.1d, bB.1d; \
pmull2 r1B.1q, aB.2d, bB.2d; \
ext t0C.16b, bC.16b, bC.16b, #8; \
pmull r0C.1q, aC.1d, bC.1d; \
pmull2 r1C.1q, aC.2d, bC.2d; \
pmull t1A.1q, aA.1d, t0A.1d; \
pmull2 t0A.1q, aA.2d, t0A.2d; \
pmull t1B.1q, aB.1d, t0B.1d; \
pmull2 t0B.1q, aB.2d, t0B.2d; \
pmull t1C.1q, aC.1d, t0C.1d; \
pmull2 t0C.1q, aC.2d, t0C.2d; \
eor t0A.16b, t0A.16b, t1A.16b; \
eor t0B.16b, t0B.16b, t1B.16b; \
eor t0C.16b, t0C.16b, t1C.16b; \
interleave_op; \
ext t1A.16b, vZZ.16b, t0A.16b, #8; \
ext t0A.16b, t0A.16b, vZZ.16b, #8; \
ext t1B.16b, vZZ.16b, t0B.16b, #8; \
ext t0B.16b, t0B.16b, vZZ.16b, #8; \
ext t1C.16b, vZZ.16b, t0C.16b, #8; \
ext t0C.16b, t0C.16b, vZZ.16b, #8; \
eor r0A.16b, r0A.16b, t1A.16b; \
eor r1A.16b, r1A.16b, t0A.16b; \
eor r0B.16b, r0B.16b, t1B.16b; \
eor r1B.16b, r1B.16b, t0B.16b; \
eor r0C.16b, r0C.16b, t1C.16b; \
eor r1C.16b, r1C.16b, t0C.16b; \
/* Input: 'r0:r1', Output: 'a' */
#define REDUCTION(a, r0, r1, rconst, T0, T1, interleave_op1, interleave_op2, \
interleave_op3) \
pmull2 T0.1q, r1.2d, rconst.2d; \
interleave_op1; \
ext T1.16b, T0.16b, vZZ.16b, #8; \
ext T0.16b, vZZ.16b, T0.16b, #8; \
interleave_op2; \
eor r1.16b, r1.16b, T1.16b; \
eor r0.16b, r0.16b, T0.16b; \
pmull T0.1q, r1.1d, rconst.1d; \
interleave_op3; \
eor a.16b, r0.16b, T0.16b;
/* Other functional macros */
#define _(...) __VA_ARGS__
#define __ _()
-#define CLEAR_REG(reg) eor reg.16b, reg.16b, reg.16b;
+#define CLEAR_REG(reg) movi reg.16b, #0;
#define VPUSH_ABI \
stp d8, d9, [sp, #-16]!; \
CFI_ADJUST_CFA_OFFSET(16); \
stp d10, d11, [sp, #-16]!; \
CFI_ADJUST_CFA_OFFSET(16); \
stp d12, d13, [sp, #-16]!; \
CFI_ADJUST_CFA_OFFSET(16); \
stp d14, d15, [sp, #-16]!; \
CFI_ADJUST_CFA_OFFSET(16);
#define VPOP_ABI \
ldp d14, d15, [sp], #16; \
CFI_ADJUST_CFA_OFFSET(-16); \
ldp d12, d13, [sp], #16; \
CFI_ADJUST_CFA_OFFSET(-16); \
ldp d10, d11, [sp], #16; \
CFI_ADJUST_CFA_OFFSET(-16); \
ldp d8, d9, [sp], #16; \
CFI_ADJUST_CFA_OFFSET(-16);
/*
* unsigned int _gcry_ghash_armv8_ce_pmull (void *gcm_key, byte *result,
* const byte *buf, size_t nblocks,
* void *gcm_table);
*/
.align 3
.globl _gcry_ghash_armv8_ce_pmull
ELF(.type _gcry_ghash_armv8_ce_pmull,%function;)
_gcry_ghash_armv8_ce_pmull:
/* input:
* x0: gcm_key
* x1: result/hash
* x2: buf
* x3: nblocks
* x4: gcm_table
*/
CFI_STARTPROC();
cbz x3, .Ldo_nothing;
GET_DATA_POINTER(x5, .Lrconst)
eor vZZ.16b, vZZ.16b, vZZ.16b
ld1 {rhash.16b}, [x1]
ld1 {rh1.16b}, [x0]
rbit rhash.16b, rhash.16b /* bit-swap */
ld1r {rrconst.2d}, [x5]
cmp x3, #6
b.lo .Less_than_6
add x6, x4, #64
VPUSH_ABI
ld1 {rh2.16b-rh5.16b}, [x4]
ld1 {rh6.16b}, [x6]
sub x3, x3, #6
ld1 {rbuf.16b-rbuf2.16b}, [x2], #(3*16)
ld1 {rbuf3.16b-rbuf5.16b}, [x2], #(3*16)
rbit rbuf.16b, rbuf.16b /* bit-swap */
rbit rbuf1.16b, rbuf1.16b /* bit-swap */
rbit rbuf2.16b, rbuf2.16b /* bit-swap */
rbit rbuf3.16b, rbuf3.16b /* bit-swap */
rbit rbuf4.16b, rbuf4.16b /* bit-swap */
rbit rbuf5.16b, rbuf5.16b /* bit-swap */
eor rhash.16b, rhash.16b, rbuf.16b
cmp x3, #6
b.lo .Lend_6
.Loop_6:
/* (in1) * H⁵ => rr0:rr1 */
/* (in2) * H⁴ => rr2:rr3 */
/* (in0 ^ hash) * H⁶ => rr4:rr5 */
PMUL_128x128_3(rr0, rr1, rbuf1, rh5, t0, t1,
rr2, rr3, rbuf2, rh4, t2, t3,
rr4, rr5, rhash, rh6, t4, t5,
_(sub x3, x3, #6))
ld1 {rbuf.16b-rbuf2.16b}, [x2], #(3*16)
cmp x3, #6
eor rr0.16b, rr0.16b, rr2.16b
eor rr1.16b, rr1.16b, rr3.16b
/* (in3) * H³ => rr2:rr3 */
/* (in4) * H² => rr6:rr7 */
/* (in5) * H¹ => rr8:rr9 */
PMUL_128x128_3(rr2, rr3, rbuf3, rh3, t0, t1,
rr6, rr7, rbuf4, rh2, t2, t3,
rr8, rr9, rbuf5, rh1, t4, t5,
_(eor rr0.16b, rr0.16b, rr4.16b;
eor rr1.16b, rr1.16b, rr5.16b))
eor rr0.16b, rr0.16b, rr2.16b
eor rr1.16b, rr1.16b, rr3.16b
rbit rbuf.16b, rbuf.16b
eor rr0.16b, rr0.16b, rr6.16b
eor rr1.16b, rr1.16b, rr7.16b
rbit rbuf1.16b, rbuf1.16b
eor rr0.16b, rr0.16b, rr8.16b
eor rr1.16b, rr1.16b, rr9.16b
ld1 {rbuf3.16b-rbuf5.16b}, [x2], #(3*16)
REDUCTION(rhash, rr0, rr1, rrconst, t0, t1,
_(rbit rbuf2.16b, rbuf2.16b),
_(rbit rbuf3.16b, rbuf3.16b),
_(rbit rbuf4.16b, rbuf4.16b))
rbit rbuf5.16b, rbuf5.16b
eor rhash.16b, rhash.16b, rbuf.16b
b.hs .Loop_6
.Lend_6:
/* (in1) * H⁵ => rr0:rr1 */
/* (in0 ^ hash) * H⁶ => rr2:rr3 */
/* (in2) * H⁴ => rr4:rr5 */
PMUL_128x128_3(rr0, rr1, rbuf1, rh5, t0, t1,
rr2, rr3, rhash, rh6, t2, t3,
rr4, rr5, rbuf2, rh4, t4, t5,
__)
eor rr0.16b, rr0.16b, rr2.16b
eor rr1.16b, rr1.16b, rr3.16b
eor rr0.16b, rr0.16b, rr4.16b
eor rr1.16b, rr1.16b, rr5.16b
/* (in3) * H³ => rhash:rbuf */
/* (in4) * H² => rr6:rr7 */
/* (in5) * H¹ => rr8:rr9 */
PMUL_128x128_3(rhash, rbuf, rbuf3, rh3, t0, t1,
rr6, rr7, rbuf4, rh2, t2, t3,
rr8, rr9, rbuf5, rh1, t4, t5,
_(CLEAR_REG(rh4);
CLEAR_REG(rh5);
CLEAR_REG(rh6)))
eor rr0.16b, rr0.16b, rhash.16b
eor rr1.16b, rr1.16b, rbuf.16b
eor rr0.16b, rr0.16b, rr6.16b
eor rr1.16b, rr1.16b, rr7.16b
eor rr0.16b, rr0.16b, rr8.16b
eor rr1.16b, rr1.16b, rr9.16b
REDUCTION(rhash, rr0, rr1, rrconst, t0, t1,
_(CLEAR_REG(rh2);
CLEAR_REG(rh3);
CLEAR_REG(rr2);
CLEAR_REG(rbuf2);
CLEAR_REG(rbuf3)),
_(CLEAR_REG(rr3);
CLEAR_REG(rr4);
CLEAR_REG(rr5);
CLEAR_REG(rr6);
CLEAR_REG(rr7)),
_(CLEAR_REG(rr8);
CLEAR_REG(rr9);
CLEAR_REG(rbuf1);
CLEAR_REG(rbuf2)))
CLEAR_REG(rbuf4)
CLEAR_REG(rbuf5)
CLEAR_REG(t2)
CLEAR_REG(t3)
CLEAR_REG(t4)
CLEAR_REG(t5)
VPOP_ABI
cbz x3, .Ldone
.Less_than_6:
/* Handle remaining blocks. */
ld1 {rbuf.16b}, [x2], #16
sub x3, x3, #1
rbit rbuf.16b, rbuf.16b /* bit-swap */
eor rhash.16b, rhash.16b, rbuf.16b
cbz x3, .Lend
.Loop:
PMUL_128x128(rr0, rr1, rh1, rhash, t0, t1, _(ld1 {rbuf.16b}, [x2], #16))
REDUCTION(rhash, rr0, rr1, rrconst, t0, t1,
_(sub x3, x3, #1),
_(rbit rbuf.16b, rbuf.16b),
__)
eor rhash.16b, rhash.16b, rbuf.16b
cbnz x3, .Loop
.Lend:
PMUL_128x128(rr0, rr1, rh1, rhash, t0, t1, _(CLEAR_REG(rbuf)))
REDUCTION(rhash, rr0, rr1, rrconst, t0, t1, __, _(CLEAR_REG(rh1)), __)
.Ldone:
CLEAR_REG(rr1)
CLEAR_REG(rr0)
rbit rhash.16b, rhash.16b /* bit-swap */
CLEAR_REG(t0)
CLEAR_REG(t1)
st1 {rhash.2d}, [x1]
CLEAR_REG(rhash)
.Ldo_nothing:
mov x0, #0
ret
CFI_ENDPROC()
ELF(.size _gcry_ghash_armv8_ce_pmull,.-_gcry_ghash_armv8_ce_pmull;)
/*
* void _gcry_ghash_setup_armv8_ce_pmull (void *gcm_key, void *gcm_table);
*/
.align 3
.globl _gcry_ghash_setup_armv8_ce_pmull
ELF(.type _gcry_ghash_setup_armv8_ce_pmull,%function;)
_gcry_ghash_setup_armv8_ce_pmull:
/* input:
* x0: gcm_key
* x1: gcm_table
*/
CFI_STARTPROC()
GET_DATA_POINTER(x2, .Lrconst)
eor vZZ.16b, vZZ.16b, vZZ.16b
/* H¹ */
ld1 {rh1.16b}, [x0]
rbit rh1.16b, rh1.16b
st1 {rh1.16b}, [x0]
ld1r {rrconst.2d}, [x2]
/* H² */
PMUL_128x128(rr0, rr1, rh1, rh1, t0, t1, __)
REDUCTION(rh2, rr0, rr1, rrconst, t0, t1, __, __, __)
/* H³ */
PMUL_128x128(rr0, rr1, rh2, rh1, t0, t1, __)
REDUCTION(rh3, rr0, rr1, rrconst, t0, t1, __, __, __)
/* H⁴ */
PMUL_128x128(rr0, rr1, rh2, rh2, t0, t1, __)
REDUCTION(rh4, rr0, rr1, rrconst, t0, t1, __, __, __)
/* H⁵ */
PMUL_128x128(rr0, rr1, rh2, rh3, t0, t1, __)
REDUCTION(rh5, rr0, rr1, rrconst, t0, t1, __, __, __)
/* H⁶ */
PMUL_128x128(rr0, rr1, rh3, rh3, t0, t1, __)
REDUCTION(rh6, rr0, rr1, rrconst, t0, t1, __, __, __)
st1 {rh2.16b-rh4.16b}, [x1], #(3*16)
st1 {rh5.16b-rh6.16b}, [x1]
ret
CFI_ENDPROC()
ELF(.size _gcry_ghash_setup_armv8_ce_pmull,.-_gcry_ghash_setup_armv8_ce_pmull;)
#endif
diff --git a/cipher/rijndael-armv8-aarch32-ce.S b/cipher/rijndael-armv8-aarch32-ce.S
index 6d78af0a..1eafa93e 100644
--- a/cipher/rijndael-armv8-aarch32-ce.S
+++ b/cipher/rijndael-armv8-aarch32-ce.S
@@ -1,1988 +1,1988 @@
/* rijndael-armv8-aarch32-ce.S - ARMv8/CE accelerated AES
* Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO)
.syntax unified
.arch armv8-a
.fpu crypto-neon-fp-armv8
.arm
.text
#ifdef __PIC__
# define GET_DATA_POINTER(reg, name, rtmp) \
ldr reg, 1f; \
ldr rtmp, 2f; \
b 3f; \
1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
2: .word name(GOT); \
3: add reg, pc, reg; \
ldr reg, [reg, rtmp];
#else
# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
#endif
/* AES macros */
#define aes_preload_keys(keysched, rekeysched) \
vldmia keysched!, {q5-q7}; \
mov rekeysched, keysched; \
vldmialo keysched!, {q8-q15}; /* 128-bit */ \
addeq keysched, #(2*16); \
vldmiaeq keysched!, {q10-q15}; /* 192-bit */ \
addhi keysched, #(4*16); \
vldmiahi keysched!, {q12-q15}; /* 256-bit */ \
#define do_aes_one128(ed, mcimc, qo, qb) \
aes##ed.8 qb, q5; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q6; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q7; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q8; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q9; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q10; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q11; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q12; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q13; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q14; \
veor qo, qb, q15;
#define do_aes_one128re(ed, mcimc, qo, qb, keysched, rekeysched) \
vldm rekeysched, {q8-q9}; \
do_aes_one128(ed, mcimc, qo, qb);
#define do_aes_one192(ed, mcimc, qo, qb, keysched, rekeysched) \
vldm rekeysched!, {q8}; \
aes##ed.8 qb, q5; \
aes##mcimc.8 qb, qb; \
vldm rekeysched, {q9}; \
aes##ed.8 qb, q6; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q7; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q8; \
aes##mcimc.8 qb, qb; \
vldmia keysched!, {q8}; \
aes##ed.8 qb, q9; \
aes##mcimc.8 qb, qb; \
sub rekeysched, #(1*16); \
aes##ed.8 qb, q10; \
aes##mcimc.8 qb, qb; \
vldm keysched, {q9}; \
aes##ed.8 qb, q11; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q12; \
aes##mcimc.8 qb, qb; \
sub keysched, #16; \
aes##ed.8 qb, q13; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q14; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q15; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q8; \
veor qo, qb, q9; \
#define do_aes_one256(ed, mcimc, qo, qb, keysched, rekeysched) \
vldmia rekeysched!, {q8}; \
aes##ed.8 qb, q5; \
aes##mcimc.8 qb, qb; \
vldmia rekeysched!, {q9}; \
aes##ed.8 qb, q6; \
aes##mcimc.8 qb, qb; \
vldmia rekeysched!, {q10}; \
aes##ed.8 qb, q7; \
aes##mcimc.8 qb, qb; \
vldm rekeysched, {q11}; \
aes##ed.8 qb, q8; \
aes##mcimc.8 qb, qb; \
vldmia keysched!, {q8}; \
aes##ed.8 qb, q9; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q10; \
aes##mcimc.8 qb, qb; \
vldmia keysched!, {q9}; \
aes##ed.8 qb, q11; \
aes##mcimc.8 qb, qb; \
sub rekeysched, #(3*16); \
aes##ed.8 qb, q12; \
aes##mcimc.8 qb, qb; \
vldmia keysched!, {q10}; \
aes##ed.8 qb, q13; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q14; \
aes##mcimc.8 qb, qb; \
vldm keysched, {q11}; \
aes##ed.8 qb, q15; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q8; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q9; \
aes##mcimc.8 qb, qb; \
aes##ed.8 qb, q10; \
veor qo, qb, q11; \
sub keysched, #(3*16); \
#define aes_round_4(ed, mcimc, b0, b1, b2, b3, key) \
aes##ed.8 b0, key; \
aes##mcimc.8 b0, b0; \
aes##ed.8 b1, key; \
aes##mcimc.8 b1, b1; \
aes##ed.8 b2, key; \
aes##mcimc.8 b2, b2; \
aes##ed.8 b3, key; \
aes##mcimc.8 b3, b3;
#define do_aes_4_128(ed, mcimc, b0, b1, b2, b3) \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q5); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q6); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q7); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q10); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q11); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q12); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q13); \
aes##ed.8 b0, q14; \
veor b0, b0, q15; \
aes##ed.8 b1, q14; \
veor b1, b1, q15; \
aes##ed.8 b2, q14; \
veor b2, b2, q15; \
aes##ed.8 b3, q14; \
veor b3, b3, q15;
#define do_aes_4_128re(ed, mcimc, b0, b1, b2, b3, keysched, rekeysched) \
vldm rekeysched, {q8-q9}; \
do_aes_4_128(ed, mcimc, b0, b1, b2, b3);
#define do_aes_4_192(ed, mcimc, b0, b1, b2, b3, keysched, rekeysched) \
vldm rekeysched!, {q8}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q5); \
vldm rekeysched, {q9}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q6); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q7); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \
vldmia keysched!, {q8}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \
sub rekeysched, #(1*16); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q10); \
vldm keysched, {q9}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q11); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q12); \
sub keysched, #16; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q13); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q14); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q15); \
aes##ed.8 b0, q8; \
veor b0, b0, q9; \
aes##ed.8 b1, q8; \
veor b1, b1, q9; \
aes##ed.8 b2, q8; \
veor b2, b2, q9; \
aes##ed.8 b3, q8; \
veor b3, b3, q9;
#define do_aes_4_256(ed, mcimc, b0, b1, b2, b3, keysched, rekeysched) \
vldmia rekeysched!, {q8}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q5); \
vldmia rekeysched!, {q9}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q6); \
vldmia rekeysched!, {q10}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q7); \
vldm rekeysched, {q11}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \
vldmia keysched!, {q8}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q10); \
vldmia keysched!, {q9}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q11); \
sub rekeysched, #(3*16); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q12); \
vldmia keysched!, {q10}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q13); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q14); \
vldm keysched, {q11}; \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q15); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q8); \
aes_round_4(ed, mcimc, b0, b1, b2, b3, q9); \
sub keysched, #(3*16); \
aes##ed.8 b0, q10; \
veor b0, b0, q11; \
aes##ed.8 b1, q10; \
veor b1, b1, q11; \
aes##ed.8 b2, q10; \
veor b2, b2, q11; \
aes##ed.8 b3, q10; \
veor b3, b3, q11;
/* Other functional macros */
-#define CLEAR_REG(reg) veor reg, reg;
+#define CLEAR_REG(reg) vmov.i8 reg, #0;
/*
* unsigned int _gcry_aes_enc_armv8_ce(void *keysched, byte *dst,
* const byte *src,
* unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_enc_armv8_ce
.type _gcry_aes_enc_armv8_ce,%function;
_gcry_aes_enc_armv8_ce:
/* input:
* r0: keysched
* r1: dst
* r2: src
* r3: nrounds
*/
vldmia r0!, {q1-q3} /* load 3 round keys */
cmp r3, #12
vld1.8 {q0}, [r2]
bhi .Lenc1_256
beq .Lenc1_192
.Lenc1_128:
.Lenc1_tail:
vldmia r0, {q8-q15} /* load 8 round keys */
aese.8 q0, q1
aesmc.8 q0, q0
CLEAR_REG(q1)
aese.8 q0, q2
aesmc.8 q0, q0
CLEAR_REG(q2)
aese.8 q0, q3
aesmc.8 q0, q0
CLEAR_REG(q3)
aese.8 q0, q8
aesmc.8 q0, q0
CLEAR_REG(q8)
aese.8 q0, q9
aesmc.8 q0, q0
CLEAR_REG(q9)
aese.8 q0, q10
aesmc.8 q0, q0
CLEAR_REG(q10)
aese.8 q0, q11
aesmc.8 q0, q0
CLEAR_REG(q11)
aese.8 q0, q12
aesmc.8 q0, q0
CLEAR_REG(q12)
aese.8 q0, q13
aesmc.8 q0, q0
CLEAR_REG(q13)
aese.8 q0, q14
veor q0, q15
CLEAR_REG(q14)
CLEAR_REG(q15)
vst1.8 {q0}, [r1]
CLEAR_REG(q0)
mov r0, #0
bx lr
.Lenc1_192:
aese.8 q0, q1
aesmc.8 q0, q0
vmov q1, q3
aese.8 q0, q2
aesmc.8 q0, q0
vldm r0!, {q2-q3} /* load 3 round keys */
b .Lenc1_tail
.Lenc1_256:
vldm r0!, {q15} /* load 1 round key */
aese.8 q0, q1
aesmc.8 q0, q0
aese.8 q0, q2
aesmc.8 q0, q0
aese.8 q0, q3
aesmc.8 q0, q0
vldm r0!, {q1-q3} /* load 3 round keys */
aese.8 q0, q15
aesmc.8 q0, q0
b .Lenc1_tail
.size _gcry_aes_enc_armv8_ce,.-_gcry_aes_enc_armv8_ce;
/*
* unsigned int _gcry_aes_dec_armv8_ce(void *keysched, byte *dst,
* const byte *src,
* unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_dec_armv8_ce
.type _gcry_aes_dec_armv8_ce,%function;
_gcry_aes_dec_armv8_ce:
/* input:
* r0: keysched
* r1: dst
* r2: src
* r3: nrounds
*/
vldmia r0!, {q1-q3} /* load 3 round keys */
cmp r3, #12
vld1.8 {q0}, [r2]
bhi .Ldec1_256
beq .Ldec1_192
.Ldec1_128:
.Ldec1_tail:
vldmia r0, {q8-q15} /* load 8 round keys */
aesd.8 q0, q1
aesimc.8 q0, q0
CLEAR_REG(q1)
aesd.8 q0, q2
aesimc.8 q0, q0
CLEAR_REG(q2)
aesd.8 q0, q3
aesimc.8 q0, q0
CLEAR_REG(q3)
aesd.8 q0, q8
aesimc.8 q0, q0
CLEAR_REG(q8)
aesd.8 q0, q9
aesimc.8 q0, q0
CLEAR_REG(q9)
aesd.8 q0, q10
aesimc.8 q0, q0
CLEAR_REG(q10)
aesd.8 q0, q11
aesimc.8 q0, q0
CLEAR_REG(q11)
aesd.8 q0, q12
aesimc.8 q0, q0
CLEAR_REG(q12)
aesd.8 q0, q13
aesimc.8 q0, q0
CLEAR_REG(q13)
aesd.8 q0, q14
veor q0, q15
CLEAR_REG(q14)
CLEAR_REG(q15)
vst1.8 {q0}, [r1]
CLEAR_REG(q0)
mov r0, #0
bx lr
.Ldec1_192:
aesd.8 q0, q1
aesimc.8 q0, q0
vmov q1, q3
aesd.8 q0, q2
aesimc.8 q0, q0
vldm r0!, {q2-q3} /* load 3 round keys */
b .Ldec1_tail
.Ldec1_256:
vldm r0!, {q15} /* load 1 round key */
aesd.8 q0, q1
aesimc.8 q0, q0
aesd.8 q0, q2
aesimc.8 q0, q0
aesd.8 q0, q3
aesimc.8 q0, q0
vldm r0!, {q1-q3} /* load 3 round keys */
aesd.8 q0, q15
aesimc.8 q0, q0
b .Ldec1_tail
.size _gcry_aes_dec_armv8_ce,.-_gcry_aes_dec_armv8_ce;
/*
* void _gcry_aes_cbc_enc_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv, size_t nblocks,
* int cbc_mac, unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_cbc_enc_armv8_ce
.type _gcry_aes_cbc_enc_armv8_ce,%function;
_gcry_aes_cbc_enc_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: cbc_mac => r5
* %st+8: nrounds => r6
*/
push {r4-r6,lr} /* 4*4 = 16b */
ldr r4, [sp, #(16+0)]
ldr r5, [sp, #(16+4)]
cmp r4, #0
ldr r6, [sp, #(16+8)]
beq .Lcbc_enc_skip
cmp r5, #0
vpush {q4-q7}
moveq r5, #16
movne r5, #0
cmp r6, #12
vld1.8 {q1}, [r3] /* load IV */
aes_preload_keys(r0, lr);
beq .Lcbc_enc_loop192
bhi .Lcbc_enc_loop256
#define CBC_ENC(bits, ...) \
.Lcbc_enc_loop##bits: \
vld1.8 {q0}, [r2]!; /* load plaintext */ \
veor q1, q0, q1; \
subs r4, r4, #1; \
\
do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \
\
vst1.8 {q1}, [r1], r5; /* store ciphertext */ \
\
bne .Lcbc_enc_loop##bits; \
b .Lcbc_enc_done;
CBC_ENC(128)
CBC_ENC(192, r0, lr)
CBC_ENC(256, r0, lr)
#undef CBC_ENC
.Lcbc_enc_done:
vst1.8 {q1}, [r3] /* store IV */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
vpop {q4-q7}
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lcbc_enc_skip:
pop {r4-r6,pc}
.size _gcry_aes_cbc_enc_armv8_ce,.-_gcry_aes_cbc_enc_armv8_ce;
/*
* void _gcry_aes_cbc_dec_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv, unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_cbc_dec_armv8_ce
.type _gcry_aes_cbc_dec_armv8_ce,%function;
_gcry_aes_cbc_dec_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: nrounds => r5
*/
push {r4-r6,lr} /* 4*4 = 16b */
ldr r4, [sp, #(16+0)]
ldr r5, [sp, #(16+4)]
cmp r4, #0
beq .Lcbc_dec_skip
vpush {q4-q7}
cmp r5, #12
vld1.8 {q0}, [r3] /* load IV */
aes_preload_keys(r0, r6);
beq .Lcbc_dec_entry_192
bhi .Lcbc_dec_entry_256
#define CBC_DEC(bits, ...) \
.Lcbc_dec_entry_##bits: \
cmp r4, #4; \
blo .Lcbc_dec_loop_##bits; \
\
.Lcbc_dec_loop4_##bits: \
\
vld1.8 {q1-q2}, [r2]!; /* load ciphertext */ \
sub r4, r4, #4; \
vld1.8 {q3-q4}, [r2]; /* load ciphertext */ \
cmp r4, #4; \
sub r2, #32; \
\
do_aes_4_##bits(d, imc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
veor q1, q1, q0; \
vld1.8 {q0}, [r2]!; /* load next IV */ \
veor q2, q2, q0; \
vld1.8 {q0}, [r2]!; /* load next IV */ \
vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \
veor q3, q3, q0; \
vld1.8 {q0}, [r2]!; /* load next IV */ \
veor q4, q4, q0; \
vld1.8 {q0}, [r2]!; /* load next IV */ \
vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \
\
bhs .Lcbc_dec_loop4_##bits; \
cmp r4, #0; \
beq .Lcbc_dec_done; \
\
.Lcbc_dec_loop_##bits: \
vld1.8 {q1}, [r2]!; /* load ciphertext */ \
subs r4, r4, #1; \
vmov q2, q1; \
\
do_aes_one##bits(d, imc, q1, q1, ##__VA_ARGS__); \
\
veor q1, q1, q0; \
vmov q0, q2; \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
\
bne .Lcbc_dec_loop_##bits; \
b .Lcbc_dec_done;
CBC_DEC(128)
CBC_DEC(192, r0, r6)
CBC_DEC(256, r0, r6)
#undef CBC_DEC
.Lcbc_dec_done:
vst1.8 {q0}, [r3] /* store IV */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
vpop {q4-q7}
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lcbc_dec_skip:
pop {r4-r6,pc}
.size _gcry_aes_cbc_dec_armv8_ce,.-_gcry_aes_cbc_dec_armv8_ce;
/*
* void _gcry_aes_cfb_enc_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv, unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_cfb_enc_armv8_ce
.type _gcry_aes_cfb_enc_armv8_ce,%function;
_gcry_aes_cfb_enc_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: nrounds => r5
*/
push {r4-r6,lr} /* 4*4 = 16b */
ldr r4, [sp, #(16+0)]
ldr r5, [sp, #(16+4)]
cmp r4, #0
beq .Lcfb_enc_skip
vpush {q4-q7}
cmp r5, #12
vld1.8 {q0}, [r3] /* load IV */
aes_preload_keys(r0, r6);
beq .Lcfb_enc_entry_192
bhi .Lcfb_enc_entry_256
#define CFB_ENC(bits, ...) \
.Lcfb_enc_entry_##bits: \
.Lcfb_enc_loop_##bits: \
vld1.8 {q1}, [r2]!; /* load plaintext */ \
subs r4, r4, #1; \
\
do_aes_one##bits(e, mc, q0, q0, ##__VA_ARGS__); \
\
veor q0, q1, q0; \
vst1.8 {q0}, [r1]!; /* store ciphertext */ \
\
bne .Lcfb_enc_loop_##bits; \
b .Lcfb_enc_done;
CFB_ENC(128)
CFB_ENC(192, r0, r6)
CFB_ENC(256, r0, r6)
#undef CFB_ENC
.Lcfb_enc_done:
vst1.8 {q0}, [r3] /* store IV */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
vpop {q4-q7}
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lcfb_enc_skip:
pop {r4-r6,pc}
.size _gcry_aes_cfb_enc_armv8_ce,.-_gcry_aes_cfb_enc_armv8_ce;
/*
* void _gcry_aes_cfb_dec_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv, unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_cfb_dec_armv8_ce
.type _gcry_aes_cfb_dec_armv8_ce,%function;
_gcry_aes_cfb_dec_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: nrounds => r5
*/
push {r4-r6,lr} /* 4*4 = 16b */
ldr r4, [sp, #(16+0)]
ldr r5, [sp, #(16+4)]
cmp r4, #0
beq .Lcfb_dec_skip
vpush {q4-q7}
cmp r5, #12
vld1.8 {q0}, [r3] /* load IV */
aes_preload_keys(r0, r6);
beq .Lcfb_dec_entry_192
bhi .Lcfb_dec_entry_256
#define CFB_DEC(bits, ...) \
.Lcfb_dec_entry_##bits: \
cmp r4, #4; \
blo .Lcfb_dec_loop_##bits; \
\
.Lcfb_dec_loop4_##bits: \
\
vld1.8 {q2-q3}, [r2]!; /* load ciphertext */ \
vmov q1, q0; \
sub r4, r4, #4; \
vld1.8 {q4}, [r2]; /* load ciphertext */ \
sub r2, #32; \
cmp r4, #4; \
\
do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
veor q1, q1, q0; \
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
veor q2, q2, q0; \
vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \
vld1.8 {q0}, [r2]!; \
veor q3, q3, q0; \
vld1.8 {q0}, [r2]!; /* load next IV / ciphertext */ \
veor q4, q4, q0; \
vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \
\
bhs .Lcfb_dec_loop4_##bits; \
cmp r4, #0; \
beq .Lcfb_dec_done; \
\
.Lcfb_dec_loop_##bits: \
\
vld1.8 {q1}, [r2]!; /* load ciphertext */ \
\
subs r4, r4, #1; \
\
do_aes_one##bits(e, mc, q0, q0, ##__VA_ARGS__); \
\
veor q2, q1, q0; \
vmov q0, q1; \
vst1.8 {q2}, [r1]!; /* store plaintext */ \
\
bne .Lcfb_dec_loop_##bits; \
b .Lcfb_dec_done;
CFB_DEC(128)
CFB_DEC(192, r0, r6)
CFB_DEC(256, r0, r6)
#undef CFB_DEC
.Lcfb_dec_done:
vst1.8 {q0}, [r3] /* store IV */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
vpop {q4-q7}
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lcfb_dec_skip:
pop {r4-r6,pc}
.size _gcry_aes_cfb_dec_armv8_ce,.-_gcry_aes_cfb_dec_armv8_ce;
/*
* void _gcry_aes_ctr_enc_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv, unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_ctr_enc_armv8_ce
.type _gcry_aes_ctr_enc_armv8_ce,%function;
_gcry_aes_ctr_enc_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: nrounds => r5
*/
vpush {q4-q7}
push {r4-r12,lr} /* 4*16 + 4*10 = 104b */
ldr r4, [sp, #(104+0)]
ldr r5, [sp, #(104+4)]
cmp r4, #0
beq .Lctr_enc_skip
cmp r5, #12
ldm r3, {r7-r10}
vld1.8 {q0}, [r3] /* load IV */
rev r7, r7
rev r8, r8
rev r9, r9
rev r10, r10
aes_preload_keys(r0, r6);
beq .Lctr_enc_entry_192
bhi .Lctr_enc_entry_256
#define CTR_ENC(bits, ...) \
.Lctr_enc_entry_##bits: \
cmp r4, #4; \
blo .Lctr_enc_loop_##bits; \
\
.Lctr_enc_loop4_##bits: \
cmp r10, #0xfffffffc; \
sub r4, r4, #4; \
blo .Lctr_enc_loop4_##bits##_nocarry; \
cmp r9, #0xffffffff; \
bne .Lctr_enc_loop4_##bits##_nocarry; \
\
adds r10, #1; \
vmov q1, q0; \
blcs .Lctr_overflow_one; \
rev r11, r10; \
vmov.32 d1[1], r11; \
\
adds r10, #1; \
vmov q2, q0; \
blcs .Lctr_overflow_one; \
rev r11, r10; \
vmov.32 d1[1], r11; \
\
adds r10, #1; \
vmov q3, q0; \
blcs .Lctr_overflow_one; \
rev r11, r10; \
vmov.32 d1[1], r11; \
\
adds r10, #1; \
vmov q4, q0; \
blcs .Lctr_overflow_one; \
rev r11, r10; \
vmov.32 d1[1], r11; \
\
b .Lctr_enc_loop4_##bits##_store_ctr; \
\
.Lctr_enc_loop4_##bits##_nocarry: \
\
veor q2, q2; \
vrev64.8 q1, q0; \
vceq.u32 d5, d5; \
vadd.u64 q3, q2, q2; \
vadd.u64 q4, q3, q2; \
vadd.u64 q0, q3, q3; \
vsub.u64 q2, q1, q2; \
vsub.u64 q3, q1, q3; \
vsub.u64 q4, q1, q4; \
vsub.u64 q0, q1, q0; \
vrev64.8 q1, q1; \
vrev64.8 q2, q2; \
vrev64.8 q3, q3; \
vrev64.8 q0, q0; \
vrev64.8 q4, q4; \
add r10, #4; \
\
.Lctr_enc_loop4_##bits##_store_ctr: \
\
vst1.8 {q0}, [r3]; \
cmp r4, #4; \
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
\
do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
veor q1, q1, q0; \
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
vld1.8 {q1}, [r2]!; /* load ciphertext */ \
veor q2, q2, q0; \
veor q3, q3, q1; \
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
vst1.8 {q2}, [r1]!; /* store plaintext */ \
veor q4, q4, q0; \
vld1.8 {q0}, [r3]; /* reload IV */ \
vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \
\
bhs .Lctr_enc_loop4_##bits; \
cmp r4, #0; \
beq .Lctr_enc_done; \
\
.Lctr_enc_loop_##bits: \
\
adds r10, #1; \
vmov q1, q0; \
blcs .Lctr_overflow_one; \
rev r11, r10; \
subs r4, r4, #1; \
vld1.8 {q2}, [r2]!; /* load ciphertext */ \
vmov.32 d1[1], r11; \
\
do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \
\
veor q1, q2, q1; \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
\
bne .Lctr_enc_loop_##bits; \
b .Lctr_enc_done;
CTR_ENC(128)
CTR_ENC(192, r0, r6)
CTR_ENC(256, r0, r6)
#undef CTR_ENC
.Lctr_enc_done:
vst1.8 {q0}, [r3] /* store IV */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lctr_enc_skip:
pop {r4-r12,lr}
vpop {q4-q7}
bx lr
.Lctr_overflow_one:
adcs r9, #0
adcs r8, #0
adc r7, #0
rev r11, r9
rev r12, r8
vmov.32 d1[0], r11
rev r11, r7
vmov.32 d0[1], r12
vmov.32 d0[0], r11
bx lr
.size _gcry_aes_ctr_enc_armv8_ce,.-_gcry_aes_ctr_enc_armv8_ce;
/*
* void _gcry_aes_ctr32le_enc_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv,
* unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_ctr32le_enc_armv8_ce
.type _gcry_aes_ctr32le_enc_armv8_ce,%function;
_gcry_aes_ctr32le_enc_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: nrounds => r5
*/
vpush {q4-q7}
push {r4-r12,lr} /* 4*16 + 4*10 = 104b */
ldr r4, [sp, #(104+0)]
ldr r5, [sp, #(104+4)]
cmp r4, #0
beq .Lctr32le_enc_skip
cmp r5, #12
vld1.8 {q0}, [r3] /* load IV */
aes_preload_keys(r0, r6);
beq .Lctr32le_enc_entry_192
bhi .Lctr32le_enc_entry_256
#define CTR_ENC(bits, ...) \
.Lctr32le_enc_entry_##bits: \
cmp r4, #4; \
blo .Lctr32le_enc_loop_##bits; \
\
.Lctr32le_enc_loop4_##bits: \
veor q2, q2; \
sub r4, r4, #4; \
vmov.i64 d4, #0xffffffff; /* q2 <= -1:0:0:0 */ \
vmov q1, q0; \
vadd.u32 q3, q2, q2; /* q3 <= -2:0:0:0 */ \
vadd.u32 q0, q3, q3; /* q0 <= -4:0:0:0 */ \
vadd.u32 q4, q3, q2; /* q4 <= -3:0:0:0 */ \
vsub.u32 q0, q1, q0; \
vsub.u32 q2, q1, q2; \
vst1.8 {q0}, [r3]; \
vsub.u32 q3, q1, q3; \
vsub.u32 q4, q1, q4; \
\
cmp r4, #4; \
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
\
do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
veor q1, q1, q0; \
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
vld1.8 {q1}, [r2]!; /* load ciphertext */ \
veor q2, q2, q0; \
veor q3, q3, q1; \
vld1.8 {q0}, [r2]!; /* load ciphertext */ \
vst1.8 {q2}, [r1]!; /* store plaintext */ \
veor q4, q4, q0; \
vld1.8 {q0}, [r3]; /* reload IV */ \
vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \
\
bhs .Lctr32le_enc_loop4_##bits; \
cmp r4, #0; \
beq .Lctr32le_enc_done; \
\
.Lctr32le_enc_loop_##bits: \
\
veor q2, q2; \
vmov q1, q0; \
vmov.i64 d4, #0xffffffff; /* q2 <= -1:0:0:0 */ \
subs r4, r4, #1; \
vsub.u32 q0, q0, q2; \
vld1.8 {q2}, [r2]!; /* load ciphertext */ \
\
do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \
\
veor q1, q2, q1; \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
\
bne .Lctr32le_enc_loop_##bits; \
b .Lctr32le_enc_done;
CTR_ENC(128)
CTR_ENC(192, r0, r6)
CTR_ENC(256, r0, r6)
#undef CTR_ENC
.Lctr32le_enc_done:
vst1.8 {q0}, [r3] /* store IV */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lctr32le_enc_skip:
pop {r4-r12,lr}
vpop {q4-q7}
bx lr
.size _gcry_aes_ctr32le_enc_armv8_ce,.-_gcry_aes_ctr32le_enc_armv8_ce;
/*
* void _gcry_aes_ocb_enc_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *offset,
* unsigned char *checksum,
* unsigned char *L_table,
* size_t nblocks,
* unsigned int nrounds,
* unsigned int blkn);
*/
.align 3
.globl _gcry_aes_ocb_enc_armv8_ce
.type _gcry_aes_ocb_enc_armv8_ce,%function;
_gcry_aes_ocb_enc_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: offset
* %st+0: checksum => r4
* %st+4: Ls => r5
* %st+8: nblocks => r6 (0 < nblocks <= 32)
* %st+12: nrounds => r7
* %st+16: blkn => lr
*/
vpush {q4-q7}
push {r4-r12,lr} /* 4*16 + 4*10 = 104b */
ldr r7, [sp, #(104+12)]
ldr r4, [sp, #(104+0)]
ldr r5, [sp, #(104+4)]
ldr r6, [sp, #(104+8)]
ldr lr, [sp, #(104+16)]
cmp r7, #12
vld1.8 {q0}, [r3] /* load offset */
aes_preload_keys(r0, r12);
beq .Locb_enc_entry_192
bhi .Locb_enc_entry_256
#define OCB_ENC(bits, ...) \
.Locb_enc_entry_##bits: \
cmp r6, #4; \
add lr, #1; \
blo .Locb_enc_loop_##bits; \
\
.Locb_enc_loop4_##bits: \
\
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \
/* Checksum_i = Checksum_{i-1} xor P_i */ \
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \
\
add r9, lr, #1; \
add r10, lr, #2; \
add r11, lr, #3; \
rbit r8, lr; \
add lr, lr, #4; \
rbit r9, r9; \
rbit r10, r10; \
rbit r11, r11; \
clz r8, r8; /* ntz(i+0) */ \
clz r9, r9; /* ntz(i+1) */ \
clz r10, r10; /* ntz(i+2) */ \
clz r11, r11; /* ntz(i+3) */ \
add r8, r5, r8, lsl #4; \
add r9, r5, r9, lsl #4; \
add r10, r5, r10, lsl #4; \
add r11, r5, r11, lsl #4; \
\
sub r6, #4; \
\
vld1.8 {q9}, [r8]; /* load L_{ntz(i+0)} */ \
vld1.8 {q1-q2}, [r2]!; /* load P_i+<0-1> */ \
vld1.8 {q8}, [r4]; /* load Checksum_{i-1} */ \
veor q0, q0, q9; /* Offset_i+0 */ \
vld1.8 {q9}, [r9]; /* load L_{ntz(i+1)} */ \
veor q8, q8, q1; /* Checksum_i+0 */ \
veor q1, q1, q0; /* P_i+0 xor Offset_i+0 */\
vld1.8 {q3-q4}, [r2]!; /* load P_i+<2-3> */ \
vst1.8 {q0}, [r1]!; /* store Offset_i+0 */\
veor q0, q0, q9; /* Offset_i+1 */ \
vld1.8 {q9}, [r10]; /* load L_{ntz(i+2)} */ \
veor q8, q8, q2; /* Checksum_i+1 */ \
veor q2, q2, q0; /* P_i+1 xor Offset_i+1 */\
vst1.8 {q0}, [r1]!; /* store Offset_i+1 */\
veor q0, q0, q9; /* Offset_i+2 */ \
vld1.8 {q9}, [r11]; /* load L_{ntz(i+3)} */ \
veor q8, q8, q3; /* Checksum_i+2 */ \
veor q3, q3, q0; /* P_i+2 xor Offset_i+2 */\
vst1.8 {q0}, [r1]!; /* store Offset_i+2 */\
veor q0, q0, q9; /* Offset_i+3 */ \
veor q8, q8, q4; /* Checksum_i+3 */ \
veor q4, q4, q0; /* P_i+3 xor Offset_i+3 */\
vst1.8 {q0}, [r1]; /* store Offset_i+3 */\
sub r1, #(3*16); \
vst1.8 {q8}, [r4]; /* store Checksum_i+3 */\
\
cmp r6, #4; \
\
do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
mov r8, r1; \
vld1.8 {q8-q9}, [r1]!; \
veor q1, q1, q8; \
veor q2, q2, q9; \
vld1.8 {q8-q9}, [r1]!; \
vst1.8 {q1-q2}, [r8]!; \
veor q3, q3, q8; \
veor q4, q4, q9; \
vst1.8 {q3-q4}, [r8]; \
\
bhs .Locb_enc_loop4_##bits; \
cmp r6, #0; \
beq .Locb_enc_done; \
\
.Locb_enc_loop_##bits: \
\
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \
/* Checksum_i = Checksum_{i-1} xor P_i */ \
/* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ \
\
rbit r8, lr; \
add lr, #1; \
clz r8, r8; /* ntz(i) */ \
add r8, r5, r8, lsl #4; \
\
vld1.8 {q1}, [r2]!; /* load plaintext */ \
vld1.8 {q2}, [r8]; /* load L_{ntz(i)} */ \
vld1.8 {q3}, [r4]; /* load checksum */ \
subs r6, #1; \
veor q0, q0, q2; \
veor q3, q3, q1; \
veor q1, q1, q0; \
vst1.8 {q3}, [r4]; /* store checksum */ \
\
do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \
\
veor q1, q1, q0; \
vst1.8 {q1}, [r1]!; /* store ciphertext */ \
\
bne .Locb_enc_loop_##bits; \
b .Locb_enc_done;
OCB_ENC(128re, r0, r12)
OCB_ENC(192, r0, r12)
OCB_ENC(256, r0, r12)
#undef OCB_ENC
.Locb_enc_done:
vst1.8 {q0}, [r3] /* store offset */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
pop {r4-r12,lr}
vpop {q4-q7}
bx lr
.size _gcry_aes_ocb_enc_armv8_ce,.-_gcry_aes_ocb_enc_armv8_ce;
/*
* void _gcry_aes_ocb_dec_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *offset,
* unsigned char *checksum,
* unsigned char *L_table,
* size_t nblocks,
* unsigned int nrounds,
* unsigned int blkn);
*/
.align 3
.globl _gcry_aes_ocb_dec_armv8_ce
.type _gcry_aes_ocb_dec_armv8_ce,%function;
_gcry_aes_ocb_dec_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: offset
* %st+0: checksum => r4
* %st+4: Ls => r5
* %st+8: nblocks => r6 (0 < nblocks <= 32)
* %st+12: nrounds => r7
* %st+16: blkn => lr
*/
vpush {q4-q7}
push {r4-r12,lr} /* 4*16 + 4*10 = 104b */
ldr r7, [sp, #(104+12)]
ldr r4, [sp, #(104+0)]
ldr r5, [sp, #(104+4)]
ldr r6, [sp, #(104+8)]
ldr lr, [sp, #(104+16)]
cmp r7, #12
vld1.8 {q0}, [r3] /* load offset */
aes_preload_keys(r0, r12);
beq .Locb_dec_entry_192
bhi .Locb_dec_entry_256
#define OCB_DEC(bits, ...) \
.Locb_dec_entry_##bits: \
cmp r6, #4; \
add lr, #1; \
blo .Locb_dec_loop_##bits; \
\
.Locb_dec_loop4_##bits: \
\
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \
/* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ \
/* Checksum_i = Checksum_{i-1} xor P_i */ \
\
add r9, lr, #1; \
add r10, lr, #2; \
add r11, lr, #3; \
rbit r8, lr; \
add lr, lr, #4; \
rbit r9, r9; \
rbit r10, r10; \
rbit r11, r11; \
clz r8, r8; /* ntz(i+0) */ \
clz r9, r9; /* ntz(i+1) */ \
clz r10, r10; /* ntz(i+2) */ \
clz r11, r11; /* ntz(i+3) */ \
add r8, r5, r8, lsl #4; \
add r9, r5, r9, lsl #4; \
add r10, r5, r10, lsl #4; \
add r11, r5, r11, lsl #4; \
\
sub r6, #4; \
\
vld1.8 {q9}, [r8]; /* load L_{ntz(i+0)} */ \
vld1.8 {q1-q2}, [r2]!; /* load P_i+<0-1> */ \
veor q0, q0, q9; /* Offset_i+0 */ \
vld1.8 {q9}, [r9]; /* load L_{ntz(i+1)} */ \
veor q1, q1, q0; /* P_i+0 xor Offset_i+0 */\
vld1.8 {q3-q4}, [r2]!; /* load P_i+<2-3> */ \
vst1.8 {q0}, [r1]!; /* store Offset_i+0 */\
veor q0, q0, q9; /* Offset_i+1 */ \
vld1.8 {q9}, [r10]; /* load L_{ntz(i+2)} */ \
veor q2, q2, q0; /* P_i+1 xor Offset_i+1 */\
vst1.8 {q0}, [r1]!; /* store Offset_i+1 */\
veor q0, q0, q9; /* Offset_i+2 */ \
vld1.8 {q9}, [r11]; /* load L_{ntz(i+3)} */ \
veor q3, q3, q0; /* P_i+2 xor Offset_i+2 */\
vst1.8 {q0}, [r1]!; /* store Offset_i+2 */\
veor q0, q0, q9; /* Offset_i+3 */ \
veor q4, q4, q0; /* P_i+3 xor Offset_i+3 */\
vst1.8 {q0}, [r1]; /* store Offset_i+3 */\
sub r1, #(3*16); \
\
cmp r6, #4; \
\
do_aes_4_##bits(d, imc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
mov r8, r1; \
vld1.8 {q8-q9}, [r1]!; \
veor q1, q1, q8; \
veor q2, q2, q9; \
vld1.8 {q8-q9}, [r1]!; \
vst1.8 {q1-q2}, [r8]!; \
veor q1, q1, q2; \
vld1.8 {q2}, [r4]; /* load Checksum_{i-1} */ \
veor q3, q3, q8; \
veor q1, q1, q3; \
veor q4, q4, q9; \
veor q1, q1, q4; \
vst1.8 {q3-q4}, [r8]; \
veor q2, q2, q1; \
vst1.8 {q2}, [r4]; /* store Checksum_i+3 */ \
\
bhs .Locb_dec_loop4_##bits; \
cmp r6, #0; \
beq .Locb_dec_done; \
\
.Locb_dec_loop_##bits: \
\
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \
/* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ \
/* Checksum_i = Checksum_{i-1} xor P_i */ \
\
rbit r8, lr; \
add lr, #1; \
clz r8, r8; /* ntz(i) */ \
add r8, r5, r8, lsl #4; \
\
vld1.8 {q2}, [r8]; /* load L_{ntz(i)} */ \
vld1.8 {q1}, [r2]!; /* load ciphertext */ \
subs r6, #1; \
veor q0, q0, q2; \
veor q1, q1, q0; \
\
do_aes_one##bits(d, imc, q1, q1, ##__VA_ARGS__) \
\
vld1.8 {q2}, [r4]; /* load checksum */ \
veor q1, q1, q0; \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
veor q2, q2, q1; \
vst1.8 {q2}, [r4]; /* store checksum */ \
\
bne .Locb_dec_loop_##bits; \
b .Locb_dec_done;
OCB_DEC(128re, r0, r12)
OCB_DEC(192, r0, r12)
OCB_DEC(256, r0, r12)
#undef OCB_DEC
.Locb_dec_done:
vst1.8 {q0}, [r3] /* store offset */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
pop {r4-r12,lr}
vpop {q4-q7}
bx lr
.size _gcry_aes_ocb_dec_armv8_ce,.-_gcry_aes_ocb_dec_armv8_ce;
/*
* void _gcry_aes_ocb_auth_armv8_ce (const void *keysched,
* const unsigned char *abuf,
* unsigned char *offset,
* unsigned char *checksum,
* unsigned char *L_table,
* size_t nblocks,
* unsigned int nrounds,
* unsigned int blkn);
*/
.align 3
.globl _gcry_aes_ocb_auth_armv8_ce
.type _gcry_aes_ocb_auth_armv8_ce,%function;
_gcry_aes_ocb_auth_armv8_ce:
/* input:
* r0: keysched
* r1: abuf
* r2: offset
* r3: checksum
* %st+0: Ls => r5
* %st+4: nblocks => r6 (0 < nblocks <= 32)
* %st+8: nrounds => r7
* %st+12: blkn => lr
*/
vpush {q4-q7}
push {r4-r12,lr} /* 4*16 + 4*10 = 104b */
ldr r7, [sp, #(104+8)]
ldr r5, [sp, #(104+0)]
ldr r6, [sp, #(104+4)]
ldr lr, [sp, #(104+12)]
cmp r7, #12
vld1.8 {q0}, [r2] /* load offset */
aes_preload_keys(r0, r12);
beq .Locb_auth_entry_192
bhi .Locb_auth_entry_256
#define OCB_AUTH(bits, ...) \
.Locb_auth_entry_##bits: \
cmp r6, #4; \
add lr, #1; \
blo .Locb_auth_loop_##bits; \
\
.Locb_auth_loop4_##bits: \
\
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \
/* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \
\
add r9, lr, #1; \
add r10, lr, #2; \
add r11, lr, #3; \
rbit r8, lr; \
add lr, lr, #4; \
rbit r9, r9; \
rbit r10, r10; \
rbit r11, r11; \
clz r8, r8; /* ntz(i+0) */ \
clz r9, r9; /* ntz(i+1) */ \
clz r10, r10; /* ntz(i+2) */ \
clz r11, r11; /* ntz(i+3) */ \
add r8, r5, r8, lsl #4; \
add r9, r5, r9, lsl #4; \
add r10, r5, r10, lsl #4; \
add r11, r5, r11, lsl #4; \
\
sub r6, #4; \
\
vld1.8 {q9}, [r8]; /* load L_{ntz(i+0)} */ \
vld1.8 {q1-q2}, [r1]!; /* load A_i+<0-1> */ \
veor q0, q0, q9; /* Offset_i+0 */ \
vld1.8 {q9}, [r9]; /* load L_{ntz(i+1)} */ \
veor q1, q1, q0; /* A_i+0 xor Offset_i+0 */\
vld1.8 {q3-q4}, [r1]!; /* load A_i+<2-3> */ \
veor q0, q0, q9; /* Offset_i+1 */ \
vld1.8 {q9}, [r10]; /* load L_{ntz(i+2)} */ \
veor q2, q2, q0; /* A_i+1 xor Offset_i+1 */\
veor q0, q0, q9; /* Offset_i+2 */ \
vld1.8 {q9}, [r11]; /* load L_{ntz(i+3)} */ \
veor q3, q3, q0; /* A_i+2 xor Offset_i+2 */\
veor q0, q0, q9; /* Offset_i+3 */ \
veor q4, q4, q0; /* A_i+3 xor Offset_i+3 */\
\
cmp r6, #4; \
\
do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
veor q1, q1, q2; \
veor q3, q3, q4; \
vld1.8 {q2}, [r3]; \
veor q1, q1, q3; \
veor q2, q2, q1; \
vst1.8 {q2}, [r3]; \
\
bhs .Locb_auth_loop4_##bits; \
cmp r6, #0; \
beq .Locb_auth_done; \
\
.Locb_auth_loop_##bits: \
\
/* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ \
/* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ \
\
rbit r8, lr; \
add lr, #1; \
clz r8, r8; /* ntz(i) */ \
add r8, r5, r8, lsl #4; \
\
vld1.8 {q2}, [r8]; /* load L_{ntz(i)} */ \
vld1.8 {q1}, [r1]!; /* load aadtext */ \
subs r6, #1; \
veor q0, q0, q2; \
vld1.8 {q2}, [r3]; /* load checksum */ \
veor q1, q1, q0; \
\
do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__) \
\
veor q2, q2, q1; \
vst1.8 {q2}, [r3]; /* store checksum */ \
\
bne .Locb_auth_loop_##bits; \
b .Locb_auth_done;
OCB_AUTH(128re, r0, r12)
OCB_AUTH(192, r0, r12)
OCB_AUTH(256, r0, r12)
#undef OCB_AUTH
.Locb_auth_done:
vst1.8 {q0}, [r2] /* store offset */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
pop {r4-r12,lr}
vpop {q4-q7}
bx lr
.size _gcry_aes_ocb_auth_armv8_ce,.-_gcry_aes_ocb_auth_armv8_ce;
/*
* void _gcry_aes_xts_enc_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv, unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_xts_enc_armv8_ce
.type _gcry_aes_xts_enc_armv8_ce,%function;
_gcry_aes_xts_enc_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: nrounds => r5
*/
vpush {q4-q7}
push {r4-r12,lr} /* 4*16 + 4*10 = 104b */
ldr r4, [sp, #(104+0)]
ldr r5, [sp, #(104+4)]
cmp r4, #0
beq .Lxts_enc_skip
cmp r5, #12
vld1.8 {q0}, [r3] /* load tweak */
mov r7, #0x87;
aes_preload_keys(r0, r6);
beq .Lxts_enc_entry_192
bhi .Lxts_enc_entry_256
#define CTR_XTS(bits, ...) \
.Lxts_enc_entry_##bits: \
cmp r4, #4; \
blo .Lxts_enc_loop_##bits; \
\
.Lxts_enc_loop4_##bits: \
sub r4, r4, #4; \
veor q9, q9, q9; \
\
vld1.8 {q1-q2}, [r2]!; /* load plaintext */ \
veor q1, q1, q0; \
cmp r4, #4; \
vmov.u32 d18[0], r7; \
vst1.8 {q0}, [r1]!; /* store tweak0 to temp */ \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
vld1.8 {q3-q4}, [r2]!; /* load plaintext */ \
veor q2, q2, q0; \
vst1.8 {q0}, [r1]!; /* store tweak1 to temp */ \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
veor q3, q3, q0; \
vst1.8 {q0}, [r1]!; /* store tweak2 to temp */ \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
veor q4, q4, q0; \
vst1.8 {q0}, [r1]; /* store tweak3 to temp */ \
sub r1, r1, #48; \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
do_aes_4_##bits(e, mc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
vld1.8 {q8-q9}, [r1]!; /* load tweak from temp */ \
veor q1, q1, q8; \
veor q2, q2, q9; \
vld1.8 {q8-q9}, [r1]; /* load tweak from temp */ \
sub r1, r1, #32; \
veor q3, q3, q8; \
veor q4, q4, q9; \
vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \
vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \
\
bhs .Lxts_enc_loop4_##bits; \
cmp r4, #0; \
beq .Lxts_enc_done; \
\
.Lxts_enc_loop_##bits: \
\
vld1.8 {q1}, [r2]!; /* load ciphertext */ \
\
veor q9, q9, q9; \
veor q1, q1, q0; \
vmov.u32 d18[0], r7; \
vmov q2, q0; \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
subs r4, r4, #1; \
\
do_aes_one##bits(e, mc, q1, q1, ##__VA_ARGS__); \
\
veor q1, q1, q2; \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
\
bne .Lxts_enc_loop_##bits; \
b .Lxts_enc_done;
CTR_XTS(128re, r0, r6)
CTR_XTS(192, r0, r6)
CTR_XTS(256, r0, r6)
#undef CTR_XTS
.Lxts_enc_done:
vst1.8 {q0}, [r3] /* store tweak */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lxts_enc_skip:
pop {r4-r12,lr}
vpop {q4-q7}
bx lr
.size _gcry_aes_xts_enc_armv8_ce,.-_gcry_aes_xts_enc_armv8_ce;
/*
* void _gcry_aes_xts_dec_armv8_ce (const void *keysched,
* unsigned char *outbuf,
* const unsigned char *inbuf,
* unsigned char *iv, unsigned int nrounds);
*/
.align 3
.globl _gcry_aes_xts_dec_armv8_ce
.type _gcry_aes_xts_dec_armv8_ce,%function;
_gcry_aes_xts_dec_armv8_ce:
/* input:
* r0: keysched
* r1: outbuf
* r2: inbuf
* r3: iv
* %st+0: nblocks => r4
* %st+4: nrounds => r5
*/
vpush {q4-q7}
push {r4-r12,lr} /* 4*16 + 4*10 = 104b */
ldr r4, [sp, #(104+0)]
ldr r5, [sp, #(104+4)]
cmp r4, #0
beq .Lxts_dec_skip
cmp r5, #12
vld1.8 {q0}, [r3] /* load tweak */
mov r7, #0x87;
aes_preload_keys(r0, r6);
beq .Lxts_dec_entry_192
bhi .Lxts_dec_entry_256
#define CTR_XTS(bits, ...) \
.Lxts_dec_entry_##bits: \
cmp r4, #4; \
blo .Lxts_dec_loop_##bits; \
\
.Lxts_dec_loop4_##bits: \
sub r4, r4, #4; \
veor q9, q9, q9; \
\
vld1.8 {q1-q2}, [r2]!; /* load plaintext */ \
veor q1, q1, q0; \
cmp r4, #4; \
vmov.u32 d18[0], r7; \
vst1.8 {q0}, [r1]!; /* store tweak0 to temp */ \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
vld1.8 {q3-q4}, [r2]!; /* load plaintext */ \
veor q2, q2, q0; \
vst1.8 {q0}, [r1]!; /* store tweak1 to temp */ \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
veor q3, q3, q0; \
vst1.8 {q0}, [r1]!; /* store tweak2 to temp */ \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
veor q4, q4, q0; \
vst1.8 {q0}, [r1]; /* store tweak3 to temp */ \
sub r1, r1, #48; \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
\
do_aes_4_##bits(d, imc, q1, q2, q3, q4, ##__VA_ARGS__); \
\
vld1.8 {q8-q9}, [r1]!; /* load tweak from temp */ \
veor q1, q1, q8; \
veor q2, q2, q9; \
vld1.8 {q8-q9}, [r1]; /* load tweak from temp */ \
sub r1, r1, #32; \
veor q3, q3, q8; \
veor q4, q4, q9; \
vst1.8 {q1-q2}, [r1]!; /* store plaintext */ \
vst1.8 {q3-q4}, [r1]!; /* store plaintext */ \
\
bhs .Lxts_dec_loop4_##bits; \
cmp r4, #0; \
beq .Lxts_dec_done; \
\
.Lxts_dec_loop_##bits: \
\
vld1.8 {q1}, [r2]!; /* load ciphertext */ \
\
veor q9, q9, q9; \
veor q1, q1, q0; \
vmov.u32 d18[0], r7; \
vmov q2, q0; \
\
vshr.s64 d16, d1, #63; \
vshr.u64 d17, d0, #63; \
vadd.u64 q0, q0, q0; \
vand d16, d16, d18; \
veor q0, q0, q8; \
subs r4, r4, #1; \
\
do_aes_one##bits(d, imc, q1, q1, ##__VA_ARGS__); \
\
veor q1, q1, q2; \
vst1.8 {q1}, [r1]!; /* store plaintext */ \
\
bne .Lxts_dec_loop_##bits; \
b .Lxts_dec_done;
CTR_XTS(128re, r0, r6)
CTR_XTS(192, r0, r6)
CTR_XTS(256, r0, r6)
#undef CTR_XTS
.Lxts_dec_done:
vst1.8 {q0}, [r3] /* store tweak */
CLEAR_REG(q0)
CLEAR_REG(q1)
CLEAR_REG(q2)
CLEAR_REG(q3)
CLEAR_REG(q8)
CLEAR_REG(q9)
CLEAR_REG(q10)
CLEAR_REG(q11)
CLEAR_REG(q12)
CLEAR_REG(q13)
CLEAR_REG(q14)
.Lxts_dec_skip:
pop {r4-r12,lr}
vpop {q4-q7}
bx lr
.size _gcry_aes_xts_dec_armv8_ce,.-_gcry_aes_xts_dec_armv8_ce;
/*
* u32 _gcry_aes_sbox4_armv8_ce(u32 in4b);
*/
.align 3
.globl _gcry_aes_sbox4_armv8_ce
.type _gcry_aes_sbox4_armv8_ce,%function;
_gcry_aes_sbox4_armv8_ce:
/* See "Gouvêa, C. P. L. & López, J. Implementing GCM on ARMv8. Topics in
* Cryptology — CT-RSA 2015" for details.
*/
vmov.i8 q0, #0x52
vmov.i8 q1, #0
vmov s0, r0
aese.8 q0, q1
veor d0, d1
vpadd.i32 d0, d0, d1
vmov r0, s0
CLEAR_REG(q0)
bx lr
.size _gcry_aes_sbox4_armv8_ce,.-_gcry_aes_sbox4_armv8_ce;
/*
* void _gcry_aes_invmixcol_armv8_ce(void *dst, const void *src);
*/
.align 3
.globl _gcry_aes_invmixcol_armv8_ce
.type _gcry_aes_invmixcol_armv8_ce,%function;
_gcry_aes_invmixcol_armv8_ce:
vld1.8 {q0}, [r1]
aesimc.8 q0, q0
vst1.8 {q0}, [r0]
CLEAR_REG(q0)
bx lr
.size _gcry_aes_invmixcol_armv8_ce,.-_gcry_aes_invmixcol_armv8_ce;
#endif
diff --git a/cipher/sha1-armv7-neon.S b/cipher/sha1-armv7-neon.S
index 61cc541c..2de678b8 100644
--- a/cipher/sha1-armv7-neon.S
+++ b/cipher/sha1-armv7-neon.S
@@ -1,526 +1,526 @@
/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
* Copyright (C) 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* Based on sha1.c:
* Copyright (C) 1998, 2001, 2002, 2003, 2008 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_NEON) && defined(USE_SHA1)
.syntax unified
.fpu neon
.arm
.text
#ifdef __PIC__
# define GET_DATA_POINTER(reg, name, rtmp) \
ldr reg, 1f; \
ldr rtmp, 2f; \
b 3f; \
1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
2: .word name(GOT); \
3: add reg, pc, reg; \
ldr reg, [reg, rtmp];
#else
# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
#endif
/* Context structure */
#define state_h0 0
#define state_h1 4
#define state_h2 8
#define state_h3 12
#define state_h4 16
/* Constants */
#define K1 0x5A827999
#define K2 0x6ED9EBA1
#define K3 0x8F1BBCDC
#define K4 0xCA62C1D6
.align 4
gcry_sha1_armv7_neon_K_VEC:
.LK_VEC:
.LK1: .long K1, K1, K1, K1
.LK2: .long K2, K2, K2, K2
.LK3: .long K3, K3, K3, K3
.LK4: .long K4, K4, K4, K4
/* Register macros */
#define RSTATE r0
#define RDATA r1
#define RNBLKS r2
#define ROLDSTACK r3
#define RWK lr
#define _a r4
#define _b r5
#define _c r6
#define _d r7
#define _e r8
#define RT0 r9
#define RT1 r10
#define RT2 r11
#define RT3 r12
#define W0 q0
#define W1 q1
#define W2 q2
#define W3 q3
#define W4 q4
#define W5 q5
#define W6 q6
#define W7 q7
#define tmp0 q8
#define tmp1 q9
#define tmp2 q10
#define tmp3 q11
#define qK1 q12
#define qK2 q13
#define qK3 q14
#define qK4 q15
/* Round function macros. */
#define WK_offs(i) (((i) & 15) * 4)
#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
bic RT0, d, b; \
add e, e, a, ror #(32 - 5); \
and RT1, c, b; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add RT0, RT0, RT3; \
add e, e, RT1; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0;
#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, d, b; \
add e, e, a, ror #(32 - 5); \
eor RT0, RT0, c; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT3; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0; \
#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, b, c; \
and RT1, b, c; \
add e, e, a, ror #(32 - 5); \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
and RT0, RT0, d; \
add RT1, RT1, RT3; \
add e, e, RT0; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT1;
#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define R(a,b,c,d,e,f,i) \
_R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define dummy(...)
/* Input expansion macros. */
/********* Precalc macros for rounds 0-15 *************************************/
#define W_PRECALC_00_15() \
add RWK, sp, #(WK_offs(0)); \
\
vld1.32 {tmp0, tmp1}, [RDATA]!; \
vrev32.8 W0, tmp0; /* big => little */ \
vld1.32 {tmp2, tmp3}, [RDATA]!; \
vadd.u32 tmp0, W0, curK; \
vrev32.8 W7, tmp1; /* big => little */ \
vrev32.8 W6, tmp2; /* big => little */ \
vadd.u32 tmp1, W7, curK; \
vrev32.8 W5, tmp3; /* big => little */ \
vadd.u32 tmp2, W6, curK; \
vst1.32 {tmp0, tmp1}, [RWK]!; \
vadd.u32 tmp3, W5, curK; \
vst1.32 {tmp2, tmp3}, [RWK]; \
#define WPRECALC_00_15_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vld1.32 {tmp0, tmp1}, [RDATA]!; \
#define WPRECALC_00_15_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
add RWK, sp, #(WK_offs(0)); \
#define WPRECALC_00_15_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vrev32.8 W0, tmp0; /* big => little */ \
#define WPRECALC_00_15_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vld1.32 {tmp2, tmp3}, [RDATA]!; \
#define WPRECALC_00_15_4(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vadd.u32 tmp0, W0, curK; \
#define WPRECALC_00_15_5(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vrev32.8 W7, tmp1; /* big => little */ \
#define WPRECALC_00_15_6(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vrev32.8 W6, tmp2; /* big => little */ \
#define WPRECALC_00_15_7(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vadd.u32 tmp1, W7, curK; \
#define WPRECALC_00_15_8(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vrev32.8 W5, tmp3; /* big => little */ \
#define WPRECALC_00_15_9(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vadd.u32 tmp2, W6, curK; \
#define WPRECALC_00_15_10(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vst1.32 {tmp0, tmp1}, [RWK]!; \
#define WPRECALC_00_15_11(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vadd.u32 tmp3, W5, curK; \
#define WPRECALC_00_15_12(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vst1.32 {tmp2, tmp3}, [RWK]; \
/********* Precalc macros for rounds 16-31 ************************************/
#define WPRECALC_16_31_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor tmp0, tmp0; \
vext.8 W, W_m16, W_m12, #8; \
#define WPRECALC_16_31_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
add RWK, sp, #(WK_offs(i)); \
vext.8 tmp0, W_m04, tmp0, #4; \
#define WPRECALC_16_31_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor tmp0, tmp0, W_m16; \
veor.32 W, W, W_m08; \
#define WPRECALC_16_31_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor tmp1, tmp1; \
veor W, W, tmp0; \
#define WPRECALC_16_31_4(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vshl.u32 tmp0, W, #1; \
#define WPRECALC_16_31_5(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vext.8 tmp1, tmp1, W, #(16-12); \
vshr.u32 W, W, #31; \
#define WPRECALC_16_31_6(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vorr tmp0, tmp0, W; \
vshr.u32 W, tmp1, #30; \
#define WPRECALC_16_31_7(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vshl.u32 tmp1, tmp1, #2; \
#define WPRECALC_16_31_8(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor tmp0, tmp0, W; \
#define WPRECALC_16_31_9(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor W, tmp0, tmp1; \
#define WPRECALC_16_31_10(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_16_31_11(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vst1.32 {tmp0}, [RWK];
/********* Precalc macros for rounds 32-79 ************************************/
#define WPRECALC_32_79_0(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor W, W_m28; \
#define WPRECALC_32_79_1(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vext.8 tmp0, W_m08, W_m04, #8; \
#define WPRECALC_32_79_2(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor W, W_m16; \
#define WPRECALC_32_79_3(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
veor W, tmp0; \
#define WPRECALC_32_79_4(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
add RWK, sp, #(WK_offs(i&~3)); \
#define WPRECALC_32_79_5(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vshl.u32 tmp1, W, #2; \
#define WPRECALC_32_79_6(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vshr.u32 tmp0, W, #30; \
#define WPRECALC_32_79_7(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vorr W, tmp0, tmp1; \
#define WPRECALC_32_79_8(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_32_79_9(i, W, W_m04, W_m08, W_m12, W_m16, W_m20, W_m24, W_m28) \
vst1.32 {tmp0}, [RWK];
/* Other functional macros */
-#define CLEAR_REG(reg) veor reg, reg;
+#define CLEAR_REG(reg) vmov.i8 reg, #0;
/*
* Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
*
* unsigned int
* _gcry_sha1_transform_armv7_neon (void *ctx, const unsigned char *data,
* size_t nblks)
*/
.align 3
.globl _gcry_sha1_transform_armv7_neon
.type _gcry_sha1_transform_armv7_neon,%function;
_gcry_sha1_transform_armv7_neon:
/* input:
* r0: ctx, CTX
* r1: data (64*nblks bytes)
* r2: nblks
*/
cmp RNBLKS, #0;
beq .Ldo_nothing;
push {r4-r12, lr};
GET_DATA_POINTER(RT3, .LK_VEC, _a);
vpush {q4-q7};
mov ROLDSTACK, sp;
/* Align stack. */
sub sp, #(16*4);
and sp, #(~(16-1));
vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
/* Get the values of the chaining variables. */
ldm RSTATE, {_a-_e};
vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
#undef curK
#define curK qK1
/* Precalc 0-15. */
W_PRECALC_00_15();
b .Loop;
.ltorg
.Loop:
/* Transform 0-15 + Precalc 16-31. */
_R( _a, _b, _c, _d, _e, F1, 0, WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16, W4, W5, W6, W7, W0, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 1, WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16, W4, W5, W6, W7, W0, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 2, WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16, W4, W5, W6, W7, W0, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 3, WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16, W4, W5, W6, W7, W0, _, _, _ );
#undef curK
#define curK qK2
_R( _b, _c, _d, _e, _a, F1, 4, WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20, W3, W4, W5, W6, W7, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 5, WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20, W3, W4, W5, W6, W7, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 6, WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20, W3, W4, W5, W6, W7, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 7, WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20, W3, W4, W5, W6, W7, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 8, WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24, W2, W3, W4, W5, W6, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 9, WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24, W2, W3, W4, W5, W6, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 10, WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24, W2, W3, W4, W5, W6, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 11, WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24, W2, W3, W4, W5, W6, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 12, WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28, W1, W2, W3, W4, W5, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 13, WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28, W1, W2, W3, W4, W5, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 14, WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28, W1, W2, W3, W4, W5, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 15, WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28, W1, W2, W3, W4, W5, _, _, _ );
/* Transform 16-63 + Precalc 32-79. */
_R( _e, _a, _b, _c, _d, F1, 16, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F1, 17, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _c, _d, _e, _a, _b, F1, 18, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 32, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F1, 19, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 32, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F2, 20, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36, W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F2, 21, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36, W7, W0, W1, W2, W3, W4, W5, W6);
_R( _d, _e, _a, _b, _c, F2, 22, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 36, W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F2, 23, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 36, W7, W0, W1, W2, W3, W4, W5, W6);
#undef curK
#define curK qK3
_R( _b, _c, _d, _e, _a, F2, 24, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40, W6, W7, W0, W1, W2, W3, W4, W5);
_R( _a, _b, _c, _d, _e, F2, 25, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40, W6, W7, W0, W1, W2, W3, W4, W5);
_R( _e, _a, _b, _c, _d, F2, 26, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 40, W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F2, 27, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 40, W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F2, 28, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44, W5, W6, W7, W0, W1, W2, W3, W4);
_R( _b, _c, _d, _e, _a, F2, 29, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44, W5, W6, W7, W0, W1, W2, W3, W4);
_R( _a, _b, _c, _d, _e, F2, 30, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 44, W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F2, 31, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 44, W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F2, 32, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48, W4, W5, W6, W7, W0, W1, W2, W3);
_R( _c, _d, _e, _a, _b, F2, 33, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48, W4, W5, W6, W7, W0, W1, W2, W3);
_R( _b, _c, _d, _e, _a, F2, 34, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 48, W4, W5, W6, W7, W0, W1, W2, W3);
_R( _a, _b, _c, _d, _e, F2, 35, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 48, W4, W5, W6, W7, W0, W1, W2, W3);
_R( _e, _a, _b, _c, _d, F2, 36, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52, W3, W4, W5, W6, W7, W0, W1, W2);
_R( _d, _e, _a, _b, _c, F2, 37, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52, W3, W4, W5, W6, W7, W0, W1, W2);
_R( _c, _d, _e, _a, _b, F2, 38, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 52, W3, W4, W5, W6, W7, W0, W1, W2);
_R( _b, _c, _d, _e, _a, F2, 39, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 52, W3, W4, W5, W6, W7, W0, W1, W2);
_R( _a, _b, _c, _d, _e, F3, 40, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56, W2, W3, W4, W5, W6, W7, W0, W1);
_R( _e, _a, _b, _c, _d, F3, 41, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56, W2, W3, W4, W5, W6, W7, W0, W1);
_R( _d, _e, _a, _b, _c, F3, 42, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 56, W2, W3, W4, W5, W6, W7, W0, W1);
_R( _c, _d, _e, _a, _b, F3, 43, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 56, W2, W3, W4, W5, W6, W7, W0, W1);
#undef curK
#define curK qK4
_R( _b, _c, _d, _e, _a, F3, 44, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60, W1, W2, W3, W4, W5, W6, W7, W0);
_R( _a, _b, _c, _d, _e, F3, 45, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60, W1, W2, W3, W4, W5, W6, W7, W0);
_R( _e, _a, _b, _c, _d, F3, 46, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 60, W1, W2, W3, W4, W5, W6, W7, W0);
_R( _d, _e, _a, _b, _c, F3, 47, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 60, W1, W2, W3, W4, W5, W6, W7, W0);
_R( _c, _d, _e, _a, _b, F3, 48, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F3, 49, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F3, 50, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 64, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _e, _a, _b, _c, _d, F3, 51, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 64, W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F3, 52, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68, W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F3, 53, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68, W7, W0, W1, W2, W3, W4, W5, W6);
_R( _b, _c, _d, _e, _a, F3, 54, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 68, W7, W0, W1, W2, W3, W4, W5, W6);
_R( _a, _b, _c, _d, _e, F3, 55, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 68, W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F3, 56, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72, W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F3, 57, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72, W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F3, 58, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 72, W6, W7, W0, W1, W2, W3, W4, W5);
_R( _b, _c, _d, _e, _a, F3, 59, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 72, W6, W7, W0, W1, W2, W3, W4, W5);
subs RNBLKS, #1;
_R( _a, _b, _c, _d, _e, F4, 60, WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76, W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F4, 61, WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76, W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F4, 62, WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 76, W5, W6, W7, W0, W1, W2, W3, W4);
_R( _c, _d, _e, _a, _b, F4, 63, WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 76, W5, W6, W7, W0, W1, W2, W3, W4);
beq .Lend;
/* Transform 64-79 + Precalc 0-15 of next block. */
#undef curK
#define curK qK1
_R( _b, _c, _d, _e, _a, F4, 64, WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 65, WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 66, WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 67, WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 68, dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 69, dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 70, WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 71, WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 72, dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 73, dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 74, WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 75, WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 76, WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 77, WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 78, WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 79, WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
add _e, RT0;
stm RSTATE, {_a-_e};
b .Loop;
.ltorg
.Lend:
/* Transform 64-79 + Clear XMM registers. */
R( _b, _c, _d, _e, _a, F4, 64 );
R( _a, _b, _c, _d, _e, F4, 65 ); CLEAR_REG(tmp0);
R( _e, _a, _b, _c, _d, F4, 66 ); CLEAR_REG(tmp1);
R( _d, _e, _a, _b, _c, F4, 67 ); CLEAR_REG(W0);
R( _c, _d, _e, _a, _b, F4, 68 ); CLEAR_REG(W1);
R( _b, _c, _d, _e, _a, F4, 69 ); CLEAR_REG(W2);
R( _a, _b, _c, _d, _e, F4, 70 ); CLEAR_REG(W3);
R( _e, _a, _b, _c, _d, F4, 71 ); CLEAR_REG(W4);
R( _d, _e, _a, _b, _c, F4, 72 ); CLEAR_REG(W5);
R( _c, _d, _e, _a, _b, F4, 73 ); CLEAR_REG(W6);
R( _b, _c, _d, _e, _a, F4, 74 ); CLEAR_REG(W7);
R( _a, _b, _c, _d, _e, F4, 75 );
R( _e, _a, _b, _c, _d, F4, 76 );
R( _d, _e, _a, _b, _c, F4, 77 );
R( _c, _d, _e, _a, _b, F4, 78 );
R( _b, _c, _d, _e, _a, F4, 79 );
mov sp, ROLDSTACK;
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
vpop {q4-q7};
add _e, RT0;
stm RSTATE, {_a-_e};
/* burn_stack */
mov r0, #(16*4 + 16*4 + 15);
pop {r4-r12, pc};
.Ldo_nothing:
mov r0, #0;
bx lr
.size _gcry_sha1_transform_armv7_neon,.-_gcry_sha1_transform_armv7_neon;
#endif
diff --git a/cipher/sha1-armv8-aarch32-ce.S b/cipher/sha1-armv8-aarch32-ce.S
index bf2b233b..059b9a85 100644
--- a/cipher/sha1-armv8-aarch32-ce.S
+++ b/cipher/sha1-armv8-aarch32-ce.S
@@ -1,220 +1,220 @@
/* sha1-armv8-aarch32-ce.S - ARM/CE accelerated SHA-1 transform function
* Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO) && defined(USE_SHA1)
.syntax unified
.arch armv8-a
.fpu crypto-neon-fp-armv8
.arm
.text
#ifdef __PIC__
# define GET_DATA_POINTER(reg, name, rtmp) \
ldr reg, 1f; \
ldr rtmp, 2f; \
b 3f; \
1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
2: .word name(GOT); \
3: add reg, pc, reg; \
ldr reg, [reg, rtmp];
#else
# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
#endif
/* Constants */
#define K1 0x5A827999
#define K2 0x6ED9EBA1
#define K3 0x8F1BBCDC
#define K4 0xCA62C1D6
.align 4
gcry_sha1_aarch32_ce_K_VEC:
.LK_VEC:
.LK1: .long K1, K1, K1, K1
.LK2: .long K2, K2, K2, K2
.LK3: .long K3, K3, K3, K3
.LK4: .long K4, K4, K4, K4
/* Register macros */
#define qH4 q0
#define sH4 s0
#define qH0123 q1
#define qABCD q2
#define qE0 q3
#define qE1 q4
#define qT0 q5
#define qT1 q6
#define qW0 q8
#define qW1 q9
#define qW2 q10
#define qW3 q11
#define qK1 q12
#define qK2 q13
#define qK3 q14
#define qK4 q15
/* Round macros */
#define _(...) /*_*/
#define do_add(dst, src0, src1) vadd.u32 dst, src0, src1;
#define do_sha1su0(w0,w1,w2) sha1su0.32 w0,w1,w2;
#define do_sha1su1(w0,w3) sha1su1.32 w0,w3;
#define do_rounds(f, e0, e1, t, k, w0, w1, w2, w3, add_fn, sha1su0_fn, sha1su1_fn) \
sha1su1_fn( w3, w2 ); \
sha1h.32 e0, qABCD; \
sha1##f.32 qABCD, e1, t; \
add_fn( t, w2, k ); \
sha1su0_fn( w0, w1, w2 );
/* Other functional macros */
-#define CLEAR_REG(reg) veor reg, reg;
+#define CLEAR_REG(reg) vmov.i8 reg, #0;
/*
* unsigned int
* _gcry_sha1_transform_armv8_ce (void *ctx, const unsigned char *data,
* size_t nblks)
*/
.align 3
.globl _gcry_sha1_transform_armv8_ce
.type _gcry_sha1_transform_armv8_ce,%function;
_gcry_sha1_transform_armv8_ce:
/* input:
* r0: ctx, CTX
* r1: data (64*nblks bytes)
* r2: nblks
*/
cmp r2, #0;
push {r4,lr};
beq .Ldo_nothing;
vpush {q4-q7};
GET_DATA_POINTER(r4, .LK_VEC, lr);
veor qH4, qH4
vld1.32 {qH0123}, [r0] /* load h0,h1,h2,h3 */
vld1.32 {qK1-qK2}, [r4]! /* load K1,K2 */
vldr sH4, [r0, #16] /* load h4 */
vld1.32 {qK3-qK4}, [r4] /* load K3,K4 */
vld1.8 {qW0-qW1}, [r1]!
vmov qABCD, qH0123
vld1.8 {qW2-qW3}, [r1]!
vrev32.8 qW0, qW0
vrev32.8 qW1, qW1
vrev32.8 qW2, qW2
do_add(qT0, qW0, qK1)
vrev32.8 qW3, qW3
do_add(qT1, qW1, qK1)
.Loop:
do_rounds(c, qE1, qH4, qT0, qK1, qW0, qW1, qW2, qW3, do_add, do_sha1su0, _)
subs r2, r2, #1
do_rounds(c, qE0, qE1, qT1, qK1, qW1, qW2, qW3, qW0, do_add, do_sha1su0, do_sha1su1)
do_rounds(c, qE1, qE0, qT0, qK1, qW2, qW3, qW0, qW1, do_add, do_sha1su0, do_sha1su1)
do_rounds(c, qE0, qE1, qT1, qK2, qW3, qW0, qW1, qW2, do_add, do_sha1su0, do_sha1su1)
do_rounds(c, qE1, qE0, qT0, qK2, qW0, qW1, qW2, qW3, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, qE0, qE1, qT1, qK2, qW1, qW2, qW3, qW0, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, qE1, qE0, qT0, qK2, qW2, qW3, qW0, qW1, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, qE0, qE1, qT1, qK2, qW3, qW0, qW1, qW2, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, qE1, qE0, qT0, qK3, qW0, qW1, qW2, qW3, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, qE0, qE1, qT1, qK3, qW1, qW2, qW3, qW0, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, qE1, qE0, qT0, qK3, qW2, qW3, qW0, qW1, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, qE0, qE1, qT1, qK3, qW3, qW0, qW1, qW2, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, qE1, qE0, qT0, qK3, qW0, qW1, qW2, qW3, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, qE0, qE1, qT1, qK4, qW1, qW2, qW3, qW0, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, qE1, qE0, qT0, qK4, qW2, qW3, qW0, qW1, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, qE0, qE1, qT1, qK4, qW3, qW0, qW1, qW2, do_add, do_sha1su0, do_sha1su1)
beq .Lend
vld1.8 {qW0-qW1}, [r1]! /* preload */
do_rounds(p, qE1, qE0, qT0, qK4, _ , _ , qW2, qW3, do_add, _, do_sha1su1)
vrev32.8 qW0, qW0
vld1.8 {qW2}, [r1]!
vrev32.8 qW1, qW1
do_rounds(p, qE0, qE1, qT1, qK4, _ , _ , qW3, _ , do_add, _, _)
vld1.8 {qW3}, [r1]!
vrev32.8 qW2, qW2
do_rounds(p, qE1, qE0, qT0, _, _, _, _, _, _, _, _)
vrev32.8 qW3, qW3
do_rounds(p, qE0, qE1, qT1, _, _, _, _, _, _, _, _)
do_add(qT0, qW0, qK1)
vadd.u32 qH4, qE0
vadd.u32 qABCD, qH0123
do_add(qT1, qW1, qK1)
vmov qH0123, qABCD
b .Loop
.Lend:
do_rounds(p, qE1, qE0, qT0, qK4, _ , _ , qW2, qW3, do_add, _, do_sha1su1)
do_rounds(p, qE0, qE1, qT1, qK4, _ , _ , qW3, _ , do_add, _, _)
do_rounds(p, qE1, qE0, qT0, _, _, _, _, _, _, _, _)
do_rounds(p, qE0, qE1, qT1, _, _, _, _, _, _, _, _)
vadd.u32 qH4, qE0
vadd.u32 qH0123, qABCD
CLEAR_REG(qW0)
CLEAR_REG(qW1)
CLEAR_REG(qW2)
CLEAR_REG(qW3)
CLEAR_REG(qABCD)
CLEAR_REG(qE1)
CLEAR_REG(qE0)
vstr sH4, [r0, #16] /* store h4 */
vst1.32 {qH0123}, [r0] /* store h0,h1,h2,h3 */
CLEAR_REG(qH0123)
CLEAR_REG(qH4)
vpop {q4-q7}
.Ldo_nothing:
mov r0, #0
pop {r4,pc}
.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;
#endif
diff --git a/cipher/sha1-armv8-aarch64-ce.S b/cipher/sha1-armv8-aarch64-ce.S
index 223268ca..8ea1486b 100644
--- a/cipher/sha1-armv8-aarch64-ce.S
+++ b/cipher/sha1-armv8-aarch64-ce.S
@@ -1,201 +1,201 @@
/* sha1-armv8-aarch64-ce.S - ARM/CE accelerated SHA-1 transform function
* Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asm-common-aarch64.h"
#if defined(__AARCH64EL__) && \
defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) && defined(USE_SHA1)
.cpu generic+simd+crypto
.text
/* Constants */
#define K1 0x5A827999
#define K2 0x6ED9EBA1
#define K3 0x8F1BBCDC
#define K4 0xCA62C1D6
.align 4
gcry_sha1_aarch64_ce_K_VEC:
.LK_VEC:
.LK1: .long K1, K1, K1, K1
.LK2: .long K2, K2, K2, K2
.LK3: .long K3, K3, K3, K3
.LK4: .long K4, K4, K4, K4
/* Register macros */
#define sH4 s0
#define vH4 v0
#define vH0123 v1
#define qABCD q2
#define sABCD s2
#define vABCD v2
#define sE0 s3
#define vE0 v3
#define sE1 s4
#define vE1 v4
#define vT0 v5
#define vT1 v6
#define vW0 v16
#define vW1 v17
#define vW2 v18
#define vW3 v19
#define vK1 v20
#define vK2 v21
#define vK3 v22
#define vK4 v23
/* Round macros */
#define _(...) /*_*/
#define do_add(dst, src0, src1) add dst.4s, src0.4s, src1.4s;
#define do_sha1su0(w0,w1,w2) sha1su0 w0.4s,w1.4s,w2.4s;
#define do_sha1su1(w0,w3) sha1su1 w0.4s,w3.4s;
#define do_rounds(f, e0, e1, t, k, w0, w1, w2, w3, add_fn, sha1su0_fn, sha1su1_fn) \
sha1su1_fn( v##w3, v##w2 ); \
sha1h e0, sABCD; \
sha1##f qABCD, e1, v##t.4s; \
add_fn( v##t, v##w2, v##k ); \
sha1su0_fn( v##w0, v##w1, v##w2 );
/* Other functional macros */
-#define CLEAR_REG(reg) eor reg.16b, reg.16b, reg.16b;
+#define CLEAR_REG(reg) movi reg.16b, #0;
/*
* unsigned int
* _gcry_sha1_transform_armv8_ce (void *ctx, const unsigned char *data,
* size_t nblks)
*/
.align 3
.globl _gcry_sha1_transform_armv8_ce
ELF(.type _gcry_sha1_transform_armv8_ce,%function;)
_gcry_sha1_transform_armv8_ce:
/* input:
* x0: ctx, CTX
* x1: data (64*nblks bytes)
* x2: nblks
*/
CFI_STARTPROC();
cbz x2, .Ldo_nothing;
GET_DATA_POINTER(x4, .LK_VEC);
ld1 {vH0123.4s}, [x0] /* load h0,h1,h2,h3 */
ld1 {vK1.4s-vK4.4s}, [x4] /* load K1,K2,K3,K4 */
ldr sH4, [x0, #16] /* load h4 */
ld1 {vW0.16b-vW3.16b}, [x1], #64
mov vABCD.16b, vH0123.16b
rev32 vW0.16b, vW0.16b
rev32 vW1.16b, vW1.16b
rev32 vW2.16b, vW2.16b
do_add(vT0, vW0, vK1)
rev32 vW3.16b, vW3.16b
do_add(vT1, vW1, vK1)
.Loop:
do_rounds(c, sE1, sH4, T0, K1, W0, W1, W2, W3, do_add, do_sha1su0, _)
sub x2, x2, #1
do_rounds(c, sE0, sE1, T1, K1, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1)
do_rounds(c, sE1, sE0, T0, K1, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1)
do_rounds(c, sE0, sE1, T1, K2, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1)
do_rounds(c, sE1, sE0, T0, K2, W0, W1, W2, W3, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, sE0, sE1, T1, K2, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, sE1, sE0, T0, K2, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, sE0, sE1, T1, K2, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, sE1, sE0, T0, K3, W0, W1, W2, W3, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, sE0, sE1, T1, K3, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, sE1, sE0, T0, K3, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, sE0, sE1, T1, K3, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, sE1, sE0, T0, K3, W0, W1, W2, W3, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, sE0, sE1, T1, K4, W1, W2, W3, W0, do_add, do_sha1su0, do_sha1su1)
do_rounds(m, sE1, sE0, T0, K4, W2, W3, W0, W1, do_add, do_sha1su0, do_sha1su1)
do_rounds(p, sE0, sE1, T1, K4, W3, W0, W1, W2, do_add, do_sha1su0, do_sha1su1)
cbz x2, .Lend
ld1 {vW0.16b-vW1.16b}, [x1], #32 /* preload */
do_rounds(p, sE1, sE0, T0, K4, _ , _ , W2, W3, do_add, _, do_sha1su1)
rev32 vW0.16b, vW0.16b
ld1 {vW2.16b}, [x1], #16
rev32 vW1.16b, vW1.16b
do_rounds(p, sE0, sE1, T1, K4, _ , _ , W3, _ , do_add, _, _)
ld1 {vW3.16b}, [x1], #16
rev32 vW2.16b, vW2.16b
do_rounds(p, sE1, sE0, T0, _, _, _, _, _, _, _, _)
rev32 vW3.16b, vW3.16b
do_rounds(p, sE0, sE1, T1, _, _, _, _, _, _, _, _)
do_add(vT0, vW0, vK1)
add vH4.2s, vH4.2s, vE0.2s
add vABCD.4s, vABCD.4s, vH0123.4s
do_add(vT1, vW1, vK1)
mov vH0123.16b, vABCD.16b
b .Loop
.Lend:
do_rounds(p, sE1, sE0, T0, K4, _ , _ , W2, W3, do_add, _, do_sha1su1)
do_rounds(p, sE0, sE1, T1, K4, _ , _ , W3, _ , do_add, _, _)
do_rounds(p, sE1, sE0, T0, _, _, _, _, _, _, _, _)
do_rounds(p, sE0, sE1, T1, _, _, _, _, _, _, _, _)
add vH4.2s, vH4.2s, vE0.2s
add vH0123.4s, vH0123.4s, vABCD.4s
CLEAR_REG(vW0)
CLEAR_REG(vW1)
CLEAR_REG(vW2)
CLEAR_REG(vW3)
CLEAR_REG(vABCD)
CLEAR_REG(vE1)
CLEAR_REG(vE0)
str sH4, [x0, #16] /* store h4 */
st1 {vH0123.4s}, [x0] /* store h0,h1,h2,h3 */
CLEAR_REG(vH0123)
CLEAR_REG(vH4)
.Ldo_nothing:
mov x0, #0
ret
CFI_ENDPROC();
ELF(.size _gcry_sha1_transform_armv8_ce,.-_gcry_sha1_transform_armv8_ce;)
#endif
diff --git a/cipher/sha256-armv8-aarch32-ce.S b/cipher/sha256-armv8-aarch32-ce.S
index 2b17ab1b..95778b40 100644
--- a/cipher/sha256-armv8-aarch32-ce.S
+++ b/cipher/sha256-armv8-aarch32-ce.S
@@ -1,231 +1,231 @@
/* sha256-armv8-aarch32-ce.S - ARM/CE accelerated SHA-256 transform function
* Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH32_CRYPTO) && defined(USE_SHA256)
.syntax unified
.arch armv8-a
.fpu crypto-neon-fp-armv8
.arm
.text
#ifdef __PIC__
# define GET_DATA_POINTER(reg, name, rtmp) \
ldr reg, 1f; \
ldr rtmp, 2f; \
b 3f; \
1: .word _GLOBAL_OFFSET_TABLE_-(3f+8); \
2: .word name(GOT); \
3: add reg, pc, reg; \
ldr reg, [reg, rtmp];
#else
# define GET_DATA_POINTER(reg, name, rtmp) ldr reg, =name
#endif
/* Constants */
.align 4
gcry_sha256_aarch32_ce_K:
.LK:
.long 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
.long 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
.long 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
.long 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
.long 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
.long 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
.long 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
.long 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
.long 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
.long 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
.long 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
.long 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
.long 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
.long 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
.long 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
.long 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/* Register macros */
#define qH0123 q0
#define qH4567 q1
#define qABCD0 q2
#define qABCD1 q3
#define qEFGH q4
#define qT0 q5
#define qT1 q6
#define qW0 q8
#define qW1 q9
#define qW2 q10
#define qW3 q11
#define qK0 q12
#define qK1 q13
#define qK2 q14
#define qK3 q15
/* Round macros */
#define _(...) /*_*/
#define do_loadk(nk0, nk1) vld1.32 {nk0-nk1},[lr]!;
#define do_add(a, b) vadd.u32 a, a, b;
#define do_sha256su0(w0, w1) sha256su0.32 w0, w1;
#define do_sha256su1(w0, w2, w3) sha256su1.32 w0, w2, w3;
#define do_rounds(k, nk0, nk1, w0, w1, w2, w3, loadk_fn, add_fn, su0_fn, su1_fn) \
loadk_fn( nk0, nk1 ); \
su0_fn( w0, w1 ); \
vmov qABCD1, qABCD0; \
sha256h.32 qABCD0, qEFGH, k; \
sha256h2.32 qEFGH, qABCD1, k; \
add_fn( nk0, w2 ); \
su1_fn( w0, w2, w3 );
/* Other functional macros */
-#define CLEAR_REG(reg) veor reg, reg;
+#define CLEAR_REG(reg) vmov.i8 reg, #0;
/*
* unsigned int
* _gcry_sha256_transform_armv8_ce (u32 state[8], const void *input_data,
* size_t num_blks)
*/
.align 3
.globl _gcry_sha256_transform_armv8_ce
.type _gcry_sha256_transform_armv8_ce,%function;
_gcry_sha256_transform_armv8_ce:
/* input:
* r0: ctx, CTX
* r1: data (64*nblks bytes)
* r2: nblks
*/
cmp r2, #0;
push {r4,lr};
beq .Ldo_nothing;
vpush {q4-q7};
GET_DATA_POINTER(r4, .LK, lr);
mov lr, r4
vld1.32 {qH0123-qH4567}, [r0] /* load state */
vld1.8 {qW0-qW1}, [r1]!
do_loadk(qK0, qK1)
vld1.8 {qW2-qW3}, [r1]!
vmov qABCD0, qH0123
vmov qEFGH, qH4567
vrev32.8 qW0, qW0
vrev32.8 qW1, qW1
vrev32.8 qW2, qW2
do_add(qK0, qW0)
vrev32.8 qW3, qW3
do_add(qK1, qW1)
.Loop:
do_rounds(qK0, qK2, qK3, qW0, qW1, qW2, qW3, do_loadk, do_add, do_sha256su0, do_sha256su1)
subs r2,r2,#1
do_rounds(qK1, qK3, _ , qW1, qW2, qW3, qW0, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(qK2, qK0, qK1, qW2, qW3, qW0, qW1, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(qK3, qK1, _ , qW3, qW0, qW1, qW2, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(qK0, qK2, qK3, qW0, qW1, qW2, qW3, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(qK1, qK3, _ , qW1, qW2, qW3, qW0, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(qK2, qK0, qK1, qW2, qW3, qW0, qW1, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(qK3, qK1, _ , qW3, qW0, qW1, qW2, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(qK0, qK2, qK3, qW0, qW1, qW2, qW3, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(qK1, qK3, _ , qW1, qW2, qW3, qW0, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(qK2, qK0, qK1, qW2, qW3, qW0, qW1, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(qK3, qK1, _ , qW3, qW0, qW1, qW2, _ , do_add, do_sha256su0, do_sha256su1)
beq .Lend
do_rounds(qK0, qK2, qK3, qW0, _ , qW2, qW3, do_loadk, do_add, _, _)
vld1.8 {qW0}, [r1]!
mov lr, r4
do_rounds(qK1, qK3, _ , qW1, _ , qW3, _ , _ , do_add, _, _)
vld1.8 {qW1}, [r1]!
vrev32.8 qW0, qW0
do_rounds(qK2, qK0, qK1, qW2, _ , qW0, _ , do_loadk, do_add, _, _)
vrev32.8 qW1, qW1
vld1.8 {qW2}, [r1]!
do_rounds(qK3, qK1, _ , qW3, _ , qW1, _ , _ , do_add, _, _)
vld1.8 {qW3}, [r1]!
vadd.u32 qH0123, qABCD0
vadd.u32 qH4567, qEFGH
vrev32.8 qW2, qW2
vmov qABCD0, qH0123
vrev32.8 qW3, qW3
vmov qEFGH, qH4567
b .Loop
.Lend:
do_rounds(qK0, qK2, qK3, qW0, _ , qW2, qW3, do_loadk, do_add, _, _)
do_rounds(qK1, qK3, _ , qW1, _ , qW3, _ , _ , do_add, _, _)
do_rounds(qK2, _ , _ , qW2, _ , _ , _ , _ , _, _, _)
do_rounds(qK3, _ , _ , qW3, _ , _ , _ , _ , _, _, _)
CLEAR_REG(qW0)
CLEAR_REG(qW1)
CLEAR_REG(qW2)
CLEAR_REG(qW3)
CLEAR_REG(qK0)
CLEAR_REG(qK1)
CLEAR_REG(qK2)
CLEAR_REG(qK3)
vadd.u32 qH0123, qABCD0
vadd.u32 qH4567, qEFGH
CLEAR_REG(qABCD0)
CLEAR_REG(qABCD1)
CLEAR_REG(qEFGH)
vst1.32 {qH0123-qH4567}, [r0] /* store state */
CLEAR_REG(qH0123)
CLEAR_REG(qH4567)
vpop {q4-q7}
.Ldo_nothing:
mov r0, #0
pop {r4,pc}
.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;
#endif
diff --git a/cipher/sha256-armv8-aarch64-ce.S b/cipher/sha256-armv8-aarch64-ce.S
index f57cae29..5c39e83e 100644
--- a/cipher/sha256-armv8-aarch64-ce.S
+++ b/cipher/sha256-armv8-aarch64-ce.S
@@ -1,215 +1,215 @@
/* sha256-armv8-aarch64-ce.S - ARM/CE accelerated SHA-256 transform function
* Copyright (C) 2016 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "asm-common-aarch64.h"
#if defined(__AARCH64EL__) && \
defined(HAVE_COMPATIBLE_GCC_AARCH64_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_AARCH64_CRYPTO) && defined(USE_SHA256)
.cpu generic+simd+crypto
.text
/* Constants */
.align 4
gcry_sha256_aarch64_ce_K:
.LK:
.long 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5
.long 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5
.long 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3
.long 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174
.long 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc
.long 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da
.long 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7
.long 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967
.long 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13
.long 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85
.long 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3
.long 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070
.long 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5
.long 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3
.long 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208
.long 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/* Register macros */
#define vH0123 v0
#define vH4567 v1
#define vABCD0 v2
#define qABCD0 q2
#define vABCD1 v3
#define qABCD1 q3
#define vEFGH v4
#define qEFGH q4
#define vT0 v5
#define vT1 v6
#define vW0 v16
#define vW1 v17
#define vW2 v18
#define vW3 v19
#define vK0 v20
#define vK1 v21
#define vK2 v22
#define vK3 v23
/* Round macros */
#define _(...) /*_*/
#define do_loadk(nk0, nk1) ld1 {nk0.16b-nk1.16b},[x3],#32;
#define do_add(a, b) add a.4s, a.4s, b.4s;
#define do_sha256su0(w0, w1) sha256su0 w0.4s, w1.4s;
#define do_sha256su1(w0, w2, w3) sha256su1 w0.4s, w2.4s, w3.4s;
#define do_rounds(k, nk0, nk1, w0, w1, w2, w3, loadk_fn, add_fn, su0_fn, su1_fn) \
loadk_fn( v##nk0, v##nk1 ); \
su0_fn( v##w0, v##w1 ); \
mov vABCD1.16b, vABCD0.16b; \
sha256h qABCD0, qEFGH, v##k.4s; \
sha256h2 qEFGH, qABCD1, v##k.4s; \
add_fn( v##nk0, v##w2 ); \
su1_fn( v##w0, v##w2, v##w3 );
/* Other functional macros */
-#define CLEAR_REG(reg) eor reg.16b, reg.16b, reg.16b;
+#define CLEAR_REG(reg) movi reg.16b, #0;
/*
* unsigned int
* _gcry_sha256_transform_armv8_ce (u32 state[8], const void *input_data,
* size_t num_blks)
*/
.align 3
.globl _gcry_sha256_transform_armv8_ce
ELF(.type _gcry_sha256_transform_armv8_ce,%function;)
_gcry_sha256_transform_armv8_ce:
/* input:
* r0: ctx, CTX
* r1: data (64*nblks bytes)
* r2: nblks
*/
CFI_STARTPROC();
cbz x2, .Ldo_nothing;
GET_DATA_POINTER(x3, .LK);
mov x4, x3
ld1 {vH0123.4s-vH4567.4s}, [x0] /* load state */
ld1 {vW0.16b-vW1.16b}, [x1], #32
do_loadk(vK0, vK1)
ld1 {vW2.16b-vW3.16b}, [x1], #32
mov vABCD0.16b, vH0123.16b
mov vEFGH.16b, vH4567.16b
rev32 vW0.16b, vW0.16b
rev32 vW1.16b, vW1.16b
rev32 vW2.16b, vW2.16b
do_add(vK0, vW0)
rev32 vW3.16b, vW3.16b
do_add(vK1, vW1)
.Loop:
do_rounds(K0, K2, K3, W0, W1, W2, W3, do_loadk, do_add, do_sha256su0, do_sha256su1)
sub x2,x2,#1
do_rounds(K1, K3, _ , W1, W2, W3, W0, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(K2, K0, K1, W2, W3, W0, W1, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(K3, K1, _ , W3, W0, W1, W2, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(K0, K2, K3, W0, W1, W2, W3, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(K1, K3, _ , W1, W2, W3, W0, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(K2, K0, K1, W2, W3, W0, W1, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(K3, K1, _ , W3, W0, W1, W2, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(K0, K2, K3, W0, W1, W2, W3, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(K1, K3, _ , W1, W2, W3, W0, _ , do_add, do_sha256su0, do_sha256su1)
do_rounds(K2, K0, K1, W2, W3, W0, W1, do_loadk, do_add, do_sha256su0, do_sha256su1)
do_rounds(K3, K1, _ , W3, W0, W1, W2, _ , do_add, do_sha256su0, do_sha256su1)
cbz x2, .Lend
do_rounds(K0, K2, K3, W0, _ , W2, W3, do_loadk, do_add, _, _)
ld1 {vW0.16b}, [x1], #16
mov x3, x4
do_rounds(K1, K3, _ , W1, _ , W3, _ , _ , do_add, _, _)
ld1 {vW1.16b}, [x1], #16
rev32 vW0.16b, vW0.16b
do_rounds(K2, K0, K1, W2, _ , W0, _ , do_loadk, do_add, _, _)
rev32 vW1.16b, vW1.16b
ld1 {vW2.16b}, [x1], #16
do_rounds(K3, K1, _ , W3, _ , W1, _ , _ , do_add, _, _)
ld1 {vW3.16b}, [x1], #16
do_add(vH0123, vABCD0)
do_add(vH4567, vEFGH)
rev32 vW2.16b, vW2.16b
mov vABCD0.16b, vH0123.16b
rev32 vW3.16b, vW3.16b
mov vEFGH.16b, vH4567.16b
b .Loop
.Lend:
do_rounds(K0, K2, K3, W0, _ , W2, W3, do_loadk, do_add, _, _)
do_rounds(K1, K3, _ , W1, _ , W3, _ , _ , do_add, _, _)
do_rounds(K2, _ , _ , W2, _ , _ , _ , _ , _, _, _)
do_rounds(K3, _ , _ , W3, _ , _ , _ , _ , _, _, _)
CLEAR_REG(vW0)
CLEAR_REG(vW1)
CLEAR_REG(vW2)
CLEAR_REG(vW3)
CLEAR_REG(vK0)
CLEAR_REG(vK1)
CLEAR_REG(vK2)
CLEAR_REG(vK3)
do_add(vH0123, vABCD0)
do_add(vH4567, vEFGH)
CLEAR_REG(vABCD0)
CLEAR_REG(vABCD1)
CLEAR_REG(vEFGH)
st1 {vH0123.4s-vH4567.4s}, [x0] /* store state */
CLEAR_REG(vH0123)
CLEAR_REG(vH4567)
.Ldo_nothing:
mov x0, #0
ret
CFI_ENDPROC();
ELF(.size _gcry_sha256_transform_armv8_ce,.-_gcry_sha256_transform_armv8_ce;)
#endif
diff --git a/cipher/sha512-armv7-neon.S b/cipher/sha512-armv7-neon.S
index 6596f2cd..2b186b47 100644
--- a/cipher/sha512-armv7-neon.S
+++ b/cipher/sha512-armv7-neon.S
@@ -1,450 +1,452 @@
/* sha512-armv7-neon.S - ARM/NEON assembly implementation of SHA-512 transform
*
* Copyright (C) 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#if defined(HAVE_ARM_ARCH_V6) && defined(__ARMEL__) && \
defined(HAVE_COMPATIBLE_GCC_ARM_PLATFORM_AS) && \
defined(HAVE_GCC_INLINE_ASM_NEON)
.text
.syntax unified
.fpu neon
.arm
/* structure of SHA512_CONTEXT */
#define hd_a 0
#define hd_b ((hd_a) + 8)
#define hd_c ((hd_b) + 8)
#define hd_d ((hd_c) + 8)
#define hd_e ((hd_d) + 8)
#define hd_f ((hd_e) + 8)
#define hd_g ((hd_f) + 8)
/* register macros */
#define RK %r2
#define RA d0
#define RB d1
#define RC d2
#define RD d3
#define RE d4
#define RF d5
#define RG d6
#define RH d7
#define RT0 d8
#define RT1 d9
#define RT2 d10
#define RT3 d11
#define RT4 d12
#define RT5 d13
#define RT6 d14
#define RT7 d15
#define RT01q q4
#define RT23q q5
#define RT45q q6
#define RT67q q7
#define RW0 d16
#define RW1 d17
#define RW2 d18
#define RW3 d19
#define RW4 d20
#define RW5 d21
#define RW6 d22
#define RW7 d23
#define RW8 d24
#define RW9 d25
#define RW10 d26
#define RW11 d27
#define RW12 d28
#define RW13 d29
#define RW14 d30
#define RW15 d31
#define RW01q q8
#define RW23q q9
#define RW45q q10
#define RW67q q11
#define RW89q q12
#define RW1011q q13
#define RW1213q q14
#define RW1415q q15
+#define CLEAR_REG(reg) vmov.i8 reg, #0;
+
/***********************************************************************
* ARM assembly implementation of sha512 transform
***********************************************************************/
#define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \
/* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
vshr.u64 RT2, re, #14; \
vshl.u64 RT3, re, #64 - 14; \
interleave_op(arg1); \
vshr.u64 RT4, re, #18; \
vshl.u64 RT5, re, #64 - 18; \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, re, #41; \
vshl.u64 RT5, re, #64 - 41; \
vadd.u64 RT0, RT0, rw0; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, re; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, rf, rg; \
\
vadd.u64 RT1, RT1, rh; \
vshr.u64 RT2, ra, #28; \
vshl.u64 RT3, ra, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, ra, #34; \
vshl.u64 RT5, ra, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* h = Sum0 (a) + Maj (a, b, c); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, ra, #39; \
vshl.u64 RT5, ra, #64 - 39; \
veor.64 RT0, ra, rb; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rc, rb; \
vadd.u64 rd, rd, RT1; /* d+=t1; */ \
veor.64 rh, RT2, RT3; \
\
/* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
vshr.u64 RT2, rd, #14; \
vshl.u64 RT3, rd, #64 - 14; \
vadd.u64 rh, rh, RT0; \
vshr.u64 RT4, rd, #18; \
vshl.u64 RT5, rd, #64 - 18; \
vadd.u64 rh, rh, RT1; /* h+=t1; */ \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rd, #41; \
vshl.u64 RT5, rd, #64 - 41; \
vadd.u64 RT0, RT0, rw1; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, rd; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, re, rf; \
\
vadd.u64 RT1, RT1, rg; \
vshr.u64 RT2, rh, #28; \
vshl.u64 RT3, rh, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, rh, #34; \
vshl.u64 RT5, rh, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* g = Sum0 (h) + Maj (h, a, b); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rh, #39; \
vshl.u64 RT5, rh, #64 - 39; \
veor.64 RT0, rh, ra; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rb, ra; \
vadd.u64 rc, rc, RT1; /* c+=t1; */ \
veor.64 rg, RT2, RT3; \
\
/* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \
/* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \
\
/**** S0(w[1:2]) */ \
\
/* w[0:1] += w[9:10] */ \
/* RT23q = rw1:rw2 */ \
vext.u64 RT23q, rw01q, rw23q, #1; \
vadd.u64 rw0, rw9; \
vadd.u64 rg, rg, RT0; \
vadd.u64 rw1, rw10;\
vadd.u64 rg, rg, RT1; /* g+=t1; */ \
\
vshr.u64 RT45q, RT23q, #1; \
vshl.u64 RT67q, RT23q, #64 - 1; \
vshr.u64 RT01q, RT23q, #8; \
veor.u64 RT45q, RT45q, RT67q; \
vshl.u64 RT67q, RT23q, #64 - 8; \
veor.u64 RT45q, RT45q, RT01q; \
vshr.u64 RT01q, RT23q, #7; \
veor.u64 RT45q, RT45q, RT67q; \
\
/**** S1(w[14:15]) */ \
vshr.u64 RT23q, rw1415q, #6; \
veor.u64 RT01q, RT01q, RT45q; \
vshr.u64 RT45q, rw1415q, #19; \
vshl.u64 RT67q, rw1415q, #64 - 19; \
veor.u64 RT23q, RT23q, RT45q; \
vshr.u64 RT45q, rw1415q, #61; \
veor.u64 RT23q, RT23q, RT67q; \
vshl.u64 RT67q, rw1415q, #64 - 61; \
veor.u64 RT23q, RT23q, RT45q; \
vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \
veor.u64 RT01q, RT23q, RT67q;
#define vadd_RT01q(rw01q) \
/* w[0:1] += S(w[14:15]) */ \
vadd.u64 rw01q, RT01q;
#define dummy(_) /*_*/
#define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, interleave_op1, arg1, interleave_op2, arg2) \
/* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
vshr.u64 RT2, re, #14; \
vshl.u64 RT3, re, #64 - 14; \
interleave_op1(arg1); \
vshr.u64 RT4, re, #18; \
vshl.u64 RT5, re, #64 - 18; \
interleave_op2(arg2); \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, re, #41; \
vshl.u64 RT5, re, #64 - 41; \
vadd.u64 RT0, RT0, rw0; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, re; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, rf, rg; \
\
vadd.u64 RT1, RT1, rh; \
vshr.u64 RT2, ra, #28; \
vshl.u64 RT3, ra, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, ra, #34; \
vshl.u64 RT5, ra, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* h = Sum0 (a) + Maj (a, b, c); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, ra, #39; \
vshl.u64 RT5, ra, #64 - 39; \
veor.64 RT0, ra, rb; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rc, rb; \
vadd.u64 rd, rd, RT1; /* d+=t1; */ \
veor.64 rh, RT2, RT3; \
\
/* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
vshr.u64 RT2, rd, #14; \
vshl.u64 RT3, rd, #64 - 14; \
vadd.u64 rh, rh, RT0; \
vshr.u64 RT4, rd, #18; \
vshl.u64 RT5, rd, #64 - 18; \
vadd.u64 rh, rh, RT1; /* h+=t1; */ \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rd, #41; \
vshl.u64 RT5, rd, #64 - 41; \
vadd.u64 RT0, RT0, rw1; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, rd; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, re, rf; \
\
vadd.u64 RT1, RT1, rg; \
vshr.u64 RT2, rh, #28; \
vshl.u64 RT3, rh, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, rh, #34; \
vshl.u64 RT5, rh, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* g = Sum0 (h) + Maj (h, a, b); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rh, #39; \
vshl.u64 RT5, rh, #64 - 39; \
veor.64 RT0, rh, ra; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rb, ra; \
vadd.u64 rc, rc, RT1; /* c+=t1; */ \
veor.64 rg, RT2, RT3;
#define vadd_rg_RT0(rg) \
vadd.u64 rg, rg, RT0;
#define vadd_rg_RT1(rg) \
vadd.u64 rg, rg, RT1; /* g+=t1; */
.align 3
.globl _gcry_sha512_transform_armv7_neon
.type _gcry_sha512_transform_armv7_neon,%function;
_gcry_sha512_transform_armv7_neon:
/* Input:
* %r0: SHA512_CONTEXT
* %r1: data
* %r2: u64 k[] constants
* %r3: nblks
*/
push {%lr};
mov %lr, #0;
/* Load context to d0-d7 */
vld1.64 {RA-RD}, [%r0]!;
vld1.64 {RE-RH}, [%r0];
sub %r0, #(4*8);
/* Load input to w[16], d16-d31 */
/* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */
vld1.64 {RW0-RW3}, [%r1]!;
vld1.64 {RW4-RW7}, [%r1]!;
vld1.64 {RW8-RW11}, [%r1]!;
vld1.64 {RW12-RW15}, [%r1]!;
#ifdef __ARMEL__
/* byteswap */
vrev64.8 RW01q, RW01q;
vrev64.8 RW23q, RW23q;
vrev64.8 RW45q, RW45q;
vrev64.8 RW67q, RW67q;
vrev64.8 RW89q, RW89q;
vrev64.8 RW1011q, RW1011q;
vrev64.8 RW1213q, RW1213q;
vrev64.8 RW1415q, RW1415q;
#endif
/* EABI says that d8-d15 must be preserved by callee. */
vpush {RT0-RT7};
.Loop:
rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2, RW23q, RW1415q, RW9, RW10, dummy, _);
b .Lenter_rounds;
.Loop_rounds:
rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2, RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q);
.Lenter_rounds:
rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4, RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q);
rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6, RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q);
rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8, RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q);
rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10, RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q);
rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12, RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q);
add %lr, #16;
rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14, RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q);
cmp %lr, #64;
rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0, RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q);
bne .Loop_rounds;
subs %r3, #1;
rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, vadd_RT01q, RW1415q, dummy, _);
rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, vadd_rg_RT0, RG, vadd_rg_RT1, RG);
beq .Lhandle_tail;
vld1.64 {RW0-RW3}, [%r1]!;
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, vadd_rg_RT0, RC, vadd_rg_RT1, RC);
#ifdef __ARMEL__
vrev64.8 RW01q, RW01q;
vrev64.8 RW23q, RW23q;
#endif
vld1.64 {RW4-RW7}, [%r1]!;
rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, vadd_rg_RT0, RA, vadd_rg_RT1, RA);
rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, vadd_rg_RT0, RG, vadd_rg_RT1, RG);
#ifdef __ARMEL__
vrev64.8 RW45q, RW45q;
vrev64.8 RW67q, RW67q;
#endif
vld1.64 {RW8-RW11}, [%r1]!;
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, vadd_rg_RT0, RC, vadd_rg_RT1, RC);
#ifdef __ARMEL__
vrev64.8 RW89q, RW89q;
vrev64.8 RW1011q, RW1011q;
#endif
vld1.64 {RW12-RW15}, [%r1]!;
vadd_rg_RT0(RA);
vadd_rg_RT1(RA);
/* Load context */
vld1.64 {RT0-RT3}, [%r0]!;
vld1.64 {RT4-RT7}, [%r0];
sub %r0, #(4*8);
#ifdef __ARMEL__
vrev64.8 RW1213q, RW1213q;
vrev64.8 RW1415q, RW1415q;
#endif
vadd.u64 RA, RT0;
vadd.u64 RB, RT1;
vadd.u64 RC, RT2;
vadd.u64 RD, RT3;
vadd.u64 RE, RT4;
vadd.u64 RF, RT5;
vadd.u64 RG, RT6;
vadd.u64 RH, RT7;
/* Store the first half of context */
vst1.64 {RA-RD}, [%r0]!;
sub RK, $(8*80);
vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
mov %lr, #0;
sub %r0, #(4*8);
b .Loop;
.ltorg
.Lhandle_tail:
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, vadd_rg_RT0, RC, vadd_rg_RT1, RC);
rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, vadd_rg_RT0, RA, vadd_rg_RT1, RA);
rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, vadd_rg_RT0, RG, vadd_rg_RT1, RG);
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, vadd_rg_RT0, RC, vadd_rg_RT1, RC);
/* Load context to d16-d23 */
vld1.64 {RW0-RW3}, [%r0]!;
vadd_rg_RT0(RA);
vld1.64 {RW4-RW7}, [%r0];
vadd_rg_RT1(RA);
sub %r0, #(4*8);
vadd.u64 RA, RW0;
vadd.u64 RB, RW1;
vadd.u64 RC, RW2;
vadd.u64 RD, RW3;
vadd.u64 RE, RW4;
vadd.u64 RF, RW5;
vadd.u64 RG, RW6;
vadd.u64 RH, RW7;
/* Store the first half of context */
vst1.64 {RA-RD}, [%r0]!;
/* Clear used registers */
/* d16-d31 */
- veor.u64 RW01q, RW01q;
- veor.u64 RW23q, RW23q;
- veor.u64 RW45q, RW45q;
- veor.u64 RW67q, RW67q;
+ CLEAR_REG(RW01q);
+ CLEAR_REG(RW23q);
+ CLEAR_REG(RW45q);
+ CLEAR_REG(RW67q);
vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
- veor.u64 RW89q, RW89q;
- veor.u64 RW1011q, RW1011q;
- veor.u64 RW1213q, RW1213q;
- veor.u64 RW1415q, RW1415q;
+ CLEAR_REG(RW89q);
+ CLEAR_REG(RW1011q);
+ CLEAR_REG(RW1213q);
+ CLEAR_REG(RW1415q);
/* d8-d15 */
vpop {RT0-RT7};
/* d0-d7 (q0-q3) */
- veor.u64 %q0, %q0;
- veor.u64 %q1, %q1;
- veor.u64 %q2, %q2;
- veor.u64 %q3, %q3;
+ CLEAR_REG(%q0);
+ CLEAR_REG(%q1);
+ CLEAR_REG(%q2);
+ CLEAR_REG(%q3);
eor %r0, %r0;
pop {%pc};
.size _gcry_sha512_transform_armv7_neon,.-_gcry_sha512_transform_armv7_neon;
#endif
File Metadata
Details
Attached
Mime Type
text/x-diff
Expires
Sun, Apr 5, 11:42 AM (1 d, 16 h)
Storage Engine
local-disk
Storage Format
Raw Data
Storage Handle
86/07/34e6a1fa7e3d112865d466b8b229
Attached To
rC libgcrypt
Event Timeline
Log In to Comment