1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#include <ring-core/asm_base.h>
5
6#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_ARM) && defined(__ELF__)
7#include <ring-core/arm_arch.h>
8
9#if __ARM_MAX_ARCH__>=7
10.text
11.fpu	neon
12.code	32
13#undef	__thumb2__
14.globl	gcm_init_clmul
15.hidden	gcm_init_clmul
16.type	gcm_init_clmul,%function
17.align	4
18gcm_init_clmul:
19	AARCH64_VALID_CALL_TARGET
20	vld1.64	{q9},[r1]		@ load input H
21	vmov.i8	q11,#0xe1
22	vshl.i64	q11,q11,#57		@ 0xc2.0
23	vext.8	q3,q9,q9,#8
24	vshr.u64	q10,q11,#63
25	vdup.32	q9,d18[1]
26	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
27	vshr.u64	q10,q3,#63
28	vshr.s32	q9,q9,#31		@ broadcast carry bit
29	vand	q10,q10,q8
30	vshl.i64	q3,q3,#1
31	vext.8	q10,q10,q10,#8
32	vand	q8,q8,q9
33	vorr	q3,q3,q10		@ H<<<=1
34	veor	q12,q3,q8		@ twisted H
35	vst1.64	{q12},[r0]!		@ store Htable[0]
36
37	@ calculate H^2
38	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
39.byte	0xa8,0x0e,0xa8,0xf2	@ pmull q0,q12,q12
40	veor	q8,q8,q12
41.byte	0xa9,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q12
42.byte	0xa0,0x2e,0xa0,0xf2	@ pmull q1,q8,q8
43
44	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
45	veor	q10,q0,q2
46	veor	q1,q1,q9
47	veor	q1,q1,q10
48.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase
49
50	vmov	d4,d3		@ Xh|Xm - 256-bit result
51	vmov	d3,d0		@ Xm is rotated Xl
52	veor	q0,q1,q10
53
54	vext.8	q10,q0,q0,#8		@ 2nd phase
55.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
56	veor	q10,q10,q2
57	veor	q14,q0,q10
58
59	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
60	veor	q9,q9,q14
61	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
62	vst1.64	{q13,q14},[r0]!	@ store Htable[1..2]
63	bx	lr
64.size	gcm_init_clmul,.-gcm_init_clmul
65.globl	gcm_gmult_clmul
66.hidden	gcm_gmult_clmul
67.type	gcm_gmult_clmul,%function
68.align	4
69gcm_gmult_clmul:
70	AARCH64_VALID_CALL_TARGET
71	vld1.64	{q9},[r0]		@ load Xi
72	vmov.i8	q11,#0xe1
73	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
74	vshl.u64	q11,q11,#57
75#ifndef __ARMEB__
76	vrev64.8	q9,q9
77#endif
78	vext.8	q3,q9,q9,#8
79
80.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
81	veor	q9,q9,q3		@ Karatsuba pre-processing
82.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
83.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
84
85	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
86	veor	q10,q0,q2
87	veor	q1,q1,q9
88	veor	q1,q1,q10
89.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
90
91	vmov	d4,d3		@ Xh|Xm - 256-bit result
92	vmov	d3,d0		@ Xm is rotated Xl
93	veor	q0,q1,q10
94
95	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
96.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
97	veor	q10,q10,q2
98	veor	q0,q0,q10
99
100#ifndef __ARMEB__
101	vrev64.8	q0,q0
102#endif
103	vext.8	q0,q0,q0,#8
104	vst1.64	{q0},[r0]		@ write out Xi
105
106	bx	lr
107.size	gcm_gmult_clmul,.-gcm_gmult_clmul
108.globl	gcm_ghash_clmul
109.hidden	gcm_ghash_clmul
110.type	gcm_ghash_clmul,%function
111.align	4
112gcm_ghash_clmul:
113	AARCH64_VALID_CALL_TARGET
114	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
115	vld1.64	{q0},[r0]		@ load [rotated] Xi
116						@ "[rotated]" means that
117						@ loaded value would have
118						@ to be rotated in order to
119						@ make it appear as in
120						@ algorithm specification
121	subs	r3,r3,#32		@ see if r3 is 32 or larger
122	mov	r12,#16		@ r12 is used as post-
123						@ increment for input pointer;
124						@ as loop is modulo-scheduled
125						@ r12 is zeroed just in time
126						@ to preclude overstepping
127						@ inp[len], which means that
128						@ last block[s] are actually
129						@ loaded twice, but last
130						@ copy is not processed
131	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
132	vmov.i8	q11,#0xe1
133	vld1.64	{q14},[r1]
134	moveq	r12,#0			@ is it time to zero r12?
135	vext.8	q0,q0,q0,#8		@ rotate Xi
136	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
137	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
138#ifndef __ARMEB__
139	vrev64.8	q8,q8
140	vrev64.8	q0,q0
141#endif
142	vext.8	q3,q8,q8,#8		@ rotate I[0]
143	blo	.Lodd_tail_v8		@ r3 was less than 32
144	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
145#ifndef __ARMEB__
146	vrev64.8	q9,q9
147#endif
148	vext.8	q7,q9,q9,#8
149	veor	q3,q3,q0		@ I[i]^=Xi
150.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
151	veor	q9,q9,q7		@ Karatsuba pre-processing
152.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
153	b	.Loop_mod2x_v8
154
155.align	4
156.Loop_mod2x_v8:
157	vext.8	q10,q3,q3,#8
158	subs	r3,r3,#32		@ is there more data?
159.byte	0x86,0x0e,0xac,0xf2	@ pmull q0,q14,q3		@ H^2.lo·Xi.lo
160	movlo	r12,#0			@ is it time to zero r12?
161
162.byte	0xa2,0xae,0xaa,0xf2	@ pmull q5,q13,q9
163	veor	q10,q10,q3		@ Karatsuba pre-processing
164.byte	0x87,0x4e,0xad,0xf2	@ pmull2 q2,q14,q3		@ H^2.hi·Xi.hi
165	veor	q0,q0,q4		@ accumulate
166.byte	0xa5,0x2e,0xab,0xf2	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
167	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
168
169	veor	q2,q2,q6
170	moveq	r12,#0			@ is it time to zero r12?
171	veor	q1,q1,q5
172
173	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
174	veor	q10,q0,q2
175	veor	q1,q1,q9
176	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
177#ifndef __ARMEB__
178	vrev64.8	q8,q8
179#endif
180	veor	q1,q1,q10
181.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
182
183#ifndef __ARMEB__
184	vrev64.8	q9,q9
185#endif
186	vmov	d4,d3		@ Xh|Xm - 256-bit result
187	vmov	d3,d0		@ Xm is rotated Xl
188	vext.8	q7,q9,q9,#8
189	vext.8	q3,q8,q8,#8
190	veor	q0,q1,q10
191.byte	0x8e,0x8e,0xa8,0xf2	@ pmull q4,q12,q7		@ H·Ii+1
192	veor	q3,q3,q2		@ accumulate q3 early
193
194	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
195.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
196	veor	q3,q3,q10
197	veor	q9,q9,q7		@ Karatsuba pre-processing
198	veor	q3,q3,q0
199.byte	0x8f,0xce,0xa9,0xf2	@ pmull2 q6,q12,q7
200	bhs	.Loop_mod2x_v8		@ there was at least 32 more bytes
201
202	veor	q2,q2,q10
203	vext.8	q3,q8,q8,#8		@ re-construct q3
204	adds	r3,r3,#32		@ re-construct r3
205	veor	q0,q0,q2		@ re-construct q0
206	beq	.Ldone_v8		@ is r3 zero?
207.Lodd_tail_v8:
208	vext.8	q10,q0,q0,#8
209	veor	q3,q3,q0		@ inp^=Xi
210	veor	q9,q8,q10		@ q9 is rotated inp^Xi
211
212.byte	0x86,0x0e,0xa8,0xf2	@ pmull q0,q12,q3		@ H.lo·Xi.lo
213	veor	q9,q9,q3		@ Karatsuba pre-processing
214.byte	0x87,0x4e,0xa9,0xf2	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
215.byte	0xa2,0x2e,0xaa,0xf2	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
216
217	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
218	veor	q10,q0,q2
219	veor	q1,q1,q9
220	veor	q1,q1,q10
221.byte	0x26,0x4e,0xe0,0xf2	@ pmull q10,q0,q11		@ 1st phase of reduction
222
223	vmov	d4,d3		@ Xh|Xm - 256-bit result
224	vmov	d3,d0		@ Xm is rotated Xl
225	veor	q0,q1,q10
226
227	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
228.byte	0x26,0x0e,0xa0,0xf2	@ pmull q0,q0,q11
229	veor	q10,q10,q2
230	veor	q0,q0,q10
231
232.Ldone_v8:
233#ifndef __ARMEB__
234	vrev64.8	q0,q0
235#endif
236	vext.8	q0,q0,q0,#8
237	vst1.64	{q0},[r0]		@ write out Xi
238
239	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
240	bx	lr
241.size	gcm_ghash_clmul,.-gcm_ghash_clmul
242.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
243.align	2
244.align	2
245#endif
246#endif  // !OPENSSL_NO_ASM && defined(OPENSSL_ARM) && defined(__ELF__)
247