xref: /aosp_15_r20/external/cronet/third_party/boringssl/src/gen/bcm/vpaes-armv8-win.S (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#include <openssl/asm_base.h>
5
6#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(_WIN32)
7#include <openssl/arm_arch.h>
8
9.section	.rodata
10
11
12.align	7	// totally strategic alignment
13_vpaes_consts:
14Lk_mc_forward:	//	mc_forward
15.quad	0x0407060500030201, 0x0C0F0E0D080B0A09
16.quad	0x080B0A0904070605, 0x000302010C0F0E0D
17.quad	0x0C0F0E0D080B0A09, 0x0407060500030201
18.quad	0x000302010C0F0E0D, 0x080B0A0904070605
19Lk_mc_backward:	//	mc_backward
20.quad	0x0605040702010003, 0x0E0D0C0F0A09080B
21.quad	0x020100030E0D0C0F, 0x0A09080B06050407
22.quad	0x0E0D0C0F0A09080B, 0x0605040702010003
23.quad	0x0A09080B06050407, 0x020100030E0D0C0F
24Lk_sr:	//	sr
25.quad	0x0706050403020100, 0x0F0E0D0C0B0A0908
26.quad	0x030E09040F0A0500, 0x0B06010C07020D08
27.quad	0x0F060D040B020900, 0x070E050C030A0108
28.quad	0x0B0E0104070A0D00, 0x0306090C0F020508
29
30//
31// "Hot" constants
32//
33Lk_inv:	//	inv, inva
34.quad	0x0E05060F0D080180, 0x040703090A0B0C02
35.quad	0x01040A060F0B0780, 0x030D0E0C02050809
36Lk_ipt:	//	input transform (lo, hi)
37.quad	0xC2B2E8985A2A7000, 0xCABAE09052227808
38.quad	0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
39Lk_sbo:	//	sbou, sbot
40.quad	0xD0D26D176FBDC700, 0x15AABF7AC502A878
41.quad	0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
42Lk_sb1:	//	sb1u, sb1t
43.quad	0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
44.quad	0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
45Lk_sb2:	//	sb2u, sb2t
46.quad	0x69EB88400AE12900, 0xC2A163C8AB82234A
47.quad	0xE27A93C60B712400, 0x5EB7E955BC982FCD
48
49//
50//  Decryption stuff
51//
52Lk_dipt:	//	decryption input transform
53.quad	0x0F505B040B545F00, 0x154A411E114E451A
54.quad	0x86E383E660056500, 0x12771772F491F194
55Lk_dsbo:	//	decryption sbox final output
56.quad	0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
57.quad	0x12D7560F93441D00, 0xCA4B8159D8C58E9C
58Lk_dsb9:	//	decryption sbox output *9*u, *9*t
59.quad	0x851C03539A86D600, 0xCAD51F504F994CC9
60.quad	0xC03B1789ECD74900, 0x725E2C9EB2FBA565
61Lk_dsbd:	//	decryption sbox output *D*u, *D*t
62.quad	0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
63.quad	0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
64Lk_dsbb:	//	decryption sbox output *B*u, *B*t
65.quad	0xD022649296B44200, 0x602646F6B0F2D404
66.quad	0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
67Lk_dsbe:	//	decryption sbox output *E*u, *E*t
68.quad	0x46F2929626D4D000, 0x2242600464B4F6B0
69.quad	0x0C55A6CDFFAAC100, 0x9467F36B98593E32
70
71//
72//  Key schedule constants
73//
74Lk_dksd:	//	decryption key schedule: invskew x*D
75.quad	0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
76.quad	0x41C277F4B5368300, 0x5FDC69EAAB289D1E
77Lk_dksb:	//	decryption key schedule: invskew x*B
78.quad	0x9A4FCA1F8550D500, 0x03D653861CC94C99
79.quad	0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
80Lk_dkse:	//	decryption key schedule: invskew x*E + 0x63
81.quad	0xD5031CCA1FC9D600, 0x53859A4C994F5086
82.quad	0xA23196054FDC7BE8, 0xCD5EF96A20B31487
83Lk_dks9:	//	decryption key schedule: invskew x*9
84.quad	0xB6116FC87ED9A700, 0x4AED933482255BFC
85.quad	0x4576516227143300, 0x8BB89FACE9DAFDCE
86
87Lk_rcon:	//	rcon
88.quad	0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
89
90Lk_opt:	//	output transform
91.quad	0xFF9F4929D6B66000, 0xF7974121DEBE6808
92.quad	0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
93Lk_deskew:	//	deskew tables: inverts the sbox's "skew"
94.quad	0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
95.quad	0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
96
97.byte	86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,65,82,77,118,56,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
98.align	2
99
100.align	6
101
102.text
103##
104##  _aes_preheat
105##
106##  Fills register %r10 -> .aes_consts (so you can -fPIC)
107##  and %xmm9-%xmm15 as specified below.
108##
109.def _vpaes_encrypt_preheat
110   .type 32
111.endef
112.align	4
113_vpaes_encrypt_preheat:
114	adrp	x10, Lk_inv
115	add	x10, x10, :lo12:Lk_inv
116	movi	v17.16b, #0x0f
117	ld1	{v18.2d,v19.2d}, [x10],#32	// Lk_inv
118	ld1	{v20.2d,v21.2d,v22.2d,v23.2d}, [x10],#64	// Lk_ipt, Lk_sbo
119	ld1	{v24.2d,v25.2d,v26.2d,v27.2d}, [x10]		// Lk_sb1, Lk_sb2
120	ret
121
122
123##
124##  _aes_encrypt_core
125##
126##  AES-encrypt %xmm0.
127##
128##  Inputs:
129##     %xmm0 = input
130##     %xmm9-%xmm15 as in _vpaes_preheat
131##    (%rdx) = scheduled keys
132##
133##  Output in %xmm0
134##  Clobbers  %xmm1-%xmm5, %r9, %r10, %r11, %rax
135##  Preserves %xmm6 - %xmm8 so you get some local vectors
136##
137##
138.def _vpaes_encrypt_core
139   .type 32
140.endef
141.align	4
142_vpaes_encrypt_core:
143	mov	x9, x2
144	ldr	w8, [x2,#240]			// pull rounds
145	adrp	x11, Lk_mc_forward+16
146	add	x11, x11, :lo12:Lk_mc_forward+16
147						// vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
148	ld1	{v16.2d}, [x9], #16		// vmovdqu	(%r9),	%xmm5		# round0 key
149	and	v1.16b, v7.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
150	ushr	v0.16b, v7.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0
151	tbl	v1.16b, {v20.16b}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm1
152						// vmovdqa	.Lk_ipt+16(%rip), %xmm3	# ipthi
153	tbl	v2.16b, {v21.16b}, v0.16b	// vpshufb	%xmm0,	%xmm3,	%xmm2
154	eor	v0.16b, v1.16b, v16.16b		// vpxor	%xmm5,	%xmm1,	%xmm0
155	eor	v0.16b, v0.16b, v2.16b		// vpxor	%xmm2,	%xmm0,	%xmm0
156	b	Lenc_entry
157
158.align	4
159Lenc_loop:
160	// middle of middle round
161	add	x10, x11, #0x40
162	tbl	v4.16b, {v25.16b}, v2.16b		// vpshufb	%xmm2,	%xmm13,	%xmm4	# 4 = sb1u
163	ld1	{v1.2d}, [x11], #16		// vmovdqa	-0x40(%r11,%r10), %xmm1	# Lk_mc_forward[]
164	tbl	v0.16b, {v24.16b}, v3.16b		// vpshufb	%xmm3,	%xmm12,	%xmm0	# 0 = sb1t
165	eor	v4.16b, v4.16b, v16.16b		// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
166	tbl	v5.16b,	{v27.16b}, v2.16b		// vpshufb	%xmm2,	%xmm15,	%xmm5	# 4 = sb2u
167	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
168	tbl	v2.16b, {v26.16b}, v3.16b		// vpshufb	%xmm3,	%xmm14,	%xmm2	# 2 = sb2t
169	ld1	{v4.2d}, [x10]			// vmovdqa	(%r11,%r10), %xmm4	# Lk_mc_backward[]
170	tbl	v3.16b, {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm3	# 0 = B
171	eor	v2.16b, v2.16b, v5.16b		// vpxor	%xmm5,	%xmm2,	%xmm2	# 2 = 2A
172	tbl	v0.16b, {v0.16b}, v4.16b	// vpshufb	%xmm4,	%xmm0,	%xmm0	# 3 = D
173	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3	# 0 = 2A+B
174	tbl	v4.16b, {v3.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm4	# 0 = 2B+C
175	eor	v0.16b, v0.16b, v3.16b		// vpxor	%xmm3,	%xmm0,	%xmm0	# 3 = 2A+B+D
176	and	x11, x11, #~(1<<6)		// and		$0x30,	%r11		# ... mod 4
177	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0, %xmm0	# 0 = 2A+3B+C+D
178	sub	w8, w8, #1			// nr--
179
180Lenc_entry:
181	// top of round
182	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm0,	%xmm9,	%xmm1   # 0 = k
183	ushr	v0.16b, v0.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
184	tbl	v5.16b, {v19.16b}, v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm5	# 2 = a/k
185	eor	v1.16b, v1.16b, v0.16b		// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
186	tbl	v3.16b, {v18.16b}, v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3  	# 3 = 1/i
187	tbl	v4.16b, {v18.16b}, v1.16b	// vpshufb	%xmm1, 	%xmm10,	%xmm4  	# 4 = 1/j
188	eor	v3.16b, v3.16b, v5.16b		// vpxor	%xmm5,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
189	eor	v4.16b, v4.16b, v5.16b		// vpxor	%xmm5,	%xmm4,	%xmm4  	# 4 = jak = 1/j + a/k
190	tbl	v2.16b, {v18.16b}, v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2  	# 2 = 1/iak
191	tbl	v3.16b, {v18.16b}, v4.16b	// vpshufb	%xmm4,	%xmm10,	%xmm3	# 3 = 1/jak
192	eor	v2.16b, v2.16b, v1.16b		// vpxor	%xmm1,	%xmm2,	%xmm2  	# 2 = io
193	eor	v3.16b, v3.16b, v0.16b		// vpxor	%xmm0,	%xmm3,	%xmm3	# 3 = jo
194	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm5
195	cbnz	w8, Lenc_loop
196
197	// middle of last round
198	add	x10, x11, #0x80
199						// vmovdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
200						// vmovdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
201	tbl	v4.16b, {v22.16b}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
202	ld1	{v1.2d}, [x10]			// vmovdqa	0x40(%r11,%r10), %xmm1	# Lk_sr[]
203	tbl	v0.16b, {v23.16b}, v3.16b		// vpshufb	%xmm3,	%xmm0,	%xmm0	# 0 = sb1t
204	eor	v4.16b, v4.16b, v16.16b		// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
205	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
206	tbl	v0.16b, {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm0
207	ret
208
209
210.globl	vpaes_encrypt
211
212.def vpaes_encrypt
213   .type 32
214.endef
215.align	4
216vpaes_encrypt:
217	AARCH64_SIGN_LINK_REGISTER
218	stp	x29,x30,[sp,#-16]!
219	add	x29,sp,#0
220
221	ld1	{v7.16b}, [x0]
222	bl	_vpaes_encrypt_preheat
223	bl	_vpaes_encrypt_core
224	st1	{v0.16b}, [x1]
225
226	ldp	x29,x30,[sp],#16
227	AARCH64_VALIDATE_LINK_REGISTER
228	ret
229
230
231.def _vpaes_encrypt_2x
232   .type 32
233.endef
234.align	4
235_vpaes_encrypt_2x:
236	mov	x9, x2
237	ldr	w8, [x2,#240]			// pull rounds
238	adrp	x11, Lk_mc_forward+16
239	add	x11, x11, :lo12:Lk_mc_forward+16
240						// vmovdqa	.Lk_ipt(%rip),	%xmm2	# iptlo
241	ld1	{v16.2d}, [x9], #16		// vmovdqu	(%r9),	%xmm5		# round0 key
242	and	v1.16b,  v14.16b,  v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1
243	ushr	v0.16b,  v14.16b,  #4		// vpsrlb	$4,	%xmm0,	%xmm0
244	and	v9.16b,  v15.16b,  v17.16b
245	ushr	v8.16b,  v15.16b,  #4
246	tbl	v1.16b,  {v20.16b}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm1
247	tbl	v9.16b,  {v20.16b}, v9.16b
248						// vmovdqa	.Lk_ipt+16(%rip), %xmm3	# ipthi
249	tbl	v2.16b,  {v21.16b}, v0.16b	// vpshufb	%xmm0,	%xmm3,	%xmm2
250	tbl	v10.16b, {v21.16b}, v8.16b
251	eor	v0.16b,  v1.16b,   v16.16b	// vpxor	%xmm5,	%xmm1,	%xmm0
252	eor	v8.16b,  v9.16b,   v16.16b
253	eor	v0.16b,  v0.16b,   v2.16b	// vpxor	%xmm2,	%xmm0,	%xmm0
254	eor	v8.16b,  v8.16b,   v10.16b
255	b	Lenc_2x_entry
256
257.align	4
258Lenc_2x_loop:
259	// middle of middle round
260	add	x10, x11, #0x40
261	tbl	v4.16b,  {v25.16b}, v2.16b	// vpshufb	%xmm2,	%xmm13,	%xmm4	# 4 = sb1u
262	tbl	v12.16b, {v25.16b}, v10.16b
263	ld1	{v1.2d}, [x11], #16		// vmovdqa	-0x40(%r11,%r10), %xmm1	# Lk_mc_forward[]
264	tbl	v0.16b,  {v24.16b}, v3.16b	// vpshufb	%xmm3,	%xmm12,	%xmm0	# 0 = sb1t
265	tbl	v8.16b,  {v24.16b}, v11.16b
266	eor	v4.16b,  v4.16b,  v16.16b	// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
267	eor	v12.16b, v12.16b, v16.16b
268	tbl	v5.16b,	 {v27.16b}, v2.16b	// vpshufb	%xmm2,	%xmm15,	%xmm5	# 4 = sb2u
269	tbl	v13.16b, {v27.16b}, v10.16b
270	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
271	eor	v8.16b,  v8.16b,  v12.16b
272	tbl	v2.16b,  {v26.16b}, v3.16b	// vpshufb	%xmm3,	%xmm14,	%xmm2	# 2 = sb2t
273	tbl	v10.16b, {v26.16b}, v11.16b
274	ld1	{v4.2d}, [x10]			// vmovdqa	(%r11,%r10), %xmm4	# Lk_mc_backward[]
275	tbl	v3.16b,  {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm3	# 0 = B
276	tbl	v11.16b, {v8.16b}, v1.16b
277	eor	v2.16b,  v2.16b,  v5.16b	// vpxor	%xmm5,	%xmm2,	%xmm2	# 2 = 2A
278	eor	v10.16b, v10.16b, v13.16b
279	tbl	v0.16b,  {v0.16b}, v4.16b	// vpshufb	%xmm4,	%xmm0,	%xmm0	# 3 = D
280	tbl	v8.16b,  {v8.16b}, v4.16b
281	eor	v3.16b,  v3.16b,  v2.16b	// vpxor	%xmm2,	%xmm3,	%xmm3	# 0 = 2A+B
282	eor	v11.16b, v11.16b, v10.16b
283	tbl	v4.16b,  {v3.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm4	# 0 = 2B+C
284	tbl	v12.16b, {v11.16b},v1.16b
285	eor	v0.16b,  v0.16b,  v3.16b	// vpxor	%xmm3,	%xmm0,	%xmm0	# 3 = 2A+B+D
286	eor	v8.16b,  v8.16b,  v11.16b
287	and	x11, x11, #~(1<<6)		// and		$0x30,	%r11		# ... mod 4
288	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0, %xmm0	# 0 = 2A+3B+C+D
289	eor	v8.16b,  v8.16b,  v12.16b
290	sub	w8, w8, #1			// nr--
291
292Lenc_2x_entry:
293	// top of round
294	and	v1.16b,  v0.16b, v17.16b	// vpand	%xmm0,	%xmm9,	%xmm1   # 0 = k
295	ushr	v0.16b,  v0.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
296	and	v9.16b,  v8.16b, v17.16b
297	ushr	v8.16b,  v8.16b, #4
298	tbl	v5.16b,  {v19.16b},v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm5	# 2 = a/k
299	tbl	v13.16b, {v19.16b},v9.16b
300	eor	v1.16b,  v1.16b,  v0.16b	// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
301	eor	v9.16b,  v9.16b,  v8.16b
302	tbl	v3.16b,  {v18.16b},v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3  	# 3 = 1/i
303	tbl	v11.16b, {v18.16b},v8.16b
304	tbl	v4.16b,  {v18.16b},v1.16b	// vpshufb	%xmm1, 	%xmm10,	%xmm4  	# 4 = 1/j
305	tbl	v12.16b, {v18.16b},v9.16b
306	eor	v3.16b,  v3.16b,  v5.16b	// vpxor	%xmm5,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
307	eor	v11.16b, v11.16b, v13.16b
308	eor	v4.16b,  v4.16b,  v5.16b	// vpxor	%xmm5,	%xmm4,	%xmm4  	# 4 = jak = 1/j + a/k
309	eor	v12.16b, v12.16b, v13.16b
310	tbl	v2.16b,  {v18.16b},v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2  	# 2 = 1/iak
311	tbl	v10.16b, {v18.16b},v11.16b
312	tbl	v3.16b,  {v18.16b},v4.16b	// vpshufb	%xmm4,	%xmm10,	%xmm3	# 3 = 1/jak
313	tbl	v11.16b, {v18.16b},v12.16b
314	eor	v2.16b,  v2.16b,  v1.16b	// vpxor	%xmm1,	%xmm2,	%xmm2  	# 2 = io
315	eor	v10.16b, v10.16b, v9.16b
316	eor	v3.16b,  v3.16b,  v0.16b	// vpxor	%xmm0,	%xmm3,	%xmm3	# 3 = jo
317	eor	v11.16b, v11.16b, v8.16b
318	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm5
319	cbnz	w8, Lenc_2x_loop
320
321	// middle of last round
322	add	x10, x11, #0x80
323						// vmovdqa	-0x60(%r10), %xmm4	# 3 : sbou	.Lk_sbo
324						// vmovdqa	-0x50(%r10), %xmm0	# 0 : sbot	.Lk_sbo+16
325	tbl	v4.16b,  {v22.16b}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
326	tbl	v12.16b, {v22.16b}, v10.16b
327	ld1	{v1.2d}, [x10]			// vmovdqa	0x40(%r11,%r10), %xmm1	# Lk_sr[]
328	tbl	v0.16b,  {v23.16b}, v3.16b	// vpshufb	%xmm3,	%xmm0,	%xmm0	# 0 = sb1t
329	tbl	v8.16b,  {v23.16b}, v11.16b
330	eor	v4.16b,  v4.16b,  v16.16b	// vpxor	%xmm5,	%xmm4,	%xmm4	# 4 = sb1u + k
331	eor	v12.16b, v12.16b, v16.16b
332	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0	# 0 = A
333	eor	v8.16b,  v8.16b,  v12.16b
334	tbl	v0.16b,  {v0.16b},v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm0
335	tbl	v1.16b,  {v8.16b},v1.16b
336	ret
337
338
339.def _vpaes_decrypt_preheat
340   .type 32
341.endef
342.align	4
343_vpaes_decrypt_preheat:
344	adrp	x10, Lk_inv
345	add	x10, x10, :lo12:Lk_inv
346	movi	v17.16b, #0x0f
347	adrp	x11, Lk_dipt
348	add	x11, x11, :lo12:Lk_dipt
349	ld1	{v18.2d,v19.2d}, [x10],#32	// Lk_inv
350	ld1	{v20.2d,v21.2d,v22.2d,v23.2d}, [x11],#64	// Lk_dipt, Lk_dsbo
351	ld1	{v24.2d,v25.2d,v26.2d,v27.2d}, [x11],#64	// Lk_dsb9, Lk_dsbd
352	ld1	{v28.2d,v29.2d,v30.2d,v31.2d}, [x11]		// Lk_dsbb, Lk_dsbe
353	ret
354
355
356##
357##  Decryption core
358##
359##  Same API as encryption core.
360##
361.def _vpaes_decrypt_core
362   .type 32
363.endef
364.align	4
365_vpaes_decrypt_core:
366	mov	x9, x2
367	ldr	w8, [x2,#240]			// pull rounds
368
369						// vmovdqa	.Lk_dipt(%rip), %xmm2	# iptlo
370	lsl	x11, x8, #4			// mov	%rax,	%r11;	shl	$4, %r11
371	eor	x11, x11, #0x30			// xor		$0x30,	%r11
372	adrp	x10, Lk_sr
373	add	x10, x10, :lo12:Lk_sr
374	and	x11, x11, #0x30			// and		$0x30,	%r11
375	add	x11, x11, x10
376	adrp	x10, Lk_mc_forward+48
377	add	x10, x10, :lo12:Lk_mc_forward+48
378
379	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm4		# round0 key
380	and	v1.16b, v7.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
381	ushr	v0.16b, v7.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0
382	tbl	v2.16b, {v20.16b}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm2
383	ld1	{v5.2d}, [x10]			// vmovdqa	Lk_mc_forward+48(%rip), %xmm5
384						// vmovdqa	.Lk_dipt+16(%rip), %xmm1 # ipthi
385	tbl	v0.16b, {v21.16b}, v0.16b	// vpshufb	%xmm0,	%xmm1,	%xmm0
386	eor	v2.16b, v2.16b, v16.16b		// vpxor	%xmm4,	%xmm2,	%xmm2
387	eor	v0.16b, v0.16b, v2.16b		// vpxor	%xmm2,	%xmm0,	%xmm0
388	b	Ldec_entry
389
390.align	4
391Ldec_loop:
392//
393//  Inverse mix columns
394//
395						// vmovdqa	-0x20(%r10),%xmm4		# 4 : sb9u
396						// vmovdqa	-0x10(%r10),%xmm1		# 0 : sb9t
397	tbl	v4.16b, {v24.16b}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sb9u
398	tbl	v1.16b, {v25.16b}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sb9t
399	eor	v0.16b, v4.16b, v16.16b		// vpxor	%xmm4,	%xmm0,	%xmm0
400						// vmovdqa	0x00(%r10),%xmm4		# 4 : sbdu
401	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
402						// vmovdqa	0x10(%r10),%xmm1		# 0 : sbdt
403
404	tbl	v4.16b, {v26.16b}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbdu
405	tbl	v0.16b, {v0.16b}, v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
406	tbl	v1.16b, {v27.16b}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbdt
407	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
408						// vmovdqa	0x20(%r10),	%xmm4		# 4 : sbbu
409	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
410						// vmovdqa	0x30(%r10),	%xmm1		# 0 : sbbt
411
412	tbl	v4.16b, {v28.16b}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbbu
413	tbl	v0.16b, {v0.16b}, v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
414	tbl	v1.16b, {v29.16b}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbbt
415	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
416						// vmovdqa	0x40(%r10),	%xmm4		# 4 : sbeu
417	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
418						// vmovdqa	0x50(%r10),	%xmm1		# 0 : sbet
419
420	tbl	v4.16b, {v30.16b}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbeu
421	tbl	v0.16b, {v0.16b}, v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
422	tbl	v1.16b, {v31.16b}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbet
423	eor	v0.16b, v0.16b, v4.16b		// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
424	ext	v5.16b, v5.16b, v5.16b, #12	// vpalignr $12,	%xmm5,	%xmm5,	%xmm5
425	eor	v0.16b, v0.16b, v1.16b		// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
426	sub	w8, w8, #1			// sub		$1,%rax			# nr--
427
428Ldec_entry:
429	// top of round
430	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1	# 0 = k
431	ushr	v0.16b, v0.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
432	tbl	v2.16b, {v19.16b}, v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm2	# 2 = a/k
433	eor	v1.16b,	v1.16b, v0.16b		// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
434	tbl	v3.16b, {v18.16b}, v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3	# 3 = 1/i
435	tbl	v4.16b, {v18.16b}, v1.16b	// vpshufb	%xmm1,	%xmm10,	%xmm4	# 4 = 1/j
436	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
437	eor	v4.16b, v4.16b, v2.16b		// vpxor	%xmm2, 	%xmm4,	%xmm4	# 4 = jak = 1/j + a/k
438	tbl	v2.16b, {v18.16b}, v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2	# 2 = 1/iak
439	tbl	v3.16b, {v18.16b}, v4.16b	// vpshufb	%xmm4,  %xmm10,	%xmm3	# 3 = 1/jak
440	eor	v2.16b, v2.16b, v1.16b		// vpxor	%xmm1,	%xmm2,	%xmm2	# 2 = io
441	eor	v3.16b, v3.16b, v0.16b		// vpxor	%xmm0,  %xmm3,	%xmm3	# 3 = jo
442	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm0
443	cbnz	w8, Ldec_loop
444
445	// middle of last round
446						// vmovdqa	0x60(%r10),	%xmm4	# 3 : sbou
447	tbl	v4.16b, {v22.16b}, v2.16b		// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
448						// vmovdqa	0x70(%r10),	%xmm1	# 0 : sbot
449	ld1	{v2.2d}, [x11]			// vmovdqa	-0x160(%r11),	%xmm2	# Lk_sr-Lk_dsbd=-0x160
450	tbl	v1.16b, {v23.16b}, v3.16b		// vpshufb	%xmm3,	%xmm1,	%xmm1	# 0 = sb1t
451	eor	v4.16b, v4.16b, v16.16b		// vpxor	%xmm0,	%xmm4,	%xmm4	# 4 = sb1u + k
452	eor	v0.16b, v1.16b, v4.16b		// vpxor	%xmm4,	%xmm1,	%xmm0	# 0 = A
453	tbl	v0.16b, {v0.16b}, v2.16b	// vpshufb	%xmm2,	%xmm0,	%xmm0
454	ret
455
456
457.globl	vpaes_decrypt
458
459.def vpaes_decrypt
460   .type 32
461.endef
462.align	4
463vpaes_decrypt:
464	AARCH64_SIGN_LINK_REGISTER
465	stp	x29,x30,[sp,#-16]!
466	add	x29,sp,#0
467
468	ld1	{v7.16b}, [x0]
469	bl	_vpaes_decrypt_preheat
470	bl	_vpaes_decrypt_core
471	st1	{v0.16b}, [x1]
472
473	ldp	x29,x30,[sp],#16
474	AARCH64_VALIDATE_LINK_REGISTER
475	ret
476
477
478// v14-v15 input, v0-v1 output
479.def _vpaes_decrypt_2x
480   .type 32
481.endef
482.align	4
483_vpaes_decrypt_2x:
484	mov	x9, x2
485	ldr	w8, [x2,#240]			// pull rounds
486
487						// vmovdqa	.Lk_dipt(%rip), %xmm2	# iptlo
488	lsl	x11, x8, #4			// mov	%rax,	%r11;	shl	$4, %r11
489	eor	x11, x11, #0x30			// xor		$0x30,	%r11
490	adrp	x10, Lk_sr
491	add	x10, x10, :lo12:Lk_sr
492	and	x11, x11, #0x30			// and		$0x30,	%r11
493	add	x11, x11, x10
494	adrp	x10, Lk_mc_forward+48
495	add	x10, x10, :lo12:Lk_mc_forward+48
496
497	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm4		# round0 key
498	and	v1.16b,  v14.16b, v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1
499	ushr	v0.16b,  v14.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0
500	and	v9.16b,  v15.16b, v17.16b
501	ushr	v8.16b,  v15.16b, #4
502	tbl	v2.16b,  {v20.16b},v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm2
503	tbl	v10.16b, {v20.16b},v9.16b
504	ld1	{v5.2d}, [x10]			// vmovdqa	Lk_mc_forward+48(%rip), %xmm5
505						// vmovdqa	.Lk_dipt+16(%rip), %xmm1 # ipthi
506	tbl	v0.16b,  {v21.16b},v0.16b	// vpshufb	%xmm0,	%xmm1,	%xmm0
507	tbl	v8.16b,  {v21.16b},v8.16b
508	eor	v2.16b,  v2.16b,  v16.16b	// vpxor	%xmm4,	%xmm2,	%xmm2
509	eor	v10.16b, v10.16b, v16.16b
510	eor	v0.16b,  v0.16b,  v2.16b	// vpxor	%xmm2,	%xmm0,	%xmm0
511	eor	v8.16b,  v8.16b,  v10.16b
512	b	Ldec_2x_entry
513
514.align	4
515Ldec_2x_loop:
516//
517//  Inverse mix columns
518//
519						// vmovdqa	-0x20(%r10),%xmm4		# 4 : sb9u
520						// vmovdqa	-0x10(%r10),%xmm1		# 0 : sb9t
521	tbl	v4.16b,  {v24.16b}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sb9u
522	tbl	v12.16b, {v24.16b}, v10.16b
523	tbl	v1.16b,  {v25.16b}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sb9t
524	tbl	v9.16b,  {v25.16b}, v11.16b
525	eor	v0.16b,  v4.16b,  v16.16b	// vpxor	%xmm4,	%xmm0,	%xmm0
526	eor	v8.16b,  v12.16b, v16.16b
527						// vmovdqa	0x00(%r10),%xmm4		# 4 : sbdu
528	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
529	eor	v8.16b,  v8.16b,  v9.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
530						// vmovdqa	0x10(%r10),%xmm1		# 0 : sbdt
531
532	tbl	v4.16b,  {v26.16b}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbdu
533	tbl	v12.16b, {v26.16b}, v10.16b
534	tbl	v0.16b,  {v0.16b},v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
535	tbl	v8.16b,  {v8.16b},v5.16b
536	tbl	v1.16b,  {v27.16b}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbdt
537	tbl	v9.16b,  {v27.16b}, v11.16b
538	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
539	eor	v8.16b,  v8.16b,  v12.16b
540						// vmovdqa	0x20(%r10),	%xmm4		# 4 : sbbu
541	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
542	eor	v8.16b,  v8.16b,  v9.16b
543						// vmovdqa	0x30(%r10),	%xmm1		# 0 : sbbt
544
545	tbl	v4.16b,  {v28.16b}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbbu
546	tbl	v12.16b, {v28.16b}, v10.16b
547	tbl	v0.16b,  {v0.16b},v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
548	tbl	v8.16b,  {v8.16b},v5.16b
549	tbl	v1.16b,  {v29.16b}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbbt
550	tbl	v9.16b,  {v29.16b}, v11.16b
551	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
552	eor	v8.16b,  v8.16b,  v12.16b
553						// vmovdqa	0x40(%r10),	%xmm4		# 4 : sbeu
554	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
555	eor	v8.16b,  v8.16b,  v9.16b
556						// vmovdqa	0x50(%r10),	%xmm1		# 0 : sbet
557
558	tbl	v4.16b,  {v30.16b}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4		# 4 = sbeu
559	tbl	v12.16b, {v30.16b}, v10.16b
560	tbl	v0.16b,  {v0.16b},v5.16b	// vpshufb	%xmm5,	%xmm0,	%xmm0		# MC ch
561	tbl	v8.16b,  {v8.16b},v5.16b
562	tbl	v1.16b,  {v31.16b}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1		# 0 = sbet
563	tbl	v9.16b,  {v31.16b}, v11.16b
564	eor	v0.16b,  v0.16b,  v4.16b	// vpxor	%xmm4,	%xmm0,	%xmm0		# 4 = ch
565	eor	v8.16b,  v8.16b,  v12.16b
566	ext	v5.16b,  v5.16b,  v5.16b, #12	// vpalignr $12,	%xmm5,	%xmm5,	%xmm5
567	eor	v0.16b,  v0.16b,  v1.16b	// vpxor	%xmm1,	%xmm0,	%xmm0		# 0 = ch
568	eor	v8.16b,  v8.16b,  v9.16b
569	sub	w8, w8, #1			// sub		$1,%rax			# nr--
570
571Ldec_2x_entry:
572	// top of round
573	and	v1.16b,  v0.16b,  v17.16b	// vpand	%xmm9,	%xmm0,	%xmm1	# 0 = k
574	ushr	v0.16b,  v0.16b,  #4		// vpsrlb	$4,	%xmm0,	%xmm0	# 1 = i
575	and	v9.16b,  v8.16b,  v17.16b
576	ushr	v8.16b,  v8.16b,  #4
577	tbl	v2.16b,  {v19.16b},v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm2	# 2 = a/k
578	tbl	v10.16b, {v19.16b},v9.16b
579	eor	v1.16b,	 v1.16b,  v0.16b	// vpxor	%xmm0,	%xmm1,	%xmm1	# 0 = j
580	eor	v9.16b,	 v9.16b,  v8.16b
581	tbl	v3.16b,  {v18.16b},v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3	# 3 = 1/i
582	tbl	v11.16b, {v18.16b},v8.16b
583	tbl	v4.16b,  {v18.16b},v1.16b	// vpshufb	%xmm1,	%xmm10,	%xmm4	# 4 = 1/j
584	tbl	v12.16b, {v18.16b},v9.16b
585	eor	v3.16b,  v3.16b,  v2.16b	// vpxor	%xmm2,	%xmm3,	%xmm3	# 3 = iak = 1/i + a/k
586	eor	v11.16b, v11.16b, v10.16b
587	eor	v4.16b,  v4.16b,  v2.16b	// vpxor	%xmm2, 	%xmm4,	%xmm4	# 4 = jak = 1/j + a/k
588	eor	v12.16b, v12.16b, v10.16b
589	tbl	v2.16b,  {v18.16b},v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm2	# 2 = 1/iak
590	tbl	v10.16b, {v18.16b},v11.16b
591	tbl	v3.16b,  {v18.16b},v4.16b	// vpshufb	%xmm4,  %xmm10,	%xmm3	# 3 = 1/jak
592	tbl	v11.16b, {v18.16b},v12.16b
593	eor	v2.16b,  v2.16b,  v1.16b	// vpxor	%xmm1,	%xmm2,	%xmm2	# 2 = io
594	eor	v10.16b, v10.16b, v9.16b
595	eor	v3.16b,  v3.16b,  v0.16b	// vpxor	%xmm0,  %xmm3,	%xmm3	# 3 = jo
596	eor	v11.16b, v11.16b, v8.16b
597	ld1	{v16.2d}, [x9],#16		// vmovdqu	(%r9),	%xmm0
598	cbnz	w8, Ldec_2x_loop
599
600	// middle of last round
601						// vmovdqa	0x60(%r10),	%xmm4	# 3 : sbou
602	tbl	v4.16b,  {v22.16b}, v2.16b	// vpshufb	%xmm2,	%xmm4,	%xmm4	# 4 = sbou
603	tbl	v12.16b, {v22.16b}, v10.16b
604						// vmovdqa	0x70(%r10),	%xmm1	# 0 : sbot
605	tbl	v1.16b,  {v23.16b}, v3.16b	// vpshufb	%xmm3,	%xmm1,	%xmm1	# 0 = sb1t
606	tbl	v9.16b,  {v23.16b}, v11.16b
607	ld1	{v2.2d}, [x11]			// vmovdqa	-0x160(%r11),	%xmm2	# Lk_sr-Lk_dsbd=-0x160
608	eor	v4.16b,  v4.16b,  v16.16b	// vpxor	%xmm0,	%xmm4,	%xmm4	# 4 = sb1u + k
609	eor	v12.16b, v12.16b, v16.16b
610	eor	v0.16b,  v1.16b,  v4.16b	// vpxor	%xmm4,	%xmm1,	%xmm0	# 0 = A
611	eor	v8.16b,  v9.16b,  v12.16b
612	tbl	v0.16b,  {v0.16b},v2.16b	// vpshufb	%xmm2,	%xmm0,	%xmm0
613	tbl	v1.16b,  {v8.16b},v2.16b
614	ret
615
616########################################################
617##                                                    ##
618##                  AES key schedule                  ##
619##                                                    ##
620########################################################
621.def _vpaes_key_preheat
622   .type 32
623.endef
624.align	4
625_vpaes_key_preheat:
626	adrp	x10, Lk_inv
627	add	x10, x10, :lo12:Lk_inv
628	movi	v16.16b, #0x5b			// Lk_s63
629	adrp	x11, Lk_sb1
630	add	x11, x11, :lo12:Lk_sb1
631	movi	v17.16b, #0x0f			// Lk_s0F
632	ld1	{v18.2d,v19.2d,v20.2d,v21.2d}, [x10]		// Lk_inv, Lk_ipt
633	adrp	x10, Lk_dksd
634	add	x10, x10, :lo12:Lk_dksd
635	ld1	{v22.2d,v23.2d}, [x11]		// Lk_sb1
636	adrp	x11, Lk_mc_forward
637	add	x11, x11, :lo12:Lk_mc_forward
638	ld1	{v24.2d,v25.2d,v26.2d,v27.2d}, [x10],#64	// Lk_dksd, Lk_dksb
639	ld1	{v28.2d,v29.2d,v30.2d,v31.2d}, [x10],#64	// Lk_dkse, Lk_dks9
640	ld1	{v8.2d}, [x10]			// Lk_rcon
641	ld1	{v9.2d}, [x11]			// Lk_mc_forward[0]
642	ret
643
644
645.def _vpaes_schedule_core
646   .type 32
647.endef
648.align	4
649_vpaes_schedule_core:
650	AARCH64_SIGN_LINK_REGISTER
651	stp	x29, x30, [sp,#-16]!
652	add	x29,sp,#0
653
654	bl	_vpaes_key_preheat		// load the tables
655
656	ld1	{v0.16b}, [x0],#16		// vmovdqu	(%rdi),	%xmm0		# load key (unaligned)
657
658	// input transform
659	mov	v3.16b, v0.16b			// vmovdqa	%xmm0,	%xmm3
660	bl	_vpaes_schedule_transform
661	mov	v7.16b, v0.16b			// vmovdqa	%xmm0,	%xmm7
662
663	adrp	x10, Lk_sr		// lea	Lk_sr(%rip),%r10
664	add	x10, x10, :lo12:Lk_sr
665
666	add	x8, x8, x10
667	cbnz	w3, Lschedule_am_decrypting
668
669	// encrypting, output zeroth round key after transform
670	st1	{v0.2d}, [x2]			// vmovdqu	%xmm0,	(%rdx)
671	b	Lschedule_go
672
673Lschedule_am_decrypting:
674	// decrypting, output zeroth round key after shiftrows
675	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),	%xmm1
676	tbl	v3.16b, {v3.16b}, v1.16b	// vpshufb  %xmm1,	%xmm3,	%xmm3
677	st1	{v3.2d}, [x2]			// vmovdqu	%xmm3,	(%rdx)
678	eor	x8, x8, #0x30			// xor	$0x30, %r8
679
680Lschedule_go:
681	cmp	w1, #192			// cmp	$192,	%esi
682	b.hi	Lschedule_256
683	b.eq	Lschedule_192
684	// 128: fall though
685
686##
687##  .schedule_128
688##
689##  128-bit specific part of key schedule.
690##
691##  This schedule is really simple, because all its parts
692##  are accomplished by the subroutines.
693##
694Lschedule_128:
695	mov	x0, #10			// mov	$10, %esi
696
697Loop_schedule_128:
698	sub	x0, x0, #1			// dec	%esi
699	bl	_vpaes_schedule_round
700	cbz	x0, Lschedule_mangle_last
701	bl	_vpaes_schedule_mangle		// write output
702	b	Loop_schedule_128
703
704##
705##  .aes_schedule_192
706##
707##  192-bit specific part of key schedule.
708##
709##  The main body of this schedule is the same as the 128-bit
710##  schedule, but with more smearing.  The long, high side is
711##  stored in %xmm7 as before, and the short, low side is in
712##  the high bits of %xmm6.
713##
714##  This schedule is somewhat nastier, however, because each
715##  round produces 192 bits of key material, or 1.5 round keys.
716##  Therefore, on each cycle we do 2 rounds and produce 3 round
717##  keys.
718##
719.align	4
720Lschedule_192:
721	sub	x0, x0, #8
722	ld1	{v0.16b}, [x0]		// vmovdqu	8(%rdi),%xmm0		# load key part 2 (very unaligned)
723	bl	_vpaes_schedule_transform	// input transform
724	mov	v6.16b, v0.16b			// vmovdqa	%xmm0,	%xmm6		# save short part
725	eor	v4.16b, v4.16b, v4.16b		// vpxor	%xmm4,	%xmm4, %xmm4	# clear 4
726	ins	v6.d[0], v4.d[0]		// vmovhlps	%xmm4,	%xmm6,	%xmm6		# clobber low side with zeros
727	mov	x0, #4			// mov	$4,	%esi
728
729Loop_schedule_192:
730	sub	x0, x0, #1			// dec	%esi
731	bl	_vpaes_schedule_round
732	ext	v0.16b, v6.16b, v0.16b, #8	// vpalignr	$8,%xmm6,%xmm0,%xmm0
733	bl	_vpaes_schedule_mangle		// save key n
734	bl	_vpaes_schedule_192_smear
735	bl	_vpaes_schedule_mangle		// save key n+1
736	bl	_vpaes_schedule_round
737	cbz	x0, Lschedule_mangle_last
738	bl	_vpaes_schedule_mangle		// save key n+2
739	bl	_vpaes_schedule_192_smear
740	b	Loop_schedule_192
741
742##
743##  .aes_schedule_256
744##
745##  256-bit specific part of key schedule.
746##
747##  The structure here is very similar to the 128-bit
748##  schedule, but with an additional "low side" in
749##  %xmm6.  The low side's rounds are the same as the
750##  high side's, except no rcon and no rotation.
751##
752.align	4
753Lschedule_256:
754	ld1	{v0.16b}, [x0]		// vmovdqu	16(%rdi),%xmm0		# load key part 2 (unaligned)
755	bl	_vpaes_schedule_transform	// input transform
756	mov	x0, #7			// mov	$7, %esi
757
758Loop_schedule_256:
759	sub	x0, x0, #1			// dec	%esi
760	bl	_vpaes_schedule_mangle		// output low result
761	mov	v6.16b, v0.16b			// vmovdqa	%xmm0,	%xmm6		# save cur_lo in xmm6
762
763	// high round
764	bl	_vpaes_schedule_round
765	cbz	x0, Lschedule_mangle_last
766	bl	_vpaes_schedule_mangle
767
768	// low round. swap xmm7 and xmm6
769	dup	v0.4s, v0.s[3]			// vpshufd	$0xFF,	%xmm0,	%xmm0
770	movi	v4.16b, #0
771	mov	v5.16b, v7.16b			// vmovdqa	%xmm7,	%xmm5
772	mov	v7.16b, v6.16b			// vmovdqa	%xmm6,	%xmm7
773	bl	_vpaes_schedule_low_round
774	mov	v7.16b, v5.16b			// vmovdqa	%xmm5,	%xmm7
775
776	b	Loop_schedule_256
777
778##
779##  .aes_schedule_mangle_last
780##
781##  Mangler for last round of key schedule
782##  Mangles %xmm0
783##    when encrypting, outputs out(%xmm0) ^ 63
784##    when decrypting, outputs unskew(%xmm0)
785##
786##  Always called right before return... jumps to cleanup and exits
787##
788.align	4
789Lschedule_mangle_last:
790	// schedule last round key from xmm0
791	adrp	x11, Lk_deskew	// lea	Lk_deskew(%rip),%r11	# prepare to deskew
792	add	x11, x11, :lo12:Lk_deskew
793
794	cbnz	w3, Lschedule_mangle_last_dec
795
796	// encrypting
797	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),%xmm1
798	adrp	x11, Lk_opt		// lea	Lk_opt(%rip),	%r11		# prepare to output transform
799	add	x11, x11, :lo12:Lk_opt
800	add	x2, x2, #32			// add	$32,	%rdx
801	tbl	v0.16b, {v0.16b}, v1.16b	// vpshufb	%xmm1,	%xmm0,	%xmm0		# output permute
802
803Lschedule_mangle_last_dec:
804	ld1	{v20.2d,v21.2d}, [x11]		// reload constants
805	sub	x2, x2, #16			// add	$-16,	%rdx
806	eor	v0.16b, v0.16b, v16.16b		// vpxor	Lk_s63(%rip),	%xmm0,	%xmm0
807	bl	_vpaes_schedule_transform	// output transform
808	st1	{v0.2d}, [x2]			// vmovdqu	%xmm0,	(%rdx)		# save last key
809
810	// cleanup
811	eor	v0.16b, v0.16b, v0.16b		// vpxor	%xmm0,	%xmm0,	%xmm0
812	eor	v1.16b, v1.16b, v1.16b		// vpxor	%xmm1,	%xmm1,	%xmm1
813	eor	v2.16b, v2.16b, v2.16b		// vpxor	%xmm2,	%xmm2,	%xmm2
814	eor	v3.16b, v3.16b, v3.16b		// vpxor	%xmm3,	%xmm3,	%xmm3
815	eor	v4.16b, v4.16b, v4.16b		// vpxor	%xmm4,	%xmm4,	%xmm4
816	eor	v5.16b, v5.16b, v5.16b		// vpxor	%xmm5,	%xmm5,	%xmm5
817	eor	v6.16b, v6.16b, v6.16b		// vpxor	%xmm6,	%xmm6,	%xmm6
818	eor	v7.16b, v7.16b, v7.16b		// vpxor	%xmm7,	%xmm7,	%xmm7
819	ldp	x29, x30, [sp],#16
820	AARCH64_VALIDATE_LINK_REGISTER
821	ret
822
823
824##
825##  .aes_schedule_192_smear
826##
827##  Smear the short, low side in the 192-bit key schedule.
828##
829##  Inputs:
830##    %xmm7: high side, b  a  x  y
831##    %xmm6:  low side, d  c  0  0
832##    %xmm13: 0
833##
834##  Outputs:
835##    %xmm6: b+c+d  b+c  0  0
836##    %xmm0: b+c+d  b+c  b  a
837##
838.def _vpaes_schedule_192_smear
839   .type 32
840.endef
841.align	4
842_vpaes_schedule_192_smear:
843	movi	v1.16b, #0
844	dup	v0.4s, v7.s[3]
845	ins	v1.s[3], v6.s[2]	// vpshufd	$0x80,	%xmm6,	%xmm1	# d c 0 0 -> c 0 0 0
846	ins	v0.s[0], v7.s[2]	// vpshufd	$0xFE,	%xmm7,	%xmm0	# b a _ _ -> b b b a
847	eor	v6.16b, v6.16b, v1.16b	// vpxor	%xmm1,	%xmm6,	%xmm6	# -> c+d c 0 0
848	eor	v1.16b, v1.16b, v1.16b	// vpxor	%xmm1,	%xmm1,	%xmm1
849	eor	v6.16b, v6.16b, v0.16b	// vpxor	%xmm0,	%xmm6,	%xmm6	# -> b+c+d b+c b a
850	mov	v0.16b, v6.16b		// vmovdqa	%xmm6,	%xmm0
851	ins	v6.d[0], v1.d[0]	// vmovhlps	%xmm1,	%xmm6,	%xmm6	# clobber low side with zeros
852	ret
853
854
855##
856##  .aes_schedule_round
857##
858##  Runs one main round of the key schedule on %xmm0, %xmm7
859##
860##  Specifically, runs subbytes on the high dword of %xmm0
861##  then rotates it by one byte and xors into the low dword of
862##  %xmm7.
863##
864##  Adds rcon from low byte of %xmm8, then rotates %xmm8 for
865##  next rcon.
866##
867##  Smears the dwords of %xmm7 by xoring the low into the
868##  second low, result into third, result into highest.
869##
870##  Returns results in %xmm7 = %xmm0.
871##  Clobbers %xmm1-%xmm4, %r11.
872##
873.def _vpaes_schedule_round
874   .type 32
875.endef
876.align	4
877_vpaes_schedule_round:
878	// extract rcon from xmm8
879	movi	v4.16b, #0			// vpxor	%xmm4,	%xmm4,	%xmm4
880	ext	v1.16b, v8.16b, v4.16b, #15	// vpalignr	$15,	%xmm8,	%xmm4,	%xmm1
881	ext	v8.16b, v8.16b, v8.16b, #15	// vpalignr	$15,	%xmm8,	%xmm8,	%xmm8
882	eor	v7.16b, v7.16b, v1.16b		// vpxor	%xmm1,	%xmm7,	%xmm7
883
884	// rotate
885	dup	v0.4s, v0.s[3]			// vpshufd	$0xFF,	%xmm0,	%xmm0
886	ext	v0.16b, v0.16b, v0.16b, #1	// vpalignr	$1,	%xmm0,	%xmm0,	%xmm0
887
888	// fall through...
889
890	// low round: same as high round, but no rotation and no rcon.
891_vpaes_schedule_low_round:
892	// smear xmm7
893	ext	v1.16b, v4.16b, v7.16b, #12	// vpslldq	$4,	%xmm7,	%xmm1
894	eor	v7.16b, v7.16b, v1.16b		// vpxor	%xmm1,	%xmm7,	%xmm7
895	ext	v4.16b, v4.16b, v7.16b, #8	// vpslldq	$8,	%xmm7,	%xmm4
896
897	// subbytes
898	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1		# 0 = k
899	ushr	v0.16b, v0.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0		# 1 = i
900	eor	v7.16b, v7.16b, v4.16b		// vpxor	%xmm4,	%xmm7,	%xmm7
901	tbl	v2.16b, {v19.16b}, v1.16b	// vpshufb	%xmm1,	%xmm11,	%xmm2		# 2 = a/k
902	eor	v1.16b, v1.16b, v0.16b		// vpxor	%xmm0,	%xmm1,	%xmm1		# 0 = j
903	tbl	v3.16b, {v18.16b}, v0.16b	// vpshufb	%xmm0, 	%xmm10,	%xmm3		# 3 = 1/i
904	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3		# 3 = iak = 1/i + a/k
905	tbl	v4.16b, {v18.16b}, v1.16b	// vpshufb	%xmm1,	%xmm10,	%xmm4		# 4 = 1/j
906	eor	v7.16b, v7.16b, v16.16b		// vpxor	Lk_s63(%rip),	%xmm7,	%xmm7
907	tbl	v3.16b, {v18.16b}, v3.16b	// vpshufb	%xmm3,	%xmm10,	%xmm3		# 2 = 1/iak
908	eor	v4.16b, v4.16b, v2.16b		// vpxor	%xmm2,	%xmm4,	%xmm4		# 4 = jak = 1/j + a/k
909	tbl	v2.16b, {v18.16b}, v4.16b	// vpshufb	%xmm4,	%xmm10,	%xmm2		# 3 = 1/jak
910	eor	v3.16b, v3.16b, v1.16b		// vpxor	%xmm1,	%xmm3,	%xmm3		# 2 = io
911	eor	v2.16b, v2.16b, v0.16b		// vpxor	%xmm0,	%xmm2,	%xmm2		# 3 = jo
912	tbl	v4.16b, {v23.16b}, v3.16b	// vpshufb	%xmm3,	%xmm13,	%xmm4		# 4 = sbou
913	tbl	v1.16b, {v22.16b}, v2.16b	// vpshufb	%xmm2,	%xmm12,	%xmm1		# 0 = sb1t
914	eor	v1.16b, v1.16b, v4.16b		// vpxor	%xmm4,	%xmm1,	%xmm1		# 0 = sbox output
915
916	// add in smeared stuff
917	eor	v0.16b, v1.16b, v7.16b		// vpxor	%xmm7,	%xmm1,	%xmm0
918	eor	v7.16b, v1.16b, v7.16b		// vmovdqa	%xmm0,	%xmm7
919	ret
920
921
922##
923##  .aes_schedule_transform
924##
925##  Linear-transform %xmm0 according to tables at (%r11)
926##
927##  Requires that %xmm9 = 0x0F0F... as in preheat
928##  Output in %xmm0
929##  Clobbers %xmm1, %xmm2
930##
931.def _vpaes_schedule_transform
932   .type 32
933.endef
934.align	4
935_vpaes_schedule_transform:
936	and	v1.16b, v0.16b, v17.16b		// vpand	%xmm9,	%xmm0,	%xmm1
937	ushr	v0.16b, v0.16b, #4		// vpsrlb	$4,	%xmm0,	%xmm0
938						// vmovdqa	(%r11),	%xmm2 	# lo
939	tbl	v2.16b, {v20.16b}, v1.16b	// vpshufb	%xmm1,	%xmm2,	%xmm2
940						// vmovdqa	16(%r11),	%xmm1 # hi
941	tbl	v0.16b, {v21.16b}, v0.16b	// vpshufb	%xmm0,	%xmm1,	%xmm0
942	eor	v0.16b, v0.16b, v2.16b		// vpxor	%xmm2,	%xmm0,	%xmm0
943	ret
944
945
946##
947##  .aes_schedule_mangle
948##
949##  Mangle xmm0 from (basis-transformed) standard version
950##  to our version.
951##
952##  On encrypt,
953##    xor with 0x63
954##    multiply by circulant 0,1,1,1
955##    apply shiftrows transform
956##
957##  On decrypt,
958##    xor with 0x63
959##    multiply by "inverse mixcolumns" circulant E,B,D,9
960##    deskew
961##    apply shiftrows transform
962##
963##
964##  Writes out to (%rdx), and increments or decrements it
965##  Keeps track of round number mod 4 in %r8
966##  Preserves xmm0
967##  Clobbers xmm1-xmm5
968##
969.def _vpaes_schedule_mangle
970   .type 32
971.endef
972.align	4
973_vpaes_schedule_mangle:
974	mov	v4.16b, v0.16b			// vmovdqa	%xmm0,	%xmm4	# save xmm0 for later
975						// vmovdqa	.Lk_mc_forward(%rip),%xmm5
976	cbnz	w3, Lschedule_mangle_dec
977
978	// encrypting
979	eor	v4.16b, v0.16b, v16.16b		// vpxor	Lk_s63(%rip),	%xmm0,	%xmm4
980	add	x2, x2, #16			// add	$16,	%rdx
981	tbl	v4.16b, {v4.16b}, v9.16b	// vpshufb	%xmm5,	%xmm4,	%xmm4
982	tbl	v1.16b, {v4.16b}, v9.16b	// vpshufb	%xmm5,	%xmm4,	%xmm1
983	tbl	v3.16b, {v1.16b}, v9.16b	// vpshufb	%xmm5,	%xmm1,	%xmm3
984	eor	v4.16b, v4.16b, v1.16b		// vpxor	%xmm1,	%xmm4,	%xmm4
985	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),	%xmm1
986	eor	v3.16b, v3.16b, v4.16b		// vpxor	%xmm4,	%xmm3,	%xmm3
987
988	b	Lschedule_mangle_both
989.align	4
990Lschedule_mangle_dec:
991	// inverse mix columns
992						// lea	.Lk_dksd(%rip),%r11
993	ushr	v1.16b, v4.16b, #4		// vpsrlb	$4,	%xmm4,	%xmm1	# 1 = hi
994	and	v4.16b, v4.16b, v17.16b		// vpand	%xmm9,	%xmm4,	%xmm4	# 4 = lo
995
996						// vmovdqa	0x00(%r11),	%xmm2
997	tbl	v2.16b, {v24.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
998						// vmovdqa	0x10(%r11),	%xmm3
999	tbl	v3.16b,	{v25.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
1000	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3
1001	tbl	v3.16b, {v3.16b}, v9.16b	// vpshufb	%xmm5,	%xmm3,	%xmm3
1002
1003						// vmovdqa	0x20(%r11),	%xmm2
1004	tbl	v2.16b, {v26.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
1005	eor	v2.16b, v2.16b, v3.16b		// vpxor	%xmm3,	%xmm2,	%xmm2
1006						// vmovdqa	0x30(%r11),	%xmm3
1007	tbl	v3.16b, {v27.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
1008	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3
1009	tbl	v3.16b, {v3.16b}, v9.16b	// vpshufb	%xmm5,	%xmm3,	%xmm3
1010
1011						// vmovdqa	0x40(%r11),	%xmm2
1012	tbl	v2.16b, {v28.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
1013	eor	v2.16b, v2.16b, v3.16b		// vpxor	%xmm3,	%xmm2,	%xmm2
1014						// vmovdqa	0x50(%r11),	%xmm3
1015	tbl	v3.16b, {v29.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
1016	eor	v3.16b, v3.16b, v2.16b		// vpxor	%xmm2,	%xmm3,	%xmm3
1017
1018						// vmovdqa	0x60(%r11),	%xmm2
1019	tbl	v2.16b, {v30.16b}, v4.16b	// vpshufb	%xmm4,	%xmm2,	%xmm2
1020	tbl	v3.16b, {v3.16b}, v9.16b	// vpshufb	%xmm5,	%xmm3,	%xmm3
1021						// vmovdqa	0x70(%r11),	%xmm4
1022	tbl	v4.16b, {v31.16b}, v1.16b	// vpshufb	%xmm1,	%xmm4,	%xmm4
1023	ld1	{v1.2d}, [x8]			// vmovdqa	(%r8,%r10),	%xmm1
1024	eor	v2.16b, v2.16b, v3.16b		// vpxor	%xmm3,	%xmm2,	%xmm2
1025	eor	v3.16b, v4.16b, v2.16b		// vpxor	%xmm2,	%xmm4,	%xmm3
1026
1027	sub	x2, x2, #16			// add	$-16,	%rdx
1028
1029Lschedule_mangle_both:
1030	tbl	v3.16b, {v3.16b}, v1.16b	// vpshufb	%xmm1,	%xmm3,	%xmm3
1031	add	x8, x8, #48			// add	$-16,	%r8
1032	and	x8, x8, #~(1<<6)		// and	$0x30,	%r8
1033	st1	{v3.2d}, [x2]			// vmovdqu	%xmm3,	(%rdx)
1034	ret
1035
1036
1037.globl	vpaes_set_encrypt_key
1038
1039.def vpaes_set_encrypt_key
1040   .type 32
1041.endef
1042.align	4
1043vpaes_set_encrypt_key:
1044	AARCH64_SIGN_LINK_REGISTER
1045	stp	x29,x30,[sp,#-16]!
1046	add	x29,sp,#0
1047	stp	d8,d9,[sp,#-16]!	// ABI spec says so
1048
1049	lsr	w9, w1, #5		// shr	$5,%eax
1050	add	w9, w9, #5		// $5,%eax
1051	str	w9, [x2,#240]		// mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
1052
1053	mov	w3, #0		// mov	$0,%ecx
1054	mov	x8, #0x30		// mov	$0x30,%r8d
1055	bl	_vpaes_schedule_core
1056	eor	x0, x0, x0
1057
1058	ldp	d8,d9,[sp],#16
1059	ldp	x29,x30,[sp],#16
1060	AARCH64_VALIDATE_LINK_REGISTER
1061	ret
1062
1063
1064.globl	vpaes_set_decrypt_key
1065
1066.def vpaes_set_decrypt_key
1067   .type 32
1068.endef
1069.align	4
1070vpaes_set_decrypt_key:
1071	AARCH64_SIGN_LINK_REGISTER
1072	stp	x29,x30,[sp,#-16]!
1073	add	x29,sp,#0
1074	stp	d8,d9,[sp,#-16]!	// ABI spec says so
1075
1076	lsr	w9, w1, #5		// shr	$5,%eax
1077	add	w9, w9, #5		// $5,%eax
1078	str	w9, [x2,#240]		// mov	%eax,240(%rdx)	# AES_KEY->rounds = nbits/32+5;
1079	lsl	w9, w9, #4		// shl	$4,%eax
1080	add	x2, x2, #16		// lea	16(%rdx,%rax),%rdx
1081	add	x2, x2, x9
1082
1083	mov	w3, #1		// mov	$1,%ecx
1084	lsr	w8, w1, #1		// shr	$1,%r8d
1085	and	x8, x8, #32		// and	$32,%r8d
1086	eor	x8, x8, #32		// xor	$32,%r8d	# nbits==192?0:32
1087	bl	_vpaes_schedule_core
1088
1089	ldp	d8,d9,[sp],#16
1090	ldp	x29,x30,[sp],#16
1091	AARCH64_VALIDATE_LINK_REGISTER
1092	ret
1093
1094.globl	vpaes_cbc_encrypt
1095
1096.def vpaes_cbc_encrypt
1097   .type 32
1098.endef
1099.align	4
1100vpaes_cbc_encrypt:
1101	AARCH64_SIGN_LINK_REGISTER
1102	cbz	x2, Lcbc_abort
1103	cmp	w5, #0			// check direction
1104	b.eq	vpaes_cbc_decrypt
1105
1106	stp	x29,x30,[sp,#-16]!
1107	add	x29,sp,#0
1108
1109	mov	x17, x2		// reassign
1110	mov	x2,  x3		// reassign
1111
1112	ld1	{v0.16b}, [x4]	// load ivec
1113	bl	_vpaes_encrypt_preheat
1114	b	Lcbc_enc_loop
1115
1116.align	4
1117Lcbc_enc_loop:
1118	ld1	{v7.16b}, [x0],#16	// load input
1119	eor	v7.16b, v7.16b, v0.16b	// xor with ivec
1120	bl	_vpaes_encrypt_core
1121	st1	{v0.16b}, [x1],#16	// save output
1122	subs	x17, x17, #16
1123	b.hi	Lcbc_enc_loop
1124
1125	st1	{v0.16b}, [x4]	// write ivec
1126
1127	ldp	x29,x30,[sp],#16
1128Lcbc_abort:
1129	AARCH64_VALIDATE_LINK_REGISTER
1130	ret
1131
1132
1133.def vpaes_cbc_decrypt
1134   .type 32
1135.endef
1136.align	4
1137vpaes_cbc_decrypt:
1138	// Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to
1139	// only from vpaes_cbc_encrypt which has already signed the return address.
1140	stp	x29,x30,[sp,#-16]!
1141	add	x29,sp,#0
1142	stp	d8,d9,[sp,#-16]!	// ABI spec says so
1143	stp	d10,d11,[sp,#-16]!
1144	stp	d12,d13,[sp,#-16]!
1145	stp	d14,d15,[sp,#-16]!
1146
1147	mov	x17, x2		// reassign
1148	mov	x2,  x3		// reassign
1149	ld1	{v6.16b}, [x4]	// load ivec
1150	bl	_vpaes_decrypt_preheat
1151	tst	x17, #16
1152	b.eq	Lcbc_dec_loop2x
1153
1154	ld1	{v7.16b}, [x0], #16	// load input
1155	bl	_vpaes_decrypt_core
1156	eor	v0.16b, v0.16b, v6.16b	// xor with ivec
1157	orr	v6.16b, v7.16b, v7.16b	// next ivec value
1158	st1	{v0.16b}, [x1], #16
1159	subs	x17, x17, #16
1160	b.ls	Lcbc_dec_done
1161
1162.align	4
1163Lcbc_dec_loop2x:
1164	ld1	{v14.16b,v15.16b}, [x0], #32
1165	bl	_vpaes_decrypt_2x
1166	eor	v0.16b, v0.16b, v6.16b	// xor with ivec
1167	eor	v1.16b, v1.16b, v14.16b
1168	orr	v6.16b, v15.16b, v15.16b
1169	st1	{v0.16b,v1.16b}, [x1], #32
1170	subs	x17, x17, #32
1171	b.hi	Lcbc_dec_loop2x
1172
1173Lcbc_dec_done:
1174	st1	{v6.16b}, [x4]
1175
1176	ldp	d14,d15,[sp],#16
1177	ldp	d12,d13,[sp],#16
1178	ldp	d10,d11,[sp],#16
1179	ldp	d8,d9,[sp],#16
1180	ldp	x29,x30,[sp],#16
1181	AARCH64_VALIDATE_LINK_REGISTER
1182	ret
1183
1184.globl	vpaes_ctr32_encrypt_blocks
1185
1186.def vpaes_ctr32_encrypt_blocks
1187   .type 32
1188.endef
1189.align	4
1190vpaes_ctr32_encrypt_blocks:
1191	AARCH64_SIGN_LINK_REGISTER
1192	stp	x29,x30,[sp,#-16]!
1193	add	x29,sp,#0
1194	stp	d8,d9,[sp,#-16]!	// ABI spec says so
1195	stp	d10,d11,[sp,#-16]!
1196	stp	d12,d13,[sp,#-16]!
1197	stp	d14,d15,[sp,#-16]!
1198
1199	cbz	x2, Lctr32_done
1200
1201	// Note, unlike the other functions, x2 here is measured in blocks,
1202	// not bytes.
1203	mov	x17, x2
1204	mov	x2,  x3
1205
1206	// Load the IV and counter portion.
1207	ldr	w6, [x4, #12]
1208	ld1	{v7.16b}, [x4]
1209
1210	bl	_vpaes_encrypt_preheat
1211	tst	x17, #1
1212	rev	w6, w6		// The counter is big-endian.
1213	b.eq	Lctr32_prep_loop
1214
1215	// Handle one block so the remaining block count is even for
1216	// _vpaes_encrypt_2x.
1217	ld1	{v6.16b}, [x0], #16	// Load input ahead of time
1218	bl	_vpaes_encrypt_core
1219	eor	v0.16b, v0.16b, v6.16b	// XOR input and result
1220	st1	{v0.16b}, [x1], #16
1221	subs	x17, x17, #1
1222	// Update the counter.
1223	add	w6, w6, #1
1224	rev	w7, w6
1225	mov	v7.s[3], w7
1226	b.ls	Lctr32_done
1227
1228Lctr32_prep_loop:
1229	// _vpaes_encrypt_core takes its input from v7, while _vpaes_encrypt_2x
1230	// uses v14 and v15.
1231	mov	v15.16b, v7.16b
1232	mov	v14.16b, v7.16b
1233	add	w6, w6, #1
1234	rev	w7, w6
1235	mov	v15.s[3], w7
1236
1237Lctr32_loop:
1238	ld1	{v6.16b,v7.16b}, [x0], #32	// Load input ahead of time
1239	bl	_vpaes_encrypt_2x
1240	eor	v0.16b, v0.16b, v6.16b		// XOR input and result
1241	eor	v1.16b, v1.16b, v7.16b		// XOR input and result (#2)
1242	st1	{v0.16b,v1.16b}, [x1], #32
1243	subs	x17, x17, #2
1244	// Update the counter.
1245	add	w7, w6, #1
1246	add	w6, w6, #2
1247	rev	w7, w7
1248	mov	v14.s[3], w7
1249	rev	w7, w6
1250	mov	v15.s[3], w7
1251	b.hi	Lctr32_loop
1252
1253Lctr32_done:
1254	ldp	d14,d15,[sp],#16
1255	ldp	d12,d13,[sp],#16
1256	ldp	d10,d11,[sp],#16
1257	ldp	d8,d9,[sp],#16
1258	ldp	x29,x30,[sp],#16
1259	AARCH64_VALIDATE_LINK_REGISTER
1260	ret
1261
1262#endif  // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(_WIN32)
1263